1 // SPDX-License-Identifier: GPL-2.0-only
3 * Qualcomm BAM-DMUX WWAN network driver
4 * Copyright (c) 2020, Stephan Gerhold <stephan@gerhold.net>
7 #include <linux/atomic.h>
8 #include <linux/bitops.h>
9 #include <linux/completion.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/dmaengine.h>
12 #include <linux/if_arp.h>
13 #include <linux/interrupt.h>
14 #include <linux/mod_devicetable.h>
15 #include <linux/module.h>
16 #include <linux/netdevice.h>
17 #include <linux/platform_device.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/soc/qcom/smem_state.h>
20 #include <linux/spinlock.h>
21 #include <linux/wait.h>
22 #include <linux/workqueue.h>
23 #include <net/pkt_sched.h>
25 #define BAM_DMUX_BUFFER_SIZE SZ_2K
26 #define BAM_DMUX_HDR_SIZE sizeof(struct bam_dmux_hdr)
27 #define BAM_DMUX_MAX_DATA_SIZE (BAM_DMUX_BUFFER_SIZE - BAM_DMUX_HDR_SIZE)
28 #define BAM_DMUX_NUM_SKB 32
30 #define BAM_DMUX_HDR_MAGIC 0x33fc
32 #define BAM_DMUX_AUTOSUSPEND_DELAY 1000
33 #define BAM_DMUX_REMOTE_TIMEOUT msecs_to_jiffies(2000)
62 struct bam_dmux_skb_dma
{
63 struct bam_dmux
*dmux
;
72 bool pc_state
, pc_ack_state
;
73 struct qcom_smem_state
*pc
, *pc_ack
;
74 u32 pc_mask
, pc_ack_mask
;
75 wait_queue_head_t pc_wait
;
76 struct completion pc_ack_completion
;
78 struct dma_chan
*rx
, *tx
;
79 struct bam_dmux_skb_dma rx_skbs
[BAM_DMUX_NUM_SKB
];
80 struct bam_dmux_skb_dma tx_skbs
[BAM_DMUX_NUM_SKB
];
81 spinlock_t tx_lock
; /* Protect tx_skbs, tx_next_skb */
82 unsigned int tx_next_skb
;
83 atomic_long_t tx_deferred_skb
;
84 struct work_struct tx_wakeup_work
;
86 DECLARE_BITMAP(remote_channels
, BAM_DMUX_NUM_CH
);
87 struct work_struct register_netdev_work
;
88 struct net_device
*netdevs
[BAM_DMUX_NUM_CH
];
91 struct bam_dmux_netdev
{
92 struct bam_dmux
*dmux
;
96 static void bam_dmux_pc_vote(struct bam_dmux
*dmux
, bool enable
)
98 reinit_completion(&dmux
->pc_ack_completion
);
99 qcom_smem_state_update_bits(dmux
->pc
, dmux
->pc_mask
,
100 enable
? dmux
->pc_mask
: 0);
103 static void bam_dmux_pc_ack(struct bam_dmux
*dmux
)
105 qcom_smem_state_update_bits(dmux
->pc_ack
, dmux
->pc_ack_mask
,
106 dmux
->pc_ack_state
? 0 : dmux
->pc_ack_mask
);
107 dmux
->pc_ack_state
= !dmux
->pc_ack_state
;
110 static bool bam_dmux_skb_dma_map(struct bam_dmux_skb_dma
*skb_dma
,
111 enum dma_data_direction dir
)
113 struct device
*dev
= skb_dma
->dmux
->dev
;
115 skb_dma
->addr
= dma_map_single(dev
, skb_dma
->skb
->data
, skb_dma
->skb
->len
, dir
);
116 if (dma_mapping_error(dev
, skb_dma
->addr
)) {
117 dev_err(dev
, "Failed to DMA map buffer\n");
125 static void bam_dmux_skb_dma_unmap(struct bam_dmux_skb_dma
*skb_dma
,
126 enum dma_data_direction dir
)
128 dma_unmap_single(skb_dma
->dmux
->dev
, skb_dma
->addr
, skb_dma
->skb
->len
, dir
);
132 static void bam_dmux_tx_wake_queues(struct bam_dmux
*dmux
)
136 dev_dbg(dmux
->dev
, "wake queues\n");
138 for (i
= 0; i
< BAM_DMUX_NUM_CH
; ++i
) {
139 struct net_device
*netdev
= dmux
->netdevs
[i
];
141 if (netdev
&& netif_running(netdev
))
142 netif_wake_queue(netdev
);
146 static void bam_dmux_tx_stop_queues(struct bam_dmux
*dmux
)
150 dev_dbg(dmux
->dev
, "stop queues\n");
152 for (i
= 0; i
< BAM_DMUX_NUM_CH
; ++i
) {
153 struct net_device
*netdev
= dmux
->netdevs
[i
];
156 netif_stop_queue(netdev
);
160 static void bam_dmux_tx_done(struct bam_dmux_skb_dma
*skb_dma
)
162 struct bam_dmux
*dmux
= skb_dma
->dmux
;
165 pm_runtime_mark_last_busy(dmux
->dev
);
166 pm_runtime_put_autosuspend(dmux
->dev
);
169 bam_dmux_skb_dma_unmap(skb_dma
, DMA_TO_DEVICE
);
171 spin_lock_irqsave(&dmux
->tx_lock
, flags
);
173 if (skb_dma
== &dmux
->tx_skbs
[dmux
->tx_next_skb
% BAM_DMUX_NUM_SKB
])
174 bam_dmux_tx_wake_queues(dmux
);
175 spin_unlock_irqrestore(&dmux
->tx_lock
, flags
);
178 static void bam_dmux_tx_callback(void *data
)
180 struct bam_dmux_skb_dma
*skb_dma
= data
;
181 struct sk_buff
*skb
= skb_dma
->skb
;
183 bam_dmux_tx_done(skb_dma
);
184 dev_consume_skb_any(skb
);
187 static bool bam_dmux_skb_dma_submit_tx(struct bam_dmux_skb_dma
*skb_dma
)
189 struct bam_dmux
*dmux
= skb_dma
->dmux
;
190 struct dma_async_tx_descriptor
*desc
;
192 desc
= dmaengine_prep_slave_single(dmux
->tx
, skb_dma
->addr
,
193 skb_dma
->skb
->len
, DMA_MEM_TO_DEV
,
196 dev_err(dmux
->dev
, "Failed to prepare TX DMA buffer\n");
200 desc
->callback
= bam_dmux_tx_callback
;
201 desc
->callback_param
= skb_dma
;
202 desc
->cookie
= dmaengine_submit(desc
);
206 static struct bam_dmux_skb_dma
*
207 bam_dmux_tx_queue(struct bam_dmux
*dmux
, struct sk_buff
*skb
)
209 struct bam_dmux_skb_dma
*skb_dma
;
212 spin_lock_irqsave(&dmux
->tx_lock
, flags
);
214 skb_dma
= &dmux
->tx_skbs
[dmux
->tx_next_skb
% BAM_DMUX_NUM_SKB
];
216 bam_dmux_tx_stop_queues(dmux
);
217 spin_unlock_irqrestore(&dmux
->tx_lock
, flags
);
223 if (dmux
->tx_skbs
[dmux
->tx_next_skb
% BAM_DMUX_NUM_SKB
].skb
)
224 bam_dmux_tx_stop_queues(dmux
);
226 spin_unlock_irqrestore(&dmux
->tx_lock
, flags
);
230 static int bam_dmux_send_cmd(struct bam_dmux_netdev
*bndev
, u8 cmd
)
232 struct bam_dmux
*dmux
= bndev
->dmux
;
233 struct bam_dmux_skb_dma
*skb_dma
;
234 struct bam_dmux_hdr
*hdr
;
238 skb
= alloc_skb(sizeof(*hdr
), GFP_KERNEL
);
242 hdr
= skb_put_zero(skb
, sizeof(*hdr
));
243 hdr
->magic
= BAM_DMUX_HDR_MAGIC
;
247 skb_dma
= bam_dmux_tx_queue(dmux
, skb
);
253 ret
= pm_runtime_get_sync(dmux
->dev
);
257 if (!bam_dmux_skb_dma_map(skb_dma
, DMA_TO_DEVICE
)) {
262 if (!bam_dmux_skb_dma_submit_tx(skb_dma
)) {
267 dma_async_issue_pending(dmux
->tx
);
271 bam_dmux_tx_done(skb_dma
);
277 static int bam_dmux_netdev_open(struct net_device
*netdev
)
279 struct bam_dmux_netdev
*bndev
= netdev_priv(netdev
);
282 ret
= bam_dmux_send_cmd(bndev
, BAM_DMUX_CMD_OPEN
);
286 netif_start_queue(netdev
);
290 static int bam_dmux_netdev_stop(struct net_device
*netdev
)
292 struct bam_dmux_netdev
*bndev
= netdev_priv(netdev
);
294 netif_stop_queue(netdev
);
295 bam_dmux_send_cmd(bndev
, BAM_DMUX_CMD_CLOSE
);
299 static unsigned int needed_room(unsigned int avail
, unsigned int needed
)
303 return needed
- avail
;
306 static int bam_dmux_tx_prepare_skb(struct bam_dmux_netdev
*bndev
,
309 unsigned int head
= needed_room(skb_headroom(skb
), BAM_DMUX_HDR_SIZE
);
310 unsigned int pad
= sizeof(u32
) - skb
->len
% sizeof(u32
);
311 unsigned int tail
= needed_room(skb_tailroom(skb
), pad
);
312 struct bam_dmux_hdr
*hdr
;
315 if (head
|| tail
|| skb_cloned(skb
)) {
316 ret
= pskb_expand_head(skb
, head
, tail
, GFP_ATOMIC
);
321 hdr
= skb_push(skb
, sizeof(*hdr
));
322 hdr
->magic
= BAM_DMUX_HDR_MAGIC
;
324 hdr
->cmd
= BAM_DMUX_CMD_DATA
;
327 hdr
->len
= skb
->len
- sizeof(*hdr
);
329 skb_put_zero(skb
, pad
);
334 static netdev_tx_t
bam_dmux_netdev_start_xmit(struct sk_buff
*skb
,
335 struct net_device
*netdev
)
337 struct bam_dmux_netdev
*bndev
= netdev_priv(netdev
);
338 struct bam_dmux
*dmux
= bndev
->dmux
;
339 struct bam_dmux_skb_dma
*skb_dma
;
342 skb_dma
= bam_dmux_tx_queue(dmux
, skb
);
344 return NETDEV_TX_BUSY
;
346 active
= pm_runtime_get(dmux
->dev
);
347 if (active
< 0 && active
!= -EINPROGRESS
)
350 ret
= bam_dmux_tx_prepare_skb(bndev
, skb
);
354 if (!bam_dmux_skb_dma_map(skb_dma
, DMA_TO_DEVICE
))
358 /* Cannot sleep here so mark skb for wakeup handler and return */
359 if (!atomic_long_fetch_or(BIT(skb_dma
- dmux
->tx_skbs
),
360 &dmux
->tx_deferred_skb
))
361 queue_pm_work(&dmux
->tx_wakeup_work
);
365 if (!bam_dmux_skb_dma_submit_tx(skb_dma
))
368 dma_async_issue_pending(dmux
->tx
);
372 bam_dmux_tx_done(skb_dma
);
373 dev_kfree_skb_any(skb
);
377 static void bam_dmux_tx_wakeup_work(struct work_struct
*work
)
379 struct bam_dmux
*dmux
= container_of(work
, struct bam_dmux
, tx_wakeup_work
);
380 unsigned long pending
;
383 ret
= pm_runtime_resume_and_get(dmux
->dev
);
385 dev_err(dmux
->dev
, "Failed to resume: %d\n", ret
);
389 pending
= atomic_long_xchg(&dmux
->tx_deferred_skb
, 0);
393 dev_dbg(dmux
->dev
, "pending skbs after wakeup: %#lx\n", pending
);
394 for_each_set_bit(i
, &pending
, BAM_DMUX_NUM_SKB
) {
395 bam_dmux_skb_dma_submit_tx(&dmux
->tx_skbs
[i
]);
397 dma_async_issue_pending(dmux
->tx
);
400 pm_runtime_mark_last_busy(dmux
->dev
);
401 pm_runtime_put_autosuspend(dmux
->dev
);
404 static const struct net_device_ops bam_dmux_ops
= {
405 .ndo_open
= bam_dmux_netdev_open
,
406 .ndo_stop
= bam_dmux_netdev_stop
,
407 .ndo_start_xmit
= bam_dmux_netdev_start_xmit
,
410 static const struct device_type wwan_type
= {
414 static void bam_dmux_netdev_setup(struct net_device
*dev
)
416 dev
->netdev_ops
= &bam_dmux_ops
;
418 dev
->type
= ARPHRD_RAWIP
;
419 SET_NETDEV_DEVTYPE(dev
, &wwan_type
);
420 dev
->flags
= IFF_POINTOPOINT
| IFF_NOARP
;
422 dev
->mtu
= ETH_DATA_LEN
;
423 dev
->max_mtu
= BAM_DMUX_MAX_DATA_SIZE
;
424 dev
->needed_headroom
= sizeof(struct bam_dmux_hdr
);
425 dev
->needed_tailroom
= sizeof(u32
); /* word-aligned */
426 dev
->tx_queue_len
= DEFAULT_TX_QUEUE_LEN
;
428 /* This perm addr will be used as interface identifier by IPv6 */
429 dev
->addr_assign_type
= NET_ADDR_RANDOM
;
430 eth_random_addr(dev
->perm_addr
);
433 static void bam_dmux_register_netdev_work(struct work_struct
*work
)
435 struct bam_dmux
*dmux
= container_of(work
, struct bam_dmux
, register_netdev_work
);
436 struct bam_dmux_netdev
*bndev
;
437 struct net_device
*netdev
;
440 for_each_set_bit(ch
, dmux
->remote_channels
, BAM_DMUX_NUM_CH
) {
441 if (dmux
->netdevs
[ch
])
444 netdev
= alloc_netdev(sizeof(*bndev
), "wwan%d", NET_NAME_ENUM
,
445 bam_dmux_netdev_setup
);
449 SET_NETDEV_DEV(netdev
, dmux
->dev
);
450 netdev
->dev_port
= ch
;
452 bndev
= netdev_priv(netdev
);
456 ret
= register_netdev(netdev
);
458 dev_err(dmux
->dev
, "Failed to register netdev for channel %u: %d\n",
464 dmux
->netdevs
[ch
] = netdev
;
468 static void bam_dmux_rx_callback(void *data
);
470 static bool bam_dmux_skb_dma_submit_rx(struct bam_dmux_skb_dma
*skb_dma
)
472 struct bam_dmux
*dmux
= skb_dma
->dmux
;
473 struct dma_async_tx_descriptor
*desc
;
475 desc
= dmaengine_prep_slave_single(dmux
->rx
, skb_dma
->addr
,
476 skb_dma
->skb
->len
, DMA_DEV_TO_MEM
,
479 dev_err(dmux
->dev
, "Failed to prepare RX DMA buffer\n");
483 desc
->callback
= bam_dmux_rx_callback
;
484 desc
->callback_param
= skb_dma
;
485 desc
->cookie
= dmaengine_submit(desc
);
489 static bool bam_dmux_skb_dma_queue_rx(struct bam_dmux_skb_dma
*skb_dma
, gfp_t gfp
)
492 skb_dma
->skb
= __netdev_alloc_skb(NULL
, BAM_DMUX_BUFFER_SIZE
, gfp
);
495 skb_put(skb_dma
->skb
, BAM_DMUX_BUFFER_SIZE
);
498 return bam_dmux_skb_dma_map(skb_dma
, DMA_FROM_DEVICE
) &&
499 bam_dmux_skb_dma_submit_rx(skb_dma
);
502 static void bam_dmux_cmd_data(struct bam_dmux_skb_dma
*skb_dma
)
504 struct bam_dmux
*dmux
= skb_dma
->dmux
;
505 struct sk_buff
*skb
= skb_dma
->skb
;
506 struct bam_dmux_hdr
*hdr
= (struct bam_dmux_hdr
*)skb
->data
;
507 struct net_device
*netdev
= dmux
->netdevs
[hdr
->ch
];
509 if (!netdev
|| !netif_running(netdev
)) {
510 dev_warn(dmux
->dev
, "Data for inactive channel %u\n", hdr
->ch
);
514 if (hdr
->len
> BAM_DMUX_MAX_DATA_SIZE
) {
515 dev_err(dmux
->dev
, "Data larger than buffer? (%u > %u)\n",
516 hdr
->len
, (u16
)BAM_DMUX_MAX_DATA_SIZE
);
520 skb_dma
->skb
= NULL
; /* Hand over to network stack */
522 skb_pull(skb
, sizeof(*hdr
));
523 skb_trim(skb
, hdr
->len
);
526 /* Only Raw-IP/QMAP is supported by this driver */
527 switch (skb
->data
[0] & 0xf0) {
529 skb
->protocol
= htons(ETH_P_IP
);
532 skb
->protocol
= htons(ETH_P_IPV6
);
535 skb
->protocol
= htons(ETH_P_MAP
);
539 netif_receive_skb(skb
);
542 static void bam_dmux_cmd_open(struct bam_dmux
*dmux
, struct bam_dmux_hdr
*hdr
)
544 struct net_device
*netdev
= dmux
->netdevs
[hdr
->ch
];
546 dev_dbg(dmux
->dev
, "open channel: %u\n", hdr
->ch
);
548 if (__test_and_set_bit(hdr
->ch
, dmux
->remote_channels
)) {
549 dev_warn(dmux
->dev
, "Channel already open: %u\n", hdr
->ch
);
554 netif_device_attach(netdev
);
556 /* Cannot sleep here, schedule work to register the netdev */
557 schedule_work(&dmux
->register_netdev_work
);
561 static void bam_dmux_cmd_close(struct bam_dmux
*dmux
, struct bam_dmux_hdr
*hdr
)
563 struct net_device
*netdev
= dmux
->netdevs
[hdr
->ch
];
565 dev_dbg(dmux
->dev
, "close channel: %u\n", hdr
->ch
);
567 if (!__test_and_clear_bit(hdr
->ch
, dmux
->remote_channels
)) {
568 dev_err(dmux
->dev
, "Channel not open: %u\n", hdr
->ch
);
573 netif_device_detach(netdev
);
576 static void bam_dmux_rx_callback(void *data
)
578 struct bam_dmux_skb_dma
*skb_dma
= data
;
579 struct bam_dmux
*dmux
= skb_dma
->dmux
;
580 struct sk_buff
*skb
= skb_dma
->skb
;
581 struct bam_dmux_hdr
*hdr
= (struct bam_dmux_hdr
*)skb
->data
;
583 bam_dmux_skb_dma_unmap(skb_dma
, DMA_FROM_DEVICE
);
585 if (hdr
->magic
!= BAM_DMUX_HDR_MAGIC
) {
586 dev_err(dmux
->dev
, "Invalid magic in header: %#x\n", hdr
->magic
);
590 if (hdr
->ch
>= BAM_DMUX_NUM_CH
) {
591 dev_dbg(dmux
->dev
, "Unsupported channel: %u\n", hdr
->ch
);
596 case BAM_DMUX_CMD_DATA
:
597 bam_dmux_cmd_data(skb_dma
);
599 case BAM_DMUX_CMD_OPEN
:
600 bam_dmux_cmd_open(dmux
, hdr
);
602 case BAM_DMUX_CMD_CLOSE
:
603 bam_dmux_cmd_close(dmux
, hdr
);
606 dev_err(dmux
->dev
, "Unsupported command %u on channel %u\n",
612 if (bam_dmux_skb_dma_queue_rx(skb_dma
, GFP_ATOMIC
))
613 dma_async_issue_pending(dmux
->rx
);
616 static bool bam_dmux_power_on(struct bam_dmux
*dmux
)
618 struct device
*dev
= dmux
->dev
;
619 struct dma_slave_config dma_rx_conf
= {
620 .direction
= DMA_DEV_TO_MEM
,
621 .src_maxburst
= BAM_DMUX_BUFFER_SIZE
,
625 dmux
->rx
= dma_request_chan(dev
, "rx");
626 if (IS_ERR(dmux
->rx
)) {
627 dev_err(dev
, "Failed to request RX DMA channel: %pe\n", dmux
->rx
);
631 dmaengine_slave_config(dmux
->rx
, &dma_rx_conf
);
633 for (i
= 0; i
< BAM_DMUX_NUM_SKB
; i
++) {
634 if (!bam_dmux_skb_dma_queue_rx(&dmux
->rx_skbs
[i
], GFP_KERNEL
))
637 dma_async_issue_pending(dmux
->rx
);
642 static void bam_dmux_free_skbs(struct bam_dmux_skb_dma skbs
[],
643 enum dma_data_direction dir
)
647 for (i
= 0; i
< BAM_DMUX_NUM_SKB
; i
++) {
648 struct bam_dmux_skb_dma
*skb_dma
= &skbs
[i
];
651 bam_dmux_skb_dma_unmap(skb_dma
, dir
);
653 dev_kfree_skb(skb_dma
->skb
);
659 static void bam_dmux_power_off(struct bam_dmux
*dmux
)
662 dmaengine_terminate_sync(dmux
->tx
);
663 dma_release_channel(dmux
->tx
);
668 dmaengine_terminate_sync(dmux
->rx
);
669 dma_release_channel(dmux
->rx
);
673 bam_dmux_free_skbs(dmux
->rx_skbs
, DMA_FROM_DEVICE
);
676 static irqreturn_t
bam_dmux_pc_irq(int irq
, void *data
)
678 struct bam_dmux
*dmux
= data
;
679 bool new_state
= !dmux
->pc_state
;
681 dev_dbg(dmux
->dev
, "pc: %u\n", new_state
);
684 if (bam_dmux_power_on(dmux
))
685 bam_dmux_pc_ack(dmux
);
687 bam_dmux_power_off(dmux
);
689 bam_dmux_power_off(dmux
);
690 bam_dmux_pc_ack(dmux
);
693 dmux
->pc_state
= new_state
;
694 wake_up_all(&dmux
->pc_wait
);
699 static irqreturn_t
bam_dmux_pc_ack_irq(int irq
, void *data
)
701 struct bam_dmux
*dmux
= data
;
703 dev_dbg(dmux
->dev
, "pc ack\n");
704 complete_all(&dmux
->pc_ack_completion
);
709 static int bam_dmux_runtime_suspend(struct device
*dev
)
711 struct bam_dmux
*dmux
= dev_get_drvdata(dev
);
713 dev_dbg(dev
, "runtime suspend\n");
714 bam_dmux_pc_vote(dmux
, false);
719 static int __maybe_unused
bam_dmux_runtime_resume(struct device
*dev
)
721 struct bam_dmux
*dmux
= dev_get_drvdata(dev
);
723 dev_dbg(dev
, "runtime resume\n");
725 /* Wait until previous power down was acked */
726 if (!wait_for_completion_timeout(&dmux
->pc_ack_completion
,
727 BAM_DMUX_REMOTE_TIMEOUT
))
730 /* Vote for power state */
731 bam_dmux_pc_vote(dmux
, true);
734 if (!wait_for_completion_timeout(&dmux
->pc_ack_completion
,
735 BAM_DMUX_REMOTE_TIMEOUT
)) {
736 bam_dmux_pc_vote(dmux
, false);
740 /* Wait until we're up */
741 if (!wait_event_timeout(dmux
->pc_wait
, dmux
->pc_state
,
742 BAM_DMUX_REMOTE_TIMEOUT
)) {
743 bam_dmux_pc_vote(dmux
, false);
747 /* Ensure that we actually initialized successfully */
749 bam_dmux_pc_vote(dmux
, false);
753 /* Request TX channel if necessary */
757 dmux
->tx
= dma_request_chan(dev
, "tx");
758 if (IS_ERR(dmux
->tx
)) {
759 dev_err(dev
, "Failed to request TX DMA channel: %pe\n", dmux
->tx
);
761 bam_dmux_runtime_suspend(dev
);
768 static int bam_dmux_probe(struct platform_device
*pdev
)
770 struct device
*dev
= &pdev
->dev
;
771 struct bam_dmux
*dmux
;
772 int ret
, pc_ack_irq
, i
;
775 dmux
= devm_kzalloc(dev
, sizeof(*dmux
), GFP_KERNEL
);
780 platform_set_drvdata(pdev
, dmux
);
782 dmux
->pc_irq
= platform_get_irq_byname(pdev
, "pc");
783 if (dmux
->pc_irq
< 0)
786 pc_ack_irq
= platform_get_irq_byname(pdev
, "pc-ack");
790 dmux
->pc
= devm_qcom_smem_state_get(dev
, "pc", &bit
);
791 if (IS_ERR(dmux
->pc
))
792 return dev_err_probe(dev
, PTR_ERR(dmux
->pc
),
793 "Failed to get pc state\n");
794 dmux
->pc_mask
= BIT(bit
);
796 dmux
->pc_ack
= devm_qcom_smem_state_get(dev
, "pc-ack", &bit
);
797 if (IS_ERR(dmux
->pc_ack
))
798 return dev_err_probe(dev
, PTR_ERR(dmux
->pc_ack
),
799 "Failed to get pc-ack state\n");
800 dmux
->pc_ack_mask
= BIT(bit
);
802 init_waitqueue_head(&dmux
->pc_wait
);
803 init_completion(&dmux
->pc_ack_completion
);
804 complete_all(&dmux
->pc_ack_completion
);
806 spin_lock_init(&dmux
->tx_lock
);
807 INIT_WORK(&dmux
->tx_wakeup_work
, bam_dmux_tx_wakeup_work
);
808 INIT_WORK(&dmux
->register_netdev_work
, bam_dmux_register_netdev_work
);
810 for (i
= 0; i
< BAM_DMUX_NUM_SKB
; i
++) {
811 dmux
->rx_skbs
[i
].dmux
= dmux
;
812 dmux
->tx_skbs
[i
].dmux
= dmux
;
815 /* Runtime PM manages our own power vote.
816 * Note that the RX path may be active even if we are runtime suspended,
817 * since it is controlled by the remote side.
819 pm_runtime_set_autosuspend_delay(dev
, BAM_DMUX_AUTOSUSPEND_DELAY
);
820 pm_runtime_use_autosuspend(dev
);
821 pm_runtime_enable(dev
);
823 ret
= devm_request_threaded_irq(dev
, pc_ack_irq
, NULL
, bam_dmux_pc_ack_irq
,
824 IRQF_ONESHOT
, NULL
, dmux
);
828 ret
= devm_request_threaded_irq(dev
, dmux
->pc_irq
, NULL
, bam_dmux_pc_irq
,
829 IRQF_ONESHOT
, NULL
, dmux
);
833 ret
= irq_get_irqchip_state(dmux
->pc_irq
, IRQCHIP_STATE_LINE_LEVEL
,
838 /* Check if remote finished initialization before us */
839 if (dmux
->pc_state
) {
840 if (bam_dmux_power_on(dmux
))
841 bam_dmux_pc_ack(dmux
);
843 bam_dmux_power_off(dmux
);
849 pm_runtime_disable(dev
);
850 pm_runtime_dont_use_autosuspend(dev
);
854 static void bam_dmux_remove(struct platform_device
*pdev
)
856 struct bam_dmux
*dmux
= platform_get_drvdata(pdev
);
857 struct device
*dev
= dmux
->dev
;
861 /* Unregister network interfaces */
862 cancel_work_sync(&dmux
->register_netdev_work
);
864 for (i
= 0; i
< BAM_DMUX_NUM_CH
; ++i
)
865 if (dmux
->netdevs
[i
])
866 unregister_netdevice_queue(dmux
->netdevs
[i
], &list
);
867 unregister_netdevice_many(&list
);
869 cancel_work_sync(&dmux
->tx_wakeup_work
);
871 /* Drop our own power vote */
872 pm_runtime_disable(dev
);
873 pm_runtime_dont_use_autosuspend(dev
);
874 bam_dmux_runtime_suspend(dev
);
875 pm_runtime_set_suspended(dev
);
877 /* Try to wait for remote side to drop power vote */
878 if (!wait_event_timeout(dmux
->pc_wait
, !dmux
->rx
, BAM_DMUX_REMOTE_TIMEOUT
))
879 dev_err(dev
, "Timed out waiting for remote side to suspend\n");
881 /* Make sure everything is cleaned up before we return */
882 disable_irq(dmux
->pc_irq
);
883 bam_dmux_power_off(dmux
);
884 bam_dmux_free_skbs(dmux
->tx_skbs
, DMA_TO_DEVICE
);
887 static const struct dev_pm_ops bam_dmux_pm_ops
= {
888 SET_RUNTIME_PM_OPS(bam_dmux_runtime_suspend
, bam_dmux_runtime_resume
, NULL
)
891 static const struct of_device_id bam_dmux_of_match
[] = {
892 { .compatible
= "qcom,bam-dmux" },
895 MODULE_DEVICE_TABLE(of
, bam_dmux_of_match
);
897 static struct platform_driver bam_dmux_driver
= {
898 .probe
= bam_dmux_probe
,
899 .remove
= bam_dmux_remove
,
902 .pm
= &bam_dmux_pm_ops
,
903 .of_match_table
= bam_dmux_of_match
,
906 module_platform_driver(bam_dmux_driver
);
908 MODULE_LICENSE("GPL v2");
909 MODULE_DESCRIPTION("Qualcomm BAM-DMUX WWAN Network Driver");
910 MODULE_AUTHOR("Stephan Gerhold <stephan@gerhold.net>");