perf tools: Don't clone maps from parent when synthesizing forks
[linux/fpc-iii.git] / drivers / net / wireless / ath / ath10k / snoc.c
blob8d3d9bca410f9899a2aad8060178028d6a40bccf
1 /*
2 * Copyright (c) 2018 The Linux Foundation. All rights reserved.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/of.h>
21 #include <linux/of_device.h>
22 #include <linux/platform_device.h>
23 #include <linux/regulator/consumer.h>
25 #include "ce.h"
26 #include "debug.h"
27 #include "hif.h"
28 #include "htc.h"
29 #include "snoc.h"
31 #define ATH10K_SNOC_RX_POST_RETRY_MS 50
32 #define CE_POLL_PIPE 4
34 static char *const ce_name[] = {
35 "WLAN_CE_0",
36 "WLAN_CE_1",
37 "WLAN_CE_2",
38 "WLAN_CE_3",
39 "WLAN_CE_4",
40 "WLAN_CE_5",
41 "WLAN_CE_6",
42 "WLAN_CE_7",
43 "WLAN_CE_8",
44 "WLAN_CE_9",
45 "WLAN_CE_10",
46 "WLAN_CE_11",
49 static struct ath10k_wcn3990_vreg_info vreg_cfg[] = {
50 {NULL, "vdd-0.8-cx-mx", 800000, 800000, 0, 0, false},
51 {NULL, "vdd-1.8-xo", 1800000, 1800000, 0, 0, false},
52 {NULL, "vdd-1.3-rfa", 1304000, 1304000, 0, 0, false},
53 {NULL, "vdd-3.3-ch0", 3312000, 3312000, 0, 0, false},
56 static struct ath10k_wcn3990_clk_info clk_cfg[] = {
57 {NULL, "cxo_ref_clk_pin", 0, false},
60 static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
61 static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
62 static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
63 static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
64 static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
65 static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
67 static const struct ath10k_snoc_drv_priv drv_priv = {
68 .hw_rev = ATH10K_HW_WCN3990,
69 .dma_mask = DMA_BIT_MASK(37),
70 .msa_size = 0x100000,
73 #define WCN3990_SRC_WR_IDX_OFFSET 0x3C
74 #define WCN3990_DST_WR_IDX_OFFSET 0x40
76 static struct ath10k_shadow_reg_cfg target_shadow_reg_cfg_map[] = {
78 .ce_id = __cpu_to_le16(0),
79 .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
83 .ce_id = __cpu_to_le16(3),
84 .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
88 .ce_id = __cpu_to_le16(4),
89 .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
93 .ce_id = __cpu_to_le16(5),
94 .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
98 .ce_id = __cpu_to_le16(7),
99 .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
103 .ce_id = __cpu_to_le16(1),
104 .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
108 .ce_id = __cpu_to_le16(2),
109 .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
113 .ce_id = __cpu_to_le16(7),
114 .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
118 .ce_id = __cpu_to_le16(8),
119 .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
123 .ce_id = __cpu_to_le16(9),
124 .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
128 .ce_id = __cpu_to_le16(10),
129 .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
133 .ce_id = __cpu_to_le16(11),
134 .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
138 static struct ce_attr host_ce_config_wlan[] = {
139 /* CE0: host->target HTC control streams */
141 .flags = CE_ATTR_FLAGS,
142 .src_nentries = 16,
143 .src_sz_max = 2048,
144 .dest_nentries = 0,
145 .send_cb = ath10k_snoc_htc_tx_cb,
148 /* CE1: target->host HTT + HTC control */
150 .flags = CE_ATTR_FLAGS,
151 .src_nentries = 0,
152 .src_sz_max = 2048,
153 .dest_nentries = 512,
154 .recv_cb = ath10k_snoc_htt_htc_rx_cb,
157 /* CE2: target->host WMI */
159 .flags = CE_ATTR_FLAGS,
160 .src_nentries = 0,
161 .src_sz_max = 2048,
162 .dest_nentries = 64,
163 .recv_cb = ath10k_snoc_htc_rx_cb,
166 /* CE3: host->target WMI */
168 .flags = CE_ATTR_FLAGS,
169 .src_nentries = 32,
170 .src_sz_max = 2048,
171 .dest_nentries = 0,
172 .send_cb = ath10k_snoc_htc_tx_cb,
175 /* CE4: host->target HTT */
177 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
178 .src_nentries = 256,
179 .src_sz_max = 256,
180 .dest_nentries = 0,
181 .send_cb = ath10k_snoc_htt_tx_cb,
184 /* CE5: target->host HTT (ipa_uc->target ) */
186 .flags = CE_ATTR_FLAGS,
187 .src_nentries = 0,
188 .src_sz_max = 512,
189 .dest_nentries = 512,
190 .recv_cb = ath10k_snoc_htt_rx_cb,
193 /* CE6: target autonomous hif_memcpy */
195 .flags = CE_ATTR_FLAGS,
196 .src_nentries = 0,
197 .src_sz_max = 0,
198 .dest_nentries = 0,
201 /* CE7: ce_diag, the Diagnostic Window */
203 .flags = CE_ATTR_FLAGS,
204 .src_nentries = 2,
205 .src_sz_max = 2048,
206 .dest_nentries = 2,
209 /* CE8: Target to uMC */
211 .flags = CE_ATTR_FLAGS,
212 .src_nentries = 0,
213 .src_sz_max = 2048,
214 .dest_nentries = 128,
217 /* CE9 target->host HTT */
219 .flags = CE_ATTR_FLAGS,
220 .src_nentries = 0,
221 .src_sz_max = 2048,
222 .dest_nentries = 512,
223 .recv_cb = ath10k_snoc_htt_htc_rx_cb,
226 /* CE10: target->host HTT */
228 .flags = CE_ATTR_FLAGS,
229 .src_nentries = 0,
230 .src_sz_max = 2048,
231 .dest_nentries = 512,
232 .recv_cb = ath10k_snoc_htt_htc_rx_cb,
235 /* CE11: target -> host PKTLOG */
237 .flags = CE_ATTR_FLAGS,
238 .src_nentries = 0,
239 .src_sz_max = 2048,
240 .dest_nentries = 512,
241 .recv_cb = ath10k_snoc_pktlog_rx_cb,
245 static struct ce_pipe_config target_ce_config_wlan[] = {
246 /* CE0: host->target HTC control and raw streams */
248 .pipenum = __cpu_to_le32(0),
249 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
250 .nentries = __cpu_to_le32(32),
251 .nbytes_max = __cpu_to_le32(2048),
252 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
253 .reserved = __cpu_to_le32(0),
256 /* CE1: target->host HTT + HTC control */
258 .pipenum = __cpu_to_le32(1),
259 .pipedir = __cpu_to_le32(PIPEDIR_IN),
260 .nentries = __cpu_to_le32(32),
261 .nbytes_max = __cpu_to_le32(2048),
262 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
263 .reserved = __cpu_to_le32(0),
266 /* CE2: target->host WMI */
268 .pipenum = __cpu_to_le32(2),
269 .pipedir = __cpu_to_le32(PIPEDIR_IN),
270 .nentries = __cpu_to_le32(64),
271 .nbytes_max = __cpu_to_le32(2048),
272 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
273 .reserved = __cpu_to_le32(0),
276 /* CE3: host->target WMI */
278 .pipenum = __cpu_to_le32(3),
279 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
280 .nentries = __cpu_to_le32(32),
281 .nbytes_max = __cpu_to_le32(2048),
282 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
283 .reserved = __cpu_to_le32(0),
286 /* CE4: host->target HTT */
288 .pipenum = __cpu_to_le32(4),
289 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
290 .nentries = __cpu_to_le32(256),
291 .nbytes_max = __cpu_to_le32(256),
292 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
293 .reserved = __cpu_to_le32(0),
296 /* CE5: target->host HTT (HIF->HTT) */
298 .pipenum = __cpu_to_le32(5),
299 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
300 .nentries = __cpu_to_le32(1024),
301 .nbytes_max = __cpu_to_le32(64),
302 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
303 .reserved = __cpu_to_le32(0),
306 /* CE6: Reserved for target autonomous hif_memcpy */
308 .pipenum = __cpu_to_le32(6),
309 .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
310 .nentries = __cpu_to_le32(32),
311 .nbytes_max = __cpu_to_le32(16384),
312 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
313 .reserved = __cpu_to_le32(0),
316 /* CE7 used only by Host */
318 .pipenum = __cpu_to_le32(7),
319 .pipedir = __cpu_to_le32(4),
320 .nentries = __cpu_to_le32(0),
321 .nbytes_max = __cpu_to_le32(0),
322 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
323 .reserved = __cpu_to_le32(0),
326 /* CE8 Target to uMC */
328 .pipenum = __cpu_to_le32(8),
329 .pipedir = __cpu_to_le32(PIPEDIR_IN),
330 .nentries = __cpu_to_le32(32),
331 .nbytes_max = __cpu_to_le32(2048),
332 .flags = __cpu_to_le32(0),
333 .reserved = __cpu_to_le32(0),
336 /* CE9 target->host HTT */
338 .pipenum = __cpu_to_le32(9),
339 .pipedir = __cpu_to_le32(PIPEDIR_IN),
340 .nentries = __cpu_to_le32(32),
341 .nbytes_max = __cpu_to_le32(2048),
342 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
343 .reserved = __cpu_to_le32(0),
346 /* CE10 target->host HTT */
348 .pipenum = __cpu_to_le32(10),
349 .pipedir = __cpu_to_le32(PIPEDIR_IN),
350 .nentries = __cpu_to_le32(32),
351 .nbytes_max = __cpu_to_le32(2048),
352 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
353 .reserved = __cpu_to_le32(0),
356 /* CE11 target autonomous qcache memcpy */
358 .pipenum = __cpu_to_le32(11),
359 .pipedir = __cpu_to_le32(PIPEDIR_IN),
360 .nentries = __cpu_to_le32(32),
361 .nbytes_max = __cpu_to_le32(2048),
362 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
363 .reserved = __cpu_to_le32(0),
367 static struct service_to_pipe target_service_to_ce_map_wlan[] = {
369 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
370 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
371 __cpu_to_le32(3),
374 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
375 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
376 __cpu_to_le32(2),
379 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
380 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
381 __cpu_to_le32(3),
384 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
385 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
386 __cpu_to_le32(2),
389 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
390 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
391 __cpu_to_le32(3),
394 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
395 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
396 __cpu_to_le32(2),
399 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
400 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
401 __cpu_to_le32(3),
404 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
405 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
406 __cpu_to_le32(2),
409 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
410 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
411 __cpu_to_le32(3),
414 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
415 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
416 __cpu_to_le32(2),
419 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
420 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
421 __cpu_to_le32(0),
424 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
425 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
426 __cpu_to_le32(2),
428 { /* not used */
429 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
430 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
431 __cpu_to_le32(0),
433 { /* not used */
434 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
435 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
436 __cpu_to_le32(2),
439 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
440 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
441 __cpu_to_le32(4),
444 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
445 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
446 __cpu_to_le32(1),
448 { /* not used */
449 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
450 __cpu_to_le32(PIPEDIR_OUT),
451 __cpu_to_le32(5),
453 { /* in = DL = target -> host */
454 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA2_MSG),
455 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
456 __cpu_to_le32(9),
458 { /* in = DL = target -> host */
459 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA3_MSG),
460 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
461 __cpu_to_le32(10),
463 { /* in = DL = target -> host pktlog */
464 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_LOG_MSG),
465 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
466 __cpu_to_le32(11),
468 /* (Additions here) */
470 { /* must be last */
471 __cpu_to_le32(0),
472 __cpu_to_le32(0),
473 __cpu_to_le32(0),
477 void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value)
479 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
481 iowrite32(value, ar_snoc->mem + offset);
484 u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset)
486 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
487 u32 val;
489 val = ioread32(ar_snoc->mem + offset);
491 return val;
494 static int __ath10k_snoc_rx_post_buf(struct ath10k_snoc_pipe *pipe)
496 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
497 struct ath10k *ar = pipe->hif_ce_state;
498 struct ath10k_ce *ce = ath10k_ce_priv(ar);
499 struct sk_buff *skb;
500 dma_addr_t paddr;
501 int ret;
503 skb = dev_alloc_skb(pipe->buf_sz);
504 if (!skb)
505 return -ENOMEM;
507 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
509 paddr = dma_map_single(ar->dev, skb->data,
510 skb->len + skb_tailroom(skb),
511 DMA_FROM_DEVICE);
512 if (unlikely(dma_mapping_error(ar->dev, paddr))) {
513 ath10k_warn(ar, "failed to dma map snoc rx buf\n");
514 dev_kfree_skb_any(skb);
515 return -EIO;
518 ATH10K_SKB_RXCB(skb)->paddr = paddr;
520 spin_lock_bh(&ce->ce_lock);
521 ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
522 spin_unlock_bh(&ce->ce_lock);
523 if (ret) {
524 dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
525 DMA_FROM_DEVICE);
526 dev_kfree_skb_any(skb);
527 return ret;
530 return 0;
533 static void ath10k_snoc_rx_post_pipe(struct ath10k_snoc_pipe *pipe)
535 struct ath10k *ar = pipe->hif_ce_state;
536 struct ath10k_ce *ce = ath10k_ce_priv(ar);
537 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
538 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
539 int ret, num;
541 if (pipe->buf_sz == 0)
542 return;
544 if (!ce_pipe->dest_ring)
545 return;
547 spin_lock_bh(&ce->ce_lock);
548 num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
549 spin_unlock_bh(&ce->ce_lock);
550 while (num--) {
551 ret = __ath10k_snoc_rx_post_buf(pipe);
552 if (ret) {
553 if (ret == -ENOSPC)
554 break;
555 ath10k_warn(ar, "failed to post rx buf: %d\n", ret);
556 mod_timer(&ar_snoc->rx_post_retry, jiffies +
557 ATH10K_SNOC_RX_POST_RETRY_MS);
558 break;
563 static void ath10k_snoc_rx_post(struct ath10k *ar)
565 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
566 int i;
568 for (i = 0; i < CE_COUNT; i++)
569 ath10k_snoc_rx_post_pipe(&ar_snoc->pipe_info[i]);
572 static void ath10k_snoc_process_rx_cb(struct ath10k_ce_pipe *ce_state,
573 void (*callback)(struct ath10k *ar,
574 struct sk_buff *skb))
576 struct ath10k *ar = ce_state->ar;
577 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
578 struct ath10k_snoc_pipe *pipe_info = &ar_snoc->pipe_info[ce_state->id];
579 struct sk_buff *skb;
580 struct sk_buff_head list;
581 void *transfer_context;
582 unsigned int nbytes, max_nbytes;
584 __skb_queue_head_init(&list);
585 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
586 &nbytes) == 0) {
587 skb = transfer_context;
588 max_nbytes = skb->len + skb_tailroom(skb);
589 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
590 max_nbytes, DMA_FROM_DEVICE);
592 if (unlikely(max_nbytes < nbytes)) {
593 ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
594 nbytes, max_nbytes);
595 dev_kfree_skb_any(skb);
596 continue;
599 skb_put(skb, nbytes);
600 __skb_queue_tail(&list, skb);
603 while ((skb = __skb_dequeue(&list))) {
604 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc rx ce pipe %d len %d\n",
605 ce_state->id, skb->len);
607 callback(ar, skb);
610 ath10k_snoc_rx_post_pipe(pipe_info);
613 static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
615 ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
618 static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
620 /* CE4 polling needs to be done whenever CE pipe which transports
621 * HTT Rx (target->host) is processed.
623 ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
625 ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
628 /* Called by lower (CE) layer when data is received from the Target.
629 * WCN3990 firmware uses separate CE(CE11) to transfer pktlog data.
631 static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)
633 ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
636 static void ath10k_snoc_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
638 skb_pull(skb, sizeof(struct ath10k_htc_hdr));
639 ath10k_htt_t2h_msg_handler(ar, skb);
642 static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
644 ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
645 ath10k_snoc_process_rx_cb(ce_state, ath10k_snoc_htt_rx_deliver);
648 static void ath10k_snoc_rx_replenish_retry(struct timer_list *t)
650 struct ath10k_snoc *ar_snoc = from_timer(ar_snoc, t, rx_post_retry);
651 struct ath10k *ar = ar_snoc->ar;
653 ath10k_snoc_rx_post(ar);
656 static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
658 struct ath10k *ar = ce_state->ar;
659 struct sk_buff_head list;
660 struct sk_buff *skb;
662 __skb_queue_head_init(&list);
663 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
664 if (!skb)
665 continue;
667 __skb_queue_tail(&list, skb);
670 while ((skb = __skb_dequeue(&list)))
671 ath10k_htc_tx_completion_handler(ar, skb);
674 static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
676 struct ath10k *ar = ce_state->ar;
677 struct sk_buff *skb;
679 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
680 if (!skb)
681 continue;
683 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
684 skb->len, DMA_TO_DEVICE);
685 ath10k_htt_hif_tx_complete(ar, skb);
689 static int ath10k_snoc_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
690 struct ath10k_hif_sg_item *items, int n_items)
692 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
693 struct ath10k_ce *ce = ath10k_ce_priv(ar);
694 struct ath10k_snoc_pipe *snoc_pipe;
695 struct ath10k_ce_pipe *ce_pipe;
696 int err, i = 0;
698 snoc_pipe = &ar_snoc->pipe_info[pipe_id];
699 ce_pipe = snoc_pipe->ce_hdl;
700 spin_lock_bh(&ce->ce_lock);
702 for (i = 0; i < n_items - 1; i++) {
703 ath10k_dbg(ar, ATH10K_DBG_SNOC,
704 "snoc tx item %d paddr %pad len %d n_items %d\n",
705 i, &items[i].paddr, items[i].len, n_items);
707 err = ath10k_ce_send_nolock(ce_pipe,
708 items[i].transfer_context,
709 items[i].paddr,
710 items[i].len,
711 items[i].transfer_id,
712 CE_SEND_FLAG_GATHER);
713 if (err)
714 goto err;
717 ath10k_dbg(ar, ATH10K_DBG_SNOC,
718 "snoc tx item %d paddr %pad len %d n_items %d\n",
719 i, &items[i].paddr, items[i].len, n_items);
721 err = ath10k_ce_send_nolock(ce_pipe,
722 items[i].transfer_context,
723 items[i].paddr,
724 items[i].len,
725 items[i].transfer_id,
727 if (err)
728 goto err;
730 spin_unlock_bh(&ce->ce_lock);
732 return 0;
734 err:
735 for (; i > 0; i--)
736 __ath10k_ce_send_revert(ce_pipe);
738 spin_unlock_bh(&ce->ce_lock);
739 return err;
742 static int ath10k_snoc_hif_get_target_info(struct ath10k *ar,
743 struct bmi_target_info *target_info)
745 target_info->version = ATH10K_HW_WCN3990;
746 target_info->type = ATH10K_HW_WCN3990;
748 return 0;
751 static u16 ath10k_snoc_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
753 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
755 ath10k_dbg(ar, ATH10K_DBG_SNOC, "hif get free queue number\n");
757 return ath10k_ce_num_free_src_entries(ar_snoc->pipe_info[pipe].ce_hdl);
760 static void ath10k_snoc_hif_send_complete_check(struct ath10k *ar, u8 pipe,
761 int force)
763 int resources;
765 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif send complete check\n");
767 if (!force) {
768 resources = ath10k_snoc_hif_get_free_queue_number(ar, pipe);
770 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
771 return;
773 ath10k_ce_per_engine_service(ar, pipe);
776 static int ath10k_snoc_hif_map_service_to_pipe(struct ath10k *ar,
777 u16 service_id,
778 u8 *ul_pipe, u8 *dl_pipe)
780 const struct service_to_pipe *entry;
781 bool ul_set = false, dl_set = false;
782 int i;
784 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif map service\n");
786 for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
787 entry = &target_service_to_ce_map_wlan[i];
789 if (__le32_to_cpu(entry->service_id) != service_id)
790 continue;
792 switch (__le32_to_cpu(entry->pipedir)) {
793 case PIPEDIR_NONE:
794 break;
795 case PIPEDIR_IN:
796 WARN_ON(dl_set);
797 *dl_pipe = __le32_to_cpu(entry->pipenum);
798 dl_set = true;
799 break;
800 case PIPEDIR_OUT:
801 WARN_ON(ul_set);
802 *ul_pipe = __le32_to_cpu(entry->pipenum);
803 ul_set = true;
804 break;
805 case PIPEDIR_INOUT:
806 WARN_ON(dl_set);
807 WARN_ON(ul_set);
808 *dl_pipe = __le32_to_cpu(entry->pipenum);
809 *ul_pipe = __le32_to_cpu(entry->pipenum);
810 dl_set = true;
811 ul_set = true;
812 break;
816 if (!ul_set || !dl_set)
817 return -ENOENT;
819 return 0;
822 static void ath10k_snoc_hif_get_default_pipe(struct ath10k *ar,
823 u8 *ul_pipe, u8 *dl_pipe)
825 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif get default pipe\n");
827 (void)ath10k_snoc_hif_map_service_to_pipe(ar,
828 ATH10K_HTC_SVC_ID_RSVD_CTRL,
829 ul_pipe, dl_pipe);
832 static inline void ath10k_snoc_irq_disable(struct ath10k *ar)
834 ath10k_ce_disable_interrupts(ar);
837 static inline void ath10k_snoc_irq_enable(struct ath10k *ar)
839 ath10k_ce_enable_interrupts(ar);
842 static void ath10k_snoc_rx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
844 struct ath10k_ce_pipe *ce_pipe;
845 struct ath10k_ce_ring *ce_ring;
846 struct sk_buff *skb;
847 struct ath10k *ar;
848 int i;
850 ar = snoc_pipe->hif_ce_state;
851 ce_pipe = snoc_pipe->ce_hdl;
852 ce_ring = ce_pipe->dest_ring;
854 if (!ce_ring)
855 return;
857 if (!snoc_pipe->buf_sz)
858 return;
860 for (i = 0; i < ce_ring->nentries; i++) {
861 skb = ce_ring->per_transfer_context[i];
862 if (!skb)
863 continue;
865 ce_ring->per_transfer_context[i] = NULL;
867 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
868 skb->len + skb_tailroom(skb),
869 DMA_FROM_DEVICE);
870 dev_kfree_skb_any(skb);
874 static void ath10k_snoc_tx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
876 struct ath10k_ce_pipe *ce_pipe;
877 struct ath10k_ce_ring *ce_ring;
878 struct ath10k_snoc *ar_snoc;
879 struct sk_buff *skb;
880 struct ath10k *ar;
881 int i;
883 ar = snoc_pipe->hif_ce_state;
884 ar_snoc = ath10k_snoc_priv(ar);
885 ce_pipe = snoc_pipe->ce_hdl;
886 ce_ring = ce_pipe->src_ring;
888 if (!ce_ring)
889 return;
891 if (!snoc_pipe->buf_sz)
892 return;
894 for (i = 0; i < ce_ring->nentries; i++) {
895 skb = ce_ring->per_transfer_context[i];
896 if (!skb)
897 continue;
899 ce_ring->per_transfer_context[i] = NULL;
901 ath10k_htc_tx_completion_handler(ar, skb);
905 static void ath10k_snoc_buffer_cleanup(struct ath10k *ar)
907 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
908 struct ath10k_snoc_pipe *pipe_info;
909 int pipe_num;
911 del_timer_sync(&ar_snoc->rx_post_retry);
912 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
913 pipe_info = &ar_snoc->pipe_info[pipe_num];
914 ath10k_snoc_rx_pipe_cleanup(pipe_info);
915 ath10k_snoc_tx_pipe_cleanup(pipe_info);
919 static void ath10k_snoc_hif_stop(struct ath10k *ar)
921 ath10k_snoc_irq_disable(ar);
922 napi_synchronize(&ar->napi);
923 napi_disable(&ar->napi);
924 ath10k_snoc_buffer_cleanup(ar);
925 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
928 static int ath10k_snoc_hif_start(struct ath10k *ar)
930 napi_enable(&ar->napi);
931 ath10k_snoc_irq_enable(ar);
932 ath10k_snoc_rx_post(ar);
934 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
936 return 0;
939 static int ath10k_snoc_init_pipes(struct ath10k *ar)
941 int i, ret;
943 for (i = 0; i < CE_COUNT; i++) {
944 ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
945 if (ret) {
946 ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
947 i, ret);
948 return ret;
952 return 0;
955 static int ath10k_snoc_wlan_enable(struct ath10k *ar)
957 struct ath10k_tgt_pipe_cfg tgt_cfg[CE_COUNT_MAX];
958 struct ath10k_qmi_wlan_enable_cfg cfg;
959 enum wlfw_driver_mode_enum_v01 mode;
960 int pipe_num;
962 for (pipe_num = 0; pipe_num < CE_COUNT_MAX; pipe_num++) {
963 tgt_cfg[pipe_num].pipe_num =
964 target_ce_config_wlan[pipe_num].pipenum;
965 tgt_cfg[pipe_num].pipe_dir =
966 target_ce_config_wlan[pipe_num].pipedir;
967 tgt_cfg[pipe_num].nentries =
968 target_ce_config_wlan[pipe_num].nentries;
969 tgt_cfg[pipe_num].nbytes_max =
970 target_ce_config_wlan[pipe_num].nbytes_max;
971 tgt_cfg[pipe_num].flags =
972 target_ce_config_wlan[pipe_num].flags;
973 tgt_cfg[pipe_num].reserved = 0;
976 cfg.num_ce_tgt_cfg = sizeof(target_ce_config_wlan) /
977 sizeof(struct ath10k_tgt_pipe_cfg);
978 cfg.ce_tgt_cfg = (struct ath10k_tgt_pipe_cfg *)
979 &tgt_cfg;
980 cfg.num_ce_svc_pipe_cfg = sizeof(target_service_to_ce_map_wlan) /
981 sizeof(struct ath10k_svc_pipe_cfg);
982 cfg.ce_svc_cfg = (struct ath10k_svc_pipe_cfg *)
983 &target_service_to_ce_map_wlan;
984 cfg.num_shadow_reg_cfg = sizeof(target_shadow_reg_cfg_map) /
985 sizeof(struct ath10k_shadow_reg_cfg);
986 cfg.shadow_reg_cfg = (struct ath10k_shadow_reg_cfg *)
987 &target_shadow_reg_cfg_map;
989 mode = QMI_WLFW_MISSION_V01;
991 return ath10k_qmi_wlan_enable(ar, &cfg, mode,
992 NULL);
995 static void ath10k_snoc_wlan_disable(struct ath10k *ar)
997 ath10k_qmi_wlan_disable(ar);
1000 static void ath10k_snoc_hif_power_down(struct ath10k *ar)
1002 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
1004 ath10k_snoc_wlan_disable(ar);
1005 ath10k_ce_free_rri(ar);
1008 static int ath10k_snoc_hif_power_up(struct ath10k *ar)
1010 int ret;
1012 ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 driver state = %d\n",
1013 __func__, ar->state);
1015 ret = ath10k_snoc_wlan_enable(ar);
1016 if (ret) {
1017 ath10k_err(ar, "failed to enable wcn3990: %d\n", ret);
1018 return ret;
1021 ath10k_ce_alloc_rri(ar);
1023 ret = ath10k_snoc_init_pipes(ar);
1024 if (ret) {
1025 ath10k_err(ar, "failed to initialize CE: %d\n", ret);
1026 goto err_wlan_enable;
1029 return 0;
1031 err_wlan_enable:
1032 ath10k_snoc_wlan_disable(ar);
1034 return ret;
1037 static const struct ath10k_hif_ops ath10k_snoc_hif_ops = {
1038 .read32 = ath10k_snoc_read32,
1039 .write32 = ath10k_snoc_write32,
1040 .start = ath10k_snoc_hif_start,
1041 .stop = ath10k_snoc_hif_stop,
1042 .map_service_to_pipe = ath10k_snoc_hif_map_service_to_pipe,
1043 .get_default_pipe = ath10k_snoc_hif_get_default_pipe,
1044 .power_up = ath10k_snoc_hif_power_up,
1045 .power_down = ath10k_snoc_hif_power_down,
1046 .tx_sg = ath10k_snoc_hif_tx_sg,
1047 .send_complete_check = ath10k_snoc_hif_send_complete_check,
1048 .get_free_queue_number = ath10k_snoc_hif_get_free_queue_number,
1049 .get_target_info = ath10k_snoc_hif_get_target_info,
1052 static const struct ath10k_bus_ops ath10k_snoc_bus_ops = {
1053 .read32 = ath10k_snoc_read32,
1054 .write32 = ath10k_snoc_write32,
1057 static int ath10k_snoc_get_ce_id_from_irq(struct ath10k *ar, int irq)
1059 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1060 int i;
1062 for (i = 0; i < CE_COUNT_MAX; i++) {
1063 if (ar_snoc->ce_irqs[i].irq_line == irq)
1064 return i;
1066 ath10k_err(ar, "No matching CE id for irq %d\n", irq);
1068 return -EINVAL;
1071 static irqreturn_t ath10k_snoc_per_engine_handler(int irq, void *arg)
1073 struct ath10k *ar = arg;
1074 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1075 int ce_id = ath10k_snoc_get_ce_id_from_irq(ar, irq);
1077 if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_snoc->pipe_info)) {
1078 ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
1079 ce_id);
1080 return IRQ_HANDLED;
1083 ath10k_snoc_irq_disable(ar);
1084 napi_schedule(&ar->napi);
1086 return IRQ_HANDLED;
1089 static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget)
1091 struct ath10k *ar = container_of(ctx, struct ath10k, napi);
1092 int done = 0;
1094 ath10k_ce_per_engine_service_any(ar);
1095 done = ath10k_htt_txrx_compl_task(ar, budget);
1097 if (done < budget) {
1098 napi_complete(ctx);
1099 ath10k_snoc_irq_enable(ar);
1102 return done;
1105 static void ath10k_snoc_init_napi(struct ath10k *ar)
1107 netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll,
1108 ATH10K_NAPI_BUDGET);
1111 static int ath10k_snoc_request_irq(struct ath10k *ar)
1113 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1114 int irqflags = IRQF_TRIGGER_RISING;
1115 int ret, id;
1117 for (id = 0; id < CE_COUNT_MAX; id++) {
1118 ret = request_irq(ar_snoc->ce_irqs[id].irq_line,
1119 ath10k_snoc_per_engine_handler,
1120 irqflags, ce_name[id], ar);
1121 if (ret) {
1122 ath10k_err(ar,
1123 "failed to register IRQ handler for CE %d: %d",
1124 id, ret);
1125 goto err_irq;
1129 return 0;
1131 err_irq:
1132 for (id -= 1; id >= 0; id--)
1133 free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
1135 return ret;
1138 static void ath10k_snoc_free_irq(struct ath10k *ar)
1140 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1141 int id;
1143 for (id = 0; id < CE_COUNT_MAX; id++)
1144 free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
1147 static int ath10k_snoc_resource_init(struct ath10k *ar)
1149 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1150 struct platform_device *pdev;
1151 struct resource *res;
1152 int i, ret = 0;
1154 pdev = ar_snoc->dev;
1155 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "membase");
1156 if (!res) {
1157 ath10k_err(ar, "Memory base not found in DT\n");
1158 return -EINVAL;
1161 ar_snoc->mem_pa = res->start;
1162 ar_snoc->mem = devm_ioremap(&pdev->dev, ar_snoc->mem_pa,
1163 resource_size(res));
1164 if (!ar_snoc->mem) {
1165 ath10k_err(ar, "Memory base ioremap failed with physical address %pa\n",
1166 &ar_snoc->mem_pa);
1167 return -EINVAL;
1170 for (i = 0; i < CE_COUNT; i++) {
1171 res = platform_get_resource(ar_snoc->dev, IORESOURCE_IRQ, i);
1172 if (!res) {
1173 ath10k_err(ar, "failed to get IRQ%d\n", i);
1174 ret = -ENODEV;
1175 goto out;
1177 ar_snoc->ce_irqs[i].irq_line = res->start;
1180 out:
1181 return ret;
1184 int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type)
1186 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1187 struct ath10k_bus_params bus_params;
1188 int ret;
1190 switch (type) {
1191 case ATH10K_QMI_EVENT_FW_READY_IND:
1192 bus_params.dev_type = ATH10K_DEV_TYPE_LL;
1193 bus_params.chip_id = ar_snoc->target_info.soc_version;
1194 ret = ath10k_core_register(ar, &bus_params);
1195 if (ret) {
1196 ath10k_err(ar, "failed to register driver core: %d\n",
1197 ret);
1199 break;
1200 case ATH10K_QMI_EVENT_FW_DOWN_IND:
1201 break;
1202 default:
1203 ath10k_err(ar, "invalid fw indication: %llx\n", type);
1204 return -EINVAL;
1207 return 0;
1210 static int ath10k_snoc_setup_resource(struct ath10k *ar)
1212 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1213 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1214 struct ath10k_snoc_pipe *pipe;
1215 int i, ret;
1217 timer_setup(&ar_snoc->rx_post_retry, ath10k_snoc_rx_replenish_retry, 0);
1218 spin_lock_init(&ce->ce_lock);
1219 for (i = 0; i < CE_COUNT; i++) {
1220 pipe = &ar_snoc->pipe_info[i];
1221 pipe->ce_hdl = &ce->ce_states[i];
1222 pipe->pipe_num = i;
1223 pipe->hif_ce_state = ar;
1225 ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
1226 if (ret) {
1227 ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
1228 i, ret);
1229 return ret;
1232 pipe->buf_sz = host_ce_config_wlan[i].src_sz_max;
1234 ath10k_snoc_init_napi(ar);
1236 return 0;
1239 static void ath10k_snoc_release_resource(struct ath10k *ar)
1241 int i;
1243 netif_napi_del(&ar->napi);
1244 for (i = 0; i < CE_COUNT; i++)
1245 ath10k_ce_free_pipe(ar, i);
1248 static int ath10k_get_vreg_info(struct ath10k *ar, struct device *dev,
1249 struct ath10k_wcn3990_vreg_info *vreg_info)
1251 struct regulator *reg;
1252 int ret = 0;
1254 reg = devm_regulator_get_optional(dev, vreg_info->name);
1256 if (IS_ERR(reg)) {
1257 ret = PTR_ERR(reg);
1259 if (ret == -EPROBE_DEFER) {
1260 ath10k_err(ar, "EPROBE_DEFER for regulator: %s\n",
1261 vreg_info->name);
1262 return ret;
1264 if (vreg_info->required) {
1265 ath10k_err(ar, "Regulator %s doesn't exist: %d\n",
1266 vreg_info->name, ret);
1267 return ret;
1269 ath10k_dbg(ar, ATH10K_DBG_SNOC,
1270 "Optional regulator %s doesn't exist: %d\n",
1271 vreg_info->name, ret);
1272 goto done;
1275 vreg_info->reg = reg;
1277 done:
1278 ath10k_dbg(ar, ATH10K_DBG_SNOC,
1279 "snog vreg %s min_v %u max_v %u load_ua %u settle_delay %lu\n",
1280 vreg_info->name, vreg_info->min_v, vreg_info->max_v,
1281 vreg_info->load_ua, vreg_info->settle_delay);
1283 return 0;
1286 static int ath10k_get_clk_info(struct ath10k *ar, struct device *dev,
1287 struct ath10k_wcn3990_clk_info *clk_info)
1289 struct clk *handle;
1290 int ret = 0;
1292 handle = devm_clk_get(dev, clk_info->name);
1293 if (IS_ERR(handle)) {
1294 ret = PTR_ERR(handle);
1295 if (clk_info->required) {
1296 ath10k_err(ar, "snoc clock %s isn't available: %d\n",
1297 clk_info->name, ret);
1298 return ret;
1300 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc ignoring clock %s: %d\n",
1301 clk_info->name,
1302 ret);
1303 return 0;
1306 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s freq %u\n",
1307 clk_info->name, clk_info->freq);
1309 clk_info->handle = handle;
1311 return ret;
1314 static int ath10k_wcn3990_vreg_on(struct ath10k *ar)
1316 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1317 struct ath10k_wcn3990_vreg_info *vreg_info;
1318 int ret = 0;
1319 int i;
1321 for (i = 0; i < ARRAY_SIZE(vreg_cfg); i++) {
1322 vreg_info = &ar_snoc->vreg[i];
1324 if (!vreg_info->reg)
1325 continue;
1327 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being enabled\n",
1328 vreg_info->name);
1330 ret = regulator_set_voltage(vreg_info->reg, vreg_info->min_v,
1331 vreg_info->max_v);
1332 if (ret) {
1333 ath10k_err(ar,
1334 "failed to set regulator %s voltage-min: %d voltage-max: %d\n",
1335 vreg_info->name, vreg_info->min_v, vreg_info->max_v);
1336 goto err_reg_config;
1339 if (vreg_info->load_ua) {
1340 ret = regulator_set_load(vreg_info->reg,
1341 vreg_info->load_ua);
1342 if (ret < 0) {
1343 ath10k_err(ar,
1344 "failed to set regulator %s load: %d\n",
1345 vreg_info->name,
1346 vreg_info->load_ua);
1347 goto err_reg_config;
1351 ret = regulator_enable(vreg_info->reg);
1352 if (ret) {
1353 ath10k_err(ar, "failed to enable regulator %s\n",
1354 vreg_info->name);
1355 goto err_reg_config;
1358 if (vreg_info->settle_delay)
1359 udelay(vreg_info->settle_delay);
1362 return 0;
1364 err_reg_config:
1365 for (; i >= 0; i--) {
1366 vreg_info = &ar_snoc->vreg[i];
1368 if (!vreg_info->reg)
1369 continue;
1371 regulator_disable(vreg_info->reg);
1372 regulator_set_load(vreg_info->reg, 0);
1373 regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v);
1376 return ret;
1379 static int ath10k_wcn3990_vreg_off(struct ath10k *ar)
1381 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1382 struct ath10k_wcn3990_vreg_info *vreg_info;
1383 int ret = 0;
1384 int i;
1386 for (i = ARRAY_SIZE(vreg_cfg) - 1; i >= 0; i--) {
1387 vreg_info = &ar_snoc->vreg[i];
1389 if (!vreg_info->reg)
1390 continue;
1392 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being disabled\n",
1393 vreg_info->name);
1395 ret = regulator_disable(vreg_info->reg);
1396 if (ret)
1397 ath10k_err(ar, "failed to disable regulator %s\n",
1398 vreg_info->name);
1400 ret = regulator_set_load(vreg_info->reg, 0);
1401 if (ret < 0)
1402 ath10k_err(ar, "failed to set load %s\n",
1403 vreg_info->name);
1405 ret = regulator_set_voltage(vreg_info->reg, 0,
1406 vreg_info->max_v);
1407 if (ret)
1408 ath10k_err(ar, "failed to set voltage %s\n",
1409 vreg_info->name);
1412 return ret;
1415 static int ath10k_wcn3990_clk_init(struct ath10k *ar)
1417 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1418 struct ath10k_wcn3990_clk_info *clk_info;
1419 int ret = 0;
1420 int i;
1422 for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
1423 clk_info = &ar_snoc->clk[i];
1425 if (!clk_info->handle)
1426 continue;
1428 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s being enabled\n",
1429 clk_info->name);
1431 if (clk_info->freq) {
1432 ret = clk_set_rate(clk_info->handle, clk_info->freq);
1434 if (ret) {
1435 ath10k_err(ar, "failed to set clock %s freq %u\n",
1436 clk_info->name, clk_info->freq);
1437 goto err_clock_config;
1441 ret = clk_prepare_enable(clk_info->handle);
1442 if (ret) {
1443 ath10k_err(ar, "failed to enable clock %s\n",
1444 clk_info->name);
1445 goto err_clock_config;
1449 return 0;
1451 err_clock_config:
1452 for (; i >= 0; i--) {
1453 clk_info = &ar_snoc->clk[i];
1455 if (!clk_info->handle)
1456 continue;
1458 clk_disable_unprepare(clk_info->handle);
1461 return ret;
1464 static int ath10k_wcn3990_clk_deinit(struct ath10k *ar)
1466 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1467 struct ath10k_wcn3990_clk_info *clk_info;
1468 int i;
1470 for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
1471 clk_info = &ar_snoc->clk[i];
1473 if (!clk_info->handle)
1474 continue;
1476 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s being disabled\n",
1477 clk_info->name);
1479 clk_disable_unprepare(clk_info->handle);
1482 return 0;
1485 static int ath10k_hw_power_on(struct ath10k *ar)
1487 int ret;
1489 ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power on\n");
1491 ret = ath10k_wcn3990_vreg_on(ar);
1492 if (ret)
1493 return ret;
1495 ret = ath10k_wcn3990_clk_init(ar);
1496 if (ret)
1497 goto vreg_off;
1499 return ret;
1501 vreg_off:
1502 ath10k_wcn3990_vreg_off(ar);
1503 return ret;
1506 static int ath10k_hw_power_off(struct ath10k *ar)
1508 int ret;
1510 ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power off\n");
1512 ath10k_wcn3990_clk_deinit(ar);
1514 ret = ath10k_wcn3990_vreg_off(ar);
1516 return ret;
1519 static const struct of_device_id ath10k_snoc_dt_match[] = {
1520 { .compatible = "qcom,wcn3990-wifi",
1521 .data = &drv_priv,
1525 MODULE_DEVICE_TABLE(of, ath10k_snoc_dt_match);
1527 static int ath10k_snoc_probe(struct platform_device *pdev)
1529 const struct ath10k_snoc_drv_priv *drv_data;
1530 const struct of_device_id *of_id;
1531 struct ath10k_snoc *ar_snoc;
1532 struct device *dev;
1533 struct ath10k *ar;
1534 u32 msa_size;
1535 int ret;
1536 u32 i;
1538 of_id = of_match_device(ath10k_snoc_dt_match, &pdev->dev);
1539 if (!of_id) {
1540 dev_err(&pdev->dev, "failed to find matching device tree id\n");
1541 return -EINVAL;
1544 drv_data = of_id->data;
1545 dev = &pdev->dev;
1547 ret = dma_set_mask_and_coherent(dev, drv_data->dma_mask);
1548 if (ret) {
1549 dev_err(dev, "failed to set dma mask: %d", ret);
1550 return ret;
1553 ar = ath10k_core_create(sizeof(*ar_snoc), dev, ATH10K_BUS_SNOC,
1554 drv_data->hw_rev, &ath10k_snoc_hif_ops);
1555 if (!ar) {
1556 dev_err(dev, "failed to allocate core\n");
1557 return -ENOMEM;
1560 ar_snoc = ath10k_snoc_priv(ar);
1561 ar_snoc->dev = pdev;
1562 platform_set_drvdata(pdev, ar);
1563 ar_snoc->ar = ar;
1564 ar_snoc->ce.bus_ops = &ath10k_snoc_bus_ops;
1565 ar->ce_priv = &ar_snoc->ce;
1566 msa_size = drv_data->msa_size;
1568 ret = ath10k_snoc_resource_init(ar);
1569 if (ret) {
1570 ath10k_warn(ar, "failed to initialize resource: %d\n", ret);
1571 goto err_core_destroy;
1574 ret = ath10k_snoc_setup_resource(ar);
1575 if (ret) {
1576 ath10k_warn(ar, "failed to setup resource: %d\n", ret);
1577 goto err_core_destroy;
1579 ret = ath10k_snoc_request_irq(ar);
1580 if (ret) {
1581 ath10k_warn(ar, "failed to request irqs: %d\n", ret);
1582 goto err_release_resource;
1585 ar_snoc->vreg = vreg_cfg;
1586 for (i = 0; i < ARRAY_SIZE(vreg_cfg); i++) {
1587 ret = ath10k_get_vreg_info(ar, dev, &ar_snoc->vreg[i]);
1588 if (ret)
1589 goto err_free_irq;
1592 ar_snoc->clk = clk_cfg;
1593 for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
1594 ret = ath10k_get_clk_info(ar, dev, &ar_snoc->clk[i]);
1595 if (ret)
1596 goto err_free_irq;
1599 ret = ath10k_hw_power_on(ar);
1600 if (ret) {
1601 ath10k_err(ar, "failed to power on device: %d\n", ret);
1602 goto err_free_irq;
1605 ret = ath10k_qmi_init(ar, msa_size);
1606 if (ret) {
1607 ath10k_warn(ar, "failed to register wlfw qmi client: %d\n", ret);
1608 goto err_core_destroy;
1611 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n");
1612 ath10k_warn(ar, "Warning: SNOC support is still work-in-progress, it will not work properly!");
1614 return 0;
1616 err_free_irq:
1617 ath10k_snoc_free_irq(ar);
1619 err_release_resource:
1620 ath10k_snoc_release_resource(ar);
1622 err_core_destroy:
1623 ath10k_core_destroy(ar);
1625 return ret;
1628 static int ath10k_snoc_remove(struct platform_device *pdev)
1630 struct ath10k *ar = platform_get_drvdata(pdev);
1632 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc remove\n");
1633 ath10k_core_unregister(ar);
1634 ath10k_hw_power_off(ar);
1635 ath10k_snoc_free_irq(ar);
1636 ath10k_snoc_release_resource(ar);
1637 ath10k_qmi_deinit(ar);
1638 ath10k_core_destroy(ar);
1640 return 0;
1643 static struct platform_driver ath10k_snoc_driver = {
1644 .probe = ath10k_snoc_probe,
1645 .remove = ath10k_snoc_remove,
1646 .driver = {
1647 .name = "ath10k_snoc",
1648 .of_match_table = ath10k_snoc_dt_match,
1651 module_platform_driver(ath10k_snoc_driver);
1653 MODULE_AUTHOR("Qualcomm");
1654 MODULE_LICENSE("Dual BSD/GPL");
1655 MODULE_DESCRIPTION("Driver support for Atheros WCN3990 SNOC devices");