2 * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 /* DXE - DMA transfer engine
18 * we have 2 channels(High prio and Low prio) for TX and 2 channels for RX.
19 * through low channels data packets are transfered
20 * through high channels managment packets are transfered
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 #include <linux/interrupt.h>
29 void *wcn36xx_dxe_get_next_bd(struct wcn36xx
*wcn
, bool is_low
)
31 struct wcn36xx_dxe_ch
*ch
= is_low
?
35 return ch
->head_blk_ctl
->bd_cpu_addr
;
38 static void wcn36xx_ccu_write_register(struct wcn36xx
*wcn
, int addr
, int data
)
40 wcn36xx_dbg(WCN36XX_DBG_DXE
,
41 "wcn36xx_ccu_write_register: addr=%x, data=%x\n",
44 writel(data
, wcn
->ccu_base
+ addr
);
47 static void wcn36xx_dxe_write_register(struct wcn36xx
*wcn
, int addr
, int data
)
49 wcn36xx_dbg(WCN36XX_DBG_DXE
,
50 "wcn36xx_dxe_write_register: addr=%x, data=%x\n",
53 writel(data
, wcn
->dxe_base
+ addr
);
56 static void wcn36xx_dxe_read_register(struct wcn36xx
*wcn
, int addr
, int *data
)
58 *data
= readl(wcn
->dxe_base
+ addr
);
60 wcn36xx_dbg(WCN36XX_DBG_DXE
,
61 "wcn36xx_dxe_read_register: addr=%x, data=%x\n",
65 static void wcn36xx_dxe_free_ctl_block(struct wcn36xx_dxe_ch
*ch
)
67 struct wcn36xx_dxe_ctl
*ctl
= ch
->head_blk_ctl
, *next
;
70 for (i
= 0; i
< ch
->desc_num
&& ctl
; i
++) {
77 static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch
*ch
)
79 struct wcn36xx_dxe_ctl
*prev_ctl
= NULL
;
80 struct wcn36xx_dxe_ctl
*cur_ctl
= NULL
;
83 spin_lock_init(&ch
->lock
);
84 for (i
= 0; i
< ch
->desc_num
; i
++) {
85 cur_ctl
= kzalloc(sizeof(*cur_ctl
), GFP_KERNEL
);
89 spin_lock_init(&cur_ctl
->skb_lock
);
90 cur_ctl
->ctl_blk_order
= i
;
92 ch
->head_blk_ctl
= cur_ctl
;
93 ch
->tail_blk_ctl
= cur_ctl
;
94 } else if (ch
->desc_num
- 1 == i
) {
95 prev_ctl
->next
= cur_ctl
;
96 cur_ctl
->next
= ch
->head_blk_ctl
;
98 prev_ctl
->next
= cur_ctl
;
106 wcn36xx_dxe_free_ctl_block(ch
);
110 int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx
*wcn
)
114 wcn
->dxe_tx_l_ch
.ch_type
= WCN36XX_DXE_CH_TX_L
;
115 wcn
->dxe_tx_h_ch
.ch_type
= WCN36XX_DXE_CH_TX_H
;
116 wcn
->dxe_rx_l_ch
.ch_type
= WCN36XX_DXE_CH_RX_L
;
117 wcn
->dxe_rx_h_ch
.ch_type
= WCN36XX_DXE_CH_RX_H
;
119 wcn
->dxe_tx_l_ch
.desc_num
= WCN36XX_DXE_CH_DESC_NUMB_TX_L
;
120 wcn
->dxe_tx_h_ch
.desc_num
= WCN36XX_DXE_CH_DESC_NUMB_TX_H
;
121 wcn
->dxe_rx_l_ch
.desc_num
= WCN36XX_DXE_CH_DESC_NUMB_RX_L
;
122 wcn
->dxe_rx_h_ch
.desc_num
= WCN36XX_DXE_CH_DESC_NUMB_RX_H
;
124 wcn
->dxe_tx_l_ch
.dxe_wq
= WCN36XX_DXE_WQ_TX_L
;
125 wcn
->dxe_tx_h_ch
.dxe_wq
= WCN36XX_DXE_WQ_TX_H
;
127 wcn
->dxe_tx_l_ch
.ctrl_bd
= WCN36XX_DXE_CTRL_TX_L_BD
;
128 wcn
->dxe_tx_h_ch
.ctrl_bd
= WCN36XX_DXE_CTRL_TX_H_BD
;
130 wcn
->dxe_tx_l_ch
.ctrl_skb
= WCN36XX_DXE_CTRL_TX_L_SKB
;
131 wcn
->dxe_tx_h_ch
.ctrl_skb
= WCN36XX_DXE_CTRL_TX_H_SKB
;
133 wcn
->dxe_tx_l_ch
.reg_ctrl
= WCN36XX_DXE_REG_CTL_TX_L
;
134 wcn
->dxe_tx_h_ch
.reg_ctrl
= WCN36XX_DXE_REG_CTL_TX_H
;
136 wcn
->dxe_tx_l_ch
.def_ctrl
= WCN36XX_DXE_CH_DEFAULT_CTL_TX_L
;
137 wcn
->dxe_tx_h_ch
.def_ctrl
= WCN36XX_DXE_CH_DEFAULT_CTL_TX_H
;
139 /* DXE control block allocation */
140 ret
= wcn36xx_dxe_allocate_ctl_block(&wcn
->dxe_tx_l_ch
);
143 ret
= wcn36xx_dxe_allocate_ctl_block(&wcn
->dxe_tx_h_ch
);
146 ret
= wcn36xx_dxe_allocate_ctl_block(&wcn
->dxe_rx_l_ch
);
149 ret
= wcn36xx_dxe_allocate_ctl_block(&wcn
->dxe_rx_h_ch
);
153 /* Initialize SMSM state Clear TX Enable RING EMPTY STATE */
154 ret
= wcn
->ctrl_ops
->smsm_change_state(
155 WCN36XX_SMSM_WLAN_TX_ENABLE
,
156 WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY
);
161 wcn36xx_err("Failed to allocate DXE control blocks\n");
162 wcn36xx_dxe_free_ctl_blks(wcn
);
166 void wcn36xx_dxe_free_ctl_blks(struct wcn36xx
*wcn
)
168 wcn36xx_dxe_free_ctl_block(&wcn
->dxe_tx_l_ch
);
169 wcn36xx_dxe_free_ctl_block(&wcn
->dxe_tx_h_ch
);
170 wcn36xx_dxe_free_ctl_block(&wcn
->dxe_rx_l_ch
);
171 wcn36xx_dxe_free_ctl_block(&wcn
->dxe_rx_h_ch
);
174 static int wcn36xx_dxe_init_descs(struct device
*dev
, struct wcn36xx_dxe_ch
*wcn_ch
)
176 struct wcn36xx_dxe_desc
*cur_dxe
= NULL
;
177 struct wcn36xx_dxe_desc
*prev_dxe
= NULL
;
178 struct wcn36xx_dxe_ctl
*cur_ctl
= NULL
;
182 size
= wcn_ch
->desc_num
* sizeof(struct wcn36xx_dxe_desc
);
183 wcn_ch
->cpu_addr
= dma_alloc_coherent(dev
, size
, &wcn_ch
->dma_addr
,
185 if (!wcn_ch
->cpu_addr
)
188 memset(wcn_ch
->cpu_addr
, 0, size
);
190 cur_dxe
= (struct wcn36xx_dxe_desc
*)wcn_ch
->cpu_addr
;
191 cur_ctl
= wcn_ch
->head_blk_ctl
;
193 for (i
= 0; i
< wcn_ch
->desc_num
; i
++) {
194 cur_ctl
->desc
= cur_dxe
;
195 cur_ctl
->desc_phy_addr
= wcn_ch
->dma_addr
+
196 i
* sizeof(struct wcn36xx_dxe_desc
);
198 switch (wcn_ch
->ch_type
) {
199 case WCN36XX_DXE_CH_TX_L
:
200 cur_dxe
->ctrl
= WCN36XX_DXE_CTRL_TX_L
;
201 cur_dxe
->dst_addr_l
= WCN36XX_DXE_WQ_TX_L
;
203 case WCN36XX_DXE_CH_TX_H
:
204 cur_dxe
->ctrl
= WCN36XX_DXE_CTRL_TX_H
;
205 cur_dxe
->dst_addr_l
= WCN36XX_DXE_WQ_TX_H
;
207 case WCN36XX_DXE_CH_RX_L
:
208 cur_dxe
->ctrl
= WCN36XX_DXE_CTRL_RX_L
;
209 cur_dxe
->src_addr_l
= WCN36XX_DXE_WQ_RX_L
;
211 case WCN36XX_DXE_CH_RX_H
:
212 cur_dxe
->ctrl
= WCN36XX_DXE_CTRL_RX_H
;
213 cur_dxe
->src_addr_l
= WCN36XX_DXE_WQ_RX_H
;
217 cur_dxe
->phy_next_l
= 0;
218 } else if ((0 < i
) && (i
< wcn_ch
->desc_num
- 1)) {
219 prev_dxe
->phy_next_l
=
220 cur_ctl
->desc_phy_addr
;
221 } else if (i
== (wcn_ch
->desc_num
- 1)) {
222 prev_dxe
->phy_next_l
=
223 cur_ctl
->desc_phy_addr
;
224 cur_dxe
->phy_next_l
=
225 wcn_ch
->head_blk_ctl
->desc_phy_addr
;
227 cur_ctl
= cur_ctl
->next
;
235 static void wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch
*ch
,
236 struct wcn36xx_dxe_mem_pool
*pool
)
238 int i
, chunk_size
= pool
->chunk_size
;
239 dma_addr_t bd_phy_addr
= pool
->phy_addr
;
240 void *bd_cpu_addr
= pool
->virt_addr
;
241 struct wcn36xx_dxe_ctl
*cur
= ch
->head_blk_ctl
;
243 for (i
= 0; i
< ch
->desc_num
; i
++) {
244 /* Only every second dxe needs a bd pointer,
245 the other will point to the skb data */
247 cur
->bd_phy_addr
= bd_phy_addr
;
248 cur
->bd_cpu_addr
= bd_cpu_addr
;
249 bd_phy_addr
+= chunk_size
;
250 bd_cpu_addr
+= chunk_size
;
252 cur
->bd_phy_addr
= 0;
253 cur
->bd_cpu_addr
= NULL
;
259 static int wcn36xx_dxe_enable_ch_int(struct wcn36xx
*wcn
, u16 wcn_ch
)
263 wcn36xx_dxe_read_register(wcn
,
264 WCN36XX_DXE_INT_MASK_REG
,
269 wcn36xx_dxe_write_register(wcn
,
270 WCN36XX_DXE_INT_MASK_REG
,
275 static int wcn36xx_dxe_fill_skb(struct device
*dev
, struct wcn36xx_dxe_ctl
*ctl
)
277 struct wcn36xx_dxe_desc
*dxe
= ctl
->desc
;
280 skb
= alloc_skb(WCN36XX_PKT_SIZE
, GFP_ATOMIC
);
284 dxe
->dst_addr_l
= dma_map_single(dev
,
285 skb_tail_pointer(skb
),
293 static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx
*wcn
,
294 struct wcn36xx_dxe_ch
*wcn_ch
)
297 struct wcn36xx_dxe_ctl
*cur_ctl
= NULL
;
299 cur_ctl
= wcn_ch
->head_blk_ctl
;
301 for (i
= 0; i
< wcn_ch
->desc_num
; i
++) {
302 wcn36xx_dxe_fill_skb(wcn
->dev
, cur_ctl
);
303 cur_ctl
= cur_ctl
->next
;
309 static void wcn36xx_dxe_ch_free_skbs(struct wcn36xx
*wcn
,
310 struct wcn36xx_dxe_ch
*wcn_ch
)
312 struct wcn36xx_dxe_ctl
*cur
= wcn_ch
->head_blk_ctl
;
315 for (i
= 0; i
< wcn_ch
->desc_num
; i
++) {
321 void wcn36xx_dxe_tx_ack_ind(struct wcn36xx
*wcn
, u32 status
)
323 struct ieee80211_tx_info
*info
;
327 spin_lock_irqsave(&wcn
->dxe_lock
, flags
);
328 skb
= wcn
->tx_ack_skb
;
329 wcn
->tx_ack_skb
= NULL
;
330 spin_unlock_irqrestore(&wcn
->dxe_lock
, flags
);
333 wcn36xx_warn("Spurious TX complete indication\n");
337 info
= IEEE80211_SKB_CB(skb
);
340 info
->flags
|= IEEE80211_TX_STAT_ACK
;
342 wcn36xx_dbg(WCN36XX_DBG_DXE
, "dxe tx ack status: %d\n", status
);
344 ieee80211_tx_status_irqsafe(wcn
->hw
, skb
);
345 ieee80211_wake_queues(wcn
->hw
);
348 static void reap_tx_dxes(struct wcn36xx
*wcn
, struct wcn36xx_dxe_ch
*ch
)
350 struct wcn36xx_dxe_ctl
*ctl
;
351 struct ieee80211_tx_info
*info
;
355 * Make at least one loop of do-while because in case ring is
356 * completely full head and tail are pointing to the same element
357 * and while-do will not make any cycles.
359 spin_lock_irqsave(&ch
->lock
, flags
);
360 ctl
= ch
->tail_blk_ctl
;
362 if (ctl
->desc
->ctrl
& WCN36XX_DXE_CTRL_VALID_MASK
)
365 dma_unmap_single(wcn
->dev
, ctl
->desc
->src_addr_l
,
366 ctl
->skb
->len
, DMA_TO_DEVICE
);
367 info
= IEEE80211_SKB_CB(ctl
->skb
);
368 if (!(info
->flags
& IEEE80211_TX_CTL_REQ_TX_STATUS
)) {
369 /* Keep frame until TX status comes */
370 ieee80211_free_txskb(wcn
->hw
, ctl
->skb
);
372 spin_lock(&ctl
->skb_lock
);
373 if (wcn
->queues_stopped
) {
374 wcn
->queues_stopped
= false;
375 ieee80211_wake_queues(wcn
->hw
);
377 spin_unlock(&ctl
->skb_lock
);
382 } while (ctl
!= ch
->head_blk_ctl
&&
383 !(ctl
->desc
->ctrl
& WCN36XX_DXE_CTRL_VALID_MASK
));
385 ch
->tail_blk_ctl
= ctl
;
386 spin_unlock_irqrestore(&ch
->lock
, flags
);
389 static irqreturn_t
wcn36xx_irq_tx_complete(int irq
, void *dev
)
391 struct wcn36xx
*wcn
= (struct wcn36xx
*)dev
;
392 int int_src
, int_reason
;
394 wcn36xx_dxe_read_register(wcn
, WCN36XX_DXE_INT_SRC_RAW_REG
, &int_src
);
396 if (int_src
& WCN36XX_INT_MASK_CHAN_TX_H
) {
397 wcn36xx_dxe_read_register(wcn
,
398 WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H
,
401 /* TODO: Check int_reason */
403 wcn36xx_dxe_write_register(wcn
,
404 WCN36XX_DXE_0_INT_CLR
,
405 WCN36XX_INT_MASK_CHAN_TX_H
);
407 wcn36xx_dxe_write_register(wcn
, WCN36XX_DXE_0_INT_ED_CLR
,
408 WCN36XX_INT_MASK_CHAN_TX_H
);
409 wcn36xx_dbg(WCN36XX_DBG_DXE
, "dxe tx ready high\n");
410 reap_tx_dxes(wcn
, &wcn
->dxe_tx_h_ch
);
413 if (int_src
& WCN36XX_INT_MASK_CHAN_TX_L
) {
414 wcn36xx_dxe_read_register(wcn
,
415 WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L
,
417 /* TODO: Check int_reason */
419 wcn36xx_dxe_write_register(wcn
,
420 WCN36XX_DXE_0_INT_CLR
,
421 WCN36XX_INT_MASK_CHAN_TX_L
);
423 wcn36xx_dxe_write_register(wcn
, WCN36XX_DXE_0_INT_ED_CLR
,
424 WCN36XX_INT_MASK_CHAN_TX_L
);
425 wcn36xx_dbg(WCN36XX_DBG_DXE
, "dxe tx ready low\n");
426 reap_tx_dxes(wcn
, &wcn
->dxe_tx_l_ch
);
432 static irqreturn_t
wcn36xx_irq_rx_ready(int irq
, void *dev
)
434 struct wcn36xx
*wcn
= (struct wcn36xx
*)dev
;
436 disable_irq_nosync(wcn
->rx_irq
);
437 wcn36xx_dxe_rx_frame(wcn
);
438 enable_irq(wcn
->rx_irq
);
442 static int wcn36xx_dxe_request_irqs(struct wcn36xx
*wcn
)
446 ret
= request_irq(wcn
->tx_irq
, wcn36xx_irq_tx_complete
,
447 IRQF_TRIGGER_HIGH
, "wcn36xx_tx", wcn
);
449 wcn36xx_err("failed to alloc tx irq\n");
453 ret
= request_irq(wcn
->rx_irq
, wcn36xx_irq_rx_ready
, IRQF_TRIGGER_HIGH
,
456 wcn36xx_err("failed to alloc rx irq\n");
460 enable_irq_wake(wcn
->rx_irq
);
465 free_irq(wcn
->tx_irq
, wcn
);
471 static int wcn36xx_rx_handle_packets(struct wcn36xx
*wcn
,
472 struct wcn36xx_dxe_ch
*ch
)
474 struct wcn36xx_dxe_ctl
*ctl
= ch
->head_blk_ctl
;
475 struct wcn36xx_dxe_desc
*dxe
= ctl
->desc
;
478 int ret
= 0, int_mask
;
481 if (ch
->ch_type
== WCN36XX_DXE_CH_RX_L
) {
482 value
= WCN36XX_DXE_CTRL_RX_L
;
483 int_mask
= WCN36XX_DXE_INT_CH1_MASK
;
485 value
= WCN36XX_DXE_CTRL_RX_H
;
486 int_mask
= WCN36XX_DXE_INT_CH3_MASK
;
489 while (!(dxe
->ctrl
& WCN36XX_DXE_CTRL_VALID_MASK
)) {
491 dma_addr
= dxe
->dst_addr_l
;
492 ret
= wcn36xx_dxe_fill_skb(wcn
->dev
, ctl
);
494 /* new skb allocation ok. Use the new one and queue
495 * the old one to network system.
497 dma_unmap_single(wcn
->dev
, dma_addr
, WCN36XX_PKT_SIZE
,
499 wcn36xx_rx_skb(wcn
, skb
);
500 } /* else keep old skb not submitted and use it for rx DMA */
506 wcn36xx_dxe_write_register(wcn
, WCN36XX_DXE_ENCH_ADDR
, int_mask
);
508 ch
->head_blk_ctl
= ctl
;
512 void wcn36xx_dxe_rx_frame(struct wcn36xx
*wcn
)
516 wcn36xx_dxe_read_register(wcn
, WCN36XX_DXE_INT_SRC_RAW_REG
, &int_src
);
519 if (int_src
& WCN36XX_DXE_INT_CH1_MASK
) {
520 wcn36xx_dxe_write_register(wcn
, WCN36XX_DXE_0_INT_CLR
,
521 WCN36XX_DXE_INT_CH1_MASK
);
522 wcn36xx_rx_handle_packets(wcn
, &(wcn
->dxe_rx_l_ch
));
526 if (int_src
& WCN36XX_DXE_INT_CH3_MASK
) {
527 /* Clean up all the INT within this channel */
528 wcn36xx_dxe_write_register(wcn
, WCN36XX_DXE_0_INT_CLR
,
529 WCN36XX_DXE_INT_CH3_MASK
);
530 wcn36xx_rx_handle_packets(wcn
, &(wcn
->dxe_rx_h_ch
));
534 wcn36xx_warn("No DXE interrupt pending\n");
537 int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx
*wcn
)
542 /* Allocate BD headers for MGMT frames */
544 /* Where this come from ask QC */
545 wcn
->mgmt_mem_pool
.chunk_size
= WCN36XX_BD_CHUNK_SIZE
+
546 16 - (WCN36XX_BD_CHUNK_SIZE
% 8);
548 s
= wcn
->mgmt_mem_pool
.chunk_size
* WCN36XX_DXE_CH_DESC_NUMB_TX_H
;
549 cpu_addr
= dma_alloc_coherent(wcn
->dev
, s
, &wcn
->mgmt_mem_pool
.phy_addr
,
554 wcn
->mgmt_mem_pool
.virt_addr
= cpu_addr
;
555 memset(cpu_addr
, 0, s
);
557 /* Allocate BD headers for DATA frames */
559 /* Where this come from ask QC */
560 wcn
->data_mem_pool
.chunk_size
= WCN36XX_BD_CHUNK_SIZE
+
561 16 - (WCN36XX_BD_CHUNK_SIZE
% 8);
563 s
= wcn
->data_mem_pool
.chunk_size
* WCN36XX_DXE_CH_DESC_NUMB_TX_L
;
564 cpu_addr
= dma_alloc_coherent(wcn
->dev
, s
, &wcn
->data_mem_pool
.phy_addr
,
569 wcn
->data_mem_pool
.virt_addr
= cpu_addr
;
570 memset(cpu_addr
, 0, s
);
575 wcn36xx_dxe_free_mem_pools(wcn
);
576 wcn36xx_err("Failed to allocate BD mempool\n");
580 void wcn36xx_dxe_free_mem_pools(struct wcn36xx
*wcn
)
582 if (wcn
->mgmt_mem_pool
.virt_addr
)
583 dma_free_coherent(wcn
->dev
, wcn
->mgmt_mem_pool
.chunk_size
*
584 WCN36XX_DXE_CH_DESC_NUMB_TX_H
,
585 wcn
->mgmt_mem_pool
.virt_addr
,
586 wcn
->mgmt_mem_pool
.phy_addr
);
588 if (wcn
->data_mem_pool
.virt_addr
) {
589 dma_free_coherent(wcn
->dev
, wcn
->data_mem_pool
.chunk_size
*
590 WCN36XX_DXE_CH_DESC_NUMB_TX_L
,
591 wcn
->data_mem_pool
.virt_addr
,
592 wcn
->data_mem_pool
.phy_addr
);
596 int wcn36xx_dxe_tx_frame(struct wcn36xx
*wcn
,
597 struct wcn36xx_vif
*vif_priv
,
601 struct wcn36xx_dxe_ctl
*ctl
= NULL
;
602 struct wcn36xx_dxe_desc
*desc
= NULL
;
603 struct wcn36xx_dxe_ch
*ch
= NULL
;
607 ch
= is_low
? &wcn
->dxe_tx_l_ch
: &wcn
->dxe_tx_h_ch
;
609 spin_lock_irqsave(&ch
->lock
, flags
);
610 ctl
= ch
->head_blk_ctl
;
612 spin_lock(&ctl
->next
->skb_lock
);
615 * If skb is not null that means that we reached the tail of the ring
616 * hence ring is full. Stop queues to let mac80211 back off until ring
617 * has an empty slot again.
619 if (NULL
!= ctl
->next
->skb
) {
620 ieee80211_stop_queues(wcn
->hw
);
621 wcn
->queues_stopped
= true;
622 spin_unlock(&ctl
->next
->skb_lock
);
623 spin_unlock_irqrestore(&ch
->lock
, flags
);
626 spin_unlock(&ctl
->next
->skb_lock
);
631 /* Set source address of the BD we send */
632 desc
->src_addr_l
= ctl
->bd_phy_addr
;
634 desc
->dst_addr_l
= ch
->dxe_wq
;
635 desc
->fr_len
= sizeof(struct wcn36xx_tx_bd
);
636 desc
->ctrl
= ch
->ctrl_bd
;
638 wcn36xx_dbg(WCN36XX_DBG_DXE
, "DXE TX\n");
640 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP
, "DESC1 >>> ",
641 (char *)desc
, sizeof(*desc
));
642 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP
,
643 "BD >>> ", (char *)ctl
->bd_cpu_addr
,
644 sizeof(struct wcn36xx_tx_bd
));
646 /* Set source address of the SKB we send */
650 if (ctl
->bd_cpu_addr
) {
651 wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
656 desc
->src_addr_l
= dma_map_single(wcn
->dev
,
661 desc
->dst_addr_l
= ch
->dxe_wq
;
662 desc
->fr_len
= ctl
->skb
->len
;
664 /* set dxe descriptor to VALID */
665 desc
->ctrl
= ch
->ctrl_skb
;
667 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP
, "DESC2 >>> ",
668 (char *)desc
, sizeof(*desc
));
669 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP
, "SKB >>> ",
670 (char *)ctl
->skb
->data
, ctl
->skb
->len
);
672 /* Move the head of the ring to the next empty descriptor */
673 ch
->head_blk_ctl
= ctl
->next
;
676 * When connected and trying to send data frame chip can be in sleep
677 * mode and writing to the register will not wake up the chip. Instead
678 * notify chip about new frame through SMSM bus.
680 if (is_low
&& vif_priv
->pw_state
== WCN36XX_BMPS
) {
681 wcn
->ctrl_ops
->smsm_change_state(
683 WCN36XX_SMSM_WLAN_TX_ENABLE
);
685 /* indicate End Of Packet and generate interrupt on descriptor
688 wcn36xx_dxe_write_register(wcn
,
689 ch
->reg_ctrl
, ch
->def_ctrl
);
694 spin_unlock_irqrestore(&ch
->lock
, flags
);
698 int wcn36xx_dxe_init(struct wcn36xx
*wcn
)
700 int reg_data
= 0, ret
;
702 reg_data
= WCN36XX_DXE_REG_RESET
;
703 wcn36xx_dxe_write_register(wcn
, WCN36XX_DXE_REG_CSR_RESET
, reg_data
);
705 /* Select channels for rx avail and xfer done interrupts... */
706 reg_data
= (WCN36XX_DXE_INT_CH3_MASK
| WCN36XX_DXE_INT_CH1_MASK
) << 16 |
707 WCN36XX_DXE_INT_CH0_MASK
| WCN36XX_DXE_INT_CH4_MASK
;
709 wcn36xx_ccu_write_register(wcn
, WCN36XX_CCU_DXE_INT_SELECT_PRONTO
, reg_data
);
711 wcn36xx_ccu_write_register(wcn
, WCN36XX_CCU_DXE_INT_SELECT_RIVA
, reg_data
);
713 /***************************************/
714 /* Init descriptors for TX LOW channel */
715 /***************************************/
716 wcn36xx_dxe_init_descs(wcn
->dev
, &wcn
->dxe_tx_l_ch
);
717 wcn36xx_dxe_init_tx_bd(&wcn
->dxe_tx_l_ch
, &wcn
->data_mem_pool
);
719 /* Write channel head to a NEXT register */
720 wcn36xx_dxe_write_register(wcn
, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L
,
721 wcn
->dxe_tx_l_ch
.head_blk_ctl
->desc_phy_addr
);
723 /* Program DMA destination addr for TX LOW */
724 wcn36xx_dxe_write_register(wcn
,
725 WCN36XX_DXE_CH_DEST_ADDR_TX_L
,
726 WCN36XX_DXE_WQ_TX_L
);
728 wcn36xx_dxe_read_register(wcn
, WCN36XX_DXE_REG_CH_EN
, ®_data
);
729 wcn36xx_dxe_enable_ch_int(wcn
, WCN36XX_INT_MASK_CHAN_TX_L
);
731 /***************************************/
732 /* Init descriptors for TX HIGH channel */
733 /***************************************/
734 wcn36xx_dxe_init_descs(wcn
->dev
, &wcn
->dxe_tx_h_ch
);
735 wcn36xx_dxe_init_tx_bd(&wcn
->dxe_tx_h_ch
, &wcn
->mgmt_mem_pool
);
737 /* Write channel head to a NEXT register */
738 wcn36xx_dxe_write_register(wcn
, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H
,
739 wcn
->dxe_tx_h_ch
.head_blk_ctl
->desc_phy_addr
);
741 /* Program DMA destination addr for TX HIGH */
742 wcn36xx_dxe_write_register(wcn
,
743 WCN36XX_DXE_CH_DEST_ADDR_TX_H
,
744 WCN36XX_DXE_WQ_TX_H
);
746 wcn36xx_dxe_read_register(wcn
, WCN36XX_DXE_REG_CH_EN
, ®_data
);
748 /* Enable channel interrupts */
749 wcn36xx_dxe_enable_ch_int(wcn
, WCN36XX_INT_MASK_CHAN_TX_H
);
751 /***************************************/
752 /* Init descriptors for RX LOW channel */
753 /***************************************/
754 wcn36xx_dxe_init_descs(wcn
->dev
, &wcn
->dxe_rx_l_ch
);
756 /* For RX we need to preallocated buffers */
757 wcn36xx_dxe_ch_alloc_skb(wcn
, &wcn
->dxe_rx_l_ch
);
759 /* Write channel head to a NEXT register */
760 wcn36xx_dxe_write_register(wcn
, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L
,
761 wcn
->dxe_rx_l_ch
.head_blk_ctl
->desc_phy_addr
);
763 /* Write DMA source address */
764 wcn36xx_dxe_write_register(wcn
,
765 WCN36XX_DXE_CH_SRC_ADDR_RX_L
,
766 WCN36XX_DXE_WQ_RX_L
);
768 /* Program preallocated destination address */
769 wcn36xx_dxe_write_register(wcn
,
770 WCN36XX_DXE_CH_DEST_ADDR_RX_L
,
771 wcn
->dxe_rx_l_ch
.head_blk_ctl
->desc
->phy_next_l
);
773 /* Enable default control registers */
774 wcn36xx_dxe_write_register(wcn
,
775 WCN36XX_DXE_REG_CTL_RX_L
,
776 WCN36XX_DXE_CH_DEFAULT_CTL_RX_L
);
778 /* Enable channel interrupts */
779 wcn36xx_dxe_enable_ch_int(wcn
, WCN36XX_INT_MASK_CHAN_RX_L
);
781 /***************************************/
782 /* Init descriptors for RX HIGH channel */
783 /***************************************/
784 wcn36xx_dxe_init_descs(wcn
->dev
, &wcn
->dxe_rx_h_ch
);
786 /* For RX we need to prealocat buffers */
787 wcn36xx_dxe_ch_alloc_skb(wcn
, &wcn
->dxe_rx_h_ch
);
789 /* Write chanel head to a NEXT register */
790 wcn36xx_dxe_write_register(wcn
, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H
,
791 wcn
->dxe_rx_h_ch
.head_blk_ctl
->desc_phy_addr
);
793 /* Write DMA source address */
794 wcn36xx_dxe_write_register(wcn
,
795 WCN36XX_DXE_CH_SRC_ADDR_RX_H
,
796 WCN36XX_DXE_WQ_RX_H
);
798 /* Program preallocated destination address */
799 wcn36xx_dxe_write_register(wcn
,
800 WCN36XX_DXE_CH_DEST_ADDR_RX_H
,
801 wcn
->dxe_rx_h_ch
.head_blk_ctl
->desc
->phy_next_l
);
803 /* Enable default control registers */
804 wcn36xx_dxe_write_register(wcn
,
805 WCN36XX_DXE_REG_CTL_RX_H
,
806 WCN36XX_DXE_CH_DEFAULT_CTL_RX_H
);
808 /* Enable channel interrupts */
809 wcn36xx_dxe_enable_ch_int(wcn
, WCN36XX_INT_MASK_CHAN_RX_H
);
811 ret
= wcn36xx_dxe_request_irqs(wcn
);
821 void wcn36xx_dxe_deinit(struct wcn36xx
*wcn
)
823 free_irq(wcn
->tx_irq
, wcn
);
824 free_irq(wcn
->rx_irq
, wcn
);
826 if (wcn
->tx_ack_skb
) {
827 ieee80211_tx_status_irqsafe(wcn
->hw
, wcn
->tx_ack_skb
);
828 wcn
->tx_ack_skb
= NULL
;
831 wcn36xx_dxe_ch_free_skbs(wcn
, &wcn
->dxe_rx_l_ch
);
832 wcn36xx_dxe_ch_free_skbs(wcn
, &wcn
->dxe_rx_h_ch
);