2 * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 /* DXE - DMA transfer engine
18 * we have 2 channels(High prio and Low prio) for TX and 2 channels for RX.
19 * through low channels data packets are transfered
20 * through high channels managment packets are transfered
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 #include <linux/interrupt.h>
29 void *wcn36xx_dxe_get_next_bd(struct wcn36xx
*wcn
, bool is_low
)
31 struct wcn36xx_dxe_ch
*ch
= is_low
?
35 return ch
->head_blk_ctl
->bd_cpu_addr
;
38 static void wcn36xx_dxe_write_register(struct wcn36xx
*wcn
, int addr
, int data
)
40 wcn36xx_dbg(WCN36XX_DBG_DXE
,
41 "wcn36xx_dxe_write_register: addr=%x, data=%x\n",
44 writel(data
, wcn
->mmio
+ addr
);
47 #define wcn36xx_dxe_write_register_x(wcn, reg, reg_data) \
49 if (wcn->chip_version == WCN36XX_CHIP_3680) \
50 wcn36xx_dxe_write_register(wcn, reg ## _3680, reg_data); \
52 wcn36xx_dxe_write_register(wcn, reg ## _3660, reg_data); \
55 static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
57 *data
= readl(wcn
->mmio
+ addr
);
59 wcn36xx_dbg(WCN36XX_DBG_DXE
,
60 "wcn36xx_dxe_read_register: addr=%x, data=%x\n",
64 static void wcn36xx_dxe_free_ctl_block(struct wcn36xx_dxe_ch
*ch
)
66 struct wcn36xx_dxe_ctl
*ctl
= ch
->head_blk_ctl
, *next
;
69 for (i
= 0; i
< ch
->desc_num
&& ctl
; i
++) {
76 static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch
*ch
)
78 struct wcn36xx_dxe_ctl
*prev_ctl
= NULL
;
79 struct wcn36xx_dxe_ctl
*cur_ctl
= NULL
;
82 spin_lock_init(&ch
->lock
);
83 for (i
= 0; i
< ch
->desc_num
; i
++) {
84 cur_ctl
= kzalloc(sizeof(*cur_ctl
), GFP_KERNEL
);
88 spin_lock_init(&cur_ctl
->skb_lock
);
89 cur_ctl
->ctl_blk_order
= i
;
91 ch
->head_blk_ctl
= cur_ctl
;
92 ch
->tail_blk_ctl
= cur_ctl
;
93 } else if (ch
->desc_num
- 1 == i
) {
94 prev_ctl
->next
= cur_ctl
;
95 cur_ctl
->next
= ch
->head_blk_ctl
;
97 prev_ctl
->next
= cur_ctl
;
105 wcn36xx_dxe_free_ctl_block(ch
);
109 int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx
*wcn
)
113 wcn
->dxe_tx_l_ch
.ch_type
= WCN36XX_DXE_CH_TX_L
;
114 wcn
->dxe_tx_h_ch
.ch_type
= WCN36XX_DXE_CH_TX_H
;
115 wcn
->dxe_rx_l_ch
.ch_type
= WCN36XX_DXE_CH_RX_L
;
116 wcn
->dxe_rx_h_ch
.ch_type
= WCN36XX_DXE_CH_RX_H
;
118 wcn
->dxe_tx_l_ch
.desc_num
= WCN36XX_DXE_CH_DESC_NUMB_TX_L
;
119 wcn
->dxe_tx_h_ch
.desc_num
= WCN36XX_DXE_CH_DESC_NUMB_TX_H
;
120 wcn
->dxe_rx_l_ch
.desc_num
= WCN36XX_DXE_CH_DESC_NUMB_RX_L
;
121 wcn
->dxe_rx_h_ch
.desc_num
= WCN36XX_DXE_CH_DESC_NUMB_RX_H
;
123 wcn
->dxe_tx_l_ch
.dxe_wq
= WCN36XX_DXE_WQ_TX_L
;
124 wcn
->dxe_tx_h_ch
.dxe_wq
= WCN36XX_DXE_WQ_TX_H
;
126 wcn
->dxe_tx_l_ch
.ctrl_bd
= WCN36XX_DXE_CTRL_TX_L_BD
;
127 wcn
->dxe_tx_h_ch
.ctrl_bd
= WCN36XX_DXE_CTRL_TX_H_BD
;
129 wcn
->dxe_tx_l_ch
.ctrl_skb
= WCN36XX_DXE_CTRL_TX_L_SKB
;
130 wcn
->dxe_tx_h_ch
.ctrl_skb
= WCN36XX_DXE_CTRL_TX_H_SKB
;
132 wcn
->dxe_tx_l_ch
.reg_ctrl
= WCN36XX_DXE_REG_CTL_TX_L
;
133 wcn
->dxe_tx_h_ch
.reg_ctrl
= WCN36XX_DXE_REG_CTL_TX_H
;
135 wcn
->dxe_tx_l_ch
.def_ctrl
= WCN36XX_DXE_CH_DEFAULT_CTL_TX_L
;
136 wcn
->dxe_tx_h_ch
.def_ctrl
= WCN36XX_DXE_CH_DEFAULT_CTL_TX_H
;
138 /* DXE control block allocation */
139 ret
= wcn36xx_dxe_allocate_ctl_block(&wcn
->dxe_tx_l_ch
);
142 ret
= wcn36xx_dxe_allocate_ctl_block(&wcn
->dxe_tx_h_ch
);
145 ret
= wcn36xx_dxe_allocate_ctl_block(&wcn
->dxe_rx_l_ch
);
148 ret
= wcn36xx_dxe_allocate_ctl_block(&wcn
->dxe_rx_h_ch
);
152 /* Initialize SMSM state Clear TX Enable RING EMPTY STATE */
153 ret
= wcn
->ctrl_ops
->smsm_change_state(
154 WCN36XX_SMSM_WLAN_TX_ENABLE
,
155 WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY
);
160 wcn36xx_err("Failed to allocate DXE control blocks\n");
161 wcn36xx_dxe_free_ctl_blks(wcn
);
165 void wcn36xx_dxe_free_ctl_blks(struct wcn36xx
*wcn
)
167 wcn36xx_dxe_free_ctl_block(&wcn
->dxe_tx_l_ch
);
168 wcn36xx_dxe_free_ctl_block(&wcn
->dxe_tx_h_ch
);
169 wcn36xx_dxe_free_ctl_block(&wcn
->dxe_rx_l_ch
);
170 wcn36xx_dxe_free_ctl_block(&wcn
->dxe_rx_h_ch
);
173 static int wcn36xx_dxe_init_descs(struct device
*dev
, struct wcn36xx_dxe_ch
*wcn_ch
)
175 struct wcn36xx_dxe_desc
*cur_dxe
= NULL
;
176 struct wcn36xx_dxe_desc
*prev_dxe
= NULL
;
177 struct wcn36xx_dxe_ctl
*cur_ctl
= NULL
;
181 size
= wcn_ch
->desc_num
* sizeof(struct wcn36xx_dxe_desc
);
182 wcn_ch
->cpu_addr
= dma_alloc_coherent(dev
, size
, &wcn_ch
->dma_addr
,
184 if (!wcn_ch
->cpu_addr
)
187 memset(wcn_ch
->cpu_addr
, 0, size
);
189 cur_dxe
= (struct wcn36xx_dxe_desc
*)wcn_ch
->cpu_addr
;
190 cur_ctl
= wcn_ch
->head_blk_ctl
;
192 for (i
= 0; i
< wcn_ch
->desc_num
; i
++) {
193 cur_ctl
->desc
= cur_dxe
;
194 cur_ctl
->desc_phy_addr
= wcn_ch
->dma_addr
+
195 i
* sizeof(struct wcn36xx_dxe_desc
);
197 switch (wcn_ch
->ch_type
) {
198 case WCN36XX_DXE_CH_TX_L
:
199 cur_dxe
->ctrl
= WCN36XX_DXE_CTRL_TX_L
;
200 cur_dxe
->dst_addr_l
= WCN36XX_DXE_WQ_TX_L
;
202 case WCN36XX_DXE_CH_TX_H
:
203 cur_dxe
->ctrl
= WCN36XX_DXE_CTRL_TX_H
;
204 cur_dxe
->dst_addr_l
= WCN36XX_DXE_WQ_TX_H
;
206 case WCN36XX_DXE_CH_RX_L
:
207 cur_dxe
->ctrl
= WCN36XX_DXE_CTRL_RX_L
;
208 cur_dxe
->src_addr_l
= WCN36XX_DXE_WQ_RX_L
;
210 case WCN36XX_DXE_CH_RX_H
:
211 cur_dxe
->ctrl
= WCN36XX_DXE_CTRL_RX_H
;
212 cur_dxe
->src_addr_l
= WCN36XX_DXE_WQ_RX_H
;
216 cur_dxe
->phy_next_l
= 0;
217 } else if ((0 < i
) && (i
< wcn_ch
->desc_num
- 1)) {
218 prev_dxe
->phy_next_l
=
219 cur_ctl
->desc_phy_addr
;
220 } else if (i
== (wcn_ch
->desc_num
- 1)) {
221 prev_dxe
->phy_next_l
=
222 cur_ctl
->desc_phy_addr
;
223 cur_dxe
->phy_next_l
=
224 wcn_ch
->head_blk_ctl
->desc_phy_addr
;
226 cur_ctl
= cur_ctl
->next
;
234 static void wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch
*ch
,
235 struct wcn36xx_dxe_mem_pool
*pool
)
237 int i
, chunk_size
= pool
->chunk_size
;
238 dma_addr_t bd_phy_addr
= pool
->phy_addr
;
239 void *bd_cpu_addr
= pool
->virt_addr
;
240 struct wcn36xx_dxe_ctl
*cur
= ch
->head_blk_ctl
;
242 for (i
= 0; i
< ch
->desc_num
; i
++) {
243 /* Only every second dxe needs a bd pointer,
244 the other will point to the skb data */
246 cur
->bd_phy_addr
= bd_phy_addr
;
247 cur
->bd_cpu_addr
= bd_cpu_addr
;
248 bd_phy_addr
+= chunk_size
;
249 bd_cpu_addr
+= chunk_size
;
251 cur
->bd_phy_addr
= 0;
252 cur
->bd_cpu_addr
= NULL
;
258 static int wcn36xx_dxe_enable_ch_int(struct wcn36xx
*wcn
, u16 wcn_ch
)
262 wcn36xx_dxe_read_register(wcn
,
263 WCN36XX_DXE_INT_MASK_REG
,
268 wcn36xx_dxe_write_register(wcn
,
269 WCN36XX_DXE_INT_MASK_REG
,
274 static int wcn36xx_dxe_fill_skb(struct device
*dev
, struct wcn36xx_dxe_ctl
*ctl
)
276 struct wcn36xx_dxe_desc
*dxe
= ctl
->desc
;
279 skb
= alloc_skb(WCN36XX_PKT_SIZE
, GFP_ATOMIC
);
283 dxe
->dst_addr_l
= dma_map_single(dev
,
284 skb_tail_pointer(skb
),
292 static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx
*wcn
,
293 struct wcn36xx_dxe_ch
*wcn_ch
)
296 struct wcn36xx_dxe_ctl
*cur_ctl
= NULL
;
298 cur_ctl
= wcn_ch
->head_blk_ctl
;
300 for (i
= 0; i
< wcn_ch
->desc_num
; i
++) {
301 wcn36xx_dxe_fill_skb(wcn
->dev
, cur_ctl
);
302 cur_ctl
= cur_ctl
->next
;
308 static void wcn36xx_dxe_ch_free_skbs(struct wcn36xx
*wcn
,
309 struct wcn36xx_dxe_ch
*wcn_ch
)
311 struct wcn36xx_dxe_ctl
*cur
= wcn_ch
->head_blk_ctl
;
314 for (i
= 0; i
< wcn_ch
->desc_num
; i
++) {
320 void wcn36xx_dxe_tx_ack_ind(struct wcn36xx
*wcn
, u32 status
)
322 struct ieee80211_tx_info
*info
;
326 spin_lock_irqsave(&wcn
->dxe_lock
, flags
);
327 skb
= wcn
->tx_ack_skb
;
328 wcn
->tx_ack_skb
= NULL
;
329 spin_unlock_irqrestore(&wcn
->dxe_lock
, flags
);
332 wcn36xx_warn("Spurious TX complete indication\n");
336 info
= IEEE80211_SKB_CB(skb
);
339 info
->flags
|= IEEE80211_TX_STAT_ACK
;
341 wcn36xx_dbg(WCN36XX_DBG_DXE
, "dxe tx ack status: %d\n", status
);
343 ieee80211_tx_status_irqsafe(wcn
->hw
, skb
);
344 ieee80211_wake_queues(wcn
->hw
);
347 static void reap_tx_dxes(struct wcn36xx
*wcn
, struct wcn36xx_dxe_ch
*ch
)
349 struct wcn36xx_dxe_ctl
*ctl
;
350 struct ieee80211_tx_info
*info
;
354 * Make at least one loop of do-while because in case ring is
355 * completely full head and tail are pointing to the same element
356 * and while-do will not make any cycles.
358 spin_lock_irqsave(&ch
->lock
, flags
);
359 ctl
= ch
->tail_blk_ctl
;
361 if (ctl
->desc
->ctrl
& WCN36XX_DXE_CTRL_VALID_MASK
)
364 dma_unmap_single(wcn
->dev
, ctl
->desc
->src_addr_l
,
365 ctl
->skb
->len
, DMA_TO_DEVICE
);
366 info
= IEEE80211_SKB_CB(ctl
->skb
);
367 if (!(info
->flags
& IEEE80211_TX_CTL_REQ_TX_STATUS
)) {
368 /* Keep frame until TX status comes */
369 ieee80211_free_txskb(wcn
->hw
, ctl
->skb
);
371 spin_lock(&ctl
->skb_lock
);
372 if (wcn
->queues_stopped
) {
373 wcn
->queues_stopped
= false;
374 ieee80211_wake_queues(wcn
->hw
);
376 spin_unlock(&ctl
->skb_lock
);
381 } while (ctl
!= ch
->head_blk_ctl
&&
382 !(ctl
->desc
->ctrl
& WCN36XX_DXE_CTRL_VALID_MASK
));
384 ch
->tail_blk_ctl
= ctl
;
385 spin_unlock_irqrestore(&ch
->lock
, flags
);
388 static irqreturn_t
wcn36xx_irq_tx_complete(int irq
, void *dev
)
390 struct wcn36xx
*wcn
= (struct wcn36xx
*)dev
;
391 int int_src
, int_reason
;
393 wcn36xx_dxe_read_register(wcn
, WCN36XX_DXE_INT_SRC_RAW_REG
, &int_src
);
395 if (int_src
& WCN36XX_INT_MASK_CHAN_TX_H
) {
396 wcn36xx_dxe_read_register(wcn
,
397 WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H
,
400 /* TODO: Check int_reason */
402 wcn36xx_dxe_write_register(wcn
,
403 WCN36XX_DXE_0_INT_CLR
,
404 WCN36XX_INT_MASK_CHAN_TX_H
);
406 wcn36xx_dxe_write_register(wcn
, WCN36XX_DXE_0_INT_ED_CLR
,
407 WCN36XX_INT_MASK_CHAN_TX_H
);
408 wcn36xx_dbg(WCN36XX_DBG_DXE
, "dxe tx ready high\n");
409 reap_tx_dxes(wcn
, &wcn
->dxe_tx_h_ch
);
412 if (int_src
& WCN36XX_INT_MASK_CHAN_TX_L
) {
413 wcn36xx_dxe_read_register(wcn
,
414 WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L
,
416 /* TODO: Check int_reason */
418 wcn36xx_dxe_write_register(wcn
,
419 WCN36XX_DXE_0_INT_CLR
,
420 WCN36XX_INT_MASK_CHAN_TX_L
);
422 wcn36xx_dxe_write_register(wcn
, WCN36XX_DXE_0_INT_ED_CLR
,
423 WCN36XX_INT_MASK_CHAN_TX_L
);
424 wcn36xx_dbg(WCN36XX_DBG_DXE
, "dxe tx ready low\n");
425 reap_tx_dxes(wcn
, &wcn
->dxe_tx_l_ch
);
431 static irqreturn_t
wcn36xx_irq_rx_ready(int irq
, void *dev
)
433 struct wcn36xx
*wcn
= (struct wcn36xx
*)dev
;
435 disable_irq_nosync(wcn
->rx_irq
);
436 wcn36xx_dxe_rx_frame(wcn
);
437 enable_irq(wcn
->rx_irq
);
441 static int wcn36xx_dxe_request_irqs(struct wcn36xx
*wcn
)
445 ret
= request_irq(wcn
->tx_irq
, wcn36xx_irq_tx_complete
,
446 IRQF_TRIGGER_HIGH
, "wcn36xx_tx", wcn
);
448 wcn36xx_err("failed to alloc tx irq\n");
452 ret
= request_irq(wcn
->rx_irq
, wcn36xx_irq_rx_ready
, IRQF_TRIGGER_HIGH
,
455 wcn36xx_err("failed to alloc rx irq\n");
459 enable_irq_wake(wcn
->rx_irq
);
464 free_irq(wcn
->tx_irq
, wcn
);
470 static int wcn36xx_rx_handle_packets(struct wcn36xx
*wcn
,
471 struct wcn36xx_dxe_ch
*ch
)
473 struct wcn36xx_dxe_ctl
*ctl
= ch
->head_blk_ctl
;
474 struct wcn36xx_dxe_desc
*dxe
= ctl
->desc
;
478 while (!(dxe
->ctrl
& WCN36XX_DXE_CTRL_VALID_MASK
)) {
480 dma_addr
= dxe
->dst_addr_l
;
481 wcn36xx_dxe_fill_skb(wcn
->dev
, ctl
);
483 switch (ch
->ch_type
) {
484 case WCN36XX_DXE_CH_RX_L
:
485 dxe
->ctrl
= WCN36XX_DXE_CTRL_RX_L
;
486 wcn36xx_dxe_write_register(wcn
, WCN36XX_DXE_ENCH_ADDR
,
487 WCN36XX_DXE_INT_CH1_MASK
);
489 case WCN36XX_DXE_CH_RX_H
:
490 dxe
->ctrl
= WCN36XX_DXE_CTRL_RX_H
;
491 wcn36xx_dxe_write_register(wcn
, WCN36XX_DXE_ENCH_ADDR
,
492 WCN36XX_DXE_INT_CH3_MASK
);
495 wcn36xx_warn("Unknown channel\n");
498 dma_unmap_single(wcn
->dev
, dma_addr
, WCN36XX_PKT_SIZE
,
500 wcn36xx_rx_skb(wcn
, skb
);
505 ch
->head_blk_ctl
= ctl
;
510 void wcn36xx_dxe_rx_frame(struct wcn36xx
*wcn
)
514 wcn36xx_dxe_read_register(wcn
, WCN36XX_DXE_INT_SRC_RAW_REG
, &int_src
);
517 if (int_src
& WCN36XX_DXE_INT_CH1_MASK
) {
518 wcn36xx_dxe_write_register(wcn
, WCN36XX_DXE_0_INT_CLR
,
519 WCN36XX_DXE_INT_CH1_MASK
);
520 wcn36xx_rx_handle_packets(wcn
, &(wcn
->dxe_rx_l_ch
));
524 if (int_src
& WCN36XX_DXE_INT_CH3_MASK
) {
525 /* Clean up all the INT within this channel */
526 wcn36xx_dxe_write_register(wcn
, WCN36XX_DXE_0_INT_CLR
,
527 WCN36XX_DXE_INT_CH3_MASK
);
528 wcn36xx_rx_handle_packets(wcn
, &(wcn
->dxe_rx_h_ch
));
532 wcn36xx_warn("No DXE interrupt pending\n");
535 int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx
*wcn
)
540 /* Allocate BD headers for MGMT frames */
542 /* Where this come from ask QC */
543 wcn
->mgmt_mem_pool
.chunk_size
= WCN36XX_BD_CHUNK_SIZE
+
544 16 - (WCN36XX_BD_CHUNK_SIZE
% 8);
546 s
= wcn
->mgmt_mem_pool
.chunk_size
* WCN36XX_DXE_CH_DESC_NUMB_TX_H
;
547 cpu_addr
= dma_alloc_coherent(wcn
->dev
, s
, &wcn
->mgmt_mem_pool
.phy_addr
,
552 wcn
->mgmt_mem_pool
.virt_addr
= cpu_addr
;
553 memset(cpu_addr
, 0, s
);
555 /* Allocate BD headers for DATA frames */
557 /* Where this come from ask QC */
558 wcn
->data_mem_pool
.chunk_size
= WCN36XX_BD_CHUNK_SIZE
+
559 16 - (WCN36XX_BD_CHUNK_SIZE
% 8);
561 s
= wcn
->data_mem_pool
.chunk_size
* WCN36XX_DXE_CH_DESC_NUMB_TX_L
;
562 cpu_addr
= dma_alloc_coherent(wcn
->dev
, s
, &wcn
->data_mem_pool
.phy_addr
,
567 wcn
->data_mem_pool
.virt_addr
= cpu_addr
;
568 memset(cpu_addr
, 0, s
);
573 wcn36xx_dxe_free_mem_pools(wcn
);
574 wcn36xx_err("Failed to allocate BD mempool\n");
578 void wcn36xx_dxe_free_mem_pools(struct wcn36xx
*wcn
)
580 if (wcn
->mgmt_mem_pool
.virt_addr
)
581 dma_free_coherent(wcn
->dev
, wcn
->mgmt_mem_pool
.chunk_size
*
582 WCN36XX_DXE_CH_DESC_NUMB_TX_H
,
583 wcn
->mgmt_mem_pool
.virt_addr
,
584 wcn
->mgmt_mem_pool
.phy_addr
);
586 if (wcn
->data_mem_pool
.virt_addr
) {
587 dma_free_coherent(wcn
->dev
, wcn
->data_mem_pool
.chunk_size
*
588 WCN36XX_DXE_CH_DESC_NUMB_TX_L
,
589 wcn
->data_mem_pool
.virt_addr
,
590 wcn
->data_mem_pool
.phy_addr
);
594 int wcn36xx_dxe_tx_frame(struct wcn36xx
*wcn
,
595 struct wcn36xx_vif
*vif_priv
,
599 struct wcn36xx_dxe_ctl
*ctl
= NULL
;
600 struct wcn36xx_dxe_desc
*desc
= NULL
;
601 struct wcn36xx_dxe_ch
*ch
= NULL
;
605 ch
= is_low
? &wcn
->dxe_tx_l_ch
: &wcn
->dxe_tx_h_ch
;
607 spin_lock_irqsave(&ch
->lock
, flags
);
608 ctl
= ch
->head_blk_ctl
;
610 spin_lock(&ctl
->next
->skb_lock
);
613 * If skb is not null that means that we reached the tail of the ring
614 * hence ring is full. Stop queues to let mac80211 back off until ring
615 * has an empty slot again.
617 if (NULL
!= ctl
->next
->skb
) {
618 ieee80211_stop_queues(wcn
->hw
);
619 wcn
->queues_stopped
= true;
620 spin_unlock(&ctl
->next
->skb_lock
);
621 spin_unlock_irqrestore(&ch
->lock
, flags
);
624 spin_unlock(&ctl
->next
->skb_lock
);
629 /* Set source address of the BD we send */
630 desc
->src_addr_l
= ctl
->bd_phy_addr
;
632 desc
->dst_addr_l
= ch
->dxe_wq
;
633 desc
->fr_len
= sizeof(struct wcn36xx_tx_bd
);
634 desc
->ctrl
= ch
->ctrl_bd
;
636 wcn36xx_dbg(WCN36XX_DBG_DXE
, "DXE TX\n");
638 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP
, "DESC1 >>> ",
639 (char *)desc
, sizeof(*desc
));
640 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP
,
641 "BD >>> ", (char *)ctl
->bd_cpu_addr
,
642 sizeof(struct wcn36xx_tx_bd
));
644 /* Set source address of the SKB we send */
648 if (ctl
->bd_cpu_addr
) {
649 wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
654 desc
->src_addr_l
= dma_map_single(wcn
->dev
,
659 desc
->dst_addr_l
= ch
->dxe_wq
;
660 desc
->fr_len
= ctl
->skb
->len
;
662 /* set dxe descriptor to VALID */
663 desc
->ctrl
= ch
->ctrl_skb
;
665 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP
, "DESC2 >>> ",
666 (char *)desc
, sizeof(*desc
));
667 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP
, "SKB >>> ",
668 (char *)ctl
->skb
->data
, ctl
->skb
->len
);
670 /* Move the head of the ring to the next empty descriptor */
671 ch
->head_blk_ctl
= ctl
->next
;
674 * When connected and trying to send data frame chip can be in sleep
675 * mode and writing to the register will not wake up the chip. Instead
676 * notify chip about new frame through SMSM bus.
678 if (is_low
&& vif_priv
->pw_state
== WCN36XX_BMPS
) {
679 wcn
->ctrl_ops
->smsm_change_state(
681 WCN36XX_SMSM_WLAN_TX_ENABLE
);
683 /* indicate End Of Packet and generate interrupt on descriptor
686 wcn36xx_dxe_write_register(wcn
,
687 ch
->reg_ctrl
, ch
->def_ctrl
);
692 spin_unlock_irqrestore(&ch
->lock
, flags
);
696 int wcn36xx_dxe_init(struct wcn36xx
*wcn
)
698 int reg_data
= 0, ret
;
700 reg_data
= WCN36XX_DXE_REG_RESET
;
701 wcn36xx_dxe_write_register(wcn
, WCN36XX_DXE_REG_CSR_RESET
, reg_data
);
703 /* Setting interrupt path */
704 reg_data
= WCN36XX_DXE_CCU_INT
;
705 wcn36xx_dxe_write_register_x(wcn
, WCN36XX_DXE_REG_CCU_INT
, reg_data
);
707 /***************************************/
708 /* Init descriptors for TX LOW channel */
709 /***************************************/
710 wcn36xx_dxe_init_descs(wcn
->dev
, &wcn
->dxe_tx_l_ch
);
711 wcn36xx_dxe_init_tx_bd(&wcn
->dxe_tx_l_ch
, &wcn
->data_mem_pool
);
713 /* Write channel head to a NEXT register */
714 wcn36xx_dxe_write_register(wcn
, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L
,
715 wcn
->dxe_tx_l_ch
.head_blk_ctl
->desc_phy_addr
);
717 /* Program DMA destination addr for TX LOW */
718 wcn36xx_dxe_write_register(wcn
,
719 WCN36XX_DXE_CH_DEST_ADDR_TX_L
,
720 WCN36XX_DXE_WQ_TX_L
);
722 wcn36xx_dxe_read_register(wcn
, WCN36XX_DXE_REG_CH_EN
, ®_data
);
723 wcn36xx_dxe_enable_ch_int(wcn
, WCN36XX_INT_MASK_CHAN_TX_L
);
725 /***************************************/
726 /* Init descriptors for TX HIGH channel */
727 /***************************************/
728 wcn36xx_dxe_init_descs(wcn
->dev
, &wcn
->dxe_tx_h_ch
);
729 wcn36xx_dxe_init_tx_bd(&wcn
->dxe_tx_h_ch
, &wcn
->mgmt_mem_pool
);
731 /* Write channel head to a NEXT register */
732 wcn36xx_dxe_write_register(wcn
, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H
,
733 wcn
->dxe_tx_h_ch
.head_blk_ctl
->desc_phy_addr
);
735 /* Program DMA destination addr for TX HIGH */
736 wcn36xx_dxe_write_register(wcn
,
737 WCN36XX_DXE_CH_DEST_ADDR_TX_H
,
738 WCN36XX_DXE_WQ_TX_H
);
740 wcn36xx_dxe_read_register(wcn
, WCN36XX_DXE_REG_CH_EN
, ®_data
);
742 /* Enable channel interrupts */
743 wcn36xx_dxe_enable_ch_int(wcn
, WCN36XX_INT_MASK_CHAN_TX_H
);
745 /***************************************/
746 /* Init descriptors for RX LOW channel */
747 /***************************************/
748 wcn36xx_dxe_init_descs(wcn
->dev
, &wcn
->dxe_rx_l_ch
);
750 /* For RX we need to preallocated buffers */
751 wcn36xx_dxe_ch_alloc_skb(wcn
, &wcn
->dxe_rx_l_ch
);
753 /* Write channel head to a NEXT register */
754 wcn36xx_dxe_write_register(wcn
, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L
,
755 wcn
->dxe_rx_l_ch
.head_blk_ctl
->desc_phy_addr
);
757 /* Write DMA source address */
758 wcn36xx_dxe_write_register(wcn
,
759 WCN36XX_DXE_CH_SRC_ADDR_RX_L
,
760 WCN36XX_DXE_WQ_RX_L
);
762 /* Program preallocated destination address */
763 wcn36xx_dxe_write_register(wcn
,
764 WCN36XX_DXE_CH_DEST_ADDR_RX_L
,
765 wcn
->dxe_rx_l_ch
.head_blk_ctl
->desc
->phy_next_l
);
767 /* Enable default control registers */
768 wcn36xx_dxe_write_register(wcn
,
769 WCN36XX_DXE_REG_CTL_RX_L
,
770 WCN36XX_DXE_CH_DEFAULT_CTL_RX_L
);
772 /* Enable channel interrupts */
773 wcn36xx_dxe_enable_ch_int(wcn
, WCN36XX_INT_MASK_CHAN_RX_L
);
775 /***************************************/
776 /* Init descriptors for RX HIGH channel */
777 /***************************************/
778 wcn36xx_dxe_init_descs(wcn
->dev
, &wcn
->dxe_rx_h_ch
);
780 /* For RX we need to prealocat buffers */
781 wcn36xx_dxe_ch_alloc_skb(wcn
, &wcn
->dxe_rx_h_ch
);
783 /* Write chanel head to a NEXT register */
784 wcn36xx_dxe_write_register(wcn
, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H
,
785 wcn
->dxe_rx_h_ch
.head_blk_ctl
->desc_phy_addr
);
787 /* Write DMA source address */
788 wcn36xx_dxe_write_register(wcn
,
789 WCN36XX_DXE_CH_SRC_ADDR_RX_H
,
790 WCN36XX_DXE_WQ_RX_H
);
792 /* Program preallocated destination address */
793 wcn36xx_dxe_write_register(wcn
,
794 WCN36XX_DXE_CH_DEST_ADDR_RX_H
,
795 wcn
->dxe_rx_h_ch
.head_blk_ctl
->desc
->phy_next_l
);
797 /* Enable default control registers */
798 wcn36xx_dxe_write_register(wcn
,
799 WCN36XX_DXE_REG_CTL_RX_H
,
800 WCN36XX_DXE_CH_DEFAULT_CTL_RX_H
);
802 /* Enable channel interrupts */
803 wcn36xx_dxe_enable_ch_int(wcn
, WCN36XX_INT_MASK_CHAN_RX_H
);
805 ret
= wcn36xx_dxe_request_irqs(wcn
);
815 void wcn36xx_dxe_deinit(struct wcn36xx
*wcn
)
817 free_irq(wcn
->tx_irq
, wcn
);
818 free_irq(wcn
->rx_irq
, wcn
);
820 if (wcn
->tx_ack_skb
) {
821 ieee80211_tx_status_irqsafe(wcn
->hw
, wcn
->tx_ack_skb
);
822 wcn
->tx_ack_skb
= NULL
;
825 wcn36xx_dxe_ch_free_skbs(wcn
, &wcn
->dxe_rx_l_ch
);
826 wcn36xx_dxe_ch_free_skbs(wcn
, &wcn
->dxe_rx_h_ch
);