2 * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 /* DXE - DMA transfer engine
18 * we have 2 channels(High prio and Low prio) for TX and 2 channels for RX.
19 * through low channels data packets are transfered
20 * through high channels managment packets are transfered
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 #include <linux/interrupt.h>
29 void *wcn36xx_dxe_get_next_bd(struct wcn36xx
*wcn
, bool is_low
)
31 struct wcn36xx_dxe_ch
*ch
= is_low
?
35 return ch
->head_blk_ctl
->bd_cpu_addr
;
38 static void wcn36xx_dxe_write_register(struct wcn36xx
*wcn
, int addr
, int data
)
40 wcn36xx_dbg(WCN36XX_DBG_DXE
,
41 "wcn36xx_dxe_write_register: addr=%x, data=%x\n",
44 writel(data
, wcn
->mmio
+ addr
);
47 static void wcn36xx_dxe_read_register(struct wcn36xx
*wcn
, int addr
, int *data
)
49 *data
= readl(wcn
->mmio
+ addr
);
51 wcn36xx_dbg(WCN36XX_DBG_DXE
,
52 "wcn36xx_dxe_read_register: addr=%x, data=%x\n",
56 static void wcn36xx_dxe_free_ctl_block(struct wcn36xx_dxe_ch
*ch
)
58 struct wcn36xx_dxe_ctl
*ctl
= ch
->head_blk_ctl
, *next
;
61 for (i
= 0; i
< ch
->desc_num
&& ctl
; i
++) {
68 static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch
*ch
)
70 struct wcn36xx_dxe_ctl
*prev_ctl
= NULL
;
71 struct wcn36xx_dxe_ctl
*cur_ctl
= NULL
;
74 for (i
= 0; i
< ch
->desc_num
; i
++) {
75 cur_ctl
= kzalloc(sizeof(*cur_ctl
), GFP_KERNEL
);
79 cur_ctl
->ctl_blk_order
= i
;
81 ch
->head_blk_ctl
= cur_ctl
;
82 ch
->tail_blk_ctl
= cur_ctl
;
83 } else if (ch
->desc_num
- 1 == i
) {
84 prev_ctl
->next
= cur_ctl
;
85 cur_ctl
->next
= ch
->head_blk_ctl
;
87 prev_ctl
->next
= cur_ctl
;
95 wcn36xx_dxe_free_ctl_block(ch
);
99 int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx
*wcn
)
103 wcn
->dxe_tx_l_ch
.ch_type
= WCN36XX_DXE_CH_TX_L
;
104 wcn
->dxe_tx_h_ch
.ch_type
= WCN36XX_DXE_CH_TX_H
;
105 wcn
->dxe_rx_l_ch
.ch_type
= WCN36XX_DXE_CH_RX_L
;
106 wcn
->dxe_rx_h_ch
.ch_type
= WCN36XX_DXE_CH_RX_H
;
108 wcn
->dxe_tx_l_ch
.desc_num
= WCN36XX_DXE_CH_DESC_NUMB_TX_L
;
109 wcn
->dxe_tx_h_ch
.desc_num
= WCN36XX_DXE_CH_DESC_NUMB_TX_H
;
110 wcn
->dxe_rx_l_ch
.desc_num
= WCN36XX_DXE_CH_DESC_NUMB_RX_L
;
111 wcn
->dxe_rx_h_ch
.desc_num
= WCN36XX_DXE_CH_DESC_NUMB_RX_H
;
113 wcn
->dxe_tx_l_ch
.dxe_wq
= WCN36XX_DXE_WQ_TX_L
;
114 wcn
->dxe_tx_h_ch
.dxe_wq
= WCN36XX_DXE_WQ_TX_H
;
116 wcn
->dxe_tx_l_ch
.ctrl_bd
= WCN36XX_DXE_CTRL_TX_L_BD
;
117 wcn
->dxe_tx_h_ch
.ctrl_bd
= WCN36XX_DXE_CTRL_TX_H_BD
;
119 wcn
->dxe_tx_l_ch
.ctrl_skb
= WCN36XX_DXE_CTRL_TX_L_SKB
;
120 wcn
->dxe_tx_h_ch
.ctrl_skb
= WCN36XX_DXE_CTRL_TX_H_SKB
;
122 wcn
->dxe_tx_l_ch
.reg_ctrl
= WCN36XX_DXE_REG_CTL_TX_L
;
123 wcn
->dxe_tx_h_ch
.reg_ctrl
= WCN36XX_DXE_REG_CTL_TX_H
;
125 wcn
->dxe_tx_l_ch
.def_ctrl
= WCN36XX_DXE_CH_DEFAULT_CTL_TX_L
;
126 wcn
->dxe_tx_h_ch
.def_ctrl
= WCN36XX_DXE_CH_DEFAULT_CTL_TX_H
;
128 /* DXE control block allocation */
129 ret
= wcn36xx_dxe_allocate_ctl_block(&wcn
->dxe_tx_l_ch
);
132 ret
= wcn36xx_dxe_allocate_ctl_block(&wcn
->dxe_tx_h_ch
);
135 ret
= wcn36xx_dxe_allocate_ctl_block(&wcn
->dxe_rx_l_ch
);
138 ret
= wcn36xx_dxe_allocate_ctl_block(&wcn
->dxe_rx_h_ch
);
142 /* Initialize SMSM state Clear TX Enable RING EMPTY STATE */
143 ret
= wcn
->ctrl_ops
->smsm_change_state(
144 WCN36XX_SMSM_WLAN_TX_ENABLE
,
145 WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY
);
150 wcn36xx_err("Failed to allocate DXE control blocks\n");
151 wcn36xx_dxe_free_ctl_blks(wcn
);
155 void wcn36xx_dxe_free_ctl_blks(struct wcn36xx
*wcn
)
157 wcn36xx_dxe_free_ctl_block(&wcn
->dxe_tx_l_ch
);
158 wcn36xx_dxe_free_ctl_block(&wcn
->dxe_tx_h_ch
);
159 wcn36xx_dxe_free_ctl_block(&wcn
->dxe_rx_l_ch
);
160 wcn36xx_dxe_free_ctl_block(&wcn
->dxe_rx_h_ch
);
163 static int wcn36xx_dxe_init_descs(struct wcn36xx_dxe_ch
*wcn_ch
)
165 struct wcn36xx_dxe_desc
*cur_dxe
= NULL
;
166 struct wcn36xx_dxe_desc
*prev_dxe
= NULL
;
167 struct wcn36xx_dxe_ctl
*cur_ctl
= NULL
;
171 size
= wcn_ch
->desc_num
* sizeof(struct wcn36xx_dxe_desc
);
172 wcn_ch
->cpu_addr
= dma_alloc_coherent(NULL
, size
, &wcn_ch
->dma_addr
,
174 if (!wcn_ch
->cpu_addr
)
177 memset(wcn_ch
->cpu_addr
, 0, size
);
179 cur_dxe
= (struct wcn36xx_dxe_desc
*)wcn_ch
->cpu_addr
;
180 cur_ctl
= wcn_ch
->head_blk_ctl
;
182 for (i
= 0; i
< wcn_ch
->desc_num
; i
++) {
183 cur_ctl
->desc
= cur_dxe
;
184 cur_ctl
->desc_phy_addr
= wcn_ch
->dma_addr
+
185 i
* sizeof(struct wcn36xx_dxe_desc
);
187 switch (wcn_ch
->ch_type
) {
188 case WCN36XX_DXE_CH_TX_L
:
189 cur_dxe
->ctrl
= WCN36XX_DXE_CTRL_TX_L
;
190 cur_dxe
->dst_addr_l
= WCN36XX_DXE_WQ_TX_L
;
192 case WCN36XX_DXE_CH_TX_H
:
193 cur_dxe
->ctrl
= WCN36XX_DXE_CTRL_TX_H
;
194 cur_dxe
->dst_addr_l
= WCN36XX_DXE_WQ_TX_H
;
196 case WCN36XX_DXE_CH_RX_L
:
197 cur_dxe
->ctrl
= WCN36XX_DXE_CTRL_RX_L
;
198 cur_dxe
->src_addr_l
= WCN36XX_DXE_WQ_RX_L
;
200 case WCN36XX_DXE_CH_RX_H
:
201 cur_dxe
->ctrl
= WCN36XX_DXE_CTRL_RX_H
;
202 cur_dxe
->src_addr_l
= WCN36XX_DXE_WQ_RX_H
;
206 cur_dxe
->phy_next_l
= 0;
207 } else if ((0 < i
) && (i
< wcn_ch
->desc_num
- 1)) {
208 prev_dxe
->phy_next_l
=
209 cur_ctl
->desc_phy_addr
;
210 } else if (i
== (wcn_ch
->desc_num
- 1)) {
211 prev_dxe
->phy_next_l
=
212 cur_ctl
->desc_phy_addr
;
213 cur_dxe
->phy_next_l
=
214 wcn_ch
->head_blk_ctl
->desc_phy_addr
;
216 cur_ctl
= cur_ctl
->next
;
224 static void wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch
*ch
,
225 struct wcn36xx_dxe_mem_pool
*pool
)
227 int i
, chunk_size
= pool
->chunk_size
;
228 dma_addr_t bd_phy_addr
= pool
->phy_addr
;
229 void *bd_cpu_addr
= pool
->virt_addr
;
230 struct wcn36xx_dxe_ctl
*cur
= ch
->head_blk_ctl
;
232 for (i
= 0; i
< ch
->desc_num
; i
++) {
233 /* Only every second dxe needs a bd pointer,
234 the other will point to the skb data */
236 cur
->bd_phy_addr
= bd_phy_addr
;
237 cur
->bd_cpu_addr
= bd_cpu_addr
;
238 bd_phy_addr
+= chunk_size
;
239 bd_cpu_addr
+= chunk_size
;
241 cur
->bd_phy_addr
= 0;
242 cur
->bd_cpu_addr
= NULL
;
248 static int wcn36xx_dxe_enable_ch_int(struct wcn36xx
*wcn
, u16 wcn_ch
)
252 wcn36xx_dxe_read_register(wcn
,
253 WCN36XX_DXE_INT_MASK_REG
,
258 wcn36xx_dxe_write_register(wcn
,
259 WCN36XX_DXE_INT_MASK_REG
,
264 static int wcn36xx_dxe_fill_skb(struct wcn36xx_dxe_ctl
*ctl
)
266 struct wcn36xx_dxe_desc
*dxe
= ctl
->desc
;
269 skb
= alloc_skb(WCN36XX_PKT_SIZE
, GFP_ATOMIC
);
273 dxe
->dst_addr_l
= dma_map_single(NULL
,
274 skb_tail_pointer(skb
),
282 static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx
*wcn
,
283 struct wcn36xx_dxe_ch
*wcn_ch
)
286 struct wcn36xx_dxe_ctl
*cur_ctl
= NULL
;
288 cur_ctl
= wcn_ch
->head_blk_ctl
;
290 for (i
= 0; i
< wcn_ch
->desc_num
; i
++) {
291 wcn36xx_dxe_fill_skb(cur_ctl
);
292 cur_ctl
= cur_ctl
->next
;
298 static void wcn36xx_dxe_ch_free_skbs(struct wcn36xx
*wcn
,
299 struct wcn36xx_dxe_ch
*wcn_ch
)
301 struct wcn36xx_dxe_ctl
*cur
= wcn_ch
->head_blk_ctl
;
304 for (i
= 0; i
< wcn_ch
->desc_num
; i
++) {
310 void wcn36xx_dxe_tx_ack_ind(struct wcn36xx
*wcn
, u32 status
)
312 struct ieee80211_tx_info
*info
;
316 spin_lock_irqsave(&wcn
->dxe_lock
, flags
);
317 skb
= wcn
->tx_ack_skb
;
318 wcn
->tx_ack_skb
= NULL
;
319 spin_unlock_irqrestore(&wcn
->dxe_lock
, flags
);
322 wcn36xx_warn("Spurious TX complete indication\n");
326 info
= IEEE80211_SKB_CB(skb
);
329 info
->flags
|= IEEE80211_TX_STAT_ACK
;
331 wcn36xx_dbg(WCN36XX_DBG_DXE
, "dxe tx ack status: %d\n", status
);
333 ieee80211_tx_status_irqsafe(wcn
->hw
, skb
);
334 ieee80211_wake_queues(wcn
->hw
);
337 static void reap_tx_dxes(struct wcn36xx
*wcn
, struct wcn36xx_dxe_ch
*ch
)
339 struct wcn36xx_dxe_ctl
*ctl
= ch
->tail_blk_ctl
;
340 struct ieee80211_tx_info
*info
;
344 * Make at least one loop of do-while because in case ring is
345 * completely full head and tail are pointing to the same element
346 * and while-do will not make any cycles.
350 dma_unmap_single(NULL
, ctl
->desc
->src_addr_l
,
351 ctl
->skb
->len
, DMA_TO_DEVICE
);
352 info
= IEEE80211_SKB_CB(ctl
->skb
);
353 if (!(info
->flags
& IEEE80211_TX_CTL_REQ_TX_STATUS
)) {
354 /* Keep frame until TX status comes */
355 ieee80211_free_txskb(wcn
->hw
, ctl
->skb
);
357 spin_lock_irqsave(&ctl
->skb_lock
, flags
);
358 if (wcn
->queues_stopped
) {
359 wcn
->queues_stopped
= false;
360 ieee80211_wake_queues(wcn
->hw
);
362 spin_unlock_irqrestore(&ctl
->skb_lock
, flags
);
367 } while (ctl
!= ch
->head_blk_ctl
&&
368 !(ctl
->desc
->ctrl
& WCN36XX_DXE_CTRL_VALID_MASK
));
370 ch
->tail_blk_ctl
= ctl
;
373 static irqreturn_t
wcn36xx_irq_tx_complete(int irq
, void *dev
)
375 struct wcn36xx
*wcn
= (struct wcn36xx
*)dev
;
376 int int_src
, int_reason
;
378 wcn36xx_dxe_read_register(wcn
, WCN36XX_DXE_INT_SRC_RAW_REG
, &int_src
);
380 if (int_src
& WCN36XX_INT_MASK_CHAN_TX_H
) {
381 wcn36xx_dxe_read_register(wcn
,
382 WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H
,
385 /* TODO: Check int_reason */
387 wcn36xx_dxe_write_register(wcn
,
388 WCN36XX_DXE_0_INT_CLR
,
389 WCN36XX_INT_MASK_CHAN_TX_H
);
391 wcn36xx_dxe_write_register(wcn
, WCN36XX_DXE_0_INT_ED_CLR
,
392 WCN36XX_INT_MASK_CHAN_TX_H
);
393 wcn36xx_dbg(WCN36XX_DBG_DXE
, "dxe tx ready high\n");
394 reap_tx_dxes(wcn
, &wcn
->dxe_tx_h_ch
);
397 if (int_src
& WCN36XX_INT_MASK_CHAN_TX_L
) {
398 wcn36xx_dxe_read_register(wcn
,
399 WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L
,
401 /* TODO: Check int_reason */
403 wcn36xx_dxe_write_register(wcn
,
404 WCN36XX_DXE_0_INT_CLR
,
405 WCN36XX_INT_MASK_CHAN_TX_L
);
407 wcn36xx_dxe_write_register(wcn
, WCN36XX_DXE_0_INT_ED_CLR
,
408 WCN36XX_INT_MASK_CHAN_TX_L
);
409 wcn36xx_dbg(WCN36XX_DBG_DXE
, "dxe tx ready low\n");
410 reap_tx_dxes(wcn
, &wcn
->dxe_tx_l_ch
);
416 static irqreturn_t
wcn36xx_irq_rx_ready(int irq
, void *dev
)
418 struct wcn36xx
*wcn
= (struct wcn36xx
*)dev
;
420 disable_irq_nosync(wcn
->rx_irq
);
421 wcn36xx_dxe_rx_frame(wcn
);
422 enable_irq(wcn
->rx_irq
);
426 static int wcn36xx_dxe_request_irqs(struct wcn36xx
*wcn
)
430 ret
= request_irq(wcn
->tx_irq
, wcn36xx_irq_tx_complete
,
431 IRQF_TRIGGER_HIGH
, "wcn36xx_tx", wcn
);
433 wcn36xx_err("failed to alloc tx irq\n");
437 ret
= request_irq(wcn
->rx_irq
, wcn36xx_irq_rx_ready
, IRQF_TRIGGER_HIGH
,
440 wcn36xx_err("failed to alloc rx irq\n");
444 enable_irq_wake(wcn
->rx_irq
);
449 free_irq(wcn
->tx_irq
, wcn
);
455 static int wcn36xx_rx_handle_packets(struct wcn36xx
*wcn
,
456 struct wcn36xx_dxe_ch
*ch
)
458 struct wcn36xx_dxe_ctl
*ctl
= ch
->head_blk_ctl
;
459 struct wcn36xx_dxe_desc
*dxe
= ctl
->desc
;
463 while (!(dxe
->ctrl
& WCN36XX_DXE_CTRL_VALID_MASK
)) {
465 dma_addr
= dxe
->dst_addr_l
;
466 wcn36xx_dxe_fill_skb(ctl
);
468 switch (ch
->ch_type
) {
469 case WCN36XX_DXE_CH_RX_L
:
470 dxe
->ctrl
= WCN36XX_DXE_CTRL_RX_L
;
471 wcn36xx_dxe_write_register(wcn
, WCN36XX_DXE_ENCH_ADDR
,
472 WCN36XX_DXE_INT_CH1_MASK
);
474 case WCN36XX_DXE_CH_RX_H
:
475 dxe
->ctrl
= WCN36XX_DXE_CTRL_RX_H
;
476 wcn36xx_dxe_write_register(wcn
, WCN36XX_DXE_ENCH_ADDR
,
477 WCN36XX_DXE_INT_CH3_MASK
);
480 wcn36xx_warn("Unknown channel\n");
483 dma_unmap_single(NULL
, dma_addr
, WCN36XX_PKT_SIZE
,
485 wcn36xx_rx_skb(wcn
, skb
);
490 ch
->head_blk_ctl
= ctl
;
495 void wcn36xx_dxe_rx_frame(struct wcn36xx
*wcn
)
499 wcn36xx_dxe_read_register(wcn
, WCN36XX_DXE_INT_SRC_RAW_REG
, &int_src
);
502 if (int_src
& WCN36XX_DXE_INT_CH1_MASK
) {
503 wcn36xx_dxe_write_register(wcn
, WCN36XX_DXE_0_INT_CLR
,
504 WCN36XX_DXE_INT_CH1_MASK
);
505 wcn36xx_rx_handle_packets(wcn
, &(wcn
->dxe_rx_l_ch
));
509 if (int_src
& WCN36XX_DXE_INT_CH3_MASK
) {
510 /* Clean up all the INT within this channel */
511 wcn36xx_dxe_write_register(wcn
, WCN36XX_DXE_0_INT_CLR
,
512 WCN36XX_DXE_INT_CH3_MASK
);
513 wcn36xx_rx_handle_packets(wcn
, &(wcn
->dxe_rx_h_ch
));
517 wcn36xx_warn("No DXE interrupt pending\n");
520 int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx
*wcn
)
525 /* Allocate BD headers for MGMT frames */
527 /* Where this come from ask QC */
528 wcn
->mgmt_mem_pool
.chunk_size
= WCN36XX_BD_CHUNK_SIZE
+
529 16 - (WCN36XX_BD_CHUNK_SIZE
% 8);
531 s
= wcn
->mgmt_mem_pool
.chunk_size
* WCN36XX_DXE_CH_DESC_NUMB_TX_H
;
532 cpu_addr
= dma_alloc_coherent(NULL
, s
, &wcn
->mgmt_mem_pool
.phy_addr
,
537 wcn
->mgmt_mem_pool
.virt_addr
= cpu_addr
;
538 memset(cpu_addr
, 0, s
);
540 /* Allocate BD headers for DATA frames */
542 /* Where this come from ask QC */
543 wcn
->data_mem_pool
.chunk_size
= WCN36XX_BD_CHUNK_SIZE
+
544 16 - (WCN36XX_BD_CHUNK_SIZE
% 8);
546 s
= wcn
->data_mem_pool
.chunk_size
* WCN36XX_DXE_CH_DESC_NUMB_TX_L
;
547 cpu_addr
= dma_alloc_coherent(NULL
, s
, &wcn
->data_mem_pool
.phy_addr
,
552 wcn
->data_mem_pool
.virt_addr
= cpu_addr
;
553 memset(cpu_addr
, 0, s
);
558 wcn36xx_dxe_free_mem_pools(wcn
);
559 wcn36xx_err("Failed to allocate BD mempool\n");
563 void wcn36xx_dxe_free_mem_pools(struct wcn36xx
*wcn
)
565 if (wcn
->mgmt_mem_pool
.virt_addr
)
566 dma_free_coherent(NULL
, wcn
->mgmt_mem_pool
.chunk_size
*
567 WCN36XX_DXE_CH_DESC_NUMB_TX_H
,
568 wcn
->mgmt_mem_pool
.virt_addr
,
569 wcn
->mgmt_mem_pool
.phy_addr
);
571 if (wcn
->data_mem_pool
.virt_addr
) {
572 dma_free_coherent(NULL
, wcn
->data_mem_pool
.chunk_size
*
573 WCN36XX_DXE_CH_DESC_NUMB_TX_L
,
574 wcn
->data_mem_pool
.virt_addr
,
575 wcn
->data_mem_pool
.phy_addr
);
579 int wcn36xx_dxe_tx_frame(struct wcn36xx
*wcn
,
580 struct wcn36xx_vif
*vif_priv
,
584 struct wcn36xx_dxe_ctl
*ctl
= NULL
;
585 struct wcn36xx_dxe_desc
*desc
= NULL
;
586 struct wcn36xx_dxe_ch
*ch
= NULL
;
589 ch
= is_low
? &wcn
->dxe_tx_l_ch
: &wcn
->dxe_tx_h_ch
;
591 ctl
= ch
->head_blk_ctl
;
593 spin_lock_irqsave(&ctl
->next
->skb_lock
, flags
);
596 * If skb is not null that means that we reached the tail of the ring
597 * hence ring is full. Stop queues to let mac80211 back off until ring
598 * has an empty slot again.
600 if (NULL
!= ctl
->next
->skb
) {
601 ieee80211_stop_queues(wcn
->hw
);
602 wcn
->queues_stopped
= true;
603 spin_unlock_irqrestore(&ctl
->next
->skb_lock
, flags
);
606 spin_unlock_irqrestore(&ctl
->next
->skb_lock
, flags
);
611 /* Set source address of the BD we send */
612 desc
->src_addr_l
= ctl
->bd_phy_addr
;
614 desc
->dst_addr_l
= ch
->dxe_wq
;
615 desc
->fr_len
= sizeof(struct wcn36xx_tx_bd
);
616 desc
->ctrl
= ch
->ctrl_bd
;
618 wcn36xx_dbg(WCN36XX_DBG_DXE
, "DXE TX\n");
620 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP
, "DESC1 >>> ",
621 (char *)desc
, sizeof(*desc
));
622 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP
,
623 "BD >>> ", (char *)ctl
->bd_cpu_addr
,
624 sizeof(struct wcn36xx_tx_bd
));
626 /* Set source address of the SKB we send */
630 if (ctl
->bd_cpu_addr
) {
631 wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
635 desc
->src_addr_l
= dma_map_single(NULL
,
640 desc
->dst_addr_l
= ch
->dxe_wq
;
641 desc
->fr_len
= ctl
->skb
->len
;
643 /* set dxe descriptor to VALID */
644 desc
->ctrl
= ch
->ctrl_skb
;
646 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP
, "DESC2 >>> ",
647 (char *)desc
, sizeof(*desc
));
648 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP
, "SKB >>> ",
649 (char *)ctl
->skb
->data
, ctl
->skb
->len
);
651 /* Move the head of the ring to the next empty descriptor */
652 ch
->head_blk_ctl
= ctl
->next
;
655 * When connected and trying to send data frame chip can be in sleep
656 * mode and writing to the register will not wake up the chip. Instead
657 * notify chip about new frame through SMSM bus.
659 if (is_low
&& vif_priv
->pw_state
== WCN36XX_BMPS
) {
660 wcn
->ctrl_ops
->smsm_change_state(
662 WCN36XX_SMSM_WLAN_TX_ENABLE
);
664 /* indicate End Of Packet and generate interrupt on descriptor
667 wcn36xx_dxe_write_register(wcn
,
668 ch
->reg_ctrl
, ch
->def_ctrl
);
674 int wcn36xx_dxe_init(struct wcn36xx
*wcn
)
676 int reg_data
= 0, ret
;
678 reg_data
= WCN36XX_DXE_REG_RESET
;
679 wcn36xx_dxe_write_register(wcn
, WCN36XX_DXE_REG_CSR_RESET
, reg_data
);
681 /* Setting interrupt path */
682 reg_data
= WCN36XX_DXE_CCU_INT
;
683 wcn36xx_dxe_write_register(wcn
, WCN36XX_DXE_REG_CCU_INT
, reg_data
);
685 /***************************************/
686 /* Init descriptors for TX LOW channel */
687 /***************************************/
688 wcn36xx_dxe_init_descs(&wcn
->dxe_tx_l_ch
);
689 wcn36xx_dxe_init_tx_bd(&wcn
->dxe_tx_l_ch
, &wcn
->data_mem_pool
);
691 /* Write channel head to a NEXT register */
692 wcn36xx_dxe_write_register(wcn
, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L
,
693 wcn
->dxe_tx_l_ch
.head_blk_ctl
->desc_phy_addr
);
695 /* Program DMA destination addr for TX LOW */
696 wcn36xx_dxe_write_register(wcn
,
697 WCN36XX_DXE_CH_DEST_ADDR_TX_L
,
698 WCN36XX_DXE_WQ_TX_L
);
700 wcn36xx_dxe_read_register(wcn
, WCN36XX_DXE_REG_CH_EN
, ®_data
);
701 wcn36xx_dxe_enable_ch_int(wcn
, WCN36XX_INT_MASK_CHAN_TX_L
);
703 /***************************************/
704 /* Init descriptors for TX HIGH channel */
705 /***************************************/
706 wcn36xx_dxe_init_descs(&wcn
->dxe_tx_h_ch
);
707 wcn36xx_dxe_init_tx_bd(&wcn
->dxe_tx_h_ch
, &wcn
->mgmt_mem_pool
);
709 /* Write channel head to a NEXT register */
710 wcn36xx_dxe_write_register(wcn
, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H
,
711 wcn
->dxe_tx_h_ch
.head_blk_ctl
->desc_phy_addr
);
713 /* Program DMA destination addr for TX HIGH */
714 wcn36xx_dxe_write_register(wcn
,
715 WCN36XX_DXE_CH_DEST_ADDR_TX_H
,
716 WCN36XX_DXE_WQ_TX_H
);
718 wcn36xx_dxe_read_register(wcn
, WCN36XX_DXE_REG_CH_EN
, ®_data
);
720 /* Enable channel interrupts */
721 wcn36xx_dxe_enable_ch_int(wcn
, WCN36XX_INT_MASK_CHAN_TX_H
);
723 /***************************************/
724 /* Init descriptors for RX LOW channel */
725 /***************************************/
726 wcn36xx_dxe_init_descs(&wcn
->dxe_rx_l_ch
);
728 /* For RX we need to preallocated buffers */
729 wcn36xx_dxe_ch_alloc_skb(wcn
, &wcn
->dxe_rx_l_ch
);
731 /* Write channel head to a NEXT register */
732 wcn36xx_dxe_write_register(wcn
, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L
,
733 wcn
->dxe_rx_l_ch
.head_blk_ctl
->desc_phy_addr
);
735 /* Write DMA source address */
736 wcn36xx_dxe_write_register(wcn
,
737 WCN36XX_DXE_CH_SRC_ADDR_RX_L
,
738 WCN36XX_DXE_WQ_RX_L
);
740 /* Program preallocated destination address */
741 wcn36xx_dxe_write_register(wcn
,
742 WCN36XX_DXE_CH_DEST_ADDR_RX_L
,
743 wcn
->dxe_rx_l_ch
.head_blk_ctl
->desc
->phy_next_l
);
745 /* Enable default control registers */
746 wcn36xx_dxe_write_register(wcn
,
747 WCN36XX_DXE_REG_CTL_RX_L
,
748 WCN36XX_DXE_CH_DEFAULT_CTL_RX_L
);
750 /* Enable channel interrupts */
751 wcn36xx_dxe_enable_ch_int(wcn
, WCN36XX_INT_MASK_CHAN_RX_L
);
753 /***************************************/
754 /* Init descriptors for RX HIGH channel */
755 /***************************************/
756 wcn36xx_dxe_init_descs(&wcn
->dxe_rx_h_ch
);
758 /* For RX we need to prealocat buffers */
759 wcn36xx_dxe_ch_alloc_skb(wcn
, &wcn
->dxe_rx_h_ch
);
761 /* Write chanel head to a NEXT register */
762 wcn36xx_dxe_write_register(wcn
, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H
,
763 wcn
->dxe_rx_h_ch
.head_blk_ctl
->desc_phy_addr
);
765 /* Write DMA source address */
766 wcn36xx_dxe_write_register(wcn
,
767 WCN36XX_DXE_CH_SRC_ADDR_RX_H
,
768 WCN36XX_DXE_WQ_RX_H
);
770 /* Program preallocated destination address */
771 wcn36xx_dxe_write_register(wcn
,
772 WCN36XX_DXE_CH_DEST_ADDR_RX_H
,
773 wcn
->dxe_rx_h_ch
.head_blk_ctl
->desc
->phy_next_l
);
775 /* Enable default control registers */
776 wcn36xx_dxe_write_register(wcn
,
777 WCN36XX_DXE_REG_CTL_RX_H
,
778 WCN36XX_DXE_CH_DEFAULT_CTL_RX_H
);
780 /* Enable channel interrupts */
781 wcn36xx_dxe_enable_ch_int(wcn
, WCN36XX_INT_MASK_CHAN_RX_H
);
783 ret
= wcn36xx_dxe_request_irqs(wcn
);
793 void wcn36xx_dxe_deinit(struct wcn36xx
*wcn
)
795 free_irq(wcn
->tx_irq
, wcn
);
796 free_irq(wcn
->rx_irq
, wcn
);
798 if (wcn
->tx_ack_skb
) {
799 ieee80211_tx_status_irqsafe(wcn
->hw
, wcn
->tx_ack_skb
);
800 wcn
->tx_ack_skb
= NULL
;
803 wcn36xx_dxe_ch_free_skbs(wcn
, &wcn
->dxe_rx_l_ch
);
804 wcn36xx_dxe_ch_free_skbs(wcn
, &wcn
->dxe_rx_h_ch
);