2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include <linux/etherdevice.h>
25 static u8
ath10k_htt_tx_txq_calc_size(size_t count
)
33 while (factor
>= 64 && exp
< 4) {
42 factor
= max(1, factor
);
44 return SM(exp
, HTT_TX_Q_STATE_ENTRY_EXP
) |
45 SM(factor
, HTT_TX_Q_STATE_ENTRY_FACTOR
);
48 static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw
*hw
,
49 struct ieee80211_txq
*txq
)
51 struct ath10k
*ar
= hw
->priv
;
52 struct ath10k_sta
*arsta
;
53 struct ath10k_vif
*arvif
= (void *)txq
->vif
->drv_priv
;
54 unsigned long frame_cnt
;
55 unsigned long byte_cnt
;
62 lockdep_assert_held(&ar
->htt
.tx_lock
);
64 if (!ar
->htt
.tx_q_state
.enabled
)
67 if (ar
->htt
.tx_q_state
.mode
!= HTT_TX_MODE_SWITCH_PUSH_PULL
)
71 arsta
= (void *)txq
->sta
->drv_priv
;
72 peer_id
= arsta
->peer_id
;
74 peer_id
= arvif
->peer_id
;
78 bit
= BIT(peer_id
% 32);
81 ieee80211_txq_get_depth(txq
, &frame_cnt
, &byte_cnt
);
82 count
= ath10k_htt_tx_txq_calc_size(byte_cnt
);
84 if (unlikely(peer_id
>= ar
->htt
.tx_q_state
.num_peers
) ||
85 unlikely(tid
>= ar
->htt
.tx_q_state
.num_tids
)) {
86 ath10k_warn(ar
, "refusing to update txq for peer_id %hu tid %hhu due to out of bounds\n",
91 ar
->htt
.tx_q_state
.vaddr
->count
[tid
][peer_id
] = count
;
92 ar
->htt
.tx_q_state
.vaddr
->map
[tid
][idx
] &= ~bit
;
93 ar
->htt
.tx_q_state
.vaddr
->map
[tid
][idx
] |= count
? bit
: 0;
95 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt tx txq state update peer_id %hu tid %hhu count %hhu\n",
99 static void __ath10k_htt_tx_txq_sync(struct ath10k
*ar
)
104 lockdep_assert_held(&ar
->htt
.tx_lock
);
106 if (!ar
->htt
.tx_q_state
.enabled
)
109 if (ar
->htt
.tx_q_state
.mode
!= HTT_TX_MODE_SWITCH_PUSH_PULL
)
112 seq
= le32_to_cpu(ar
->htt
.tx_q_state
.vaddr
->seq
);
114 ar
->htt
.tx_q_state
.vaddr
->seq
= cpu_to_le32(seq
);
116 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt tx txq state update commit seq %u\n",
119 size
= sizeof(*ar
->htt
.tx_q_state
.vaddr
);
120 dma_sync_single_for_device(ar
->dev
,
121 ar
->htt
.tx_q_state
.paddr
,
126 void ath10k_htt_tx_txq_recalc(struct ieee80211_hw
*hw
,
127 struct ieee80211_txq
*txq
)
129 struct ath10k
*ar
= hw
->priv
;
131 spin_lock_bh(&ar
->htt
.tx_lock
);
132 __ath10k_htt_tx_txq_recalc(hw
, txq
);
133 spin_unlock_bh(&ar
->htt
.tx_lock
);
136 void ath10k_htt_tx_txq_sync(struct ath10k
*ar
)
138 spin_lock_bh(&ar
->htt
.tx_lock
);
139 __ath10k_htt_tx_txq_sync(ar
);
140 spin_unlock_bh(&ar
->htt
.tx_lock
);
143 void ath10k_htt_tx_txq_update(struct ieee80211_hw
*hw
,
144 struct ieee80211_txq
*txq
)
146 struct ath10k
*ar
= hw
->priv
;
148 spin_lock_bh(&ar
->htt
.tx_lock
);
149 __ath10k_htt_tx_txq_recalc(hw
, txq
);
150 __ath10k_htt_tx_txq_sync(ar
);
151 spin_unlock_bh(&ar
->htt
.tx_lock
);
154 void ath10k_htt_tx_dec_pending(struct ath10k_htt
*htt
)
156 lockdep_assert_held(&htt
->tx_lock
);
158 htt
->num_pending_tx
--;
159 if (htt
->num_pending_tx
== htt
->max_num_pending_tx
- 1)
160 ath10k_mac_tx_unlock(htt
->ar
, ATH10K_TX_PAUSE_Q_FULL
);
163 int ath10k_htt_tx_inc_pending(struct ath10k_htt
*htt
)
165 lockdep_assert_held(&htt
->tx_lock
);
167 if (htt
->num_pending_tx
>= htt
->max_num_pending_tx
)
170 htt
->num_pending_tx
++;
171 if (htt
->num_pending_tx
== htt
->max_num_pending_tx
)
172 ath10k_mac_tx_lock(htt
->ar
, ATH10K_TX_PAUSE_Q_FULL
);
177 int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt
*htt
, bool is_mgmt
,
180 struct ath10k
*ar
= htt
->ar
;
182 lockdep_assert_held(&htt
->tx_lock
);
184 if (!is_mgmt
|| !ar
->hw_params
.max_probe_resp_desc_thres
)
188 ar
->hw_params
.max_probe_resp_desc_thres
< htt
->num_pending_mgmt_tx
)
191 htt
->num_pending_mgmt_tx
++;
196 void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt
*htt
)
198 lockdep_assert_held(&htt
->tx_lock
);
200 if (!htt
->ar
->hw_params
.max_probe_resp_desc_thres
)
203 htt
->num_pending_mgmt_tx
--;
206 int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt
*htt
, struct sk_buff
*skb
)
208 struct ath10k
*ar
= htt
->ar
;
211 spin_lock_bh(&htt
->tx_lock
);
212 ret
= idr_alloc(&htt
->pending_tx
, skb
, 0,
213 htt
->max_num_pending_tx
, GFP_ATOMIC
);
214 spin_unlock_bh(&htt
->tx_lock
);
216 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt tx alloc msdu_id %d\n", ret
);
221 void ath10k_htt_tx_free_msdu_id(struct ath10k_htt
*htt
, u16 msdu_id
)
223 struct ath10k
*ar
= htt
->ar
;
225 lockdep_assert_held(&htt
->tx_lock
);
227 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt tx free msdu_id %hu\n", msdu_id
);
229 idr_remove(&htt
->pending_tx
, msdu_id
);
232 static void ath10k_htt_tx_free_cont_txbuf_32(struct ath10k_htt
*htt
)
234 struct ath10k
*ar
= htt
->ar
;
237 if (!htt
->txbuf
.vaddr_txbuff_32
)
240 size
= htt
->txbuf
.size
;
241 dma_free_coherent(ar
->dev
, size
, htt
->txbuf
.vaddr_txbuff_32
,
243 htt
->txbuf
.vaddr_txbuff_32
= NULL
;
246 static int ath10k_htt_tx_alloc_cont_txbuf_32(struct ath10k_htt
*htt
)
248 struct ath10k
*ar
= htt
->ar
;
251 size
= htt
->max_num_pending_tx
*
252 sizeof(struct ath10k_htt_txbuf_32
);
254 htt
->txbuf
.vaddr_txbuff_32
= dma_alloc_coherent(ar
->dev
, size
,
257 if (!htt
->txbuf
.vaddr_txbuff_32
)
260 htt
->txbuf
.size
= size
;
265 static void ath10k_htt_tx_free_cont_txbuf_64(struct ath10k_htt
*htt
)
267 struct ath10k
*ar
= htt
->ar
;
270 if (!htt
->txbuf
.vaddr_txbuff_64
)
273 size
= htt
->txbuf
.size
;
274 dma_free_coherent(ar
->dev
, size
, htt
->txbuf
.vaddr_txbuff_64
,
276 htt
->txbuf
.vaddr_txbuff_64
= NULL
;
279 static int ath10k_htt_tx_alloc_cont_txbuf_64(struct ath10k_htt
*htt
)
281 struct ath10k
*ar
= htt
->ar
;
284 size
= htt
->max_num_pending_tx
*
285 sizeof(struct ath10k_htt_txbuf_64
);
287 htt
->txbuf
.vaddr_txbuff_64
= dma_alloc_coherent(ar
->dev
, size
,
290 if (!htt
->txbuf
.vaddr_txbuff_64
)
293 htt
->txbuf
.size
= size
;
298 static void ath10k_htt_tx_free_cont_frag_desc_32(struct ath10k_htt
*htt
)
302 if (!htt
->frag_desc
.vaddr_desc_32
)
305 size
= htt
->max_num_pending_tx
*
306 sizeof(struct htt_msdu_ext_desc
);
308 dma_free_coherent(htt
->ar
->dev
,
310 htt
->frag_desc
.vaddr_desc_32
,
311 htt
->frag_desc
.paddr
);
313 htt
->frag_desc
.vaddr_desc_32
= NULL
;
316 static int ath10k_htt_tx_alloc_cont_frag_desc_32(struct ath10k_htt
*htt
)
318 struct ath10k
*ar
= htt
->ar
;
321 if (!ar
->hw_params
.continuous_frag_desc
)
324 size
= htt
->max_num_pending_tx
*
325 sizeof(struct htt_msdu_ext_desc
);
326 htt
->frag_desc
.vaddr_desc_32
= dma_alloc_coherent(ar
->dev
, size
,
327 &htt
->frag_desc
.paddr
,
329 if (!htt
->frag_desc
.vaddr_desc_32
) {
330 ath10k_err(ar
, "failed to alloc fragment desc memory\n");
333 htt
->frag_desc
.size
= size
;
338 static void ath10k_htt_tx_free_cont_frag_desc_64(struct ath10k_htt
*htt
)
342 if (!htt
->frag_desc
.vaddr_desc_64
)
345 size
= htt
->max_num_pending_tx
*
346 sizeof(struct htt_msdu_ext_desc_64
);
348 dma_free_coherent(htt
->ar
->dev
,
350 htt
->frag_desc
.vaddr_desc_64
,
351 htt
->frag_desc
.paddr
);
353 htt
->frag_desc
.vaddr_desc_64
= NULL
;
356 static int ath10k_htt_tx_alloc_cont_frag_desc_64(struct ath10k_htt
*htt
)
358 struct ath10k
*ar
= htt
->ar
;
361 if (!ar
->hw_params
.continuous_frag_desc
)
364 size
= htt
->max_num_pending_tx
*
365 sizeof(struct htt_msdu_ext_desc_64
);
367 htt
->frag_desc
.vaddr_desc_64
= dma_alloc_coherent(ar
->dev
, size
,
368 &htt
->frag_desc
.paddr
,
370 if (!htt
->frag_desc
.vaddr_desc_64
) {
371 ath10k_err(ar
, "failed to alloc fragment desc memory\n");
374 htt
->frag_desc
.size
= size
;
379 static void ath10k_htt_tx_free_txq(struct ath10k_htt
*htt
)
381 struct ath10k
*ar
= htt
->ar
;
384 if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL
,
385 ar
->running_fw
->fw_file
.fw_features
))
388 size
= sizeof(*htt
->tx_q_state
.vaddr
);
390 dma_unmap_single(ar
->dev
, htt
->tx_q_state
.paddr
, size
, DMA_TO_DEVICE
);
391 kfree(htt
->tx_q_state
.vaddr
);
394 static int ath10k_htt_tx_alloc_txq(struct ath10k_htt
*htt
)
396 struct ath10k
*ar
= htt
->ar
;
400 if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL
,
401 ar
->running_fw
->fw_file
.fw_features
))
404 htt
->tx_q_state
.num_peers
= HTT_TX_Q_STATE_NUM_PEERS
;
405 htt
->tx_q_state
.num_tids
= HTT_TX_Q_STATE_NUM_TIDS
;
406 htt
->tx_q_state
.type
= HTT_Q_DEPTH_TYPE_BYTES
;
408 size
= sizeof(*htt
->tx_q_state
.vaddr
);
409 htt
->tx_q_state
.vaddr
= kzalloc(size
, GFP_KERNEL
);
410 if (!htt
->tx_q_state
.vaddr
)
413 htt
->tx_q_state
.paddr
= dma_map_single(ar
->dev
, htt
->tx_q_state
.vaddr
,
414 size
, DMA_TO_DEVICE
);
415 ret
= dma_mapping_error(ar
->dev
, htt
->tx_q_state
.paddr
);
417 ath10k_warn(ar
, "failed to dma map tx_q_state: %d\n", ret
);
418 kfree(htt
->tx_q_state
.vaddr
);
425 static void ath10k_htt_tx_free_txdone_fifo(struct ath10k_htt
*htt
)
427 WARN_ON(!kfifo_is_empty(&htt
->txdone_fifo
));
428 kfifo_free(&htt
->txdone_fifo
);
431 static int ath10k_htt_tx_alloc_txdone_fifo(struct ath10k_htt
*htt
)
436 size
= roundup_pow_of_two(htt
->max_num_pending_tx
);
437 ret
= kfifo_alloc(&htt
->txdone_fifo
, size
, GFP_KERNEL
);
441 static int ath10k_htt_tx_alloc_buf(struct ath10k_htt
*htt
)
443 struct ath10k
*ar
= htt
->ar
;
446 ret
= ath10k_htt_alloc_txbuff(htt
);
448 ath10k_err(ar
, "failed to alloc cont tx buffer: %d\n", ret
);
452 ret
= ath10k_htt_alloc_frag_desc(htt
);
454 ath10k_err(ar
, "failed to alloc cont frag desc: %d\n", ret
);
458 ret
= ath10k_htt_tx_alloc_txq(htt
);
460 ath10k_err(ar
, "failed to alloc txq: %d\n", ret
);
464 ret
= ath10k_htt_tx_alloc_txdone_fifo(htt
);
466 ath10k_err(ar
, "failed to alloc txdone fifo: %d\n", ret
);
473 ath10k_htt_tx_free_txq(htt
);
476 ath10k_htt_free_frag_desc(htt
);
479 ath10k_htt_free_txbuff(htt
);
484 int ath10k_htt_tx_start(struct ath10k_htt
*htt
)
486 struct ath10k
*ar
= htt
->ar
;
489 ath10k_dbg(ar
, ATH10K_DBG_BOOT
, "htt tx max num pending tx %d\n",
490 htt
->max_num_pending_tx
);
492 spin_lock_init(&htt
->tx_lock
);
493 idr_init(&htt
->pending_tx
);
495 if (htt
->tx_mem_allocated
)
498 if (ar
->dev_type
== ATH10K_DEV_TYPE_HL
)
501 ret
= ath10k_htt_tx_alloc_buf(htt
);
503 goto free_idr_pending_tx
;
505 htt
->tx_mem_allocated
= true;
510 idr_destroy(&htt
->pending_tx
);
515 static int ath10k_htt_tx_clean_up_pending(int msdu_id
, void *skb
, void *ctx
)
517 struct ath10k
*ar
= ctx
;
518 struct ath10k_htt
*htt
= &ar
->htt
;
519 struct htt_tx_done tx_done
= {0};
521 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "force cleanup msdu_id %hu\n", msdu_id
);
523 tx_done
.msdu_id
= msdu_id
;
524 tx_done
.status
= HTT_TX_COMPL_STATE_DISCARD
;
526 ath10k_txrx_tx_unref(htt
, &tx_done
);
531 void ath10k_htt_tx_destroy(struct ath10k_htt
*htt
)
533 if (!htt
->tx_mem_allocated
)
536 ath10k_htt_free_txbuff(htt
);
537 ath10k_htt_tx_free_txq(htt
);
538 ath10k_htt_free_frag_desc(htt
);
539 ath10k_htt_tx_free_txdone_fifo(htt
);
540 htt
->tx_mem_allocated
= false;
543 void ath10k_htt_tx_stop(struct ath10k_htt
*htt
)
545 idr_for_each(&htt
->pending_tx
, ath10k_htt_tx_clean_up_pending
, htt
->ar
);
546 idr_destroy(&htt
->pending_tx
);
549 void ath10k_htt_tx_free(struct ath10k_htt
*htt
)
551 ath10k_htt_tx_stop(htt
);
552 ath10k_htt_tx_destroy(htt
);
555 void ath10k_htt_htc_tx_complete(struct ath10k
*ar
, struct sk_buff
*skb
)
557 dev_kfree_skb_any(skb
);
560 void ath10k_htt_hif_tx_complete(struct ath10k
*ar
, struct sk_buff
*skb
)
562 dev_kfree_skb_any(skb
);
564 EXPORT_SYMBOL(ath10k_htt_hif_tx_complete
);
566 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt
*htt
)
568 struct ath10k
*ar
= htt
->ar
;
574 len
+= sizeof(cmd
->hdr
);
575 len
+= sizeof(cmd
->ver_req
);
577 skb
= ath10k_htc_alloc_skb(ar
, len
);
582 cmd
= (struct htt_cmd
*)skb
->data
;
583 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_VERSION_REQ
;
585 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
587 dev_kfree_skb_any(skb
);
594 int ath10k_htt_h2t_stats_req(struct ath10k_htt
*htt
, u8 mask
, u64 cookie
)
596 struct ath10k
*ar
= htt
->ar
;
597 struct htt_stats_req
*req
;
602 len
+= sizeof(cmd
->hdr
);
603 len
+= sizeof(cmd
->stats_req
);
605 skb
= ath10k_htc_alloc_skb(ar
, len
);
610 cmd
= (struct htt_cmd
*)skb
->data
;
611 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_STATS_REQ
;
613 req
= &cmd
->stats_req
;
615 memset(req
, 0, sizeof(*req
));
617 /* currently we support only max 8 bit masks so no need to worry
618 * about endian support
620 req
->upload_types
[0] = mask
;
621 req
->reset_types
[0] = mask
;
622 req
->stat_type
= HTT_STATS_REQ_CFG_STAT_TYPE_INVALID
;
623 req
->cookie_lsb
= cpu_to_le32(cookie
& 0xffffffff);
624 req
->cookie_msb
= cpu_to_le32((cookie
& 0xffffffff00000000ULL
) >> 32);
626 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
628 ath10k_warn(ar
, "failed to send htt type stats request: %d",
630 dev_kfree_skb_any(skb
);
637 static int ath10k_htt_send_frag_desc_bank_cfg_32(struct ath10k_htt
*htt
)
639 struct ath10k
*ar
= htt
->ar
;
642 struct htt_frag_desc_bank_cfg32
*cfg
;
646 if (!ar
->hw_params
.continuous_frag_desc
)
649 if (!htt
->frag_desc
.paddr
) {
650 ath10k_warn(ar
, "invalid frag desc memory\n");
654 size
= sizeof(cmd
->hdr
) + sizeof(cmd
->frag_desc_bank_cfg32
);
655 skb
= ath10k_htc_alloc_skb(ar
, size
);
660 cmd
= (struct htt_cmd
*)skb
->data
;
661 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG
;
664 info
|= SM(htt
->tx_q_state
.type
,
665 HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE
);
667 if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL
,
668 ar
->running_fw
->fw_file
.fw_features
))
669 info
|= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID
;
671 cfg
= &cmd
->frag_desc_bank_cfg32
;
674 cfg
->desc_size
= sizeof(struct htt_msdu_ext_desc
);
675 cfg
->bank_base_addrs
[0] = __cpu_to_le32(htt
->frag_desc
.paddr
);
676 cfg
->bank_id
[0].bank_min_id
= 0;
677 cfg
->bank_id
[0].bank_max_id
= __cpu_to_le16(htt
->max_num_pending_tx
-
680 cfg
->q_state
.paddr
= cpu_to_le32(htt
->tx_q_state
.paddr
);
681 cfg
->q_state
.num_peers
= cpu_to_le16(htt
->tx_q_state
.num_peers
);
682 cfg
->q_state
.num_tids
= cpu_to_le16(htt
->tx_q_state
.num_tids
);
683 cfg
->q_state
.record_size
= HTT_TX_Q_STATE_ENTRY_SIZE
;
684 cfg
->q_state
.record_multiplier
= HTT_TX_Q_STATE_ENTRY_MULTIPLIER
;
686 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt frag desc bank cmd\n");
688 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
690 ath10k_warn(ar
, "failed to send frag desc bank cfg request: %d\n",
692 dev_kfree_skb_any(skb
);
699 static int ath10k_htt_send_frag_desc_bank_cfg_64(struct ath10k_htt
*htt
)
701 struct ath10k
*ar
= htt
->ar
;
704 struct htt_frag_desc_bank_cfg64
*cfg
;
708 if (!ar
->hw_params
.continuous_frag_desc
)
711 if (!htt
->frag_desc
.paddr
) {
712 ath10k_warn(ar
, "invalid frag desc memory\n");
716 size
= sizeof(cmd
->hdr
) + sizeof(cmd
->frag_desc_bank_cfg64
);
717 skb
= ath10k_htc_alloc_skb(ar
, size
);
722 cmd
= (struct htt_cmd
*)skb
->data
;
723 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG
;
726 info
|= SM(htt
->tx_q_state
.type
,
727 HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE
);
729 if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL
,
730 ar
->running_fw
->fw_file
.fw_features
))
731 info
|= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID
;
733 cfg
= &cmd
->frag_desc_bank_cfg64
;
736 cfg
->desc_size
= sizeof(struct htt_msdu_ext_desc_64
);
737 cfg
->bank_base_addrs
[0] = __cpu_to_le64(htt
->frag_desc
.paddr
);
738 cfg
->bank_id
[0].bank_min_id
= 0;
739 cfg
->bank_id
[0].bank_max_id
= __cpu_to_le16(htt
->max_num_pending_tx
-
742 cfg
->q_state
.paddr
= cpu_to_le32(htt
->tx_q_state
.paddr
);
743 cfg
->q_state
.num_peers
= cpu_to_le16(htt
->tx_q_state
.num_peers
);
744 cfg
->q_state
.num_tids
= cpu_to_le16(htt
->tx_q_state
.num_tids
);
745 cfg
->q_state
.record_size
= HTT_TX_Q_STATE_ENTRY_SIZE
;
746 cfg
->q_state
.record_multiplier
= HTT_TX_Q_STATE_ENTRY_MULTIPLIER
;
748 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt frag desc bank cmd\n");
750 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
752 ath10k_warn(ar
, "failed to send frag desc bank cfg request: %d\n",
754 dev_kfree_skb_any(skb
);
761 static void ath10k_htt_fill_rx_desc_offset_32(void *rx_ring
)
763 struct htt_rx_ring_setup_ring32
*ring
=
764 (struct htt_rx_ring_setup_ring32
*)rx_ring
;
766 #define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
767 ring
->mac80211_hdr_offset
= __cpu_to_le16(desc_offset(rx_hdr_status
));
768 ring
->msdu_payload_offset
= __cpu_to_le16(desc_offset(msdu_payload
));
769 ring
->ppdu_start_offset
= __cpu_to_le16(desc_offset(ppdu_start
));
770 ring
->ppdu_end_offset
= __cpu_to_le16(desc_offset(ppdu_end
));
771 ring
->mpdu_start_offset
= __cpu_to_le16(desc_offset(mpdu_start
));
772 ring
->mpdu_end_offset
= __cpu_to_le16(desc_offset(mpdu_end
));
773 ring
->msdu_start_offset
= __cpu_to_le16(desc_offset(msdu_start
));
774 ring
->msdu_end_offset
= __cpu_to_le16(desc_offset(msdu_end
));
775 ring
->rx_attention_offset
= __cpu_to_le16(desc_offset(attention
));
776 ring
->frag_info_offset
= __cpu_to_le16(desc_offset(frag_info
));
780 static void ath10k_htt_fill_rx_desc_offset_64(void *rx_ring
)
782 struct htt_rx_ring_setup_ring64
*ring
=
783 (struct htt_rx_ring_setup_ring64
*)rx_ring
;
785 #define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
786 ring
->mac80211_hdr_offset
= __cpu_to_le16(desc_offset(rx_hdr_status
));
787 ring
->msdu_payload_offset
= __cpu_to_le16(desc_offset(msdu_payload
));
788 ring
->ppdu_start_offset
= __cpu_to_le16(desc_offset(ppdu_start
));
789 ring
->ppdu_end_offset
= __cpu_to_le16(desc_offset(ppdu_end
));
790 ring
->mpdu_start_offset
= __cpu_to_le16(desc_offset(mpdu_start
));
791 ring
->mpdu_end_offset
= __cpu_to_le16(desc_offset(mpdu_end
));
792 ring
->msdu_start_offset
= __cpu_to_le16(desc_offset(msdu_start
));
793 ring
->msdu_end_offset
= __cpu_to_le16(desc_offset(msdu_end
));
794 ring
->rx_attention_offset
= __cpu_to_le16(desc_offset(attention
));
795 ring
->frag_info_offset
= __cpu_to_le16(desc_offset(frag_info
));
799 static int ath10k_htt_send_rx_ring_cfg_32(struct ath10k_htt
*htt
)
801 struct ath10k
*ar
= htt
->ar
;
804 struct htt_rx_ring_setup_ring32
*ring
;
805 const int num_rx_ring
= 1;
812 * the HW expects the buffer to be an integral number of 4-byte
815 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE
, 4));
816 BUILD_BUG_ON((HTT_RX_BUF_SIZE
& HTT_MAX_CACHE_LINE_SIZE_MASK
) != 0);
818 len
= sizeof(cmd
->hdr
) + sizeof(cmd
->rx_setup_32
.hdr
)
819 + (sizeof(*ring
) * num_rx_ring
);
820 skb
= ath10k_htc_alloc_skb(ar
, len
);
826 cmd
= (struct htt_cmd
*)skb
->data
;
827 ring
= &cmd
->rx_setup_32
.rings
[0];
829 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_RX_RING_CFG
;
830 cmd
->rx_setup_32
.hdr
.num_rings
= 1;
832 /* FIXME: do we need all of this? */
834 flags
|= HTT_RX_RING_FLAGS_MAC80211_HDR
;
835 flags
|= HTT_RX_RING_FLAGS_MSDU_PAYLOAD
;
836 flags
|= HTT_RX_RING_FLAGS_PPDU_START
;
837 flags
|= HTT_RX_RING_FLAGS_PPDU_END
;
838 flags
|= HTT_RX_RING_FLAGS_MPDU_START
;
839 flags
|= HTT_RX_RING_FLAGS_MPDU_END
;
840 flags
|= HTT_RX_RING_FLAGS_MSDU_START
;
841 flags
|= HTT_RX_RING_FLAGS_MSDU_END
;
842 flags
|= HTT_RX_RING_FLAGS_RX_ATTENTION
;
843 flags
|= HTT_RX_RING_FLAGS_FRAG_INFO
;
844 flags
|= HTT_RX_RING_FLAGS_UNICAST_RX
;
845 flags
|= HTT_RX_RING_FLAGS_MULTICAST_RX
;
846 flags
|= HTT_RX_RING_FLAGS_CTRL_RX
;
847 flags
|= HTT_RX_RING_FLAGS_MGMT_RX
;
848 flags
|= HTT_RX_RING_FLAGS_NULL_RX
;
849 flags
|= HTT_RX_RING_FLAGS_PHY_DATA_RX
;
851 fw_idx
= __le32_to_cpu(*htt
->rx_ring
.alloc_idx
.vaddr
);
853 ring
->fw_idx_shadow_reg_paddr
=
854 __cpu_to_le32(htt
->rx_ring
.alloc_idx
.paddr
);
855 ring
->rx_ring_base_paddr
= __cpu_to_le32(htt
->rx_ring
.base_paddr
);
856 ring
->rx_ring_len
= __cpu_to_le16(htt
->rx_ring
.size
);
857 ring
->rx_ring_bufsize
= __cpu_to_le16(HTT_RX_BUF_SIZE
);
858 ring
->flags
= __cpu_to_le16(flags
);
859 ring
->fw_idx_init_val
= __cpu_to_le16(fw_idx
);
861 ath10k_htt_fill_rx_desc_offset_32(ring
);
862 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
864 dev_kfree_skb_any(skb
);
871 static int ath10k_htt_send_rx_ring_cfg_64(struct ath10k_htt
*htt
)
873 struct ath10k
*ar
= htt
->ar
;
876 struct htt_rx_ring_setup_ring64
*ring
;
877 const int num_rx_ring
= 1;
883 /* HW expects the buffer to be an integral number of 4-byte
886 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE
, 4));
887 BUILD_BUG_ON((HTT_RX_BUF_SIZE
& HTT_MAX_CACHE_LINE_SIZE_MASK
) != 0);
889 len
= sizeof(cmd
->hdr
) + sizeof(cmd
->rx_setup_64
.hdr
)
890 + (sizeof(*ring
) * num_rx_ring
);
891 skb
= ath10k_htc_alloc_skb(ar
, len
);
897 cmd
= (struct htt_cmd
*)skb
->data
;
898 ring
= &cmd
->rx_setup_64
.rings
[0];
900 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_RX_RING_CFG
;
901 cmd
->rx_setup_64
.hdr
.num_rings
= 1;
904 flags
|= HTT_RX_RING_FLAGS_MAC80211_HDR
;
905 flags
|= HTT_RX_RING_FLAGS_MSDU_PAYLOAD
;
906 flags
|= HTT_RX_RING_FLAGS_PPDU_START
;
907 flags
|= HTT_RX_RING_FLAGS_PPDU_END
;
908 flags
|= HTT_RX_RING_FLAGS_MPDU_START
;
909 flags
|= HTT_RX_RING_FLAGS_MPDU_END
;
910 flags
|= HTT_RX_RING_FLAGS_MSDU_START
;
911 flags
|= HTT_RX_RING_FLAGS_MSDU_END
;
912 flags
|= HTT_RX_RING_FLAGS_RX_ATTENTION
;
913 flags
|= HTT_RX_RING_FLAGS_FRAG_INFO
;
914 flags
|= HTT_RX_RING_FLAGS_UNICAST_RX
;
915 flags
|= HTT_RX_RING_FLAGS_MULTICAST_RX
;
916 flags
|= HTT_RX_RING_FLAGS_CTRL_RX
;
917 flags
|= HTT_RX_RING_FLAGS_MGMT_RX
;
918 flags
|= HTT_RX_RING_FLAGS_NULL_RX
;
919 flags
|= HTT_RX_RING_FLAGS_PHY_DATA_RX
;
921 fw_idx
= __le32_to_cpu(*htt
->rx_ring
.alloc_idx
.vaddr
);
923 ring
->fw_idx_shadow_reg_paddr
= __cpu_to_le64(htt
->rx_ring
.alloc_idx
.paddr
);
924 ring
->rx_ring_base_paddr
= __cpu_to_le64(htt
->rx_ring
.base_paddr
);
925 ring
->rx_ring_len
= __cpu_to_le16(htt
->rx_ring
.size
);
926 ring
->rx_ring_bufsize
= __cpu_to_le16(HTT_RX_BUF_SIZE
);
927 ring
->flags
= __cpu_to_le16(flags
);
928 ring
->fw_idx_init_val
= __cpu_to_le16(fw_idx
);
930 ath10k_htt_fill_rx_desc_offset_64(ring
);
931 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
933 dev_kfree_skb_any(skb
);
940 static int ath10k_htt_send_rx_ring_cfg_hl(struct ath10k_htt
*htt
)
942 struct ath10k
*ar
= htt
->ar
;
945 struct htt_rx_ring_setup_ring32
*ring
;
946 const int num_rx_ring
= 1;
952 * the HW expects the buffer to be an integral number of 4-byte
955 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE
, 4));
956 BUILD_BUG_ON((HTT_RX_BUF_SIZE
& HTT_MAX_CACHE_LINE_SIZE_MASK
) != 0);
958 len
= sizeof(cmd
->hdr
) + sizeof(cmd
->rx_setup_32
.hdr
)
959 + (sizeof(*ring
) * num_rx_ring
);
960 skb
= ath10k_htc_alloc_skb(ar
, len
);
966 cmd
= (struct htt_cmd
*)skb
->data
;
967 ring
= &cmd
->rx_setup_32
.rings
[0];
969 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_RX_RING_CFG
;
970 cmd
->rx_setup_32
.hdr
.num_rings
= 1;
973 flags
|= HTT_RX_RING_FLAGS_MSDU_PAYLOAD
;
974 flags
|= HTT_RX_RING_FLAGS_UNICAST_RX
;
975 flags
|= HTT_RX_RING_FLAGS_MULTICAST_RX
;
977 memset(ring
, 0, sizeof(*ring
));
978 ring
->rx_ring_len
= __cpu_to_le16(HTT_RX_RING_SIZE_MIN
);
979 ring
->rx_ring_bufsize
= __cpu_to_le16(HTT_RX_BUF_SIZE
);
980 ring
->flags
= __cpu_to_le16(flags
);
982 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
984 dev_kfree_skb_any(skb
);
991 int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt
*htt
,
992 u8 max_subfrms_ampdu
,
993 u8 max_subfrms_amsdu
)
995 struct ath10k
*ar
= htt
->ar
;
996 struct htt_aggr_conf
*aggr_conf
;
1002 /* Firmware defaults are: amsdu = 3 and ampdu = 64 */
1004 if (max_subfrms_ampdu
== 0 || max_subfrms_ampdu
> 64)
1007 if (max_subfrms_amsdu
== 0 || max_subfrms_amsdu
> 31)
1010 len
= sizeof(cmd
->hdr
);
1011 len
+= sizeof(cmd
->aggr_conf
);
1013 skb
= ath10k_htc_alloc_skb(ar
, len
);
1018 cmd
= (struct htt_cmd
*)skb
->data
;
1019 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_AGGR_CFG
;
1021 aggr_conf
= &cmd
->aggr_conf
;
1022 aggr_conf
->max_num_ampdu_subframes
= max_subfrms_ampdu
;
1023 aggr_conf
->max_num_amsdu_subframes
= max_subfrms_amsdu
;
1025 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt h2t aggr cfg msg amsdu %d ampdu %d",
1026 aggr_conf
->max_num_amsdu_subframes
,
1027 aggr_conf
->max_num_ampdu_subframes
);
1029 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
1031 dev_kfree_skb_any(skb
);
1038 int ath10k_htt_tx_fetch_resp(struct ath10k
*ar
,
1040 __le16 fetch_seq_num
,
1041 struct htt_tx_fetch_record
*records
,
1044 struct sk_buff
*skb
;
1045 struct htt_cmd
*cmd
;
1046 const u16 resp_id
= 0;
1050 /* Response IDs are echo-ed back only for host driver convienence
1051 * purposes. They aren't used for anything in the driver yet so use 0.
1054 len
+= sizeof(cmd
->hdr
);
1055 len
+= sizeof(cmd
->tx_fetch_resp
);
1056 len
+= sizeof(cmd
->tx_fetch_resp
.records
[0]) * num_records
;
1058 skb
= ath10k_htc_alloc_skb(ar
, len
);
1063 cmd
= (struct htt_cmd
*)skb
->data
;
1064 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_TX_FETCH_RESP
;
1065 cmd
->tx_fetch_resp
.resp_id
= cpu_to_le16(resp_id
);
1066 cmd
->tx_fetch_resp
.fetch_seq_num
= fetch_seq_num
;
1067 cmd
->tx_fetch_resp
.num_records
= cpu_to_le16(num_records
);
1068 cmd
->tx_fetch_resp
.token
= token
;
1070 memcpy(cmd
->tx_fetch_resp
.records
, records
,
1071 sizeof(records
[0]) * num_records
);
1073 ret
= ath10k_htc_send(&ar
->htc
, ar
->htt
.eid
, skb
);
1075 ath10k_warn(ar
, "failed to submit htc command: %d\n", ret
);
1082 dev_kfree_skb_any(skb
);
1087 static u8
ath10k_htt_tx_get_vdev_id(struct ath10k
*ar
, struct sk_buff
*skb
)
1089 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1090 struct ath10k_skb_cb
*cb
= ATH10K_SKB_CB(skb
);
1091 struct ath10k_vif
*arvif
;
1093 if (info
->flags
& IEEE80211_TX_CTL_TX_OFFCHAN
) {
1094 return ar
->scan
.vdev_id
;
1095 } else if (cb
->vif
) {
1096 arvif
= (void *)cb
->vif
->drv_priv
;
1097 return arvif
->vdev_id
;
1098 } else if (ar
->monitor_started
) {
1099 return ar
->monitor_vdev_id
;
1105 static u8
ath10k_htt_tx_get_tid(struct sk_buff
*skb
, bool is_eth
)
1107 struct ieee80211_hdr
*hdr
= (void *)skb
->data
;
1108 struct ath10k_skb_cb
*cb
= ATH10K_SKB_CB(skb
);
1110 if (!is_eth
&& ieee80211_is_mgmt(hdr
->frame_control
))
1111 return HTT_DATA_TX_EXT_TID_MGMT
;
1112 else if (cb
->flags
& ATH10K_SKB_F_QOS
)
1113 return skb
->priority
& IEEE80211_QOS_CTL_TID_MASK
;
1115 return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST
;
1118 int ath10k_htt_mgmt_tx(struct ath10k_htt
*htt
, struct sk_buff
*msdu
)
1120 struct ath10k
*ar
= htt
->ar
;
1121 struct device
*dev
= ar
->dev
;
1122 struct sk_buff
*txdesc
= NULL
;
1123 struct htt_cmd
*cmd
;
1124 struct ath10k_skb_cb
*skb_cb
= ATH10K_SKB_CB(msdu
);
1125 u8 vdev_id
= ath10k_htt_tx_get_vdev_id(ar
, msdu
);
1129 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)msdu
->data
;
1131 len
+= sizeof(cmd
->hdr
);
1132 len
+= sizeof(cmd
->mgmt_tx
);
1134 res
= ath10k_htt_tx_alloc_msdu_id(htt
, msdu
);
1140 if ((ieee80211_is_action(hdr
->frame_control
) ||
1141 ieee80211_is_deauth(hdr
->frame_control
) ||
1142 ieee80211_is_disassoc(hdr
->frame_control
)) &&
1143 ieee80211_has_protected(hdr
->frame_control
)) {
1144 skb_put(msdu
, IEEE80211_CCMP_MIC_LEN
);
1147 txdesc
= ath10k_htc_alloc_skb(ar
, len
);
1150 goto err_free_msdu_id
;
1153 skb_cb
->paddr
= dma_map_single(dev
, msdu
->data
, msdu
->len
,
1155 res
= dma_mapping_error(dev
, skb_cb
->paddr
);
1158 goto err_free_txdesc
;
1161 skb_put(txdesc
, len
);
1162 cmd
= (struct htt_cmd
*)txdesc
->data
;
1163 memset(cmd
, 0, len
);
1165 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_MGMT_TX
;
1166 cmd
->mgmt_tx
.msdu_paddr
= __cpu_to_le32(ATH10K_SKB_CB(msdu
)->paddr
);
1167 cmd
->mgmt_tx
.len
= __cpu_to_le32(msdu
->len
);
1168 cmd
->mgmt_tx
.desc_id
= __cpu_to_le32(msdu_id
);
1169 cmd
->mgmt_tx
.vdev_id
= __cpu_to_le32(vdev_id
);
1170 memcpy(cmd
->mgmt_tx
.hdr
, msdu
->data
,
1171 min_t(int, msdu
->len
, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN
));
1173 res
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, txdesc
);
1175 goto err_unmap_msdu
;
1180 if (ar
->dev_type
!= ATH10K_DEV_TYPE_HL
)
1181 dma_unmap_single(dev
, skb_cb
->paddr
, msdu
->len
, DMA_TO_DEVICE
);
1183 dev_kfree_skb_any(txdesc
);
1185 spin_lock_bh(&htt
->tx_lock
);
1186 ath10k_htt_tx_free_msdu_id(htt
, msdu_id
);
1187 spin_unlock_bh(&htt
->tx_lock
);
1192 #define HTT_TX_HL_NEEDED_HEADROOM \
1193 (unsigned int)(sizeof(struct htt_cmd_hdr) + \
1194 sizeof(struct htt_data_tx_desc) + \
1195 sizeof(struct ath10k_htc_hdr))
1197 static int ath10k_htt_tx_hl(struct ath10k_htt
*htt
, enum ath10k_hw_txrx_mode txmode
,
1198 struct sk_buff
*msdu
)
1200 struct ath10k
*ar
= htt
->ar
;
1202 struct htt_cmd_hdr
*cmd_hdr
;
1203 struct htt_data_tx_desc
*tx_desc
;
1204 struct ath10k_skb_cb
*skb_cb
= ATH10K_SKB_CB(msdu
);
1205 struct sk_buff
*tmp_skb
;
1206 bool is_eth
= (txmode
== ATH10K_HW_TXRX_ETHERNET
);
1207 u8 vdev_id
= ath10k_htt_tx_get_vdev_id(ar
, msdu
);
1208 u8 tid
= ath10k_htt_tx_get_tid(msdu
, is_eth
);
1212 data_len
= msdu
->len
;
1215 case ATH10K_HW_TXRX_RAW
:
1216 case ATH10K_HW_TXRX_NATIVE_WIFI
:
1217 flags0
|= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT
;
1219 case ATH10K_HW_TXRX_ETHERNET
:
1220 flags0
|= SM(txmode
, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE
);
1222 case ATH10K_HW_TXRX_MGMT
:
1223 flags0
|= SM(ATH10K_HW_TXRX_MGMT
,
1224 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE
);
1225 flags0
|= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT
;
1229 if (skb_cb
->flags
& ATH10K_SKB_F_NO_HWCRYPT
)
1230 flags0
|= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT
;
1232 flags1
|= SM((u16
)vdev_id
, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID
);
1233 flags1
|= SM((u16
)tid
, HTT_DATA_TX_DESC_FLAGS1_EXT_TID
);
1234 if (msdu
->ip_summed
== CHECKSUM_PARTIAL
&&
1235 !test_bit(ATH10K_FLAG_RAW_MODE
, &ar
->dev_flags
)) {
1236 flags1
|= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD
;
1237 flags1
|= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD
;
1240 /* Prepend the HTT header and TX desc struct to the data message
1241 * and realloc the skb if it does not have enough headroom.
1243 if (skb_headroom(msdu
) < HTT_TX_HL_NEEDED_HEADROOM
) {
1246 ath10k_dbg(htt
->ar
, ATH10K_DBG_HTT
,
1247 "Not enough headroom in skb. Current headroom: %u, needed: %u. Reallocating...\n",
1248 skb_headroom(msdu
), HTT_TX_HL_NEEDED_HEADROOM
);
1249 msdu
= skb_realloc_headroom(msdu
, HTT_TX_HL_NEEDED_HEADROOM
);
1252 ath10k_warn(htt
->ar
, "htt hl tx: Unable to realloc skb!\n");
1258 skb_push(msdu
, sizeof(*cmd_hdr
));
1259 skb_push(msdu
, sizeof(*tx_desc
));
1260 cmd_hdr
= (struct htt_cmd_hdr
*)msdu
->data
;
1261 tx_desc
= (struct htt_data_tx_desc
*)(msdu
->data
+ sizeof(*cmd_hdr
));
1263 cmd_hdr
->msg_type
= HTT_H2T_MSG_TYPE_TX_FRM
;
1264 tx_desc
->flags0
= flags0
;
1265 tx_desc
->flags1
= __cpu_to_le16(flags1
);
1266 tx_desc
->len
= __cpu_to_le16(data_len
);
1268 tx_desc
->frags_paddr
= 0; /* always zero */
1269 /* Initialize peer_id to INVALID_PEER because this is NOT
1272 tx_desc
->peerid
= __cpu_to_le32(HTT_INVALID_PEERID
);
1274 res
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, msdu
);
1280 static int ath10k_htt_tx_32(struct ath10k_htt
*htt
,
1281 enum ath10k_hw_txrx_mode txmode
,
1282 struct sk_buff
*msdu
)
1284 struct ath10k
*ar
= htt
->ar
;
1285 struct device
*dev
= ar
->dev
;
1286 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)msdu
->data
;
1287 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(msdu
);
1288 struct ath10k_skb_cb
*skb_cb
= ATH10K_SKB_CB(msdu
);
1289 struct ath10k_hif_sg_item sg_items
[2];
1290 struct ath10k_htt_txbuf_32
*txbuf
;
1291 struct htt_data_tx_desc_frag
*frags
;
1292 bool is_eth
= (txmode
== ATH10K_HW_TXRX_ETHERNET
);
1293 u8 vdev_id
= ath10k_htt_tx_get_vdev_id(ar
, msdu
);
1294 u8 tid
= ath10k_htt_tx_get_tid(msdu
, is_eth
);
1298 u16 msdu_id
, flags1
= 0;
1300 u32 frags_paddr
= 0;
1302 struct htt_msdu_ext_desc
*ext_desc
= NULL
;
1303 struct htt_msdu_ext_desc
*ext_desc_t
= NULL
;
1305 res
= ath10k_htt_tx_alloc_msdu_id(htt
, msdu
);
1311 prefetch_len
= min(htt
->prefetch_len
, msdu
->len
);
1312 prefetch_len
= roundup(prefetch_len
, 4);
1314 txbuf
= htt
->txbuf
.vaddr_txbuff_32
+ msdu_id
;
1315 txbuf_paddr
= htt
->txbuf
.paddr
+
1316 (sizeof(struct ath10k_htt_txbuf_32
) * msdu_id
);
1318 if ((ieee80211_is_action(hdr
->frame_control
) ||
1319 ieee80211_is_deauth(hdr
->frame_control
) ||
1320 ieee80211_is_disassoc(hdr
->frame_control
)) &&
1321 ieee80211_has_protected(hdr
->frame_control
)) {
1322 skb_put(msdu
, IEEE80211_CCMP_MIC_LEN
);
1323 } else if (!(skb_cb
->flags
& ATH10K_SKB_F_NO_HWCRYPT
) &&
1324 txmode
== ATH10K_HW_TXRX_RAW
&&
1325 ieee80211_has_protected(hdr
->frame_control
)) {
1326 skb_put(msdu
, IEEE80211_CCMP_MIC_LEN
);
1329 skb_cb
->paddr
= dma_map_single(dev
, msdu
->data
, msdu
->len
,
1331 res
= dma_mapping_error(dev
, skb_cb
->paddr
);
1334 goto err_free_msdu_id
;
1337 if (unlikely(info
->flags
& IEEE80211_TX_CTL_TX_OFFCHAN
))
1338 freq
= ar
->scan
.roc_freq
;
1341 case ATH10K_HW_TXRX_RAW
:
1342 case ATH10K_HW_TXRX_NATIVE_WIFI
:
1343 flags0
|= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT
;
1345 case ATH10K_HW_TXRX_ETHERNET
:
1346 if (ar
->hw_params
.continuous_frag_desc
) {
1347 ext_desc_t
= htt
->frag_desc
.vaddr_desc_32
;
1348 memset(&ext_desc_t
[msdu_id
], 0,
1349 sizeof(struct htt_msdu_ext_desc
));
1350 frags
= (struct htt_data_tx_desc_frag
*)
1351 &ext_desc_t
[msdu_id
].frags
;
1352 ext_desc
= &ext_desc_t
[msdu_id
];
1353 frags
[0].tword_addr
.paddr_lo
=
1354 __cpu_to_le32(skb_cb
->paddr
);
1355 frags
[0].tword_addr
.paddr_hi
= 0;
1356 frags
[0].tword_addr
.len_16
= __cpu_to_le16(msdu
->len
);
1358 frags_paddr
= htt
->frag_desc
.paddr
+
1359 (sizeof(struct htt_msdu_ext_desc
) * msdu_id
);
1361 frags
= txbuf
->frags
;
1362 frags
[0].dword_addr
.paddr
=
1363 __cpu_to_le32(skb_cb
->paddr
);
1364 frags
[0].dword_addr
.len
= __cpu_to_le32(msdu
->len
);
1365 frags
[1].dword_addr
.paddr
= 0;
1366 frags
[1].dword_addr
.len
= 0;
1368 frags_paddr
= txbuf_paddr
;
1370 flags0
|= SM(txmode
, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE
);
1372 case ATH10K_HW_TXRX_MGMT
:
1373 flags0
|= SM(ATH10K_HW_TXRX_MGMT
,
1374 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE
);
1375 flags0
|= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT
;
1377 frags_paddr
= skb_cb
->paddr
;
1381 /* Normally all commands go through HTC which manages tx credits for
1382 * each endpoint and notifies when tx is completed.
1384 * HTT endpoint is creditless so there's no need to care about HTC
1385 * flags. In that case it is trivial to fill the HTC header here.
1387 * MSDU transmission is considered completed upon HTT event. This
1388 * implies no relevant resources can be freed until after the event is
1389 * received. That's why HTC tx completion handler itself is ignored by
1390 * setting NULL to transfer_context for all sg items.
1392 * There is simply no point in pushing HTT TX_FRM through HTC tx path
1393 * as it's a waste of resources. By bypassing HTC it is possible to
1394 * avoid extra memory allocations, compress data structures and thus
1395 * improve performance.
1398 txbuf
->htc_hdr
.eid
= htt
->eid
;
1399 txbuf
->htc_hdr
.len
= __cpu_to_le16(sizeof(txbuf
->cmd_hdr
) +
1400 sizeof(txbuf
->cmd_tx
) +
1402 txbuf
->htc_hdr
.flags
= 0;
1404 if (skb_cb
->flags
& ATH10K_SKB_F_NO_HWCRYPT
)
1405 flags0
|= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT
;
1407 flags1
|= SM((u16
)vdev_id
, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID
);
1408 flags1
|= SM((u16
)tid
, HTT_DATA_TX_DESC_FLAGS1_EXT_TID
);
1409 if (msdu
->ip_summed
== CHECKSUM_PARTIAL
&&
1410 !test_bit(ATH10K_FLAG_RAW_MODE
, &ar
->dev_flags
)) {
1411 flags1
|= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD
;
1412 flags1
|= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD
;
1413 if (ar
->hw_params
.continuous_frag_desc
)
1414 ext_desc
->flags
|= HTT_MSDU_CHECKSUM_ENABLE
;
1417 /* Prevent firmware from sending up tx inspection requests. There's
1418 * nothing ath10k can do with frames requested for inspection so force
1419 * it to simply rely a regular tx completion with discard status.
1421 flags1
|= HTT_DATA_TX_DESC_FLAGS1_POSTPONED
;
1423 txbuf
->cmd_hdr
.msg_type
= HTT_H2T_MSG_TYPE_TX_FRM
;
1424 txbuf
->cmd_tx
.flags0
= flags0
;
1425 txbuf
->cmd_tx
.flags1
= __cpu_to_le16(flags1
);
1426 txbuf
->cmd_tx
.len
= __cpu_to_le16(msdu
->len
);
1427 txbuf
->cmd_tx
.id
= __cpu_to_le16(msdu_id
);
1428 txbuf
->cmd_tx
.frags_paddr
= __cpu_to_le32(frags_paddr
);
1429 if (ath10k_mac_tx_frm_has_freq(ar
)) {
1430 txbuf
->cmd_tx
.offchan_tx
.peerid
=
1431 __cpu_to_le16(HTT_INVALID_PEERID
);
1432 txbuf
->cmd_tx
.offchan_tx
.freq
=
1433 __cpu_to_le16(freq
);
1435 txbuf
->cmd_tx
.peerid
=
1436 __cpu_to_le32(HTT_INVALID_PEERID
);
1439 trace_ath10k_htt_tx(ar
, msdu_id
, msdu
->len
, vdev_id
, tid
);
1440 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
1441 "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %pad, msdu_paddr %pad vdev %hhu tid %hhu freq %hu\n",
1442 flags0
, flags1
, msdu
->len
, msdu_id
, &frags_paddr
,
1443 &skb_cb
->paddr
, vdev_id
, tid
, freq
);
1444 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt tx msdu: ",
1445 msdu
->data
, msdu
->len
);
1446 trace_ath10k_tx_hdr(ar
, msdu
->data
, msdu
->len
);
1447 trace_ath10k_tx_payload(ar
, msdu
->data
, msdu
->len
);
1449 sg_items
[0].transfer_id
= 0;
1450 sg_items
[0].transfer_context
= NULL
;
1451 sg_items
[0].vaddr
= &txbuf
->htc_hdr
;
1452 sg_items
[0].paddr
= txbuf_paddr
+
1453 sizeof(txbuf
->frags
);
1454 sg_items
[0].len
= sizeof(txbuf
->htc_hdr
) +
1455 sizeof(txbuf
->cmd_hdr
) +
1456 sizeof(txbuf
->cmd_tx
);
1458 sg_items
[1].transfer_id
= 0;
1459 sg_items
[1].transfer_context
= NULL
;
1460 sg_items
[1].vaddr
= msdu
->data
;
1461 sg_items
[1].paddr
= skb_cb
->paddr
;
1462 sg_items
[1].len
= prefetch_len
;
1464 res
= ath10k_hif_tx_sg(htt
->ar
,
1465 htt
->ar
->htc
.endpoint
[htt
->eid
].ul_pipe_id
,
1466 sg_items
, ARRAY_SIZE(sg_items
));
1468 goto err_unmap_msdu
;
1473 dma_unmap_single(dev
, skb_cb
->paddr
, msdu
->len
, DMA_TO_DEVICE
);
1475 ath10k_htt_tx_free_msdu_id(htt
, msdu_id
);
1480 static int ath10k_htt_tx_64(struct ath10k_htt
*htt
,
1481 enum ath10k_hw_txrx_mode txmode
,
1482 struct sk_buff
*msdu
)
1484 struct ath10k
*ar
= htt
->ar
;
1485 struct device
*dev
= ar
->dev
;
1486 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)msdu
->data
;
1487 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(msdu
);
1488 struct ath10k_skb_cb
*skb_cb
= ATH10K_SKB_CB(msdu
);
1489 struct ath10k_hif_sg_item sg_items
[2];
1490 struct ath10k_htt_txbuf_64
*txbuf
;
1491 struct htt_data_tx_desc_frag
*frags
;
1492 bool is_eth
= (txmode
== ATH10K_HW_TXRX_ETHERNET
);
1493 u8 vdev_id
= ath10k_htt_tx_get_vdev_id(ar
, msdu
);
1494 u8 tid
= ath10k_htt_tx_get_tid(msdu
, is_eth
);
1498 u16 msdu_id
, flags1
= 0;
1500 dma_addr_t frags_paddr
= 0;
1502 struct htt_msdu_ext_desc_64
*ext_desc
= NULL
;
1503 struct htt_msdu_ext_desc_64
*ext_desc_t
= NULL
;
1505 res
= ath10k_htt_tx_alloc_msdu_id(htt
, msdu
);
1511 prefetch_len
= min(htt
->prefetch_len
, msdu
->len
);
1512 prefetch_len
= roundup(prefetch_len
, 4);
1514 txbuf
= htt
->txbuf
.vaddr_txbuff_64
+ msdu_id
;
1515 txbuf_paddr
= htt
->txbuf
.paddr
+
1516 (sizeof(struct ath10k_htt_txbuf_64
) * msdu_id
);
1518 if ((ieee80211_is_action(hdr
->frame_control
) ||
1519 ieee80211_is_deauth(hdr
->frame_control
) ||
1520 ieee80211_is_disassoc(hdr
->frame_control
)) &&
1521 ieee80211_has_protected(hdr
->frame_control
)) {
1522 skb_put(msdu
, IEEE80211_CCMP_MIC_LEN
);
1523 } else if (!(skb_cb
->flags
& ATH10K_SKB_F_NO_HWCRYPT
) &&
1524 txmode
== ATH10K_HW_TXRX_RAW
&&
1525 ieee80211_has_protected(hdr
->frame_control
)) {
1526 skb_put(msdu
, IEEE80211_CCMP_MIC_LEN
);
1529 skb_cb
->paddr
= dma_map_single(dev
, msdu
->data
, msdu
->len
,
1531 res
= dma_mapping_error(dev
, skb_cb
->paddr
);
1534 goto err_free_msdu_id
;
1537 if (unlikely(info
->flags
& IEEE80211_TX_CTL_TX_OFFCHAN
))
1538 freq
= ar
->scan
.roc_freq
;
1541 case ATH10K_HW_TXRX_RAW
:
1542 case ATH10K_HW_TXRX_NATIVE_WIFI
:
1543 flags0
|= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT
;
1545 case ATH10K_HW_TXRX_ETHERNET
:
1546 if (ar
->hw_params
.continuous_frag_desc
) {
1547 ext_desc_t
= htt
->frag_desc
.vaddr_desc_64
;
1548 memset(&ext_desc_t
[msdu_id
], 0,
1549 sizeof(struct htt_msdu_ext_desc_64
));
1550 frags
= (struct htt_data_tx_desc_frag
*)
1551 &ext_desc_t
[msdu_id
].frags
;
1552 ext_desc
= &ext_desc_t
[msdu_id
];
1553 frags
[0].tword_addr
.paddr_lo
=
1554 __cpu_to_le32(skb_cb
->paddr
);
1555 frags
[0].tword_addr
.paddr_hi
=
1556 __cpu_to_le16(upper_32_bits(skb_cb
->paddr
));
1557 frags
[0].tword_addr
.len_16
= __cpu_to_le16(msdu
->len
);
1559 frags_paddr
= htt
->frag_desc
.paddr
+
1560 (sizeof(struct htt_msdu_ext_desc_64
) * msdu_id
);
1562 frags
= txbuf
->frags
;
1563 frags
[0].tword_addr
.paddr_lo
=
1564 __cpu_to_le32(skb_cb
->paddr
);
1565 frags
[0].tword_addr
.paddr_hi
=
1566 __cpu_to_le16(upper_32_bits(skb_cb
->paddr
));
1567 frags
[0].tword_addr
.len_16
= __cpu_to_le16(msdu
->len
);
1568 frags
[1].tword_addr
.paddr_lo
= 0;
1569 frags
[1].tword_addr
.paddr_hi
= 0;
1570 frags
[1].tword_addr
.len_16
= 0;
1572 flags0
|= SM(txmode
, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE
);
1574 case ATH10K_HW_TXRX_MGMT
:
1575 flags0
|= SM(ATH10K_HW_TXRX_MGMT
,
1576 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE
);
1577 flags0
|= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT
;
1579 frags_paddr
= skb_cb
->paddr
;
1583 /* Normally all commands go through HTC which manages tx credits for
1584 * each endpoint and notifies when tx is completed.
1586 * HTT endpoint is creditless so there's no need to care about HTC
1587 * flags. In that case it is trivial to fill the HTC header here.
1589 * MSDU transmission is considered completed upon HTT event. This
1590 * implies no relevant resources can be freed until after the event is
1591 * received. That's why HTC tx completion handler itself is ignored by
1592 * setting NULL to transfer_context for all sg items.
1594 * There is simply no point in pushing HTT TX_FRM through HTC tx path
1595 * as it's a waste of resources. By bypassing HTC it is possible to
1596 * avoid extra memory allocations, compress data structures and thus
1597 * improve performance.
1600 txbuf
->htc_hdr
.eid
= htt
->eid
;
1601 txbuf
->htc_hdr
.len
= __cpu_to_le16(sizeof(txbuf
->cmd_hdr
) +
1602 sizeof(txbuf
->cmd_tx
) +
1604 txbuf
->htc_hdr
.flags
= 0;
1606 if (skb_cb
->flags
& ATH10K_SKB_F_NO_HWCRYPT
)
1607 flags0
|= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT
;
1609 flags1
|= SM((u16
)vdev_id
, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID
);
1610 flags1
|= SM((u16
)tid
, HTT_DATA_TX_DESC_FLAGS1_EXT_TID
);
1611 if (msdu
->ip_summed
== CHECKSUM_PARTIAL
&&
1612 !test_bit(ATH10K_FLAG_RAW_MODE
, &ar
->dev_flags
)) {
1613 flags1
|= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD
;
1614 flags1
|= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD
;
1615 if (ar
->hw_params
.continuous_frag_desc
) {
1616 memset(ext_desc
->tso_flag
, 0, sizeof(ext_desc
->tso_flag
));
1617 ext_desc
->tso_flag
[3] |=
1618 __cpu_to_le32(HTT_MSDU_CHECKSUM_ENABLE_64
);
1622 /* Prevent firmware from sending up tx inspection requests. There's
1623 * nothing ath10k can do with frames requested for inspection so force
1624 * it to simply rely a regular tx completion with discard status.
1626 flags1
|= HTT_DATA_TX_DESC_FLAGS1_POSTPONED
;
1628 txbuf
->cmd_hdr
.msg_type
= HTT_H2T_MSG_TYPE_TX_FRM
;
1629 txbuf
->cmd_tx
.flags0
= flags0
;
1630 txbuf
->cmd_tx
.flags1
= __cpu_to_le16(flags1
);
1631 txbuf
->cmd_tx
.len
= __cpu_to_le16(msdu
->len
);
1632 txbuf
->cmd_tx
.id
= __cpu_to_le16(msdu_id
);
1634 /* fill fragment descriptor */
1635 txbuf
->cmd_tx
.frags_paddr
= __cpu_to_le64(frags_paddr
);
1636 if (ath10k_mac_tx_frm_has_freq(ar
)) {
1637 txbuf
->cmd_tx
.offchan_tx
.peerid
=
1638 __cpu_to_le16(HTT_INVALID_PEERID
);
1639 txbuf
->cmd_tx
.offchan_tx
.freq
=
1640 __cpu_to_le16(freq
);
1642 txbuf
->cmd_tx
.peerid
=
1643 __cpu_to_le32(HTT_INVALID_PEERID
);
1646 trace_ath10k_htt_tx(ar
, msdu_id
, msdu
->len
, vdev_id
, tid
);
1647 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
1648 "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %pad, msdu_paddr %pad vdev %hhu tid %hhu freq %hu\n",
1649 flags0
, flags1
, msdu
->len
, msdu_id
, &frags_paddr
,
1650 &skb_cb
->paddr
, vdev_id
, tid
, freq
);
1651 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt tx msdu: ",
1652 msdu
->data
, msdu
->len
);
1653 trace_ath10k_tx_hdr(ar
, msdu
->data
, msdu
->len
);
1654 trace_ath10k_tx_payload(ar
, msdu
->data
, msdu
->len
);
1656 sg_items
[0].transfer_id
= 0;
1657 sg_items
[0].transfer_context
= NULL
;
1658 sg_items
[0].vaddr
= &txbuf
->htc_hdr
;
1659 sg_items
[0].paddr
= txbuf_paddr
+
1660 sizeof(txbuf
->frags
);
1661 sg_items
[0].len
= sizeof(txbuf
->htc_hdr
) +
1662 sizeof(txbuf
->cmd_hdr
) +
1663 sizeof(txbuf
->cmd_tx
);
1665 sg_items
[1].transfer_id
= 0;
1666 sg_items
[1].transfer_context
= NULL
;
1667 sg_items
[1].vaddr
= msdu
->data
;
1668 sg_items
[1].paddr
= skb_cb
->paddr
;
1669 sg_items
[1].len
= prefetch_len
;
1671 res
= ath10k_hif_tx_sg(htt
->ar
,
1672 htt
->ar
->htc
.endpoint
[htt
->eid
].ul_pipe_id
,
1673 sg_items
, ARRAY_SIZE(sg_items
));
1675 goto err_unmap_msdu
;
1680 dma_unmap_single(dev
, skb_cb
->paddr
, msdu
->len
, DMA_TO_DEVICE
);
1682 ath10k_htt_tx_free_msdu_id(htt
, msdu_id
);
1687 static const struct ath10k_htt_tx_ops htt_tx_ops_32
= {
1688 .htt_send_rx_ring_cfg
= ath10k_htt_send_rx_ring_cfg_32
,
1689 .htt_send_frag_desc_bank_cfg
= ath10k_htt_send_frag_desc_bank_cfg_32
,
1690 .htt_alloc_frag_desc
= ath10k_htt_tx_alloc_cont_frag_desc_32
,
1691 .htt_free_frag_desc
= ath10k_htt_tx_free_cont_frag_desc_32
,
1692 .htt_tx
= ath10k_htt_tx_32
,
1693 .htt_alloc_txbuff
= ath10k_htt_tx_alloc_cont_txbuf_32
,
1694 .htt_free_txbuff
= ath10k_htt_tx_free_cont_txbuf_32
,
1697 static const struct ath10k_htt_tx_ops htt_tx_ops_64
= {
1698 .htt_send_rx_ring_cfg
= ath10k_htt_send_rx_ring_cfg_64
,
1699 .htt_send_frag_desc_bank_cfg
= ath10k_htt_send_frag_desc_bank_cfg_64
,
1700 .htt_alloc_frag_desc
= ath10k_htt_tx_alloc_cont_frag_desc_64
,
1701 .htt_free_frag_desc
= ath10k_htt_tx_free_cont_frag_desc_64
,
1702 .htt_tx
= ath10k_htt_tx_64
,
1703 .htt_alloc_txbuff
= ath10k_htt_tx_alloc_cont_txbuf_64
,
1704 .htt_free_txbuff
= ath10k_htt_tx_free_cont_txbuf_64
,
1707 static const struct ath10k_htt_tx_ops htt_tx_ops_hl
= {
1708 .htt_send_rx_ring_cfg
= ath10k_htt_send_rx_ring_cfg_hl
,
1709 .htt_send_frag_desc_bank_cfg
= ath10k_htt_send_frag_desc_bank_cfg_32
,
1710 .htt_tx
= ath10k_htt_tx_hl
,
1713 void ath10k_htt_set_tx_ops(struct ath10k_htt
*htt
)
1715 struct ath10k
*ar
= htt
->ar
;
1717 if (ar
->dev_type
== ATH10K_DEV_TYPE_HL
)
1718 htt
->tx_ops
= &htt_tx_ops_hl
;
1719 else if (ar
->hw_params
.target_64bit
)
1720 htt
->tx_ops
= &htt_tx_ops_64
;
1722 htt
->tx_ops
= &htt_tx_ops_32
;