1 // SPDX-License-Identifier: ISC
3 * Copyright (c) 2005-2011 Atheros Communications Inc.
4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
7 #include <linux/etherdevice.h>
14 static u8
ath10k_htt_tx_txq_calc_size(size_t count
)
22 while (factor
>= 64 && exp
< 4) {
31 factor
= max(1, factor
);
33 return SM(exp
, HTT_TX_Q_STATE_ENTRY_EXP
) |
34 SM(factor
, HTT_TX_Q_STATE_ENTRY_FACTOR
);
37 static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw
*hw
,
38 struct ieee80211_txq
*txq
)
40 struct ath10k
*ar
= hw
->priv
;
41 struct ath10k_sta
*arsta
;
42 struct ath10k_vif
*arvif
= (void *)txq
->vif
->drv_priv
;
43 unsigned long frame_cnt
;
44 unsigned long byte_cnt
;
51 lockdep_assert_held(&ar
->htt
.tx_lock
);
53 if (!ar
->htt
.tx_q_state
.enabled
)
56 if (ar
->htt
.tx_q_state
.mode
!= HTT_TX_MODE_SWITCH_PUSH_PULL
)
60 arsta
= (void *)txq
->sta
->drv_priv
;
61 peer_id
= arsta
->peer_id
;
63 peer_id
= arvif
->peer_id
;
67 bit
= BIT(peer_id
% 32);
70 ieee80211_txq_get_depth(txq
, &frame_cnt
, &byte_cnt
);
71 count
= ath10k_htt_tx_txq_calc_size(byte_cnt
);
73 if (unlikely(peer_id
>= ar
->htt
.tx_q_state
.num_peers
) ||
74 unlikely(tid
>= ar
->htt
.tx_q_state
.num_tids
)) {
75 ath10k_warn(ar
, "refusing to update txq for peer_id %hu tid %hhu due to out of bounds\n",
80 ar
->htt
.tx_q_state
.vaddr
->count
[tid
][peer_id
] = count
;
81 ar
->htt
.tx_q_state
.vaddr
->map
[tid
][idx
] &= ~bit
;
82 ar
->htt
.tx_q_state
.vaddr
->map
[tid
][idx
] |= count
? bit
: 0;
84 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt tx txq state update peer_id %hu tid %hhu count %hhu\n",
88 static void __ath10k_htt_tx_txq_sync(struct ath10k
*ar
)
93 lockdep_assert_held(&ar
->htt
.tx_lock
);
95 if (!ar
->htt
.tx_q_state
.enabled
)
98 if (ar
->htt
.tx_q_state
.mode
!= HTT_TX_MODE_SWITCH_PUSH_PULL
)
101 seq
= le32_to_cpu(ar
->htt
.tx_q_state
.vaddr
->seq
);
103 ar
->htt
.tx_q_state
.vaddr
->seq
= cpu_to_le32(seq
);
105 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt tx txq state update commit seq %u\n",
108 size
= sizeof(*ar
->htt
.tx_q_state
.vaddr
);
109 dma_sync_single_for_device(ar
->dev
,
110 ar
->htt
.tx_q_state
.paddr
,
115 void ath10k_htt_tx_txq_recalc(struct ieee80211_hw
*hw
,
116 struct ieee80211_txq
*txq
)
118 struct ath10k
*ar
= hw
->priv
;
120 spin_lock_bh(&ar
->htt
.tx_lock
);
121 __ath10k_htt_tx_txq_recalc(hw
, txq
);
122 spin_unlock_bh(&ar
->htt
.tx_lock
);
125 void ath10k_htt_tx_txq_sync(struct ath10k
*ar
)
127 spin_lock_bh(&ar
->htt
.tx_lock
);
128 __ath10k_htt_tx_txq_sync(ar
);
129 spin_unlock_bh(&ar
->htt
.tx_lock
);
132 void ath10k_htt_tx_txq_update(struct ieee80211_hw
*hw
,
133 struct ieee80211_txq
*txq
)
135 struct ath10k
*ar
= hw
->priv
;
137 spin_lock_bh(&ar
->htt
.tx_lock
);
138 __ath10k_htt_tx_txq_recalc(hw
, txq
);
139 __ath10k_htt_tx_txq_sync(ar
);
140 spin_unlock_bh(&ar
->htt
.tx_lock
);
143 void ath10k_htt_tx_dec_pending(struct ath10k_htt
*htt
)
145 lockdep_assert_held(&htt
->tx_lock
);
147 htt
->num_pending_tx
--;
148 if (htt
->num_pending_tx
== htt
->max_num_pending_tx
- 1)
149 ath10k_mac_tx_unlock(htt
->ar
, ATH10K_TX_PAUSE_Q_FULL
);
152 int ath10k_htt_tx_inc_pending(struct ath10k_htt
*htt
)
154 lockdep_assert_held(&htt
->tx_lock
);
156 if (htt
->num_pending_tx
>= htt
->max_num_pending_tx
)
159 htt
->num_pending_tx
++;
160 if (htt
->num_pending_tx
== htt
->max_num_pending_tx
)
161 ath10k_mac_tx_lock(htt
->ar
, ATH10K_TX_PAUSE_Q_FULL
);
166 int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt
*htt
, bool is_mgmt
,
169 struct ath10k
*ar
= htt
->ar
;
171 lockdep_assert_held(&htt
->tx_lock
);
173 if (!is_mgmt
|| !ar
->hw_params
.max_probe_resp_desc_thres
)
177 ar
->hw_params
.max_probe_resp_desc_thres
< htt
->num_pending_mgmt_tx
)
180 htt
->num_pending_mgmt_tx
++;
185 void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt
*htt
)
187 lockdep_assert_held(&htt
->tx_lock
);
189 if (!htt
->ar
->hw_params
.max_probe_resp_desc_thres
)
192 htt
->num_pending_mgmt_tx
--;
195 int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt
*htt
, struct sk_buff
*skb
)
197 struct ath10k
*ar
= htt
->ar
;
200 spin_lock_bh(&htt
->tx_lock
);
201 ret
= idr_alloc(&htt
->pending_tx
, skb
, 0,
202 htt
->max_num_pending_tx
, GFP_ATOMIC
);
203 spin_unlock_bh(&htt
->tx_lock
);
205 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt tx alloc msdu_id %d\n", ret
);
210 void ath10k_htt_tx_free_msdu_id(struct ath10k_htt
*htt
, u16 msdu_id
)
212 struct ath10k
*ar
= htt
->ar
;
214 lockdep_assert_held(&htt
->tx_lock
);
216 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt tx free msdu_id %hu\n", msdu_id
);
218 idr_remove(&htt
->pending_tx
, msdu_id
);
221 static void ath10k_htt_tx_free_cont_txbuf_32(struct ath10k_htt
*htt
)
223 struct ath10k
*ar
= htt
->ar
;
226 if (!htt
->txbuf
.vaddr_txbuff_32
)
229 size
= htt
->txbuf
.size
;
230 dma_free_coherent(ar
->dev
, size
, htt
->txbuf
.vaddr_txbuff_32
,
232 htt
->txbuf
.vaddr_txbuff_32
= NULL
;
235 static int ath10k_htt_tx_alloc_cont_txbuf_32(struct ath10k_htt
*htt
)
237 struct ath10k
*ar
= htt
->ar
;
240 size
= htt
->max_num_pending_tx
*
241 sizeof(struct ath10k_htt_txbuf_32
);
243 htt
->txbuf
.vaddr_txbuff_32
= dma_alloc_coherent(ar
->dev
, size
,
246 if (!htt
->txbuf
.vaddr_txbuff_32
)
249 htt
->txbuf
.size
= size
;
254 static void ath10k_htt_tx_free_cont_txbuf_64(struct ath10k_htt
*htt
)
256 struct ath10k
*ar
= htt
->ar
;
259 if (!htt
->txbuf
.vaddr_txbuff_64
)
262 size
= htt
->txbuf
.size
;
263 dma_free_coherent(ar
->dev
, size
, htt
->txbuf
.vaddr_txbuff_64
,
265 htt
->txbuf
.vaddr_txbuff_64
= NULL
;
268 static int ath10k_htt_tx_alloc_cont_txbuf_64(struct ath10k_htt
*htt
)
270 struct ath10k
*ar
= htt
->ar
;
273 size
= htt
->max_num_pending_tx
*
274 sizeof(struct ath10k_htt_txbuf_64
);
276 htt
->txbuf
.vaddr_txbuff_64
= dma_alloc_coherent(ar
->dev
, size
,
279 if (!htt
->txbuf
.vaddr_txbuff_64
)
282 htt
->txbuf
.size
= size
;
287 static void ath10k_htt_tx_free_cont_frag_desc_32(struct ath10k_htt
*htt
)
291 if (!htt
->frag_desc
.vaddr_desc_32
)
294 size
= htt
->max_num_pending_tx
*
295 sizeof(struct htt_msdu_ext_desc
);
297 dma_free_coherent(htt
->ar
->dev
,
299 htt
->frag_desc
.vaddr_desc_32
,
300 htt
->frag_desc
.paddr
);
302 htt
->frag_desc
.vaddr_desc_32
= NULL
;
305 static int ath10k_htt_tx_alloc_cont_frag_desc_32(struct ath10k_htt
*htt
)
307 struct ath10k
*ar
= htt
->ar
;
310 if (!ar
->hw_params
.continuous_frag_desc
)
313 size
= htt
->max_num_pending_tx
*
314 sizeof(struct htt_msdu_ext_desc
);
315 htt
->frag_desc
.vaddr_desc_32
= dma_alloc_coherent(ar
->dev
, size
,
316 &htt
->frag_desc
.paddr
,
318 if (!htt
->frag_desc
.vaddr_desc_32
) {
319 ath10k_err(ar
, "failed to alloc fragment desc memory\n");
322 htt
->frag_desc
.size
= size
;
327 static void ath10k_htt_tx_free_cont_frag_desc_64(struct ath10k_htt
*htt
)
331 if (!htt
->frag_desc
.vaddr_desc_64
)
334 size
= htt
->max_num_pending_tx
*
335 sizeof(struct htt_msdu_ext_desc_64
);
337 dma_free_coherent(htt
->ar
->dev
,
339 htt
->frag_desc
.vaddr_desc_64
,
340 htt
->frag_desc
.paddr
);
342 htt
->frag_desc
.vaddr_desc_64
= NULL
;
345 static int ath10k_htt_tx_alloc_cont_frag_desc_64(struct ath10k_htt
*htt
)
347 struct ath10k
*ar
= htt
->ar
;
350 if (!ar
->hw_params
.continuous_frag_desc
)
353 size
= htt
->max_num_pending_tx
*
354 sizeof(struct htt_msdu_ext_desc_64
);
356 htt
->frag_desc
.vaddr_desc_64
= dma_alloc_coherent(ar
->dev
, size
,
357 &htt
->frag_desc
.paddr
,
359 if (!htt
->frag_desc
.vaddr_desc_64
) {
360 ath10k_err(ar
, "failed to alloc fragment desc memory\n");
363 htt
->frag_desc
.size
= size
;
368 static void ath10k_htt_tx_free_txq(struct ath10k_htt
*htt
)
370 struct ath10k
*ar
= htt
->ar
;
373 if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL
,
374 ar
->running_fw
->fw_file
.fw_features
))
377 size
= sizeof(*htt
->tx_q_state
.vaddr
);
379 dma_unmap_single(ar
->dev
, htt
->tx_q_state
.paddr
, size
, DMA_TO_DEVICE
);
380 kfree(htt
->tx_q_state
.vaddr
);
383 static int ath10k_htt_tx_alloc_txq(struct ath10k_htt
*htt
)
385 struct ath10k
*ar
= htt
->ar
;
389 if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL
,
390 ar
->running_fw
->fw_file
.fw_features
))
393 htt
->tx_q_state
.num_peers
= HTT_TX_Q_STATE_NUM_PEERS
;
394 htt
->tx_q_state
.num_tids
= HTT_TX_Q_STATE_NUM_TIDS
;
395 htt
->tx_q_state
.type
= HTT_Q_DEPTH_TYPE_BYTES
;
397 size
= sizeof(*htt
->tx_q_state
.vaddr
);
398 htt
->tx_q_state
.vaddr
= kzalloc(size
, GFP_KERNEL
);
399 if (!htt
->tx_q_state
.vaddr
)
402 htt
->tx_q_state
.paddr
= dma_map_single(ar
->dev
, htt
->tx_q_state
.vaddr
,
403 size
, DMA_TO_DEVICE
);
404 ret
= dma_mapping_error(ar
->dev
, htt
->tx_q_state
.paddr
);
406 ath10k_warn(ar
, "failed to dma map tx_q_state: %d\n", ret
);
407 kfree(htt
->tx_q_state
.vaddr
);
414 static void ath10k_htt_tx_free_txdone_fifo(struct ath10k_htt
*htt
)
416 WARN_ON(!kfifo_is_empty(&htt
->txdone_fifo
));
417 kfifo_free(&htt
->txdone_fifo
);
420 static int ath10k_htt_tx_alloc_txdone_fifo(struct ath10k_htt
*htt
)
425 size
= roundup_pow_of_two(htt
->max_num_pending_tx
);
426 ret
= kfifo_alloc(&htt
->txdone_fifo
, size
, GFP_KERNEL
);
430 static int ath10k_htt_tx_alloc_buf(struct ath10k_htt
*htt
)
432 struct ath10k
*ar
= htt
->ar
;
435 ret
= ath10k_htt_alloc_txbuff(htt
);
437 ath10k_err(ar
, "failed to alloc cont tx buffer: %d\n", ret
);
441 ret
= ath10k_htt_alloc_frag_desc(htt
);
443 ath10k_err(ar
, "failed to alloc cont frag desc: %d\n", ret
);
447 ret
= ath10k_htt_tx_alloc_txq(htt
);
449 ath10k_err(ar
, "failed to alloc txq: %d\n", ret
);
453 ret
= ath10k_htt_tx_alloc_txdone_fifo(htt
);
455 ath10k_err(ar
, "failed to alloc txdone fifo: %d\n", ret
);
462 ath10k_htt_tx_free_txq(htt
);
465 ath10k_htt_free_frag_desc(htt
);
468 ath10k_htt_free_txbuff(htt
);
473 int ath10k_htt_tx_start(struct ath10k_htt
*htt
)
475 struct ath10k
*ar
= htt
->ar
;
478 ath10k_dbg(ar
, ATH10K_DBG_BOOT
, "htt tx max num pending tx %d\n",
479 htt
->max_num_pending_tx
);
481 spin_lock_init(&htt
->tx_lock
);
482 idr_init(&htt
->pending_tx
);
484 if (htt
->tx_mem_allocated
)
487 if (ar
->bus_param
.dev_type
== ATH10K_DEV_TYPE_HL
)
490 ret
= ath10k_htt_tx_alloc_buf(htt
);
492 goto free_idr_pending_tx
;
494 htt
->tx_mem_allocated
= true;
499 idr_destroy(&htt
->pending_tx
);
504 static int ath10k_htt_tx_clean_up_pending(int msdu_id
, void *skb
, void *ctx
)
506 struct ath10k
*ar
= ctx
;
507 struct ath10k_htt
*htt
= &ar
->htt
;
508 struct htt_tx_done tx_done
= {0};
510 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "force cleanup msdu_id %hu\n", msdu_id
);
512 tx_done
.msdu_id
= msdu_id
;
513 tx_done
.status
= HTT_TX_COMPL_STATE_DISCARD
;
515 ath10k_txrx_tx_unref(htt
, &tx_done
);
520 void ath10k_htt_tx_destroy(struct ath10k_htt
*htt
)
522 if (!htt
->tx_mem_allocated
)
525 ath10k_htt_free_txbuff(htt
);
526 ath10k_htt_tx_free_txq(htt
);
527 ath10k_htt_free_frag_desc(htt
);
528 ath10k_htt_tx_free_txdone_fifo(htt
);
529 htt
->tx_mem_allocated
= false;
532 void ath10k_htt_tx_stop(struct ath10k_htt
*htt
)
534 idr_for_each(&htt
->pending_tx
, ath10k_htt_tx_clean_up_pending
, htt
->ar
);
535 idr_destroy(&htt
->pending_tx
);
538 void ath10k_htt_tx_free(struct ath10k_htt
*htt
)
540 ath10k_htt_tx_stop(htt
);
541 ath10k_htt_tx_destroy(htt
);
544 void ath10k_htt_htc_tx_complete(struct ath10k
*ar
, struct sk_buff
*skb
)
546 dev_kfree_skb_any(skb
);
549 void ath10k_htt_hif_tx_complete(struct ath10k
*ar
, struct sk_buff
*skb
)
551 dev_kfree_skb_any(skb
);
553 EXPORT_SYMBOL(ath10k_htt_hif_tx_complete
);
555 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt
*htt
)
557 struct ath10k
*ar
= htt
->ar
;
563 len
+= sizeof(cmd
->hdr
);
564 len
+= sizeof(cmd
->ver_req
);
566 skb
= ath10k_htc_alloc_skb(ar
, len
);
571 cmd
= (struct htt_cmd
*)skb
->data
;
572 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_VERSION_REQ
;
574 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
576 dev_kfree_skb_any(skb
);
583 int ath10k_htt_h2t_stats_req(struct ath10k_htt
*htt
, u32 mask
, u32 reset_mask
,
586 struct ath10k
*ar
= htt
->ar
;
587 struct htt_stats_req
*req
;
592 len
+= sizeof(cmd
->hdr
);
593 len
+= sizeof(cmd
->stats_req
);
595 skb
= ath10k_htc_alloc_skb(ar
, len
);
600 cmd
= (struct htt_cmd
*)skb
->data
;
601 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_STATS_REQ
;
603 req
= &cmd
->stats_req
;
605 memset(req
, 0, sizeof(*req
));
607 /* currently we support only max 24 bit masks so no need to worry
608 * about endian support
610 memcpy(req
->upload_types
, &mask
, 3);
611 memcpy(req
->reset_types
, &reset_mask
, 3);
612 req
->stat_type
= HTT_STATS_REQ_CFG_STAT_TYPE_INVALID
;
613 req
->cookie_lsb
= cpu_to_le32(cookie
& 0xffffffff);
614 req
->cookie_msb
= cpu_to_le32((cookie
& 0xffffffff00000000ULL
) >> 32);
616 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
618 ath10k_warn(ar
, "failed to send htt type stats request: %d",
620 dev_kfree_skb_any(skb
);
627 static int ath10k_htt_send_frag_desc_bank_cfg_32(struct ath10k_htt
*htt
)
629 struct ath10k
*ar
= htt
->ar
;
632 struct htt_frag_desc_bank_cfg32
*cfg
;
636 if (!ar
->hw_params
.continuous_frag_desc
)
639 if (!htt
->frag_desc
.paddr
) {
640 ath10k_warn(ar
, "invalid frag desc memory\n");
644 size
= sizeof(cmd
->hdr
) + sizeof(cmd
->frag_desc_bank_cfg32
);
645 skb
= ath10k_htc_alloc_skb(ar
, size
);
650 cmd
= (struct htt_cmd
*)skb
->data
;
651 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG
;
654 info
|= SM(htt
->tx_q_state
.type
,
655 HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE
);
657 if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL
,
658 ar
->running_fw
->fw_file
.fw_features
))
659 info
|= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID
;
661 cfg
= &cmd
->frag_desc_bank_cfg32
;
664 cfg
->desc_size
= sizeof(struct htt_msdu_ext_desc
);
665 cfg
->bank_base_addrs
[0] = __cpu_to_le32(htt
->frag_desc
.paddr
);
666 cfg
->bank_id
[0].bank_min_id
= 0;
667 cfg
->bank_id
[0].bank_max_id
= __cpu_to_le16(htt
->max_num_pending_tx
-
670 cfg
->q_state
.paddr
= cpu_to_le32(htt
->tx_q_state
.paddr
);
671 cfg
->q_state
.num_peers
= cpu_to_le16(htt
->tx_q_state
.num_peers
);
672 cfg
->q_state
.num_tids
= cpu_to_le16(htt
->tx_q_state
.num_tids
);
673 cfg
->q_state
.record_size
= HTT_TX_Q_STATE_ENTRY_SIZE
;
674 cfg
->q_state
.record_multiplier
= HTT_TX_Q_STATE_ENTRY_MULTIPLIER
;
676 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt frag desc bank cmd\n");
678 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
680 ath10k_warn(ar
, "failed to send frag desc bank cfg request: %d\n",
682 dev_kfree_skb_any(skb
);
689 static int ath10k_htt_send_frag_desc_bank_cfg_64(struct ath10k_htt
*htt
)
691 struct ath10k
*ar
= htt
->ar
;
694 struct htt_frag_desc_bank_cfg64
*cfg
;
698 if (!ar
->hw_params
.continuous_frag_desc
)
701 if (!htt
->frag_desc
.paddr
) {
702 ath10k_warn(ar
, "invalid frag desc memory\n");
706 size
= sizeof(cmd
->hdr
) + sizeof(cmd
->frag_desc_bank_cfg64
);
707 skb
= ath10k_htc_alloc_skb(ar
, size
);
712 cmd
= (struct htt_cmd
*)skb
->data
;
713 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG
;
716 info
|= SM(htt
->tx_q_state
.type
,
717 HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE
);
719 if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL
,
720 ar
->running_fw
->fw_file
.fw_features
))
721 info
|= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID
;
723 cfg
= &cmd
->frag_desc_bank_cfg64
;
726 cfg
->desc_size
= sizeof(struct htt_msdu_ext_desc_64
);
727 cfg
->bank_base_addrs
[0] = __cpu_to_le64(htt
->frag_desc
.paddr
);
728 cfg
->bank_id
[0].bank_min_id
= 0;
729 cfg
->bank_id
[0].bank_max_id
= __cpu_to_le16(htt
->max_num_pending_tx
-
732 cfg
->q_state
.paddr
= cpu_to_le32(htt
->tx_q_state
.paddr
);
733 cfg
->q_state
.num_peers
= cpu_to_le16(htt
->tx_q_state
.num_peers
);
734 cfg
->q_state
.num_tids
= cpu_to_le16(htt
->tx_q_state
.num_tids
);
735 cfg
->q_state
.record_size
= HTT_TX_Q_STATE_ENTRY_SIZE
;
736 cfg
->q_state
.record_multiplier
= HTT_TX_Q_STATE_ENTRY_MULTIPLIER
;
738 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt frag desc bank cmd\n");
740 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
742 ath10k_warn(ar
, "failed to send frag desc bank cfg request: %d\n",
744 dev_kfree_skb_any(skb
);
751 static void ath10k_htt_fill_rx_desc_offset_32(void *rx_ring
)
753 struct htt_rx_ring_setup_ring32
*ring
=
754 (struct htt_rx_ring_setup_ring32
*)rx_ring
;
756 #define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
757 ring
->mac80211_hdr_offset
= __cpu_to_le16(desc_offset(rx_hdr_status
));
758 ring
->msdu_payload_offset
= __cpu_to_le16(desc_offset(msdu_payload
));
759 ring
->ppdu_start_offset
= __cpu_to_le16(desc_offset(ppdu_start
));
760 ring
->ppdu_end_offset
= __cpu_to_le16(desc_offset(ppdu_end
));
761 ring
->mpdu_start_offset
= __cpu_to_le16(desc_offset(mpdu_start
));
762 ring
->mpdu_end_offset
= __cpu_to_le16(desc_offset(mpdu_end
));
763 ring
->msdu_start_offset
= __cpu_to_le16(desc_offset(msdu_start
));
764 ring
->msdu_end_offset
= __cpu_to_le16(desc_offset(msdu_end
));
765 ring
->rx_attention_offset
= __cpu_to_le16(desc_offset(attention
));
766 ring
->frag_info_offset
= __cpu_to_le16(desc_offset(frag_info
));
770 static void ath10k_htt_fill_rx_desc_offset_64(void *rx_ring
)
772 struct htt_rx_ring_setup_ring64
*ring
=
773 (struct htt_rx_ring_setup_ring64
*)rx_ring
;
775 #define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
776 ring
->mac80211_hdr_offset
= __cpu_to_le16(desc_offset(rx_hdr_status
));
777 ring
->msdu_payload_offset
= __cpu_to_le16(desc_offset(msdu_payload
));
778 ring
->ppdu_start_offset
= __cpu_to_le16(desc_offset(ppdu_start
));
779 ring
->ppdu_end_offset
= __cpu_to_le16(desc_offset(ppdu_end
));
780 ring
->mpdu_start_offset
= __cpu_to_le16(desc_offset(mpdu_start
));
781 ring
->mpdu_end_offset
= __cpu_to_le16(desc_offset(mpdu_end
));
782 ring
->msdu_start_offset
= __cpu_to_le16(desc_offset(msdu_start
));
783 ring
->msdu_end_offset
= __cpu_to_le16(desc_offset(msdu_end
));
784 ring
->rx_attention_offset
= __cpu_to_le16(desc_offset(attention
));
785 ring
->frag_info_offset
= __cpu_to_le16(desc_offset(frag_info
));
789 static int ath10k_htt_send_rx_ring_cfg_32(struct ath10k_htt
*htt
)
791 struct ath10k
*ar
= htt
->ar
;
794 struct htt_rx_ring_setup_ring32
*ring
;
795 const int num_rx_ring
= 1;
802 * the HW expects the buffer to be an integral number of 4-byte
805 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE
, 4));
806 BUILD_BUG_ON((HTT_RX_BUF_SIZE
& HTT_MAX_CACHE_LINE_SIZE_MASK
) != 0);
808 len
= sizeof(cmd
->hdr
) + sizeof(cmd
->rx_setup_32
.hdr
)
809 + (sizeof(*ring
) * num_rx_ring
);
810 skb
= ath10k_htc_alloc_skb(ar
, len
);
816 cmd
= (struct htt_cmd
*)skb
->data
;
817 ring
= &cmd
->rx_setup_32
.rings
[0];
819 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_RX_RING_CFG
;
820 cmd
->rx_setup_32
.hdr
.num_rings
= 1;
822 /* FIXME: do we need all of this? */
824 flags
|= HTT_RX_RING_FLAGS_MAC80211_HDR
;
825 flags
|= HTT_RX_RING_FLAGS_MSDU_PAYLOAD
;
826 flags
|= HTT_RX_RING_FLAGS_PPDU_START
;
827 flags
|= HTT_RX_RING_FLAGS_PPDU_END
;
828 flags
|= HTT_RX_RING_FLAGS_MPDU_START
;
829 flags
|= HTT_RX_RING_FLAGS_MPDU_END
;
830 flags
|= HTT_RX_RING_FLAGS_MSDU_START
;
831 flags
|= HTT_RX_RING_FLAGS_MSDU_END
;
832 flags
|= HTT_RX_RING_FLAGS_RX_ATTENTION
;
833 flags
|= HTT_RX_RING_FLAGS_FRAG_INFO
;
834 flags
|= HTT_RX_RING_FLAGS_UNICAST_RX
;
835 flags
|= HTT_RX_RING_FLAGS_MULTICAST_RX
;
836 flags
|= HTT_RX_RING_FLAGS_CTRL_RX
;
837 flags
|= HTT_RX_RING_FLAGS_MGMT_RX
;
838 flags
|= HTT_RX_RING_FLAGS_NULL_RX
;
839 flags
|= HTT_RX_RING_FLAGS_PHY_DATA_RX
;
841 fw_idx
= __le32_to_cpu(*htt
->rx_ring
.alloc_idx
.vaddr
);
843 ring
->fw_idx_shadow_reg_paddr
=
844 __cpu_to_le32(htt
->rx_ring
.alloc_idx
.paddr
);
845 ring
->rx_ring_base_paddr
= __cpu_to_le32(htt
->rx_ring
.base_paddr
);
846 ring
->rx_ring_len
= __cpu_to_le16(htt
->rx_ring
.size
);
847 ring
->rx_ring_bufsize
= __cpu_to_le16(HTT_RX_BUF_SIZE
);
848 ring
->flags
= __cpu_to_le16(flags
);
849 ring
->fw_idx_init_val
= __cpu_to_le16(fw_idx
);
851 ath10k_htt_fill_rx_desc_offset_32(ring
);
852 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
854 dev_kfree_skb_any(skb
);
861 static int ath10k_htt_send_rx_ring_cfg_64(struct ath10k_htt
*htt
)
863 struct ath10k
*ar
= htt
->ar
;
866 struct htt_rx_ring_setup_ring64
*ring
;
867 const int num_rx_ring
= 1;
873 /* HW expects the buffer to be an integral number of 4-byte
876 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE
, 4));
877 BUILD_BUG_ON((HTT_RX_BUF_SIZE
& HTT_MAX_CACHE_LINE_SIZE_MASK
) != 0);
879 len
= sizeof(cmd
->hdr
) + sizeof(cmd
->rx_setup_64
.hdr
)
880 + (sizeof(*ring
) * num_rx_ring
);
881 skb
= ath10k_htc_alloc_skb(ar
, len
);
887 cmd
= (struct htt_cmd
*)skb
->data
;
888 ring
= &cmd
->rx_setup_64
.rings
[0];
890 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_RX_RING_CFG
;
891 cmd
->rx_setup_64
.hdr
.num_rings
= 1;
894 flags
|= HTT_RX_RING_FLAGS_MAC80211_HDR
;
895 flags
|= HTT_RX_RING_FLAGS_MSDU_PAYLOAD
;
896 flags
|= HTT_RX_RING_FLAGS_PPDU_START
;
897 flags
|= HTT_RX_RING_FLAGS_PPDU_END
;
898 flags
|= HTT_RX_RING_FLAGS_MPDU_START
;
899 flags
|= HTT_RX_RING_FLAGS_MPDU_END
;
900 flags
|= HTT_RX_RING_FLAGS_MSDU_START
;
901 flags
|= HTT_RX_RING_FLAGS_MSDU_END
;
902 flags
|= HTT_RX_RING_FLAGS_RX_ATTENTION
;
903 flags
|= HTT_RX_RING_FLAGS_FRAG_INFO
;
904 flags
|= HTT_RX_RING_FLAGS_UNICAST_RX
;
905 flags
|= HTT_RX_RING_FLAGS_MULTICAST_RX
;
906 flags
|= HTT_RX_RING_FLAGS_CTRL_RX
;
907 flags
|= HTT_RX_RING_FLAGS_MGMT_RX
;
908 flags
|= HTT_RX_RING_FLAGS_NULL_RX
;
909 flags
|= HTT_RX_RING_FLAGS_PHY_DATA_RX
;
911 fw_idx
= __le32_to_cpu(*htt
->rx_ring
.alloc_idx
.vaddr
);
913 ring
->fw_idx_shadow_reg_paddr
= __cpu_to_le64(htt
->rx_ring
.alloc_idx
.paddr
);
914 ring
->rx_ring_base_paddr
= __cpu_to_le64(htt
->rx_ring
.base_paddr
);
915 ring
->rx_ring_len
= __cpu_to_le16(htt
->rx_ring
.size
);
916 ring
->rx_ring_bufsize
= __cpu_to_le16(HTT_RX_BUF_SIZE
);
917 ring
->flags
= __cpu_to_le16(flags
);
918 ring
->fw_idx_init_val
= __cpu_to_le16(fw_idx
);
920 ath10k_htt_fill_rx_desc_offset_64(ring
);
921 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
923 dev_kfree_skb_any(skb
);
930 static int ath10k_htt_send_rx_ring_cfg_hl(struct ath10k_htt
*htt
)
932 struct ath10k
*ar
= htt
->ar
;
935 struct htt_rx_ring_setup_ring32
*ring
;
936 const int num_rx_ring
= 1;
942 * the HW expects the buffer to be an integral number of 4-byte
945 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE
, 4));
946 BUILD_BUG_ON((HTT_RX_BUF_SIZE
& HTT_MAX_CACHE_LINE_SIZE_MASK
) != 0);
948 len
= sizeof(cmd
->hdr
) + sizeof(cmd
->rx_setup_32
.hdr
)
949 + (sizeof(*ring
) * num_rx_ring
);
950 skb
= ath10k_htc_alloc_skb(ar
, len
);
956 cmd
= (struct htt_cmd
*)skb
->data
;
957 ring
= &cmd
->rx_setup_32
.rings
[0];
959 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_RX_RING_CFG
;
960 cmd
->rx_setup_32
.hdr
.num_rings
= 1;
963 flags
|= HTT_RX_RING_FLAGS_MSDU_PAYLOAD
;
964 flags
|= HTT_RX_RING_FLAGS_UNICAST_RX
;
965 flags
|= HTT_RX_RING_FLAGS_MULTICAST_RX
;
967 memset(ring
, 0, sizeof(*ring
));
968 ring
->rx_ring_len
= __cpu_to_le16(HTT_RX_RING_SIZE_MIN
);
969 ring
->rx_ring_bufsize
= __cpu_to_le16(HTT_RX_BUF_SIZE
);
970 ring
->flags
= __cpu_to_le16(flags
);
972 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
974 dev_kfree_skb_any(skb
);
981 static int ath10k_htt_h2t_aggr_cfg_msg_32(struct ath10k_htt
*htt
,
982 u8 max_subfrms_ampdu
,
983 u8 max_subfrms_amsdu
)
985 struct ath10k
*ar
= htt
->ar
;
986 struct htt_aggr_conf
*aggr_conf
;
992 /* Firmware defaults are: amsdu = 3 and ampdu = 64 */
994 if (max_subfrms_ampdu
== 0 || max_subfrms_ampdu
> 64)
997 if (max_subfrms_amsdu
== 0 || max_subfrms_amsdu
> 31)
1000 len
= sizeof(cmd
->hdr
);
1001 len
+= sizeof(cmd
->aggr_conf
);
1003 skb
= ath10k_htc_alloc_skb(ar
, len
);
1008 cmd
= (struct htt_cmd
*)skb
->data
;
1009 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_AGGR_CFG
;
1011 aggr_conf
= &cmd
->aggr_conf
;
1012 aggr_conf
->max_num_ampdu_subframes
= max_subfrms_ampdu
;
1013 aggr_conf
->max_num_amsdu_subframes
= max_subfrms_amsdu
;
1015 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt h2t aggr cfg msg amsdu %d ampdu %d",
1016 aggr_conf
->max_num_amsdu_subframes
,
1017 aggr_conf
->max_num_ampdu_subframes
);
1019 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
1021 dev_kfree_skb_any(skb
);
1028 static int ath10k_htt_h2t_aggr_cfg_msg_v2(struct ath10k_htt
*htt
,
1029 u8 max_subfrms_ampdu
,
1030 u8 max_subfrms_amsdu
)
1032 struct ath10k
*ar
= htt
->ar
;
1033 struct htt_aggr_conf_v2
*aggr_conf
;
1034 struct sk_buff
*skb
;
1035 struct htt_cmd
*cmd
;
1039 /* Firmware defaults are: amsdu = 3 and ampdu = 64 */
1041 if (max_subfrms_ampdu
== 0 || max_subfrms_ampdu
> 64)
1044 if (max_subfrms_amsdu
== 0 || max_subfrms_amsdu
> 31)
1047 len
= sizeof(cmd
->hdr
);
1048 len
+= sizeof(cmd
->aggr_conf_v2
);
1050 skb
= ath10k_htc_alloc_skb(ar
, len
);
1055 cmd
= (struct htt_cmd
*)skb
->data
;
1056 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_AGGR_CFG
;
1058 aggr_conf
= &cmd
->aggr_conf_v2
;
1059 aggr_conf
->max_num_ampdu_subframes
= max_subfrms_ampdu
;
1060 aggr_conf
->max_num_amsdu_subframes
= max_subfrms_amsdu
;
1062 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt h2t aggr cfg msg amsdu %d ampdu %d",
1063 aggr_conf
->max_num_amsdu_subframes
,
1064 aggr_conf
->max_num_ampdu_subframes
);
1066 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
1068 dev_kfree_skb_any(skb
);
1075 int ath10k_htt_tx_fetch_resp(struct ath10k
*ar
,
1077 __le16 fetch_seq_num
,
1078 struct htt_tx_fetch_record
*records
,
1081 struct sk_buff
*skb
;
1082 struct htt_cmd
*cmd
;
1083 const u16 resp_id
= 0;
1087 /* Response IDs are echo-ed back only for host driver convienence
1088 * purposes. They aren't used for anything in the driver yet so use 0.
1091 len
+= sizeof(cmd
->hdr
);
1092 len
+= sizeof(cmd
->tx_fetch_resp
);
1093 len
+= sizeof(cmd
->tx_fetch_resp
.records
[0]) * num_records
;
1095 skb
= ath10k_htc_alloc_skb(ar
, len
);
1100 cmd
= (struct htt_cmd
*)skb
->data
;
1101 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_TX_FETCH_RESP
;
1102 cmd
->tx_fetch_resp
.resp_id
= cpu_to_le16(resp_id
);
1103 cmd
->tx_fetch_resp
.fetch_seq_num
= fetch_seq_num
;
1104 cmd
->tx_fetch_resp
.num_records
= cpu_to_le16(num_records
);
1105 cmd
->tx_fetch_resp
.token
= token
;
1107 memcpy(cmd
->tx_fetch_resp
.records
, records
,
1108 sizeof(records
[0]) * num_records
);
1110 ret
= ath10k_htc_send(&ar
->htc
, ar
->htt
.eid
, skb
);
1112 ath10k_warn(ar
, "failed to submit htc command: %d\n", ret
);
1119 dev_kfree_skb_any(skb
);
1124 static u8
ath10k_htt_tx_get_vdev_id(struct ath10k
*ar
, struct sk_buff
*skb
)
1126 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1127 struct ath10k_skb_cb
*cb
= ATH10K_SKB_CB(skb
);
1128 struct ath10k_vif
*arvif
;
1130 if (info
->flags
& IEEE80211_TX_CTL_TX_OFFCHAN
) {
1131 return ar
->scan
.vdev_id
;
1132 } else if (cb
->vif
) {
1133 arvif
= (void *)cb
->vif
->drv_priv
;
1134 return arvif
->vdev_id
;
1135 } else if (ar
->monitor_started
) {
1136 return ar
->monitor_vdev_id
;
1142 static u8
ath10k_htt_tx_get_tid(struct sk_buff
*skb
, bool is_eth
)
1144 struct ieee80211_hdr
*hdr
= (void *)skb
->data
;
1145 struct ath10k_skb_cb
*cb
= ATH10K_SKB_CB(skb
);
1147 if (!is_eth
&& ieee80211_is_mgmt(hdr
->frame_control
))
1148 return HTT_DATA_TX_EXT_TID_MGMT
;
1149 else if (cb
->flags
& ATH10K_SKB_F_QOS
)
1150 return skb
->priority
& IEEE80211_QOS_CTL_TID_MASK
;
1152 return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST
;
1155 int ath10k_htt_mgmt_tx(struct ath10k_htt
*htt
, struct sk_buff
*msdu
)
1157 struct ath10k
*ar
= htt
->ar
;
1158 struct device
*dev
= ar
->dev
;
1159 struct sk_buff
*txdesc
= NULL
;
1160 struct htt_cmd
*cmd
;
1161 struct ath10k_skb_cb
*skb_cb
= ATH10K_SKB_CB(msdu
);
1162 u8 vdev_id
= ath10k_htt_tx_get_vdev_id(ar
, msdu
);
1166 const u8
*peer_addr
;
1167 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)msdu
->data
;
1169 len
+= sizeof(cmd
->hdr
);
1170 len
+= sizeof(cmd
->mgmt_tx
);
1172 res
= ath10k_htt_tx_alloc_msdu_id(htt
, msdu
);
1178 if ((ieee80211_is_action(hdr
->frame_control
) ||
1179 ieee80211_is_deauth(hdr
->frame_control
) ||
1180 ieee80211_is_disassoc(hdr
->frame_control
)) &&
1181 ieee80211_has_protected(hdr
->frame_control
)) {
1182 peer_addr
= hdr
->addr1
;
1183 if (is_multicast_ether_addr(peer_addr
)) {
1184 skb_put(msdu
, sizeof(struct ieee80211_mmie_16
));
1186 if (skb_cb
->ucast_cipher
== WLAN_CIPHER_SUITE_GCMP
||
1187 skb_cb
->ucast_cipher
== WLAN_CIPHER_SUITE_GCMP_256
)
1188 skb_put(msdu
, IEEE80211_GCMP_MIC_LEN
);
1190 skb_put(msdu
, IEEE80211_CCMP_MIC_LEN
);
1194 txdesc
= ath10k_htc_alloc_skb(ar
, len
);
1197 goto err_free_msdu_id
;
1200 skb_cb
->paddr
= dma_map_single(dev
, msdu
->data
, msdu
->len
,
1202 res
= dma_mapping_error(dev
, skb_cb
->paddr
);
1205 goto err_free_txdesc
;
1208 skb_put(txdesc
, len
);
1209 cmd
= (struct htt_cmd
*)txdesc
->data
;
1210 memset(cmd
, 0, len
);
1212 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_MGMT_TX
;
1213 cmd
->mgmt_tx
.msdu_paddr
= __cpu_to_le32(ATH10K_SKB_CB(msdu
)->paddr
);
1214 cmd
->mgmt_tx
.len
= __cpu_to_le32(msdu
->len
);
1215 cmd
->mgmt_tx
.desc_id
= __cpu_to_le32(msdu_id
);
1216 cmd
->mgmt_tx
.vdev_id
= __cpu_to_le32(vdev_id
);
1217 memcpy(cmd
->mgmt_tx
.hdr
, msdu
->data
,
1218 min_t(int, msdu
->len
, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN
));
1220 res
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, txdesc
);
1222 goto err_unmap_msdu
;
1227 if (ar
->bus_param
.dev_type
!= ATH10K_DEV_TYPE_HL
)
1228 dma_unmap_single(dev
, skb_cb
->paddr
, msdu
->len
, DMA_TO_DEVICE
);
1230 dev_kfree_skb_any(txdesc
);
1232 spin_lock_bh(&htt
->tx_lock
);
1233 ath10k_htt_tx_free_msdu_id(htt
, msdu_id
);
1234 spin_unlock_bh(&htt
->tx_lock
);
1239 #define HTT_TX_HL_NEEDED_HEADROOM \
1240 (unsigned int)(sizeof(struct htt_cmd_hdr) + \
1241 sizeof(struct htt_data_tx_desc) + \
1242 sizeof(struct ath10k_htc_hdr))
1244 static int ath10k_htt_tx_hl(struct ath10k_htt
*htt
, enum ath10k_hw_txrx_mode txmode
,
1245 struct sk_buff
*msdu
)
1247 struct ath10k
*ar
= htt
->ar
;
1249 struct htt_cmd_hdr
*cmd_hdr
;
1250 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)msdu
->data
;
1251 struct htt_data_tx_desc
*tx_desc
;
1252 struct ath10k_skb_cb
*skb_cb
= ATH10K_SKB_CB(msdu
);
1253 struct sk_buff
*tmp_skb
;
1254 bool is_eth
= (txmode
== ATH10K_HW_TXRX_ETHERNET
);
1255 u8 vdev_id
= ath10k_htt_tx_get_vdev_id(ar
, msdu
);
1256 u8 tid
= ath10k_htt_tx_get_tid(msdu
, is_eth
);
1261 if ((ieee80211_is_action(hdr
->frame_control
) ||
1262 ieee80211_is_deauth(hdr
->frame_control
) ||
1263 ieee80211_is_disassoc(hdr
->frame_control
)) &&
1264 ieee80211_has_protected(hdr
->frame_control
)) {
1265 skb_put(msdu
, IEEE80211_CCMP_MIC_LEN
);
1268 data_len
= msdu
->len
;
1271 case ATH10K_HW_TXRX_RAW
:
1272 case ATH10K_HW_TXRX_NATIVE_WIFI
:
1273 flags0
|= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT
;
1275 case ATH10K_HW_TXRX_ETHERNET
:
1276 flags0
|= SM(txmode
, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE
);
1278 case ATH10K_HW_TXRX_MGMT
:
1279 flags0
|= SM(ATH10K_HW_TXRX_MGMT
,
1280 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE
);
1281 flags0
|= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT
;
1285 if (skb_cb
->flags
& ATH10K_SKB_F_NO_HWCRYPT
)
1286 flags0
|= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT
;
1288 flags1
|= SM((u16
)vdev_id
, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID
);
1289 flags1
|= SM((u16
)tid
, HTT_DATA_TX_DESC_FLAGS1_EXT_TID
);
1290 if (msdu
->ip_summed
== CHECKSUM_PARTIAL
&&
1291 !test_bit(ATH10K_FLAG_RAW_MODE
, &ar
->dev_flags
)) {
1292 flags1
|= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD
;
1293 flags1
|= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD
;
1296 /* Prepend the HTT header and TX desc struct to the data message
1297 * and realloc the skb if it does not have enough headroom.
1299 if (skb_headroom(msdu
) < HTT_TX_HL_NEEDED_HEADROOM
) {
1302 ath10k_dbg(htt
->ar
, ATH10K_DBG_HTT
,
1303 "Not enough headroom in skb. Current headroom: %u, needed: %u. Reallocating...\n",
1304 skb_headroom(msdu
), HTT_TX_HL_NEEDED_HEADROOM
);
1305 msdu
= skb_realloc_headroom(msdu
, HTT_TX_HL_NEEDED_HEADROOM
);
1308 ath10k_warn(htt
->ar
, "htt hl tx: Unable to realloc skb!\n");
1314 if (ar
->bus_param
.hl_msdu_ids
) {
1315 flags1
|= HTT_DATA_TX_DESC_FLAGS1_POSTPONED
;
1316 res
= ath10k_htt_tx_alloc_msdu_id(htt
, msdu
);
1318 ath10k_err(ar
, "msdu_id allocation failed %d\n", res
);
1324 /* As msdu is freed by mac80211 (in ieee80211_tx_status()) and by
1325 * ath10k (in ath10k_htt_htc_tx_complete()) we have to increase
1326 * reference by one to avoid a use-after-free case and a double
1331 skb_push(msdu
, sizeof(*cmd_hdr
));
1332 skb_push(msdu
, sizeof(*tx_desc
));
1333 cmd_hdr
= (struct htt_cmd_hdr
*)msdu
->data
;
1334 tx_desc
= (struct htt_data_tx_desc
*)(msdu
->data
+ sizeof(*cmd_hdr
));
1336 cmd_hdr
->msg_type
= HTT_H2T_MSG_TYPE_TX_FRM
;
1337 tx_desc
->flags0
= flags0
;
1338 tx_desc
->flags1
= __cpu_to_le16(flags1
);
1339 tx_desc
->len
= __cpu_to_le16(data_len
);
1340 tx_desc
->id
= __cpu_to_le16(msdu_id
);
1341 tx_desc
->frags_paddr
= 0; /* always zero */
1342 /* Initialize peer_id to INVALID_PEER because this is NOT
1345 tx_desc
->peerid
= __cpu_to_le32(HTT_INVALID_PEERID
);
1347 res
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, msdu
);
1353 static int ath10k_htt_tx_32(struct ath10k_htt
*htt
,
1354 enum ath10k_hw_txrx_mode txmode
,
1355 struct sk_buff
*msdu
)
1357 struct ath10k
*ar
= htt
->ar
;
1358 struct device
*dev
= ar
->dev
;
1359 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)msdu
->data
;
1360 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(msdu
);
1361 struct ath10k_skb_cb
*skb_cb
= ATH10K_SKB_CB(msdu
);
1362 struct ath10k_hif_sg_item sg_items
[2];
1363 struct ath10k_htt_txbuf_32
*txbuf
;
1364 struct htt_data_tx_desc_frag
*frags
;
1365 bool is_eth
= (txmode
== ATH10K_HW_TXRX_ETHERNET
);
1366 u8 vdev_id
= ath10k_htt_tx_get_vdev_id(ar
, msdu
);
1367 u8 tid
= ath10k_htt_tx_get_tid(msdu
, is_eth
);
1371 u16 msdu_id
, flags1
= 0;
1373 u32 frags_paddr
= 0;
1375 struct htt_msdu_ext_desc
*ext_desc
= NULL
;
1376 struct htt_msdu_ext_desc
*ext_desc_t
= NULL
;
1378 res
= ath10k_htt_tx_alloc_msdu_id(htt
, msdu
);
1384 prefetch_len
= min(htt
->prefetch_len
, msdu
->len
);
1385 prefetch_len
= roundup(prefetch_len
, 4);
1387 txbuf
= htt
->txbuf
.vaddr_txbuff_32
+ msdu_id
;
1388 txbuf_paddr
= htt
->txbuf
.paddr
+
1389 (sizeof(struct ath10k_htt_txbuf_32
) * msdu_id
);
1391 if ((ieee80211_is_action(hdr
->frame_control
) ||
1392 ieee80211_is_deauth(hdr
->frame_control
) ||
1393 ieee80211_is_disassoc(hdr
->frame_control
)) &&
1394 ieee80211_has_protected(hdr
->frame_control
)) {
1395 skb_put(msdu
, IEEE80211_CCMP_MIC_LEN
);
1396 } else if (!(skb_cb
->flags
& ATH10K_SKB_F_NO_HWCRYPT
) &&
1397 txmode
== ATH10K_HW_TXRX_RAW
&&
1398 ieee80211_has_protected(hdr
->frame_control
)) {
1399 skb_put(msdu
, IEEE80211_CCMP_MIC_LEN
);
1402 skb_cb
->paddr
= dma_map_single(dev
, msdu
->data
, msdu
->len
,
1404 res
= dma_mapping_error(dev
, skb_cb
->paddr
);
1407 goto err_free_msdu_id
;
1410 if (unlikely(info
->flags
& IEEE80211_TX_CTL_TX_OFFCHAN
))
1411 freq
= ar
->scan
.roc_freq
;
1414 case ATH10K_HW_TXRX_RAW
:
1415 case ATH10K_HW_TXRX_NATIVE_WIFI
:
1416 flags0
|= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT
;
1418 case ATH10K_HW_TXRX_ETHERNET
:
1419 if (ar
->hw_params
.continuous_frag_desc
) {
1420 ext_desc_t
= htt
->frag_desc
.vaddr_desc_32
;
1421 memset(&ext_desc_t
[msdu_id
], 0,
1422 sizeof(struct htt_msdu_ext_desc
));
1423 frags
= (struct htt_data_tx_desc_frag
*)
1424 &ext_desc_t
[msdu_id
].frags
;
1425 ext_desc
= &ext_desc_t
[msdu_id
];
1426 frags
[0].tword_addr
.paddr_lo
=
1427 __cpu_to_le32(skb_cb
->paddr
);
1428 frags
[0].tword_addr
.paddr_hi
= 0;
1429 frags
[0].tword_addr
.len_16
= __cpu_to_le16(msdu
->len
);
1431 frags_paddr
= htt
->frag_desc
.paddr
+
1432 (sizeof(struct htt_msdu_ext_desc
) * msdu_id
);
1434 frags
= txbuf
->frags
;
1435 frags
[0].dword_addr
.paddr
=
1436 __cpu_to_le32(skb_cb
->paddr
);
1437 frags
[0].dword_addr
.len
= __cpu_to_le32(msdu
->len
);
1438 frags
[1].dword_addr
.paddr
= 0;
1439 frags
[1].dword_addr
.len
= 0;
1441 frags_paddr
= txbuf_paddr
;
1443 flags0
|= SM(txmode
, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE
);
1445 case ATH10K_HW_TXRX_MGMT
:
1446 flags0
|= SM(ATH10K_HW_TXRX_MGMT
,
1447 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE
);
1448 flags0
|= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT
;
1450 frags_paddr
= skb_cb
->paddr
;
1454 /* Normally all commands go through HTC which manages tx credits for
1455 * each endpoint and notifies when tx is completed.
1457 * HTT endpoint is creditless so there's no need to care about HTC
1458 * flags. In that case it is trivial to fill the HTC header here.
1460 * MSDU transmission is considered completed upon HTT event. This
1461 * implies no relevant resources can be freed until after the event is
1462 * received. That's why HTC tx completion handler itself is ignored by
1463 * setting NULL to transfer_context for all sg items.
1465 * There is simply no point in pushing HTT TX_FRM through HTC tx path
1466 * as it's a waste of resources. By bypassing HTC it is possible to
1467 * avoid extra memory allocations, compress data structures and thus
1468 * improve performance.
1471 txbuf
->htc_hdr
.eid
= htt
->eid
;
1472 txbuf
->htc_hdr
.len
= __cpu_to_le16(sizeof(txbuf
->cmd_hdr
) +
1473 sizeof(txbuf
->cmd_tx
) +
1475 txbuf
->htc_hdr
.flags
= 0;
1477 if (skb_cb
->flags
& ATH10K_SKB_F_NO_HWCRYPT
)
1478 flags0
|= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT
;
1480 flags1
|= SM((u16
)vdev_id
, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID
);
1481 flags1
|= SM((u16
)tid
, HTT_DATA_TX_DESC_FLAGS1_EXT_TID
);
1482 if (msdu
->ip_summed
== CHECKSUM_PARTIAL
&&
1483 !test_bit(ATH10K_FLAG_RAW_MODE
, &ar
->dev_flags
)) {
1484 flags1
|= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD
;
1485 flags1
|= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD
;
1486 if (ar
->hw_params
.continuous_frag_desc
)
1487 ext_desc
->flags
|= HTT_MSDU_CHECKSUM_ENABLE
;
1490 /* Prevent firmware from sending up tx inspection requests. There's
1491 * nothing ath10k can do with frames requested for inspection so force
1492 * it to simply rely a regular tx completion with discard status.
1494 flags1
|= HTT_DATA_TX_DESC_FLAGS1_POSTPONED
;
1496 txbuf
->cmd_hdr
.msg_type
= HTT_H2T_MSG_TYPE_TX_FRM
;
1497 txbuf
->cmd_tx
.flags0
= flags0
;
1498 txbuf
->cmd_tx
.flags1
= __cpu_to_le16(flags1
);
1499 txbuf
->cmd_tx
.len
= __cpu_to_le16(msdu
->len
);
1500 txbuf
->cmd_tx
.id
= __cpu_to_le16(msdu_id
);
1501 txbuf
->cmd_tx
.frags_paddr
= __cpu_to_le32(frags_paddr
);
1502 if (ath10k_mac_tx_frm_has_freq(ar
)) {
1503 txbuf
->cmd_tx
.offchan_tx
.peerid
=
1504 __cpu_to_le16(HTT_INVALID_PEERID
);
1505 txbuf
->cmd_tx
.offchan_tx
.freq
=
1506 __cpu_to_le16(freq
);
1508 txbuf
->cmd_tx
.peerid
=
1509 __cpu_to_le32(HTT_INVALID_PEERID
);
1512 trace_ath10k_htt_tx(ar
, msdu_id
, msdu
->len
, vdev_id
, tid
);
1513 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
1514 "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %pad, msdu_paddr %pad vdev %hhu tid %hhu freq %hu\n",
1515 flags0
, flags1
, msdu
->len
, msdu_id
, &frags_paddr
,
1516 &skb_cb
->paddr
, vdev_id
, tid
, freq
);
1517 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt tx msdu: ",
1518 msdu
->data
, msdu
->len
);
1519 trace_ath10k_tx_hdr(ar
, msdu
->data
, msdu
->len
);
1520 trace_ath10k_tx_payload(ar
, msdu
->data
, msdu
->len
);
1522 sg_items
[0].transfer_id
= 0;
1523 sg_items
[0].transfer_context
= NULL
;
1524 sg_items
[0].vaddr
= &txbuf
->htc_hdr
;
1525 sg_items
[0].paddr
= txbuf_paddr
+
1526 sizeof(txbuf
->frags
);
1527 sg_items
[0].len
= sizeof(txbuf
->htc_hdr
) +
1528 sizeof(txbuf
->cmd_hdr
) +
1529 sizeof(txbuf
->cmd_tx
);
1531 sg_items
[1].transfer_id
= 0;
1532 sg_items
[1].transfer_context
= NULL
;
1533 sg_items
[1].vaddr
= msdu
->data
;
1534 sg_items
[1].paddr
= skb_cb
->paddr
;
1535 sg_items
[1].len
= prefetch_len
;
1537 res
= ath10k_hif_tx_sg(htt
->ar
,
1538 htt
->ar
->htc
.endpoint
[htt
->eid
].ul_pipe_id
,
1539 sg_items
, ARRAY_SIZE(sg_items
));
1541 goto err_unmap_msdu
;
1546 dma_unmap_single(dev
, skb_cb
->paddr
, msdu
->len
, DMA_TO_DEVICE
);
1548 ath10k_htt_tx_free_msdu_id(htt
, msdu_id
);
1553 static int ath10k_htt_tx_64(struct ath10k_htt
*htt
,
1554 enum ath10k_hw_txrx_mode txmode
,
1555 struct sk_buff
*msdu
)
1557 struct ath10k
*ar
= htt
->ar
;
1558 struct device
*dev
= ar
->dev
;
1559 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)msdu
->data
;
1560 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(msdu
);
1561 struct ath10k_skb_cb
*skb_cb
= ATH10K_SKB_CB(msdu
);
1562 struct ath10k_hif_sg_item sg_items
[2];
1563 struct ath10k_htt_txbuf_64
*txbuf
;
1564 struct htt_data_tx_desc_frag
*frags
;
1565 bool is_eth
= (txmode
== ATH10K_HW_TXRX_ETHERNET
);
1566 u8 vdev_id
= ath10k_htt_tx_get_vdev_id(ar
, msdu
);
1567 u8 tid
= ath10k_htt_tx_get_tid(msdu
, is_eth
);
1571 u16 msdu_id
, flags1
= 0;
1573 dma_addr_t frags_paddr
= 0;
1574 dma_addr_t txbuf_paddr
;
1575 struct htt_msdu_ext_desc_64
*ext_desc
= NULL
;
1576 struct htt_msdu_ext_desc_64
*ext_desc_t
= NULL
;
1578 res
= ath10k_htt_tx_alloc_msdu_id(htt
, msdu
);
1584 prefetch_len
= min(htt
->prefetch_len
, msdu
->len
);
1585 prefetch_len
= roundup(prefetch_len
, 4);
1587 txbuf
= htt
->txbuf
.vaddr_txbuff_64
+ msdu_id
;
1588 txbuf_paddr
= htt
->txbuf
.paddr
+
1589 (sizeof(struct ath10k_htt_txbuf_64
) * msdu_id
);
1591 if ((ieee80211_is_action(hdr
->frame_control
) ||
1592 ieee80211_is_deauth(hdr
->frame_control
) ||
1593 ieee80211_is_disassoc(hdr
->frame_control
)) &&
1594 ieee80211_has_protected(hdr
->frame_control
)) {
1595 skb_put(msdu
, IEEE80211_CCMP_MIC_LEN
);
1596 } else if (!(skb_cb
->flags
& ATH10K_SKB_F_NO_HWCRYPT
) &&
1597 txmode
== ATH10K_HW_TXRX_RAW
&&
1598 ieee80211_has_protected(hdr
->frame_control
)) {
1599 skb_put(msdu
, IEEE80211_CCMP_MIC_LEN
);
1602 skb_cb
->paddr
= dma_map_single(dev
, msdu
->data
, msdu
->len
,
1604 res
= dma_mapping_error(dev
, skb_cb
->paddr
);
1607 goto err_free_msdu_id
;
1610 if (unlikely(info
->flags
& IEEE80211_TX_CTL_TX_OFFCHAN
))
1611 freq
= ar
->scan
.roc_freq
;
1614 case ATH10K_HW_TXRX_RAW
:
1615 case ATH10K_HW_TXRX_NATIVE_WIFI
:
1616 flags0
|= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT
;
1618 case ATH10K_HW_TXRX_ETHERNET
:
1619 if (ar
->hw_params
.continuous_frag_desc
) {
1620 ext_desc_t
= htt
->frag_desc
.vaddr_desc_64
;
1621 memset(&ext_desc_t
[msdu_id
], 0,
1622 sizeof(struct htt_msdu_ext_desc_64
));
1623 frags
= (struct htt_data_tx_desc_frag
*)
1624 &ext_desc_t
[msdu_id
].frags
;
1625 ext_desc
= &ext_desc_t
[msdu_id
];
1626 frags
[0].tword_addr
.paddr_lo
=
1627 __cpu_to_le32(skb_cb
->paddr
);
1628 frags
[0].tword_addr
.paddr_hi
=
1629 __cpu_to_le16(upper_32_bits(skb_cb
->paddr
));
1630 frags
[0].tword_addr
.len_16
= __cpu_to_le16(msdu
->len
);
1632 frags_paddr
= htt
->frag_desc
.paddr
+
1633 (sizeof(struct htt_msdu_ext_desc_64
) * msdu_id
);
1635 frags
= txbuf
->frags
;
1636 frags
[0].tword_addr
.paddr_lo
=
1637 __cpu_to_le32(skb_cb
->paddr
);
1638 frags
[0].tword_addr
.paddr_hi
=
1639 __cpu_to_le16(upper_32_bits(skb_cb
->paddr
));
1640 frags
[0].tword_addr
.len_16
= __cpu_to_le16(msdu
->len
);
1641 frags
[1].tword_addr
.paddr_lo
= 0;
1642 frags
[1].tword_addr
.paddr_hi
= 0;
1643 frags
[1].tword_addr
.len_16
= 0;
1645 flags0
|= SM(txmode
, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE
);
1647 case ATH10K_HW_TXRX_MGMT
:
1648 flags0
|= SM(ATH10K_HW_TXRX_MGMT
,
1649 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE
);
1650 flags0
|= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT
;
1652 frags_paddr
= skb_cb
->paddr
;
1656 /* Normally all commands go through HTC which manages tx credits for
1657 * each endpoint and notifies when tx is completed.
1659 * HTT endpoint is creditless so there's no need to care about HTC
1660 * flags. In that case it is trivial to fill the HTC header here.
1662 * MSDU transmission is considered completed upon HTT event. This
1663 * implies no relevant resources can be freed until after the event is
1664 * received. That's why HTC tx completion handler itself is ignored by
1665 * setting NULL to transfer_context for all sg items.
1667 * There is simply no point in pushing HTT TX_FRM through HTC tx path
1668 * as it's a waste of resources. By bypassing HTC it is possible to
1669 * avoid extra memory allocations, compress data structures and thus
1670 * improve performance.
1673 txbuf
->htc_hdr
.eid
= htt
->eid
;
1674 txbuf
->htc_hdr
.len
= __cpu_to_le16(sizeof(txbuf
->cmd_hdr
) +
1675 sizeof(txbuf
->cmd_tx
) +
1677 txbuf
->htc_hdr
.flags
= 0;
1679 if (skb_cb
->flags
& ATH10K_SKB_F_NO_HWCRYPT
)
1680 flags0
|= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT
;
1682 flags1
|= SM((u16
)vdev_id
, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID
);
1683 flags1
|= SM((u16
)tid
, HTT_DATA_TX_DESC_FLAGS1_EXT_TID
);
1684 if (msdu
->ip_summed
== CHECKSUM_PARTIAL
&&
1685 !test_bit(ATH10K_FLAG_RAW_MODE
, &ar
->dev_flags
)) {
1686 flags1
|= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD
;
1687 flags1
|= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD
;
1688 if (ar
->hw_params
.continuous_frag_desc
) {
1689 memset(ext_desc
->tso_flag
, 0, sizeof(ext_desc
->tso_flag
));
1690 ext_desc
->tso_flag
[3] |=
1691 __cpu_to_le32(HTT_MSDU_CHECKSUM_ENABLE_64
);
1695 /* Prevent firmware from sending up tx inspection requests. There's
1696 * nothing ath10k can do with frames requested for inspection so force
1697 * it to simply rely a regular tx completion with discard status.
1699 flags1
|= HTT_DATA_TX_DESC_FLAGS1_POSTPONED
;
1701 txbuf
->cmd_hdr
.msg_type
= HTT_H2T_MSG_TYPE_TX_FRM
;
1702 txbuf
->cmd_tx
.flags0
= flags0
;
1703 txbuf
->cmd_tx
.flags1
= __cpu_to_le16(flags1
);
1704 txbuf
->cmd_tx
.len
= __cpu_to_le16(msdu
->len
);
1705 txbuf
->cmd_tx
.id
= __cpu_to_le16(msdu_id
);
1707 /* fill fragment descriptor */
1708 txbuf
->cmd_tx
.frags_paddr
= __cpu_to_le64(frags_paddr
);
1709 if (ath10k_mac_tx_frm_has_freq(ar
)) {
1710 txbuf
->cmd_tx
.offchan_tx
.peerid
=
1711 __cpu_to_le16(HTT_INVALID_PEERID
);
1712 txbuf
->cmd_tx
.offchan_tx
.freq
=
1713 __cpu_to_le16(freq
);
1715 txbuf
->cmd_tx
.peerid
=
1716 __cpu_to_le32(HTT_INVALID_PEERID
);
1719 trace_ath10k_htt_tx(ar
, msdu_id
, msdu
->len
, vdev_id
, tid
);
1720 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
1721 "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %pad, msdu_paddr %pad vdev %hhu tid %hhu freq %hu\n",
1722 flags0
, flags1
, msdu
->len
, msdu_id
, &frags_paddr
,
1723 &skb_cb
->paddr
, vdev_id
, tid
, freq
);
1724 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt tx msdu: ",
1725 msdu
->data
, msdu
->len
);
1726 trace_ath10k_tx_hdr(ar
, msdu
->data
, msdu
->len
);
1727 trace_ath10k_tx_payload(ar
, msdu
->data
, msdu
->len
);
1729 sg_items
[0].transfer_id
= 0;
1730 sg_items
[0].transfer_context
= NULL
;
1731 sg_items
[0].vaddr
= &txbuf
->htc_hdr
;
1732 sg_items
[0].paddr
= txbuf_paddr
+
1733 sizeof(txbuf
->frags
);
1734 sg_items
[0].len
= sizeof(txbuf
->htc_hdr
) +
1735 sizeof(txbuf
->cmd_hdr
) +
1736 sizeof(txbuf
->cmd_tx
);
1738 sg_items
[1].transfer_id
= 0;
1739 sg_items
[1].transfer_context
= NULL
;
1740 sg_items
[1].vaddr
= msdu
->data
;
1741 sg_items
[1].paddr
= skb_cb
->paddr
;
1742 sg_items
[1].len
= prefetch_len
;
1744 res
= ath10k_hif_tx_sg(htt
->ar
,
1745 htt
->ar
->htc
.endpoint
[htt
->eid
].ul_pipe_id
,
1746 sg_items
, ARRAY_SIZE(sg_items
));
1748 goto err_unmap_msdu
;
1753 dma_unmap_single(dev
, skb_cb
->paddr
, msdu
->len
, DMA_TO_DEVICE
);
1755 ath10k_htt_tx_free_msdu_id(htt
, msdu_id
);
1760 static const struct ath10k_htt_tx_ops htt_tx_ops_32
= {
1761 .htt_send_rx_ring_cfg
= ath10k_htt_send_rx_ring_cfg_32
,
1762 .htt_send_frag_desc_bank_cfg
= ath10k_htt_send_frag_desc_bank_cfg_32
,
1763 .htt_alloc_frag_desc
= ath10k_htt_tx_alloc_cont_frag_desc_32
,
1764 .htt_free_frag_desc
= ath10k_htt_tx_free_cont_frag_desc_32
,
1765 .htt_tx
= ath10k_htt_tx_32
,
1766 .htt_alloc_txbuff
= ath10k_htt_tx_alloc_cont_txbuf_32
,
1767 .htt_free_txbuff
= ath10k_htt_tx_free_cont_txbuf_32
,
1768 .htt_h2t_aggr_cfg_msg
= ath10k_htt_h2t_aggr_cfg_msg_32
,
1771 static const struct ath10k_htt_tx_ops htt_tx_ops_64
= {
1772 .htt_send_rx_ring_cfg
= ath10k_htt_send_rx_ring_cfg_64
,
1773 .htt_send_frag_desc_bank_cfg
= ath10k_htt_send_frag_desc_bank_cfg_64
,
1774 .htt_alloc_frag_desc
= ath10k_htt_tx_alloc_cont_frag_desc_64
,
1775 .htt_free_frag_desc
= ath10k_htt_tx_free_cont_frag_desc_64
,
1776 .htt_tx
= ath10k_htt_tx_64
,
1777 .htt_alloc_txbuff
= ath10k_htt_tx_alloc_cont_txbuf_64
,
1778 .htt_free_txbuff
= ath10k_htt_tx_free_cont_txbuf_64
,
1779 .htt_h2t_aggr_cfg_msg
= ath10k_htt_h2t_aggr_cfg_msg_v2
,
1782 static const struct ath10k_htt_tx_ops htt_tx_ops_hl
= {
1783 .htt_send_rx_ring_cfg
= ath10k_htt_send_rx_ring_cfg_hl
,
1784 .htt_send_frag_desc_bank_cfg
= ath10k_htt_send_frag_desc_bank_cfg_32
,
1785 .htt_tx
= ath10k_htt_tx_hl
,
1786 .htt_h2t_aggr_cfg_msg
= ath10k_htt_h2t_aggr_cfg_msg_32
,
1789 void ath10k_htt_set_tx_ops(struct ath10k_htt
*htt
)
1791 struct ath10k
*ar
= htt
->ar
;
1793 if (ar
->bus_param
.dev_type
== ATH10K_DEV_TYPE_HL
)
1794 htt
->tx_ops
= &htt_tx_ops_hl
;
1795 else if (ar
->hw_params
.target_64bit
)
1796 htt
->tx_ops
= &htt_tx_ops_64
;
1798 htt
->tx_ops
= &htt_tx_ops_32
;