1 // SPDX-License-Identifier: ISC
3 * Copyright (c) 2005-2011 Atheros Communications Inc.
4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
7 #include <linux/etherdevice.h>
14 static u8
ath10k_htt_tx_txq_calc_size(size_t count
)
22 while (factor
>= 64 && exp
< 4) {
31 factor
= max(1, factor
);
33 return SM(exp
, HTT_TX_Q_STATE_ENTRY_EXP
) |
34 SM(factor
, HTT_TX_Q_STATE_ENTRY_FACTOR
);
37 static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw
*hw
,
38 struct ieee80211_txq
*txq
)
40 struct ath10k
*ar
= hw
->priv
;
41 struct ath10k_sta
*arsta
;
42 struct ath10k_vif
*arvif
= (void *)txq
->vif
->drv_priv
;
43 unsigned long frame_cnt
;
44 unsigned long byte_cnt
;
51 lockdep_assert_held(&ar
->htt
.tx_lock
);
53 if (!ar
->htt
.tx_q_state
.enabled
)
56 if (ar
->htt
.tx_q_state
.mode
!= HTT_TX_MODE_SWITCH_PUSH_PULL
)
60 arsta
= (void *)txq
->sta
->drv_priv
;
61 peer_id
= arsta
->peer_id
;
63 peer_id
= arvif
->peer_id
;
67 bit
= BIT(peer_id
% 32);
70 ieee80211_txq_get_depth(txq
, &frame_cnt
, &byte_cnt
);
71 count
= ath10k_htt_tx_txq_calc_size(byte_cnt
);
73 if (unlikely(peer_id
>= ar
->htt
.tx_q_state
.num_peers
) ||
74 unlikely(tid
>= ar
->htt
.tx_q_state
.num_tids
)) {
75 ath10k_warn(ar
, "refusing to update txq for peer_id %hu tid %hhu due to out of bounds\n",
80 ar
->htt
.tx_q_state
.vaddr
->count
[tid
][peer_id
] = count
;
81 ar
->htt
.tx_q_state
.vaddr
->map
[tid
][idx
] &= ~bit
;
82 ar
->htt
.tx_q_state
.vaddr
->map
[tid
][idx
] |= count
? bit
: 0;
84 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt tx txq state update peer_id %hu tid %hhu count %hhu\n",
88 static void __ath10k_htt_tx_txq_sync(struct ath10k
*ar
)
93 lockdep_assert_held(&ar
->htt
.tx_lock
);
95 if (!ar
->htt
.tx_q_state
.enabled
)
98 if (ar
->htt
.tx_q_state
.mode
!= HTT_TX_MODE_SWITCH_PUSH_PULL
)
101 seq
= le32_to_cpu(ar
->htt
.tx_q_state
.vaddr
->seq
);
103 ar
->htt
.tx_q_state
.vaddr
->seq
= cpu_to_le32(seq
);
105 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt tx txq state update commit seq %u\n",
108 size
= sizeof(*ar
->htt
.tx_q_state
.vaddr
);
109 dma_sync_single_for_device(ar
->dev
,
110 ar
->htt
.tx_q_state
.paddr
,
115 void ath10k_htt_tx_txq_recalc(struct ieee80211_hw
*hw
,
116 struct ieee80211_txq
*txq
)
118 struct ath10k
*ar
= hw
->priv
;
120 spin_lock_bh(&ar
->htt
.tx_lock
);
121 __ath10k_htt_tx_txq_recalc(hw
, txq
);
122 spin_unlock_bh(&ar
->htt
.tx_lock
);
125 void ath10k_htt_tx_txq_sync(struct ath10k
*ar
)
127 spin_lock_bh(&ar
->htt
.tx_lock
);
128 __ath10k_htt_tx_txq_sync(ar
);
129 spin_unlock_bh(&ar
->htt
.tx_lock
);
132 void ath10k_htt_tx_txq_update(struct ieee80211_hw
*hw
,
133 struct ieee80211_txq
*txq
)
135 struct ath10k
*ar
= hw
->priv
;
137 spin_lock_bh(&ar
->htt
.tx_lock
);
138 __ath10k_htt_tx_txq_recalc(hw
, txq
);
139 __ath10k_htt_tx_txq_sync(ar
);
140 spin_unlock_bh(&ar
->htt
.tx_lock
);
143 void ath10k_htt_tx_dec_pending(struct ath10k_htt
*htt
)
145 lockdep_assert_held(&htt
->tx_lock
);
147 htt
->num_pending_tx
--;
148 if (htt
->num_pending_tx
== htt
->max_num_pending_tx
- 1)
149 ath10k_mac_tx_unlock(htt
->ar
, ATH10K_TX_PAUSE_Q_FULL
);
152 int ath10k_htt_tx_inc_pending(struct ath10k_htt
*htt
)
154 lockdep_assert_held(&htt
->tx_lock
);
156 if (htt
->num_pending_tx
>= htt
->max_num_pending_tx
)
159 htt
->num_pending_tx
++;
160 if (htt
->num_pending_tx
== htt
->max_num_pending_tx
)
161 ath10k_mac_tx_lock(htt
->ar
, ATH10K_TX_PAUSE_Q_FULL
);
166 int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt
*htt
, bool is_mgmt
,
169 struct ath10k
*ar
= htt
->ar
;
171 lockdep_assert_held(&htt
->tx_lock
);
173 if (!is_mgmt
|| !ar
->hw_params
.max_probe_resp_desc_thres
)
177 ar
->hw_params
.max_probe_resp_desc_thres
< htt
->num_pending_mgmt_tx
)
180 htt
->num_pending_mgmt_tx
++;
185 void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt
*htt
)
187 lockdep_assert_held(&htt
->tx_lock
);
189 if (!htt
->ar
->hw_params
.max_probe_resp_desc_thres
)
192 htt
->num_pending_mgmt_tx
--;
195 int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt
*htt
, struct sk_buff
*skb
)
197 struct ath10k
*ar
= htt
->ar
;
200 spin_lock_bh(&htt
->tx_lock
);
201 ret
= idr_alloc(&htt
->pending_tx
, skb
, 0,
202 htt
->max_num_pending_tx
, GFP_ATOMIC
);
203 spin_unlock_bh(&htt
->tx_lock
);
205 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt tx alloc msdu_id %d\n", ret
);
210 void ath10k_htt_tx_free_msdu_id(struct ath10k_htt
*htt
, u16 msdu_id
)
212 struct ath10k
*ar
= htt
->ar
;
214 lockdep_assert_held(&htt
->tx_lock
);
216 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt tx free msdu_id %hu\n", msdu_id
);
218 idr_remove(&htt
->pending_tx
, msdu_id
);
221 static void ath10k_htt_tx_free_cont_txbuf_32(struct ath10k_htt
*htt
)
223 struct ath10k
*ar
= htt
->ar
;
226 if (!htt
->txbuf
.vaddr_txbuff_32
)
229 size
= htt
->txbuf
.size
;
230 dma_free_coherent(ar
->dev
, size
, htt
->txbuf
.vaddr_txbuff_32
,
232 htt
->txbuf
.vaddr_txbuff_32
= NULL
;
235 static int ath10k_htt_tx_alloc_cont_txbuf_32(struct ath10k_htt
*htt
)
237 struct ath10k
*ar
= htt
->ar
;
240 size
= htt
->max_num_pending_tx
*
241 sizeof(struct ath10k_htt_txbuf_32
);
243 htt
->txbuf
.vaddr_txbuff_32
= dma_alloc_coherent(ar
->dev
, size
,
246 if (!htt
->txbuf
.vaddr_txbuff_32
)
249 htt
->txbuf
.size
= size
;
254 static void ath10k_htt_tx_free_cont_txbuf_64(struct ath10k_htt
*htt
)
256 struct ath10k
*ar
= htt
->ar
;
259 if (!htt
->txbuf
.vaddr_txbuff_64
)
262 size
= htt
->txbuf
.size
;
263 dma_free_coherent(ar
->dev
, size
, htt
->txbuf
.vaddr_txbuff_64
,
265 htt
->txbuf
.vaddr_txbuff_64
= NULL
;
268 static int ath10k_htt_tx_alloc_cont_txbuf_64(struct ath10k_htt
*htt
)
270 struct ath10k
*ar
= htt
->ar
;
273 size
= htt
->max_num_pending_tx
*
274 sizeof(struct ath10k_htt_txbuf_64
);
276 htt
->txbuf
.vaddr_txbuff_64
= dma_alloc_coherent(ar
->dev
, size
,
279 if (!htt
->txbuf
.vaddr_txbuff_64
)
282 htt
->txbuf
.size
= size
;
287 static void ath10k_htt_tx_free_cont_frag_desc_32(struct ath10k_htt
*htt
)
291 if (!htt
->frag_desc
.vaddr_desc_32
)
294 size
= htt
->max_num_pending_tx
*
295 sizeof(struct htt_msdu_ext_desc
);
297 dma_free_coherent(htt
->ar
->dev
,
299 htt
->frag_desc
.vaddr_desc_32
,
300 htt
->frag_desc
.paddr
);
302 htt
->frag_desc
.vaddr_desc_32
= NULL
;
305 static int ath10k_htt_tx_alloc_cont_frag_desc_32(struct ath10k_htt
*htt
)
307 struct ath10k
*ar
= htt
->ar
;
310 if (!ar
->hw_params
.continuous_frag_desc
)
313 size
= htt
->max_num_pending_tx
*
314 sizeof(struct htt_msdu_ext_desc
);
315 htt
->frag_desc
.vaddr_desc_32
= dma_alloc_coherent(ar
->dev
, size
,
316 &htt
->frag_desc
.paddr
,
318 if (!htt
->frag_desc
.vaddr_desc_32
) {
319 ath10k_err(ar
, "failed to alloc fragment desc memory\n");
322 htt
->frag_desc
.size
= size
;
327 static void ath10k_htt_tx_free_cont_frag_desc_64(struct ath10k_htt
*htt
)
331 if (!htt
->frag_desc
.vaddr_desc_64
)
334 size
= htt
->max_num_pending_tx
*
335 sizeof(struct htt_msdu_ext_desc_64
);
337 dma_free_coherent(htt
->ar
->dev
,
339 htt
->frag_desc
.vaddr_desc_64
,
340 htt
->frag_desc
.paddr
);
342 htt
->frag_desc
.vaddr_desc_64
= NULL
;
345 static int ath10k_htt_tx_alloc_cont_frag_desc_64(struct ath10k_htt
*htt
)
347 struct ath10k
*ar
= htt
->ar
;
350 if (!ar
->hw_params
.continuous_frag_desc
)
353 size
= htt
->max_num_pending_tx
*
354 sizeof(struct htt_msdu_ext_desc_64
);
356 htt
->frag_desc
.vaddr_desc_64
= dma_alloc_coherent(ar
->dev
, size
,
357 &htt
->frag_desc
.paddr
,
359 if (!htt
->frag_desc
.vaddr_desc_64
) {
360 ath10k_err(ar
, "failed to alloc fragment desc memory\n");
363 htt
->frag_desc
.size
= size
;
368 static void ath10k_htt_tx_free_txq(struct ath10k_htt
*htt
)
370 struct ath10k
*ar
= htt
->ar
;
373 if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL
,
374 ar
->running_fw
->fw_file
.fw_features
))
377 size
= sizeof(*htt
->tx_q_state
.vaddr
);
379 dma_unmap_single(ar
->dev
, htt
->tx_q_state
.paddr
, size
, DMA_TO_DEVICE
);
380 kfree(htt
->tx_q_state
.vaddr
);
383 static int ath10k_htt_tx_alloc_txq(struct ath10k_htt
*htt
)
385 struct ath10k
*ar
= htt
->ar
;
389 if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL
,
390 ar
->running_fw
->fw_file
.fw_features
))
393 htt
->tx_q_state
.num_peers
= HTT_TX_Q_STATE_NUM_PEERS
;
394 htt
->tx_q_state
.num_tids
= HTT_TX_Q_STATE_NUM_TIDS
;
395 htt
->tx_q_state
.type
= HTT_Q_DEPTH_TYPE_BYTES
;
397 size
= sizeof(*htt
->tx_q_state
.vaddr
);
398 htt
->tx_q_state
.vaddr
= kzalloc(size
, GFP_KERNEL
);
399 if (!htt
->tx_q_state
.vaddr
)
402 htt
->tx_q_state
.paddr
= dma_map_single(ar
->dev
, htt
->tx_q_state
.vaddr
,
403 size
, DMA_TO_DEVICE
);
404 ret
= dma_mapping_error(ar
->dev
, htt
->tx_q_state
.paddr
);
406 ath10k_warn(ar
, "failed to dma map tx_q_state: %d\n", ret
);
407 kfree(htt
->tx_q_state
.vaddr
);
414 static void ath10k_htt_tx_free_txdone_fifo(struct ath10k_htt
*htt
)
416 WARN_ON(!kfifo_is_empty(&htt
->txdone_fifo
));
417 kfifo_free(&htt
->txdone_fifo
);
420 static int ath10k_htt_tx_alloc_txdone_fifo(struct ath10k_htt
*htt
)
425 size
= roundup_pow_of_two(htt
->max_num_pending_tx
);
426 ret
= kfifo_alloc(&htt
->txdone_fifo
, size
, GFP_KERNEL
);
430 static int ath10k_htt_tx_alloc_buf(struct ath10k_htt
*htt
)
432 struct ath10k
*ar
= htt
->ar
;
435 ret
= ath10k_htt_alloc_txbuff(htt
);
437 ath10k_err(ar
, "failed to alloc cont tx buffer: %d\n", ret
);
441 ret
= ath10k_htt_alloc_frag_desc(htt
);
443 ath10k_err(ar
, "failed to alloc cont frag desc: %d\n", ret
);
447 ret
= ath10k_htt_tx_alloc_txq(htt
);
449 ath10k_err(ar
, "failed to alloc txq: %d\n", ret
);
453 ret
= ath10k_htt_tx_alloc_txdone_fifo(htt
);
455 ath10k_err(ar
, "failed to alloc txdone fifo: %d\n", ret
);
462 ath10k_htt_tx_free_txq(htt
);
465 ath10k_htt_free_frag_desc(htt
);
468 ath10k_htt_free_txbuff(htt
);
473 int ath10k_htt_tx_start(struct ath10k_htt
*htt
)
475 struct ath10k
*ar
= htt
->ar
;
478 ath10k_dbg(ar
, ATH10K_DBG_BOOT
, "htt tx max num pending tx %d\n",
479 htt
->max_num_pending_tx
);
481 spin_lock_init(&htt
->tx_lock
);
482 idr_init(&htt
->pending_tx
);
484 if (htt
->tx_mem_allocated
)
487 if (ar
->bus_param
.dev_type
== ATH10K_DEV_TYPE_HL
)
490 ret
= ath10k_htt_tx_alloc_buf(htt
);
492 goto free_idr_pending_tx
;
494 htt
->tx_mem_allocated
= true;
499 idr_destroy(&htt
->pending_tx
);
504 static int ath10k_htt_tx_clean_up_pending(int msdu_id
, void *skb
, void *ctx
)
506 struct ath10k
*ar
= ctx
;
507 struct ath10k_htt
*htt
= &ar
->htt
;
508 struct htt_tx_done tx_done
= {0};
510 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "force cleanup msdu_id %hu\n", msdu_id
);
512 tx_done
.msdu_id
= msdu_id
;
513 tx_done
.status
= HTT_TX_COMPL_STATE_DISCARD
;
515 ath10k_txrx_tx_unref(htt
, &tx_done
);
520 void ath10k_htt_tx_destroy(struct ath10k_htt
*htt
)
522 if (!htt
->tx_mem_allocated
)
525 ath10k_htt_free_txbuff(htt
);
526 ath10k_htt_tx_free_txq(htt
);
527 ath10k_htt_free_frag_desc(htt
);
528 ath10k_htt_tx_free_txdone_fifo(htt
);
529 htt
->tx_mem_allocated
= false;
532 static void ath10k_htt_flush_tx_queue(struct ath10k_htt
*htt
)
534 ath10k_htc_stop_hl(htt
->ar
);
535 idr_for_each(&htt
->pending_tx
, ath10k_htt_tx_clean_up_pending
, htt
->ar
);
538 void ath10k_htt_tx_stop(struct ath10k_htt
*htt
)
540 ath10k_htt_flush_tx_queue(htt
);
541 idr_destroy(&htt
->pending_tx
);
544 void ath10k_htt_tx_free(struct ath10k_htt
*htt
)
546 ath10k_htt_tx_stop(htt
);
547 ath10k_htt_tx_destroy(htt
);
550 void ath10k_htt_op_ep_tx_credits(struct ath10k
*ar
)
552 queue_work(ar
->workqueue
, &ar
->bundle_tx_work
);
555 void ath10k_htt_htc_tx_complete(struct ath10k
*ar
, struct sk_buff
*skb
)
557 struct ath10k_htt
*htt
= &ar
->htt
;
558 struct htt_tx_done tx_done
= {0};
559 struct htt_cmd_hdr
*htt_hdr
;
560 struct htt_data_tx_desc
*desc_hdr
= NULL
;
564 if (htt
->disable_tx_comp
) {
565 htt_hdr
= (struct htt_cmd_hdr
*)skb
->data
;
566 msg_type
= htt_hdr
->msg_type
;
568 if (msg_type
== HTT_H2T_MSG_TYPE_TX_FRM
) {
569 desc_hdr
= (struct htt_data_tx_desc
*)
570 (skb
->data
+ sizeof(*htt_hdr
));
571 flags1
= __le16_to_cpu(desc_hdr
->flags1
);
575 dev_kfree_skb_any(skb
);
577 if ((!htt
->disable_tx_comp
) || (msg_type
!= HTT_H2T_MSG_TYPE_TX_FRM
))
580 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
581 "htt tx complete msdu id:%u ,flags1:%x\n",
582 __le16_to_cpu(desc_hdr
->id
), flags1
);
584 if (flags1
& HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE
)
587 tx_done
.status
= HTT_TX_COMPL_STATE_ACK
;
588 tx_done
.msdu_id
= __le16_to_cpu(desc_hdr
->id
);
589 ath10k_txrx_tx_unref(&ar
->htt
, &tx_done
);
592 void ath10k_htt_hif_tx_complete(struct ath10k
*ar
, struct sk_buff
*skb
)
594 dev_kfree_skb_any(skb
);
596 EXPORT_SYMBOL(ath10k_htt_hif_tx_complete
);
598 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt
*htt
)
600 struct ath10k
*ar
= htt
->ar
;
606 len
+= sizeof(cmd
->hdr
);
607 len
+= sizeof(cmd
->ver_req
);
609 skb
= ath10k_htc_alloc_skb(ar
, len
);
614 cmd
= (struct htt_cmd
*)skb
->data
;
615 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_VERSION_REQ
;
617 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
619 dev_kfree_skb_any(skb
);
626 int ath10k_htt_h2t_stats_req(struct ath10k_htt
*htt
, u32 mask
, u32 reset_mask
,
629 struct ath10k
*ar
= htt
->ar
;
630 struct htt_stats_req
*req
;
635 len
+= sizeof(cmd
->hdr
);
636 len
+= sizeof(cmd
->stats_req
);
638 skb
= ath10k_htc_alloc_skb(ar
, len
);
643 cmd
= (struct htt_cmd
*)skb
->data
;
644 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_STATS_REQ
;
646 req
= &cmd
->stats_req
;
648 memset(req
, 0, sizeof(*req
));
650 /* currently we support only max 24 bit masks so no need to worry
651 * about endian support
653 memcpy(req
->upload_types
, &mask
, 3);
654 memcpy(req
->reset_types
, &reset_mask
, 3);
655 req
->stat_type
= HTT_STATS_REQ_CFG_STAT_TYPE_INVALID
;
656 req
->cookie_lsb
= cpu_to_le32(cookie
& 0xffffffff);
657 req
->cookie_msb
= cpu_to_le32((cookie
& 0xffffffff00000000ULL
) >> 32);
659 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
661 ath10k_warn(ar
, "failed to send htt type stats request: %d",
663 dev_kfree_skb_any(skb
);
670 static int ath10k_htt_send_frag_desc_bank_cfg_32(struct ath10k_htt
*htt
)
672 struct ath10k
*ar
= htt
->ar
;
675 struct htt_frag_desc_bank_cfg32
*cfg
;
679 if (!ar
->hw_params
.continuous_frag_desc
)
682 if (!htt
->frag_desc
.paddr
) {
683 ath10k_warn(ar
, "invalid frag desc memory\n");
687 size
= sizeof(cmd
->hdr
) + sizeof(cmd
->frag_desc_bank_cfg32
);
688 skb
= ath10k_htc_alloc_skb(ar
, size
);
693 cmd
= (struct htt_cmd
*)skb
->data
;
694 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG
;
697 info
|= SM(htt
->tx_q_state
.type
,
698 HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE
);
700 if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL
,
701 ar
->running_fw
->fw_file
.fw_features
))
702 info
|= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID
;
704 cfg
= &cmd
->frag_desc_bank_cfg32
;
707 cfg
->desc_size
= sizeof(struct htt_msdu_ext_desc
);
708 cfg
->bank_base_addrs
[0] = __cpu_to_le32(htt
->frag_desc
.paddr
);
709 cfg
->bank_id
[0].bank_min_id
= 0;
710 cfg
->bank_id
[0].bank_max_id
= __cpu_to_le16(htt
->max_num_pending_tx
-
713 cfg
->q_state
.paddr
= cpu_to_le32(htt
->tx_q_state
.paddr
);
714 cfg
->q_state
.num_peers
= cpu_to_le16(htt
->tx_q_state
.num_peers
);
715 cfg
->q_state
.num_tids
= cpu_to_le16(htt
->tx_q_state
.num_tids
);
716 cfg
->q_state
.record_size
= HTT_TX_Q_STATE_ENTRY_SIZE
;
717 cfg
->q_state
.record_multiplier
= HTT_TX_Q_STATE_ENTRY_MULTIPLIER
;
719 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt frag desc bank cmd\n");
721 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
723 ath10k_warn(ar
, "failed to send frag desc bank cfg request: %d\n",
725 dev_kfree_skb_any(skb
);
732 static int ath10k_htt_send_frag_desc_bank_cfg_64(struct ath10k_htt
*htt
)
734 struct ath10k
*ar
= htt
->ar
;
737 struct htt_frag_desc_bank_cfg64
*cfg
;
741 if (!ar
->hw_params
.continuous_frag_desc
)
744 if (!htt
->frag_desc
.paddr
) {
745 ath10k_warn(ar
, "invalid frag desc memory\n");
749 size
= sizeof(cmd
->hdr
) + sizeof(cmd
->frag_desc_bank_cfg64
);
750 skb
= ath10k_htc_alloc_skb(ar
, size
);
755 cmd
= (struct htt_cmd
*)skb
->data
;
756 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG
;
759 info
|= SM(htt
->tx_q_state
.type
,
760 HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE
);
762 if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL
,
763 ar
->running_fw
->fw_file
.fw_features
))
764 info
|= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID
;
766 cfg
= &cmd
->frag_desc_bank_cfg64
;
769 cfg
->desc_size
= sizeof(struct htt_msdu_ext_desc_64
);
770 cfg
->bank_base_addrs
[0] = __cpu_to_le64(htt
->frag_desc
.paddr
);
771 cfg
->bank_id
[0].bank_min_id
= 0;
772 cfg
->bank_id
[0].bank_max_id
= __cpu_to_le16(htt
->max_num_pending_tx
-
775 cfg
->q_state
.paddr
= cpu_to_le32(htt
->tx_q_state
.paddr
);
776 cfg
->q_state
.num_peers
= cpu_to_le16(htt
->tx_q_state
.num_peers
);
777 cfg
->q_state
.num_tids
= cpu_to_le16(htt
->tx_q_state
.num_tids
);
778 cfg
->q_state
.record_size
= HTT_TX_Q_STATE_ENTRY_SIZE
;
779 cfg
->q_state
.record_multiplier
= HTT_TX_Q_STATE_ENTRY_MULTIPLIER
;
781 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt frag desc bank cmd\n");
783 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
785 ath10k_warn(ar
, "failed to send frag desc bank cfg request: %d\n",
787 dev_kfree_skb_any(skb
);
794 static void ath10k_htt_fill_rx_desc_offset_32(void *rx_ring
)
796 struct htt_rx_ring_setup_ring32
*ring
=
797 (struct htt_rx_ring_setup_ring32
*)rx_ring
;
799 #define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
800 ring
->mac80211_hdr_offset
= __cpu_to_le16(desc_offset(rx_hdr_status
));
801 ring
->msdu_payload_offset
= __cpu_to_le16(desc_offset(msdu_payload
));
802 ring
->ppdu_start_offset
= __cpu_to_le16(desc_offset(ppdu_start
));
803 ring
->ppdu_end_offset
= __cpu_to_le16(desc_offset(ppdu_end
));
804 ring
->mpdu_start_offset
= __cpu_to_le16(desc_offset(mpdu_start
));
805 ring
->mpdu_end_offset
= __cpu_to_le16(desc_offset(mpdu_end
));
806 ring
->msdu_start_offset
= __cpu_to_le16(desc_offset(msdu_start
));
807 ring
->msdu_end_offset
= __cpu_to_le16(desc_offset(msdu_end
));
808 ring
->rx_attention_offset
= __cpu_to_le16(desc_offset(attention
));
809 ring
->frag_info_offset
= __cpu_to_le16(desc_offset(frag_info
));
813 static void ath10k_htt_fill_rx_desc_offset_64(void *rx_ring
)
815 struct htt_rx_ring_setup_ring64
*ring
=
816 (struct htt_rx_ring_setup_ring64
*)rx_ring
;
818 #define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
819 ring
->mac80211_hdr_offset
= __cpu_to_le16(desc_offset(rx_hdr_status
));
820 ring
->msdu_payload_offset
= __cpu_to_le16(desc_offset(msdu_payload
));
821 ring
->ppdu_start_offset
= __cpu_to_le16(desc_offset(ppdu_start
));
822 ring
->ppdu_end_offset
= __cpu_to_le16(desc_offset(ppdu_end
));
823 ring
->mpdu_start_offset
= __cpu_to_le16(desc_offset(mpdu_start
));
824 ring
->mpdu_end_offset
= __cpu_to_le16(desc_offset(mpdu_end
));
825 ring
->msdu_start_offset
= __cpu_to_le16(desc_offset(msdu_start
));
826 ring
->msdu_end_offset
= __cpu_to_le16(desc_offset(msdu_end
));
827 ring
->rx_attention_offset
= __cpu_to_le16(desc_offset(attention
));
828 ring
->frag_info_offset
= __cpu_to_le16(desc_offset(frag_info
));
832 static int ath10k_htt_send_rx_ring_cfg_32(struct ath10k_htt
*htt
)
834 struct ath10k
*ar
= htt
->ar
;
837 struct htt_rx_ring_setup_ring32
*ring
;
838 const int num_rx_ring
= 1;
845 * the HW expects the buffer to be an integral number of 4-byte
848 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE
, 4));
849 BUILD_BUG_ON((HTT_RX_BUF_SIZE
& HTT_MAX_CACHE_LINE_SIZE_MASK
) != 0);
851 len
= sizeof(cmd
->hdr
) + sizeof(cmd
->rx_setup_32
.hdr
)
852 + (sizeof(*ring
) * num_rx_ring
);
853 skb
= ath10k_htc_alloc_skb(ar
, len
);
859 cmd
= (struct htt_cmd
*)skb
->data
;
860 ring
= &cmd
->rx_setup_32
.rings
[0];
862 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_RX_RING_CFG
;
863 cmd
->rx_setup_32
.hdr
.num_rings
= 1;
865 /* FIXME: do we need all of this? */
867 flags
|= HTT_RX_RING_FLAGS_MAC80211_HDR
;
868 flags
|= HTT_RX_RING_FLAGS_MSDU_PAYLOAD
;
869 flags
|= HTT_RX_RING_FLAGS_PPDU_START
;
870 flags
|= HTT_RX_RING_FLAGS_PPDU_END
;
871 flags
|= HTT_RX_RING_FLAGS_MPDU_START
;
872 flags
|= HTT_RX_RING_FLAGS_MPDU_END
;
873 flags
|= HTT_RX_RING_FLAGS_MSDU_START
;
874 flags
|= HTT_RX_RING_FLAGS_MSDU_END
;
875 flags
|= HTT_RX_RING_FLAGS_RX_ATTENTION
;
876 flags
|= HTT_RX_RING_FLAGS_FRAG_INFO
;
877 flags
|= HTT_RX_RING_FLAGS_UNICAST_RX
;
878 flags
|= HTT_RX_RING_FLAGS_MULTICAST_RX
;
879 flags
|= HTT_RX_RING_FLAGS_CTRL_RX
;
880 flags
|= HTT_RX_RING_FLAGS_MGMT_RX
;
881 flags
|= HTT_RX_RING_FLAGS_NULL_RX
;
882 flags
|= HTT_RX_RING_FLAGS_PHY_DATA_RX
;
884 fw_idx
= __le32_to_cpu(*htt
->rx_ring
.alloc_idx
.vaddr
);
886 ring
->fw_idx_shadow_reg_paddr
=
887 __cpu_to_le32(htt
->rx_ring
.alloc_idx
.paddr
);
888 ring
->rx_ring_base_paddr
= __cpu_to_le32(htt
->rx_ring
.base_paddr
);
889 ring
->rx_ring_len
= __cpu_to_le16(htt
->rx_ring
.size
);
890 ring
->rx_ring_bufsize
= __cpu_to_le16(HTT_RX_BUF_SIZE
);
891 ring
->flags
= __cpu_to_le16(flags
);
892 ring
->fw_idx_init_val
= __cpu_to_le16(fw_idx
);
894 ath10k_htt_fill_rx_desc_offset_32(ring
);
895 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
897 dev_kfree_skb_any(skb
);
904 static int ath10k_htt_send_rx_ring_cfg_64(struct ath10k_htt
*htt
)
906 struct ath10k
*ar
= htt
->ar
;
909 struct htt_rx_ring_setup_ring64
*ring
;
910 const int num_rx_ring
= 1;
916 /* HW expects the buffer to be an integral number of 4-byte
919 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE
, 4));
920 BUILD_BUG_ON((HTT_RX_BUF_SIZE
& HTT_MAX_CACHE_LINE_SIZE_MASK
) != 0);
922 len
= sizeof(cmd
->hdr
) + sizeof(cmd
->rx_setup_64
.hdr
)
923 + (sizeof(*ring
) * num_rx_ring
);
924 skb
= ath10k_htc_alloc_skb(ar
, len
);
930 cmd
= (struct htt_cmd
*)skb
->data
;
931 ring
= &cmd
->rx_setup_64
.rings
[0];
933 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_RX_RING_CFG
;
934 cmd
->rx_setup_64
.hdr
.num_rings
= 1;
937 flags
|= HTT_RX_RING_FLAGS_MAC80211_HDR
;
938 flags
|= HTT_RX_RING_FLAGS_MSDU_PAYLOAD
;
939 flags
|= HTT_RX_RING_FLAGS_PPDU_START
;
940 flags
|= HTT_RX_RING_FLAGS_PPDU_END
;
941 flags
|= HTT_RX_RING_FLAGS_MPDU_START
;
942 flags
|= HTT_RX_RING_FLAGS_MPDU_END
;
943 flags
|= HTT_RX_RING_FLAGS_MSDU_START
;
944 flags
|= HTT_RX_RING_FLAGS_MSDU_END
;
945 flags
|= HTT_RX_RING_FLAGS_RX_ATTENTION
;
946 flags
|= HTT_RX_RING_FLAGS_FRAG_INFO
;
947 flags
|= HTT_RX_RING_FLAGS_UNICAST_RX
;
948 flags
|= HTT_RX_RING_FLAGS_MULTICAST_RX
;
949 flags
|= HTT_RX_RING_FLAGS_CTRL_RX
;
950 flags
|= HTT_RX_RING_FLAGS_MGMT_RX
;
951 flags
|= HTT_RX_RING_FLAGS_NULL_RX
;
952 flags
|= HTT_RX_RING_FLAGS_PHY_DATA_RX
;
954 fw_idx
= __le32_to_cpu(*htt
->rx_ring
.alloc_idx
.vaddr
);
956 ring
->fw_idx_shadow_reg_paddr
= __cpu_to_le64(htt
->rx_ring
.alloc_idx
.paddr
);
957 ring
->rx_ring_base_paddr
= __cpu_to_le64(htt
->rx_ring
.base_paddr
);
958 ring
->rx_ring_len
= __cpu_to_le16(htt
->rx_ring
.size
);
959 ring
->rx_ring_bufsize
= __cpu_to_le16(HTT_RX_BUF_SIZE
);
960 ring
->flags
= __cpu_to_le16(flags
);
961 ring
->fw_idx_init_val
= __cpu_to_le16(fw_idx
);
963 ath10k_htt_fill_rx_desc_offset_64(ring
);
964 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
966 dev_kfree_skb_any(skb
);
973 static int ath10k_htt_send_rx_ring_cfg_hl(struct ath10k_htt
*htt
)
975 struct ath10k
*ar
= htt
->ar
;
978 struct htt_rx_ring_setup_ring32
*ring
;
979 const int num_rx_ring
= 1;
985 * the HW expects the buffer to be an integral number of 4-byte
988 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE
, 4));
989 BUILD_BUG_ON((HTT_RX_BUF_SIZE
& HTT_MAX_CACHE_LINE_SIZE_MASK
) != 0);
991 len
= sizeof(cmd
->hdr
) + sizeof(cmd
->rx_setup_32
.hdr
)
992 + (sizeof(*ring
) * num_rx_ring
);
993 skb
= ath10k_htc_alloc_skb(ar
, len
);
999 cmd
= (struct htt_cmd
*)skb
->data
;
1000 ring
= &cmd
->rx_setup_32
.rings
[0];
1002 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_RX_RING_CFG
;
1003 cmd
->rx_setup_32
.hdr
.num_rings
= 1;
1006 flags
|= HTT_RX_RING_FLAGS_MSDU_PAYLOAD
;
1007 flags
|= HTT_RX_RING_FLAGS_UNICAST_RX
;
1008 flags
|= HTT_RX_RING_FLAGS_MULTICAST_RX
;
1010 memset(ring
, 0, sizeof(*ring
));
1011 ring
->rx_ring_len
= __cpu_to_le16(HTT_RX_RING_SIZE_MIN
);
1012 ring
->rx_ring_bufsize
= __cpu_to_le16(HTT_RX_BUF_SIZE
);
1013 ring
->flags
= __cpu_to_le16(flags
);
1015 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
1017 dev_kfree_skb_any(skb
);
1024 static int ath10k_htt_h2t_aggr_cfg_msg_32(struct ath10k_htt
*htt
,
1025 u8 max_subfrms_ampdu
,
1026 u8 max_subfrms_amsdu
)
1028 struct ath10k
*ar
= htt
->ar
;
1029 struct htt_aggr_conf
*aggr_conf
;
1030 struct sk_buff
*skb
;
1031 struct htt_cmd
*cmd
;
1035 /* Firmware defaults are: amsdu = 3 and ampdu = 64 */
1037 if (max_subfrms_ampdu
== 0 || max_subfrms_ampdu
> 64)
1040 if (max_subfrms_amsdu
== 0 || max_subfrms_amsdu
> 31)
1043 len
= sizeof(cmd
->hdr
);
1044 len
+= sizeof(cmd
->aggr_conf
);
1046 skb
= ath10k_htc_alloc_skb(ar
, len
);
1051 cmd
= (struct htt_cmd
*)skb
->data
;
1052 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_AGGR_CFG
;
1054 aggr_conf
= &cmd
->aggr_conf
;
1055 aggr_conf
->max_num_ampdu_subframes
= max_subfrms_ampdu
;
1056 aggr_conf
->max_num_amsdu_subframes
= max_subfrms_amsdu
;
1058 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt h2t aggr cfg msg amsdu %d ampdu %d",
1059 aggr_conf
->max_num_amsdu_subframes
,
1060 aggr_conf
->max_num_ampdu_subframes
);
1062 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
1064 dev_kfree_skb_any(skb
);
1071 static int ath10k_htt_h2t_aggr_cfg_msg_v2(struct ath10k_htt
*htt
,
1072 u8 max_subfrms_ampdu
,
1073 u8 max_subfrms_amsdu
)
1075 struct ath10k
*ar
= htt
->ar
;
1076 struct htt_aggr_conf_v2
*aggr_conf
;
1077 struct sk_buff
*skb
;
1078 struct htt_cmd
*cmd
;
1082 /* Firmware defaults are: amsdu = 3 and ampdu = 64 */
1084 if (max_subfrms_ampdu
== 0 || max_subfrms_ampdu
> 64)
1087 if (max_subfrms_amsdu
== 0 || max_subfrms_amsdu
> 31)
1090 len
= sizeof(cmd
->hdr
);
1091 len
+= sizeof(cmd
->aggr_conf_v2
);
1093 skb
= ath10k_htc_alloc_skb(ar
, len
);
1098 cmd
= (struct htt_cmd
*)skb
->data
;
1099 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_AGGR_CFG
;
1101 aggr_conf
= &cmd
->aggr_conf_v2
;
1102 aggr_conf
->max_num_ampdu_subframes
= max_subfrms_ampdu
;
1103 aggr_conf
->max_num_amsdu_subframes
= max_subfrms_amsdu
;
1105 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt h2t aggr cfg msg amsdu %d ampdu %d",
1106 aggr_conf
->max_num_amsdu_subframes
,
1107 aggr_conf
->max_num_ampdu_subframes
);
1109 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
1111 dev_kfree_skb_any(skb
);
1118 int ath10k_htt_tx_fetch_resp(struct ath10k
*ar
,
1120 __le16 fetch_seq_num
,
1121 struct htt_tx_fetch_record
*records
,
1124 struct sk_buff
*skb
;
1125 struct htt_cmd
*cmd
;
1126 const u16 resp_id
= 0;
1130 /* Response IDs are echo-ed back only for host driver convienence
1131 * purposes. They aren't used for anything in the driver yet so use 0.
1134 len
+= sizeof(cmd
->hdr
);
1135 len
+= sizeof(cmd
->tx_fetch_resp
);
1136 len
+= sizeof(cmd
->tx_fetch_resp
.records
[0]) * num_records
;
1138 skb
= ath10k_htc_alloc_skb(ar
, len
);
1143 cmd
= (struct htt_cmd
*)skb
->data
;
1144 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_TX_FETCH_RESP
;
1145 cmd
->tx_fetch_resp
.resp_id
= cpu_to_le16(resp_id
);
1146 cmd
->tx_fetch_resp
.fetch_seq_num
= fetch_seq_num
;
1147 cmd
->tx_fetch_resp
.num_records
= cpu_to_le16(num_records
);
1148 cmd
->tx_fetch_resp
.token
= token
;
1150 memcpy(cmd
->tx_fetch_resp
.records
, records
,
1151 sizeof(records
[0]) * num_records
);
1153 ret
= ath10k_htc_send(&ar
->htc
, ar
->htt
.eid
, skb
);
1155 ath10k_warn(ar
, "failed to submit htc command: %d\n", ret
);
1162 dev_kfree_skb_any(skb
);
1167 static u8
ath10k_htt_tx_get_vdev_id(struct ath10k
*ar
, struct sk_buff
*skb
)
1169 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1170 struct ath10k_skb_cb
*cb
= ATH10K_SKB_CB(skb
);
1171 struct ath10k_vif
*arvif
;
1173 if (info
->flags
& IEEE80211_TX_CTL_TX_OFFCHAN
) {
1174 return ar
->scan
.vdev_id
;
1175 } else if (cb
->vif
) {
1176 arvif
= (void *)cb
->vif
->drv_priv
;
1177 return arvif
->vdev_id
;
1178 } else if (ar
->monitor_started
) {
1179 return ar
->monitor_vdev_id
;
1185 static u8
ath10k_htt_tx_get_tid(struct sk_buff
*skb
, bool is_eth
)
1187 struct ieee80211_hdr
*hdr
= (void *)skb
->data
;
1188 struct ath10k_skb_cb
*cb
= ATH10K_SKB_CB(skb
);
1190 if (!is_eth
&& ieee80211_is_mgmt(hdr
->frame_control
))
1191 return HTT_DATA_TX_EXT_TID_MGMT
;
1192 else if (cb
->flags
& ATH10K_SKB_F_QOS
)
1193 return skb
->priority
& IEEE80211_QOS_CTL_TID_MASK
;
1195 return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST
;
1198 int ath10k_htt_mgmt_tx(struct ath10k_htt
*htt
, struct sk_buff
*msdu
)
1200 struct ath10k
*ar
= htt
->ar
;
1201 struct device
*dev
= ar
->dev
;
1202 struct sk_buff
*txdesc
= NULL
;
1203 struct htt_cmd
*cmd
;
1204 struct ath10k_skb_cb
*skb_cb
= ATH10K_SKB_CB(msdu
);
1205 u8 vdev_id
= ath10k_htt_tx_get_vdev_id(ar
, msdu
);
1209 const u8
*peer_addr
;
1210 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)msdu
->data
;
1212 len
+= sizeof(cmd
->hdr
);
1213 len
+= sizeof(cmd
->mgmt_tx
);
1215 res
= ath10k_htt_tx_alloc_msdu_id(htt
, msdu
);
1221 if ((ieee80211_is_action(hdr
->frame_control
) ||
1222 ieee80211_is_deauth(hdr
->frame_control
) ||
1223 ieee80211_is_disassoc(hdr
->frame_control
)) &&
1224 ieee80211_has_protected(hdr
->frame_control
)) {
1225 peer_addr
= hdr
->addr1
;
1226 if (is_multicast_ether_addr(peer_addr
)) {
1227 skb_put(msdu
, sizeof(struct ieee80211_mmie_16
));
1229 if (skb_cb
->ucast_cipher
== WLAN_CIPHER_SUITE_GCMP
||
1230 skb_cb
->ucast_cipher
== WLAN_CIPHER_SUITE_GCMP_256
)
1231 skb_put(msdu
, IEEE80211_GCMP_MIC_LEN
);
1233 skb_put(msdu
, IEEE80211_CCMP_MIC_LEN
);
1237 txdesc
= ath10k_htc_alloc_skb(ar
, len
);
1240 goto err_free_msdu_id
;
1243 skb_cb
->paddr
= dma_map_single(dev
, msdu
->data
, msdu
->len
,
1245 res
= dma_mapping_error(dev
, skb_cb
->paddr
);
1248 goto err_free_txdesc
;
1251 skb_put(txdesc
, len
);
1252 cmd
= (struct htt_cmd
*)txdesc
->data
;
1253 memset(cmd
, 0, len
);
1255 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_MGMT_TX
;
1256 cmd
->mgmt_tx
.msdu_paddr
= __cpu_to_le32(ATH10K_SKB_CB(msdu
)->paddr
);
1257 cmd
->mgmt_tx
.len
= __cpu_to_le32(msdu
->len
);
1258 cmd
->mgmt_tx
.desc_id
= __cpu_to_le32(msdu_id
);
1259 cmd
->mgmt_tx
.vdev_id
= __cpu_to_le32(vdev_id
);
1260 memcpy(cmd
->mgmt_tx
.hdr
, msdu
->data
,
1261 min_t(int, msdu
->len
, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN
));
1263 res
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, txdesc
);
1265 goto err_unmap_msdu
;
1270 if (ar
->bus_param
.dev_type
!= ATH10K_DEV_TYPE_HL
)
1271 dma_unmap_single(dev
, skb_cb
->paddr
, msdu
->len
, DMA_TO_DEVICE
);
1273 dev_kfree_skb_any(txdesc
);
1275 spin_lock_bh(&htt
->tx_lock
);
1276 ath10k_htt_tx_free_msdu_id(htt
, msdu_id
);
1277 spin_unlock_bh(&htt
->tx_lock
);
1282 #define HTT_TX_HL_NEEDED_HEADROOM \
1283 (unsigned int)(sizeof(struct htt_cmd_hdr) + \
1284 sizeof(struct htt_data_tx_desc) + \
1285 sizeof(struct ath10k_htc_hdr))
1287 static int ath10k_htt_tx_hl(struct ath10k_htt
*htt
, enum ath10k_hw_txrx_mode txmode
,
1288 struct sk_buff
*msdu
)
1290 struct ath10k
*ar
= htt
->ar
;
1292 struct htt_cmd_hdr
*cmd_hdr
;
1293 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)msdu
->data
;
1294 struct htt_data_tx_desc
*tx_desc
;
1295 struct ath10k_skb_cb
*skb_cb
= ATH10K_SKB_CB(msdu
);
1296 struct sk_buff
*tmp_skb
;
1297 bool is_eth
= (txmode
== ATH10K_HW_TXRX_ETHERNET
);
1298 u8 vdev_id
= ath10k_htt_tx_get_vdev_id(ar
, msdu
);
1299 u8 tid
= ath10k_htt_tx_get_tid(msdu
, is_eth
);
1304 if ((ieee80211_is_action(hdr
->frame_control
) ||
1305 ieee80211_is_deauth(hdr
->frame_control
) ||
1306 ieee80211_is_disassoc(hdr
->frame_control
)) &&
1307 ieee80211_has_protected(hdr
->frame_control
)) {
1308 skb_put(msdu
, IEEE80211_CCMP_MIC_LEN
);
1311 data_len
= msdu
->len
;
1314 case ATH10K_HW_TXRX_RAW
:
1315 case ATH10K_HW_TXRX_NATIVE_WIFI
:
1316 flags0
|= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT
;
1318 case ATH10K_HW_TXRX_ETHERNET
:
1319 flags0
|= SM(txmode
, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE
);
1321 case ATH10K_HW_TXRX_MGMT
:
1322 flags0
|= SM(ATH10K_HW_TXRX_MGMT
,
1323 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE
);
1324 flags0
|= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT
;
1326 if (htt
->disable_tx_comp
)
1327 flags1
|= HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE
;
1331 if (skb_cb
->flags
& ATH10K_SKB_F_NO_HWCRYPT
)
1332 flags0
|= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT
;
1334 flags1
|= SM((u16
)vdev_id
, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID
);
1335 flags1
|= SM((u16
)tid
, HTT_DATA_TX_DESC_FLAGS1_EXT_TID
);
1336 if (msdu
->ip_summed
== CHECKSUM_PARTIAL
&&
1337 !test_bit(ATH10K_FLAG_RAW_MODE
, &ar
->dev_flags
)) {
1338 flags1
|= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD
;
1339 flags1
|= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD
;
1342 /* Prepend the HTT header and TX desc struct to the data message
1343 * and realloc the skb if it does not have enough headroom.
1345 if (skb_headroom(msdu
) < HTT_TX_HL_NEEDED_HEADROOM
) {
1348 ath10k_dbg(htt
->ar
, ATH10K_DBG_HTT
,
1349 "Not enough headroom in skb. Current headroom: %u, needed: %u. Reallocating...\n",
1350 skb_headroom(msdu
), HTT_TX_HL_NEEDED_HEADROOM
);
1351 msdu
= skb_realloc_headroom(msdu
, HTT_TX_HL_NEEDED_HEADROOM
);
1354 ath10k_warn(htt
->ar
, "htt hl tx: Unable to realloc skb!\n");
1360 if (ar
->bus_param
.hl_msdu_ids
) {
1361 flags1
|= HTT_DATA_TX_DESC_FLAGS1_POSTPONED
;
1362 res
= ath10k_htt_tx_alloc_msdu_id(htt
, msdu
);
1364 ath10k_err(ar
, "msdu_id allocation failed %d\n", res
);
1370 /* As msdu is freed by mac80211 (in ieee80211_tx_status()) and by
1371 * ath10k (in ath10k_htt_htc_tx_complete()) we have to increase
1372 * reference by one to avoid a use-after-free case and a double
1377 skb_push(msdu
, sizeof(*cmd_hdr
));
1378 skb_push(msdu
, sizeof(*tx_desc
));
1379 cmd_hdr
= (struct htt_cmd_hdr
*)msdu
->data
;
1380 tx_desc
= (struct htt_data_tx_desc
*)(msdu
->data
+ sizeof(*cmd_hdr
));
1382 cmd_hdr
->msg_type
= HTT_H2T_MSG_TYPE_TX_FRM
;
1383 tx_desc
->flags0
= flags0
;
1384 tx_desc
->flags1
= __cpu_to_le16(flags1
);
1385 tx_desc
->len
= __cpu_to_le16(data_len
);
1386 tx_desc
->id
= __cpu_to_le16(msdu_id
);
1387 tx_desc
->frags_paddr
= 0; /* always zero */
1388 /* Initialize peer_id to INVALID_PEER because this is NOT
1391 tx_desc
->peerid
= __cpu_to_le32(HTT_INVALID_PEERID
);
1393 res
= ath10k_htc_send_hl(&htt
->ar
->htc
, htt
->eid
, msdu
);
1399 static int ath10k_htt_tx_32(struct ath10k_htt
*htt
,
1400 enum ath10k_hw_txrx_mode txmode
,
1401 struct sk_buff
*msdu
)
1403 struct ath10k
*ar
= htt
->ar
;
1404 struct device
*dev
= ar
->dev
;
1405 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)msdu
->data
;
1406 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(msdu
);
1407 struct ath10k_skb_cb
*skb_cb
= ATH10K_SKB_CB(msdu
);
1408 struct ath10k_hif_sg_item sg_items
[2];
1409 struct ath10k_htt_txbuf_32
*txbuf
;
1410 struct htt_data_tx_desc_frag
*frags
;
1411 bool is_eth
= (txmode
== ATH10K_HW_TXRX_ETHERNET
);
1412 u8 vdev_id
= ath10k_htt_tx_get_vdev_id(ar
, msdu
);
1413 u8 tid
= ath10k_htt_tx_get_tid(msdu
, is_eth
);
1417 u16 msdu_id
, flags1
= 0;
1419 u32 frags_paddr
= 0;
1421 struct htt_msdu_ext_desc
*ext_desc
= NULL
;
1422 struct htt_msdu_ext_desc
*ext_desc_t
= NULL
;
1424 res
= ath10k_htt_tx_alloc_msdu_id(htt
, msdu
);
1430 prefetch_len
= min(htt
->prefetch_len
, msdu
->len
);
1431 prefetch_len
= roundup(prefetch_len
, 4);
1433 txbuf
= htt
->txbuf
.vaddr_txbuff_32
+ msdu_id
;
1434 txbuf_paddr
= htt
->txbuf
.paddr
+
1435 (sizeof(struct ath10k_htt_txbuf_32
) * msdu_id
);
1437 if ((ieee80211_is_action(hdr
->frame_control
) ||
1438 ieee80211_is_deauth(hdr
->frame_control
) ||
1439 ieee80211_is_disassoc(hdr
->frame_control
)) &&
1440 ieee80211_has_protected(hdr
->frame_control
)) {
1441 skb_put(msdu
, IEEE80211_CCMP_MIC_LEN
);
1442 } else if (!(skb_cb
->flags
& ATH10K_SKB_F_NO_HWCRYPT
) &&
1443 txmode
== ATH10K_HW_TXRX_RAW
&&
1444 ieee80211_has_protected(hdr
->frame_control
)) {
1445 skb_put(msdu
, IEEE80211_CCMP_MIC_LEN
);
1448 skb_cb
->paddr
= dma_map_single(dev
, msdu
->data
, msdu
->len
,
1450 res
= dma_mapping_error(dev
, skb_cb
->paddr
);
1453 goto err_free_msdu_id
;
1456 if (unlikely(info
->flags
& IEEE80211_TX_CTL_TX_OFFCHAN
))
1457 freq
= ar
->scan
.roc_freq
;
1460 case ATH10K_HW_TXRX_RAW
:
1461 case ATH10K_HW_TXRX_NATIVE_WIFI
:
1462 flags0
|= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT
;
1464 case ATH10K_HW_TXRX_ETHERNET
:
1465 if (ar
->hw_params
.continuous_frag_desc
) {
1466 ext_desc_t
= htt
->frag_desc
.vaddr_desc_32
;
1467 memset(&ext_desc_t
[msdu_id
], 0,
1468 sizeof(struct htt_msdu_ext_desc
));
1469 frags
= (struct htt_data_tx_desc_frag
*)
1470 &ext_desc_t
[msdu_id
].frags
;
1471 ext_desc
= &ext_desc_t
[msdu_id
];
1472 frags
[0].tword_addr
.paddr_lo
=
1473 __cpu_to_le32(skb_cb
->paddr
);
1474 frags
[0].tword_addr
.paddr_hi
= 0;
1475 frags
[0].tword_addr
.len_16
= __cpu_to_le16(msdu
->len
);
1477 frags_paddr
= htt
->frag_desc
.paddr
+
1478 (sizeof(struct htt_msdu_ext_desc
) * msdu_id
);
1480 frags
= txbuf
->frags
;
1481 frags
[0].dword_addr
.paddr
=
1482 __cpu_to_le32(skb_cb
->paddr
);
1483 frags
[0].dword_addr
.len
= __cpu_to_le32(msdu
->len
);
1484 frags
[1].dword_addr
.paddr
= 0;
1485 frags
[1].dword_addr
.len
= 0;
1487 frags_paddr
= txbuf_paddr
;
1489 flags0
|= SM(txmode
, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE
);
1491 case ATH10K_HW_TXRX_MGMT
:
1492 flags0
|= SM(ATH10K_HW_TXRX_MGMT
,
1493 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE
);
1494 flags0
|= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT
;
1496 frags_paddr
= skb_cb
->paddr
;
1500 /* Normally all commands go through HTC which manages tx credits for
1501 * each endpoint and notifies when tx is completed.
1503 * HTT endpoint is creditless so there's no need to care about HTC
1504 * flags. In that case it is trivial to fill the HTC header here.
1506 * MSDU transmission is considered completed upon HTT event. This
1507 * implies no relevant resources can be freed until after the event is
1508 * received. That's why HTC tx completion handler itself is ignored by
1509 * setting NULL to transfer_context for all sg items.
1511 * There is simply no point in pushing HTT TX_FRM through HTC tx path
1512 * as it's a waste of resources. By bypassing HTC it is possible to
1513 * avoid extra memory allocations, compress data structures and thus
1514 * improve performance.
1517 txbuf
->htc_hdr
.eid
= htt
->eid
;
1518 txbuf
->htc_hdr
.len
= __cpu_to_le16(sizeof(txbuf
->cmd_hdr
) +
1519 sizeof(txbuf
->cmd_tx
) +
1521 txbuf
->htc_hdr
.flags
= 0;
1523 if (skb_cb
->flags
& ATH10K_SKB_F_NO_HWCRYPT
)
1524 flags0
|= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT
;
1526 flags1
|= SM((u16
)vdev_id
, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID
);
1527 flags1
|= SM((u16
)tid
, HTT_DATA_TX_DESC_FLAGS1_EXT_TID
);
1528 if (msdu
->ip_summed
== CHECKSUM_PARTIAL
&&
1529 !test_bit(ATH10K_FLAG_RAW_MODE
, &ar
->dev_flags
)) {
1530 flags1
|= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD
;
1531 flags1
|= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD
;
1532 if (ar
->hw_params
.continuous_frag_desc
)
1533 ext_desc
->flags
|= HTT_MSDU_CHECKSUM_ENABLE
;
1536 /* Prevent firmware from sending up tx inspection requests. There's
1537 * nothing ath10k can do with frames requested for inspection so force
1538 * it to simply rely a regular tx completion with discard status.
1540 flags1
|= HTT_DATA_TX_DESC_FLAGS1_POSTPONED
;
1542 txbuf
->cmd_hdr
.msg_type
= HTT_H2T_MSG_TYPE_TX_FRM
;
1543 txbuf
->cmd_tx
.flags0
= flags0
;
1544 txbuf
->cmd_tx
.flags1
= __cpu_to_le16(flags1
);
1545 txbuf
->cmd_tx
.len
= __cpu_to_le16(msdu
->len
);
1546 txbuf
->cmd_tx
.id
= __cpu_to_le16(msdu_id
);
1547 txbuf
->cmd_tx
.frags_paddr
= __cpu_to_le32(frags_paddr
);
1548 if (ath10k_mac_tx_frm_has_freq(ar
)) {
1549 txbuf
->cmd_tx
.offchan_tx
.peerid
=
1550 __cpu_to_le16(HTT_INVALID_PEERID
);
1551 txbuf
->cmd_tx
.offchan_tx
.freq
=
1552 __cpu_to_le16(freq
);
1554 txbuf
->cmd_tx
.peerid
=
1555 __cpu_to_le32(HTT_INVALID_PEERID
);
1558 trace_ath10k_htt_tx(ar
, msdu_id
, msdu
->len
, vdev_id
, tid
);
1559 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
1560 "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %pad, msdu_paddr %pad vdev %hhu tid %hhu freq %hu\n",
1561 flags0
, flags1
, msdu
->len
, msdu_id
, &frags_paddr
,
1562 &skb_cb
->paddr
, vdev_id
, tid
, freq
);
1563 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt tx msdu: ",
1564 msdu
->data
, msdu
->len
);
1565 trace_ath10k_tx_hdr(ar
, msdu
->data
, msdu
->len
);
1566 trace_ath10k_tx_payload(ar
, msdu
->data
, msdu
->len
);
1568 sg_items
[0].transfer_id
= 0;
1569 sg_items
[0].transfer_context
= NULL
;
1570 sg_items
[0].vaddr
= &txbuf
->htc_hdr
;
1571 sg_items
[0].paddr
= txbuf_paddr
+
1572 sizeof(txbuf
->frags
);
1573 sg_items
[0].len
= sizeof(txbuf
->htc_hdr
) +
1574 sizeof(txbuf
->cmd_hdr
) +
1575 sizeof(txbuf
->cmd_tx
);
1577 sg_items
[1].transfer_id
= 0;
1578 sg_items
[1].transfer_context
= NULL
;
1579 sg_items
[1].vaddr
= msdu
->data
;
1580 sg_items
[1].paddr
= skb_cb
->paddr
;
1581 sg_items
[1].len
= prefetch_len
;
1583 res
= ath10k_hif_tx_sg(htt
->ar
,
1584 htt
->ar
->htc
.endpoint
[htt
->eid
].ul_pipe_id
,
1585 sg_items
, ARRAY_SIZE(sg_items
));
1587 goto err_unmap_msdu
;
1592 dma_unmap_single(dev
, skb_cb
->paddr
, msdu
->len
, DMA_TO_DEVICE
);
1594 spin_lock_bh(&htt
->tx_lock
);
1595 ath10k_htt_tx_free_msdu_id(htt
, msdu_id
);
1596 spin_unlock_bh(&htt
->tx_lock
);
1601 static int ath10k_htt_tx_64(struct ath10k_htt
*htt
,
1602 enum ath10k_hw_txrx_mode txmode
,
1603 struct sk_buff
*msdu
)
1605 struct ath10k
*ar
= htt
->ar
;
1606 struct device
*dev
= ar
->dev
;
1607 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)msdu
->data
;
1608 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(msdu
);
1609 struct ath10k_skb_cb
*skb_cb
= ATH10K_SKB_CB(msdu
);
1610 struct ath10k_hif_sg_item sg_items
[2];
1611 struct ath10k_htt_txbuf_64
*txbuf
;
1612 struct htt_data_tx_desc_frag
*frags
;
1613 bool is_eth
= (txmode
== ATH10K_HW_TXRX_ETHERNET
);
1614 u8 vdev_id
= ath10k_htt_tx_get_vdev_id(ar
, msdu
);
1615 u8 tid
= ath10k_htt_tx_get_tid(msdu
, is_eth
);
1619 u16 msdu_id
, flags1
= 0;
1621 dma_addr_t frags_paddr
= 0;
1622 dma_addr_t txbuf_paddr
;
1623 struct htt_msdu_ext_desc_64
*ext_desc
= NULL
;
1624 struct htt_msdu_ext_desc_64
*ext_desc_t
= NULL
;
1626 res
= ath10k_htt_tx_alloc_msdu_id(htt
, msdu
);
1632 prefetch_len
= min(htt
->prefetch_len
, msdu
->len
);
1633 prefetch_len
= roundup(prefetch_len
, 4);
1635 txbuf
= htt
->txbuf
.vaddr_txbuff_64
+ msdu_id
;
1636 txbuf_paddr
= htt
->txbuf
.paddr
+
1637 (sizeof(struct ath10k_htt_txbuf_64
) * msdu_id
);
1639 if ((ieee80211_is_action(hdr
->frame_control
) ||
1640 ieee80211_is_deauth(hdr
->frame_control
) ||
1641 ieee80211_is_disassoc(hdr
->frame_control
)) &&
1642 ieee80211_has_protected(hdr
->frame_control
)) {
1643 skb_put(msdu
, IEEE80211_CCMP_MIC_LEN
);
1644 } else if (!(skb_cb
->flags
& ATH10K_SKB_F_NO_HWCRYPT
) &&
1645 txmode
== ATH10K_HW_TXRX_RAW
&&
1646 ieee80211_has_protected(hdr
->frame_control
)) {
1647 skb_put(msdu
, IEEE80211_CCMP_MIC_LEN
);
1650 skb_cb
->paddr
= dma_map_single(dev
, msdu
->data
, msdu
->len
,
1652 res
= dma_mapping_error(dev
, skb_cb
->paddr
);
1655 goto err_free_msdu_id
;
1658 if (unlikely(info
->flags
& IEEE80211_TX_CTL_TX_OFFCHAN
))
1659 freq
= ar
->scan
.roc_freq
;
1662 case ATH10K_HW_TXRX_RAW
:
1663 case ATH10K_HW_TXRX_NATIVE_WIFI
:
1664 flags0
|= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT
;
1666 case ATH10K_HW_TXRX_ETHERNET
:
1667 if (ar
->hw_params
.continuous_frag_desc
) {
1668 ext_desc_t
= htt
->frag_desc
.vaddr_desc_64
;
1669 memset(&ext_desc_t
[msdu_id
], 0,
1670 sizeof(struct htt_msdu_ext_desc_64
));
1671 frags
= (struct htt_data_tx_desc_frag
*)
1672 &ext_desc_t
[msdu_id
].frags
;
1673 ext_desc
= &ext_desc_t
[msdu_id
];
1674 frags
[0].tword_addr
.paddr_lo
=
1675 __cpu_to_le32(skb_cb
->paddr
);
1676 frags
[0].tword_addr
.paddr_hi
=
1677 __cpu_to_le16(upper_32_bits(skb_cb
->paddr
));
1678 frags
[0].tword_addr
.len_16
= __cpu_to_le16(msdu
->len
);
1680 frags_paddr
= htt
->frag_desc
.paddr
+
1681 (sizeof(struct htt_msdu_ext_desc_64
) * msdu_id
);
1683 frags
= txbuf
->frags
;
1684 frags
[0].tword_addr
.paddr_lo
=
1685 __cpu_to_le32(skb_cb
->paddr
);
1686 frags
[0].tword_addr
.paddr_hi
=
1687 __cpu_to_le16(upper_32_bits(skb_cb
->paddr
));
1688 frags
[0].tword_addr
.len_16
= __cpu_to_le16(msdu
->len
);
1689 frags
[1].tword_addr
.paddr_lo
= 0;
1690 frags
[1].tword_addr
.paddr_hi
= 0;
1691 frags
[1].tword_addr
.len_16
= 0;
1693 flags0
|= SM(txmode
, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE
);
1695 case ATH10K_HW_TXRX_MGMT
:
1696 flags0
|= SM(ATH10K_HW_TXRX_MGMT
,
1697 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE
);
1698 flags0
|= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT
;
1700 frags_paddr
= skb_cb
->paddr
;
1704 /* Normally all commands go through HTC which manages tx credits for
1705 * each endpoint and notifies when tx is completed.
1707 * HTT endpoint is creditless so there's no need to care about HTC
1708 * flags. In that case it is trivial to fill the HTC header here.
1710 * MSDU transmission is considered completed upon HTT event. This
1711 * implies no relevant resources can be freed until after the event is
1712 * received. That's why HTC tx completion handler itself is ignored by
1713 * setting NULL to transfer_context for all sg items.
1715 * There is simply no point in pushing HTT TX_FRM through HTC tx path
1716 * as it's a waste of resources. By bypassing HTC it is possible to
1717 * avoid extra memory allocations, compress data structures and thus
1718 * improve performance.
1721 txbuf
->htc_hdr
.eid
= htt
->eid
;
1722 txbuf
->htc_hdr
.len
= __cpu_to_le16(sizeof(txbuf
->cmd_hdr
) +
1723 sizeof(txbuf
->cmd_tx
) +
1725 txbuf
->htc_hdr
.flags
= 0;
1727 if (skb_cb
->flags
& ATH10K_SKB_F_NO_HWCRYPT
)
1728 flags0
|= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT
;
1730 flags1
|= SM((u16
)vdev_id
, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID
);
1731 flags1
|= SM((u16
)tid
, HTT_DATA_TX_DESC_FLAGS1_EXT_TID
);
1732 if (msdu
->ip_summed
== CHECKSUM_PARTIAL
&&
1733 !test_bit(ATH10K_FLAG_RAW_MODE
, &ar
->dev_flags
)) {
1734 flags1
|= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD
;
1735 flags1
|= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD
;
1736 if (ar
->hw_params
.continuous_frag_desc
) {
1737 memset(ext_desc
->tso_flag
, 0, sizeof(ext_desc
->tso_flag
));
1738 ext_desc
->tso_flag
[3] |=
1739 __cpu_to_le32(HTT_MSDU_CHECKSUM_ENABLE_64
);
1743 /* Prevent firmware from sending up tx inspection requests. There's
1744 * nothing ath10k can do with frames requested for inspection so force
1745 * it to simply rely a regular tx completion with discard status.
1747 flags1
|= HTT_DATA_TX_DESC_FLAGS1_POSTPONED
;
1749 txbuf
->cmd_hdr
.msg_type
= HTT_H2T_MSG_TYPE_TX_FRM
;
1750 txbuf
->cmd_tx
.flags0
= flags0
;
1751 txbuf
->cmd_tx
.flags1
= __cpu_to_le16(flags1
);
1752 txbuf
->cmd_tx
.len
= __cpu_to_le16(msdu
->len
);
1753 txbuf
->cmd_tx
.id
= __cpu_to_le16(msdu_id
);
1755 /* fill fragment descriptor */
1756 txbuf
->cmd_tx
.frags_paddr
= __cpu_to_le64(frags_paddr
);
1757 if (ath10k_mac_tx_frm_has_freq(ar
)) {
1758 txbuf
->cmd_tx
.offchan_tx
.peerid
=
1759 __cpu_to_le16(HTT_INVALID_PEERID
);
1760 txbuf
->cmd_tx
.offchan_tx
.freq
=
1761 __cpu_to_le16(freq
);
1763 txbuf
->cmd_tx
.peerid
=
1764 __cpu_to_le32(HTT_INVALID_PEERID
);
1767 trace_ath10k_htt_tx(ar
, msdu_id
, msdu
->len
, vdev_id
, tid
);
1768 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
1769 "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %pad, msdu_paddr %pad vdev %hhu tid %hhu freq %hu\n",
1770 flags0
, flags1
, msdu
->len
, msdu_id
, &frags_paddr
,
1771 &skb_cb
->paddr
, vdev_id
, tid
, freq
);
1772 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt tx msdu: ",
1773 msdu
->data
, msdu
->len
);
1774 trace_ath10k_tx_hdr(ar
, msdu
->data
, msdu
->len
);
1775 trace_ath10k_tx_payload(ar
, msdu
->data
, msdu
->len
);
1777 sg_items
[0].transfer_id
= 0;
1778 sg_items
[0].transfer_context
= NULL
;
1779 sg_items
[0].vaddr
= &txbuf
->htc_hdr
;
1780 sg_items
[0].paddr
= txbuf_paddr
+
1781 sizeof(txbuf
->frags
);
1782 sg_items
[0].len
= sizeof(txbuf
->htc_hdr
) +
1783 sizeof(txbuf
->cmd_hdr
) +
1784 sizeof(txbuf
->cmd_tx
);
1786 sg_items
[1].transfer_id
= 0;
1787 sg_items
[1].transfer_context
= NULL
;
1788 sg_items
[1].vaddr
= msdu
->data
;
1789 sg_items
[1].paddr
= skb_cb
->paddr
;
1790 sg_items
[1].len
= prefetch_len
;
1792 res
= ath10k_hif_tx_sg(htt
->ar
,
1793 htt
->ar
->htc
.endpoint
[htt
->eid
].ul_pipe_id
,
1794 sg_items
, ARRAY_SIZE(sg_items
));
1796 goto err_unmap_msdu
;
1801 dma_unmap_single(dev
, skb_cb
->paddr
, msdu
->len
, DMA_TO_DEVICE
);
1803 spin_lock_bh(&htt
->tx_lock
);
1804 ath10k_htt_tx_free_msdu_id(htt
, msdu_id
);
1805 spin_unlock_bh(&htt
->tx_lock
);
1810 static const struct ath10k_htt_tx_ops htt_tx_ops_32
= {
1811 .htt_send_rx_ring_cfg
= ath10k_htt_send_rx_ring_cfg_32
,
1812 .htt_send_frag_desc_bank_cfg
= ath10k_htt_send_frag_desc_bank_cfg_32
,
1813 .htt_alloc_frag_desc
= ath10k_htt_tx_alloc_cont_frag_desc_32
,
1814 .htt_free_frag_desc
= ath10k_htt_tx_free_cont_frag_desc_32
,
1815 .htt_tx
= ath10k_htt_tx_32
,
1816 .htt_alloc_txbuff
= ath10k_htt_tx_alloc_cont_txbuf_32
,
1817 .htt_free_txbuff
= ath10k_htt_tx_free_cont_txbuf_32
,
1818 .htt_h2t_aggr_cfg_msg
= ath10k_htt_h2t_aggr_cfg_msg_32
,
1821 static const struct ath10k_htt_tx_ops htt_tx_ops_64
= {
1822 .htt_send_rx_ring_cfg
= ath10k_htt_send_rx_ring_cfg_64
,
1823 .htt_send_frag_desc_bank_cfg
= ath10k_htt_send_frag_desc_bank_cfg_64
,
1824 .htt_alloc_frag_desc
= ath10k_htt_tx_alloc_cont_frag_desc_64
,
1825 .htt_free_frag_desc
= ath10k_htt_tx_free_cont_frag_desc_64
,
1826 .htt_tx
= ath10k_htt_tx_64
,
1827 .htt_alloc_txbuff
= ath10k_htt_tx_alloc_cont_txbuf_64
,
1828 .htt_free_txbuff
= ath10k_htt_tx_free_cont_txbuf_64
,
1829 .htt_h2t_aggr_cfg_msg
= ath10k_htt_h2t_aggr_cfg_msg_v2
,
1832 static const struct ath10k_htt_tx_ops htt_tx_ops_hl
= {
1833 .htt_send_rx_ring_cfg
= ath10k_htt_send_rx_ring_cfg_hl
,
1834 .htt_send_frag_desc_bank_cfg
= ath10k_htt_send_frag_desc_bank_cfg_32
,
1835 .htt_tx
= ath10k_htt_tx_hl
,
1836 .htt_h2t_aggr_cfg_msg
= ath10k_htt_h2t_aggr_cfg_msg_32
,
1837 .htt_flush_tx
= ath10k_htt_flush_tx_queue
,
1840 void ath10k_htt_set_tx_ops(struct ath10k_htt
*htt
)
1842 struct ath10k
*ar
= htt
->ar
;
1844 if (ar
->bus_param
.dev_type
== ATH10K_DEV_TYPE_HL
)
1845 htt
->tx_ops
= &htt_tx_ops_hl
;
1846 else if (ar
->hw_params
.target_64bit
)
1847 htt
->tx_ops
= &htt_tx_ops_64
;
1849 htt
->tx_ops
= &htt_tx_ops_32
;