1 // SPDX-License-Identifier: BSD-3-Clause-Clear
3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
10 #include "debugfs_sta.h"
15 static enum hal_tcl_encap_type
16 ath11k_dp_tx_get_encap_type(struct ath11k_vif
*arvif
, struct sk_buff
*skb
)
18 struct ieee80211_tx_info
*tx_info
= IEEE80211_SKB_CB(skb
);
19 struct ath11k_base
*ab
= arvif
->ar
->ab
;
21 if (test_bit(ATH11K_FLAG_RAW_MODE
, &ab
->dev_flags
))
22 return HAL_TCL_ENCAP_TYPE_RAW
;
24 if (tx_info
->flags
& IEEE80211_TX_CTL_HW_80211_ENCAP
)
25 return HAL_TCL_ENCAP_TYPE_ETHERNET
;
27 return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI
;
30 static void ath11k_dp_tx_encap_nwifi(struct sk_buff
*skb
)
32 struct ieee80211_hdr
*hdr
= (void *)skb
->data
;
35 if (!ieee80211_is_data_qos(hdr
->frame_control
))
38 qos_ctl
= ieee80211_get_qos_ctl(hdr
);
39 memmove(skb
->data
+ IEEE80211_QOS_CTL_LEN
,
40 skb
->data
, (void *)qos_ctl
- (void *)skb
->data
);
41 skb_pull(skb
, IEEE80211_QOS_CTL_LEN
);
43 hdr
= (void *)skb
->data
;
44 hdr
->frame_control
&= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA
);
47 static u8
ath11k_dp_tx_get_tid(struct sk_buff
*skb
)
49 struct ieee80211_hdr
*hdr
= (void *)skb
->data
;
50 struct ath11k_skb_cb
*cb
= ATH11K_SKB_CB(skb
);
52 if (cb
->flags
& ATH11K_SKB_HW_80211_ENCAP
)
53 return skb
->priority
& IEEE80211_QOS_CTL_TID_MASK
;
54 else if (!ieee80211_is_data_qos(hdr
->frame_control
))
55 return HAL_DESC_REO_NON_QOS_TID
;
57 return skb
->priority
& IEEE80211_QOS_CTL_TID_MASK
;
60 enum hal_encrypt_type
ath11k_dp_tx_get_encrypt_type(u32 cipher
)
63 case WLAN_CIPHER_SUITE_WEP40
:
64 return HAL_ENCRYPT_TYPE_WEP_40
;
65 case WLAN_CIPHER_SUITE_WEP104
:
66 return HAL_ENCRYPT_TYPE_WEP_104
;
67 case WLAN_CIPHER_SUITE_TKIP
:
68 return HAL_ENCRYPT_TYPE_TKIP_MIC
;
69 case WLAN_CIPHER_SUITE_CCMP
:
70 return HAL_ENCRYPT_TYPE_CCMP_128
;
71 case WLAN_CIPHER_SUITE_CCMP_256
:
72 return HAL_ENCRYPT_TYPE_CCMP_256
;
73 case WLAN_CIPHER_SUITE_GCMP
:
74 return HAL_ENCRYPT_TYPE_GCMP_128
;
75 case WLAN_CIPHER_SUITE_GCMP_256
:
76 return HAL_ENCRYPT_TYPE_AES_GCMP_256
;
78 return HAL_ENCRYPT_TYPE_OPEN
;
82 int ath11k_dp_tx(struct ath11k
*ar
, struct ath11k_vif
*arvif
,
83 struct ath11k_sta
*arsta
, struct sk_buff
*skb
)
85 struct ath11k_base
*ab
= ar
->ab
;
86 struct ath11k_dp
*dp
= &ab
->dp
;
87 struct hal_tx_info ti
= {0};
88 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
89 struct ath11k_skb_cb
*skb_cb
= ATH11K_SKB_CB(skb
);
90 struct hal_srng
*tcl_ring
;
91 struct ieee80211_hdr
*hdr
= (void *)skb
->data
;
92 struct dp_tx_ring
*tx_ring
;
97 u32 ring_selector
= 0;
101 if (unlikely(test_bit(ATH11K_FLAG_CRASH_FLUSH
, &ar
->ab
->dev_flags
)))
104 if (unlikely(!(info
->flags
& IEEE80211_TX_CTL_HW_80211_ENCAP
) &&
105 !ieee80211_is_data(hdr
->frame_control
)))
108 pool_id
= skb_get_queue_mapping(skb
) & (ATH11K_HW_MAX_QUEUES
- 1);
110 ring_selector
= ab
->hw_params
.hw_ops
->get_ring_selector(skb
);
113 tcl_ring_retry
= false;
115 ti
.ring_id
= ring_selector
% ab
->hw_params
.max_tx_ring
;
116 ti
.rbm_id
= ab
->hw_params
.hal_params
->tcl2wbm_rbm_map
[ti
.ring_id
].rbm_id
;
118 ring_map
|= BIT(ti
.ring_id
);
120 tx_ring
= &dp
->tx_ring
[ti
.ring_id
];
122 spin_lock_bh(&tx_ring
->tx_idr_lock
);
123 ret
= idr_alloc(&tx_ring
->txbuf_idr
, skb
, 0,
124 DP_TX_IDR_SIZE
- 1, GFP_ATOMIC
);
125 spin_unlock_bh(&tx_ring
->tx_idr_lock
);
127 if (unlikely(ret
< 0)) {
128 if (ring_map
== (BIT(ab
->hw_params
.max_tx_ring
) - 1) ||
129 !ab
->hw_params
.tcl_ring_retry
) {
130 atomic_inc(&ab
->soc_stats
.tx_err
.misc_fail
);
134 /* Check if the next ring is available */
139 ti
.desc_id
= FIELD_PREP(DP_TX_DESC_ID_MAC_ID
, ar
->pdev_idx
) |
140 FIELD_PREP(DP_TX_DESC_ID_MSDU_ID
, ret
) |
141 FIELD_PREP(DP_TX_DESC_ID_POOL_ID
, pool_id
);
142 ti
.encap_type
= ath11k_dp_tx_get_encap_type(arvif
, skb
);
144 if (ieee80211_has_a4(hdr
->frame_control
) &&
145 is_multicast_ether_addr(hdr
->addr3
) && arsta
&&
146 arsta
->use_4addr_set
) {
147 ti
.meta_data_flags
= arsta
->tcl_metadata
;
148 ti
.flags0
|= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TO_FW
, 1);
150 ti
.meta_data_flags
= arvif
->tcl_metadata
;
153 if (unlikely(ti
.encap_type
== HAL_TCL_ENCAP_TYPE_RAW
)) {
154 if (skb_cb
->flags
& ATH11K_SKB_CIPHER_SET
) {
156 ath11k_dp_tx_get_encrypt_type(skb_cb
->cipher
);
158 if (ieee80211_has_protected(hdr
->frame_control
))
159 skb_put(skb
, IEEE80211_CCMP_MIC_LEN
);
161 ti
.encrypt_type
= HAL_ENCRYPT_TYPE_OPEN
;
165 ti
.addr_search_flags
= arvif
->hal_addr_search_flags
;
166 ti
.search_type
= arvif
->search_type
;
167 ti
.type
= HAL_TCL_DESC_TYPE_BUFFER
;
169 ti
.lmac_id
= ar
->lmac_id
;
170 ti
.bss_ast_hash
= arvif
->ast_hash
;
171 ti
.bss_ast_idx
= arvif
->ast_idx
;
172 ti
.dscp_tid_tbl_idx
= 0;
174 if (likely(skb
->ip_summed
== CHECKSUM_PARTIAL
&&
175 ti
.encap_type
!= HAL_TCL_ENCAP_TYPE_RAW
)) {
176 ti
.flags0
|= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_IP4_CKSUM_EN
, 1) |
177 FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP4_CKSUM_EN
, 1) |
178 FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP6_CKSUM_EN
, 1) |
179 FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP4_CKSUM_EN
, 1) |
180 FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP6_CKSUM_EN
, 1);
183 if (ieee80211_vif_is_mesh(arvif
->vif
))
184 ti
.enable_mesh
= true;
186 ti
.flags1
|= FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_TID_OVERWRITE
, 1);
188 ti
.tid
= ath11k_dp_tx_get_tid(skb
);
190 switch (ti
.encap_type
) {
191 case HAL_TCL_ENCAP_TYPE_NATIVE_WIFI
:
192 ath11k_dp_tx_encap_nwifi(skb
);
194 case HAL_TCL_ENCAP_TYPE_RAW
:
195 if (!test_bit(ATH11K_FLAG_RAW_MODE
, &ab
->dev_flags
)) {
197 goto fail_remove_idr
;
200 case HAL_TCL_ENCAP_TYPE_ETHERNET
:
201 /* no need to encap */
203 case HAL_TCL_ENCAP_TYPE_802_3
:
205 /* TODO: Take care of other encap modes as well */
207 atomic_inc(&ab
->soc_stats
.tx_err
.misc_fail
);
208 goto fail_remove_idr
;
211 ti
.paddr
= dma_map_single(ab
->dev
, skb
->data
, skb
->len
, DMA_TO_DEVICE
);
212 if (unlikely(dma_mapping_error(ab
->dev
, ti
.paddr
))) {
213 atomic_inc(&ab
->soc_stats
.tx_err
.misc_fail
);
214 ath11k_warn(ab
, "failed to DMA map data Tx buffer\n");
216 goto fail_remove_idr
;
219 ti
.data_len
= skb
->len
;
220 skb_cb
->paddr
= ti
.paddr
;
221 skb_cb
->vif
= arvif
->vif
;
224 hal_ring_id
= tx_ring
->tcl_data_ring
.ring_id
;
225 tcl_ring
= &ab
->hal
.srng_list
[hal_ring_id
];
227 spin_lock_bh(&tcl_ring
->lock
);
229 ath11k_hal_srng_access_begin(ab
, tcl_ring
);
231 hal_tcl_desc
= (void *)ath11k_hal_srng_src_get_next_entry(ab
, tcl_ring
);
232 if (unlikely(!hal_tcl_desc
)) {
233 /* NOTE: It is highly unlikely we'll be running out of tcl_ring
234 * desc because the desc is directly enqueued onto hw queue.
236 ath11k_hal_srng_access_end(ab
, tcl_ring
);
237 ab
->soc_stats
.tx_err
.desc_na
[ti
.ring_id
]++;
238 spin_unlock_bh(&tcl_ring
->lock
);
241 /* Checking for available tcl descriptors in another ring in
242 * case of failure due to full tcl ring now, is better than
243 * checking this ring earlier for each pkt tx.
244 * Restart ring selection if some rings are not checked yet.
246 if (unlikely(ring_map
!= (BIT(ab
->hw_params
.max_tx_ring
)) - 1) &&
247 ab
->hw_params
.tcl_ring_retry
&& ab
->hw_params
.max_tx_ring
> 1) {
248 tcl_ring_retry
= true;
255 ath11k_hal_tx_cmd_desc_setup(ab
, hal_tcl_desc
+
256 sizeof(struct hal_tlv_hdr
), &ti
);
258 ath11k_hal_srng_access_end(ab
, tcl_ring
);
260 ath11k_dp_shadow_start_timer(ab
, tcl_ring
, &dp
->tx_ring_timer
[ti
.ring_id
]);
262 spin_unlock_bh(&tcl_ring
->lock
);
264 ath11k_dbg_dump(ab
, ATH11K_DBG_DP_TX
, NULL
, "dp tx msdu: ",
265 skb
->data
, skb
->len
);
267 atomic_inc(&ar
->dp
.num_tx_pending
);
272 dma_unmap_single(ab
->dev
, ti
.paddr
, ti
.data_len
, DMA_TO_DEVICE
);
275 spin_lock_bh(&tx_ring
->tx_idr_lock
);
276 idr_remove(&tx_ring
->txbuf_idr
,
277 FIELD_GET(DP_TX_DESC_ID_MSDU_ID
, ti
.desc_id
));
278 spin_unlock_bh(&tx_ring
->tx_idr_lock
);
286 static void ath11k_dp_tx_free_txbuf(struct ath11k_base
*ab
, u8 mac_id
,
288 struct dp_tx_ring
*tx_ring
)
291 struct sk_buff
*msdu
;
292 struct ath11k_skb_cb
*skb_cb
;
294 spin_lock(&tx_ring
->tx_idr_lock
);
295 msdu
= idr_remove(&tx_ring
->txbuf_idr
, msdu_id
);
296 spin_unlock(&tx_ring
->tx_idr_lock
);
298 if (unlikely(!msdu
)) {
299 ath11k_warn(ab
, "tx completion for unknown msdu_id %d\n",
304 skb_cb
= ATH11K_SKB_CB(msdu
);
306 dma_unmap_single(ab
->dev
, skb_cb
->paddr
, msdu
->len
, DMA_TO_DEVICE
);
307 dev_kfree_skb_any(msdu
);
309 ar
= ab
->pdevs
[mac_id
].ar
;
310 if (atomic_dec_and_test(&ar
->dp
.num_tx_pending
))
311 wake_up(&ar
->dp
.tx_empty_waitq
);
315 ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base
*ab
,
316 struct dp_tx_ring
*tx_ring
,
317 struct ath11k_dp_htt_wbm_tx_status
*ts
)
319 struct ieee80211_tx_status status
= { 0 };
320 struct sk_buff
*msdu
;
321 struct ieee80211_tx_info
*info
;
322 struct ath11k_skb_cb
*skb_cb
;
324 struct ath11k_peer
*peer
;
326 spin_lock(&tx_ring
->tx_idr_lock
);
327 msdu
= idr_remove(&tx_ring
->txbuf_idr
, ts
->msdu_id
);
328 spin_unlock(&tx_ring
->tx_idr_lock
);
330 if (unlikely(!msdu
)) {
331 ath11k_warn(ab
, "htt tx completion for unknown msdu_id %d\n",
336 skb_cb
= ATH11K_SKB_CB(msdu
);
337 info
= IEEE80211_SKB_CB(msdu
);
341 if (atomic_dec_and_test(&ar
->dp
.num_tx_pending
))
342 wake_up(&ar
->dp
.tx_empty_waitq
);
344 dma_unmap_single(ab
->dev
, skb_cb
->paddr
, msdu
->len
, DMA_TO_DEVICE
);
347 ieee80211_free_txskb(ar
->hw
, msdu
);
351 memset(&info
->status
, 0, sizeof(info
->status
));
354 if (!(info
->flags
& IEEE80211_TX_CTL_NO_ACK
)) {
355 info
->flags
|= IEEE80211_TX_STAT_ACK
;
356 info
->status
.ack_signal
= ts
->ack_rssi
;
358 if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT
,
360 info
->status
.ack_signal
+= ATH11K_DEFAULT_NOISE_FLOOR
;
362 info
->status
.flags
|=
363 IEEE80211_TX_STATUS_ACK_SIGNAL_VALID
;
365 info
->flags
|= IEEE80211_TX_STAT_NOACK_TRANSMITTED
;
369 spin_lock_bh(&ab
->base_lock
);
370 peer
= ath11k_peer_find_by_id(ab
, ts
->peer_id
);
371 if (!peer
|| !peer
->sta
) {
372 ath11k_dbg(ab
, ATH11K_DBG_DATA
,
373 "dp_tx: failed to find the peer with peer_id %d\n",
375 spin_unlock_bh(&ab
->base_lock
);
376 ieee80211_free_txskb(ar
->hw
, msdu
);
379 spin_unlock_bh(&ab
->base_lock
);
381 status
.sta
= peer
->sta
;
385 ieee80211_tx_status_ext(ar
->hw
, &status
);
389 ath11k_dp_tx_process_htt_tx_complete(struct ath11k_base
*ab
,
390 void *desc
, u8 mac_id
,
391 u32 msdu_id
, struct dp_tx_ring
*tx_ring
)
393 struct htt_tx_wbm_completion
*status_desc
;
394 struct ath11k_dp_htt_wbm_tx_status ts
= {0};
395 enum hal_wbm_htt_tx_comp_status wbm_status
;
397 status_desc
= desc
+ HTT_TX_WBM_COMP_STATUS_OFFSET
;
399 wbm_status
= FIELD_GET(HTT_TX_WBM_COMP_INFO0_STATUS
,
401 switch (wbm_status
) {
402 case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK
:
403 case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP
:
404 case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL
:
405 ts
.acked
= (wbm_status
== HAL_WBM_REL_HTT_TX_COMP_STATUS_OK
);
406 ts
.msdu_id
= msdu_id
;
407 ts
.ack_rssi
= FIELD_GET(HTT_TX_WBM_COMP_INFO1_ACK_RSSI
,
410 if (FIELD_GET(HTT_TX_WBM_COMP_INFO2_VALID
, status_desc
->info2
))
411 ts
.peer_id
= FIELD_GET(HTT_TX_WBM_COMP_INFO2_SW_PEER_ID
,
414 ts
.peer_id
= HTT_INVALID_PEER_ID
;
416 ath11k_dp_tx_htt_tx_complete_buf(ab
, tx_ring
, &ts
);
419 case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ
:
420 case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT
:
421 ath11k_dp_tx_free_txbuf(ab
, mac_id
, msdu_id
, tx_ring
);
423 case HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY
:
424 /* This event is to be handled only when the driver decides to
425 * use WDS offload functionality.
429 ath11k_warn(ab
, "Unknown htt tx status %d\n", wbm_status
);
434 static void ath11k_dp_tx_cache_peer_stats(struct ath11k
*ar
,
435 struct sk_buff
*msdu
,
436 struct hal_tx_status
*ts
)
438 struct ath11k_per_peer_tx_stats
*peer_stats
= &ar
->cached_stats
;
440 if (ts
->try_cnt
> 1) {
441 peer_stats
->retry_pkts
+= ts
->try_cnt
- 1;
442 peer_stats
->retry_bytes
+= (ts
->try_cnt
- 1) * msdu
->len
;
444 if (ts
->status
!= HAL_WBM_TQM_REL_REASON_FRAME_ACKED
) {
445 peer_stats
->failed_pkts
+= 1;
446 peer_stats
->failed_bytes
+= msdu
->len
;
451 void ath11k_dp_tx_update_txcompl(struct ath11k
*ar
, struct hal_tx_status
*ts
)
453 struct ath11k_base
*ab
= ar
->ab
;
454 struct ath11k_per_peer_tx_stats
*peer_stats
= &ar
->cached_stats
;
455 enum hal_tx_rate_stats_pkt_type pkt_type
;
456 enum hal_tx_rate_stats_sgi sgi
;
457 enum hal_tx_rate_stats_bw bw
;
458 struct ath11k_peer
*peer
;
459 struct ath11k_sta
*arsta
;
460 struct ieee80211_sta
*sta
;
462 u8 mcs
, rate_idx
= 0, ofdma
;
465 spin_lock_bh(&ab
->base_lock
);
466 peer
= ath11k_peer_find_by_id(ab
, ts
->peer_id
);
467 if (!peer
|| !peer
->sta
) {
468 ath11k_dbg(ab
, ATH11K_DBG_DP_TX
,
469 "failed to find the peer by id %u\n", ts
->peer_id
);
474 arsta
= ath11k_sta_to_arsta(sta
);
476 memset(&arsta
->txrate
, 0, sizeof(arsta
->txrate
));
477 pkt_type
= FIELD_GET(HAL_TX_RATE_STATS_INFO0_PKT_TYPE
,
479 mcs
= FIELD_GET(HAL_TX_RATE_STATS_INFO0_MCS
,
481 sgi
= FIELD_GET(HAL_TX_RATE_STATS_INFO0_SGI
,
483 bw
= FIELD_GET(HAL_TX_RATE_STATS_INFO0_BW
, ts
->rate_stats
);
484 ru_tones
= FIELD_GET(HAL_TX_RATE_STATS_INFO0_TONES_IN_RU
, ts
->rate_stats
);
485 ofdma
= FIELD_GET(HAL_TX_RATE_STATS_INFO0_OFDMA_TX
, ts
->rate_stats
);
487 /* This is to prefer choose the real NSS value arsta->last_txrate.nss,
488 * if it is invalid, then choose the NSS value while assoc.
490 if (arsta
->last_txrate
.nss
)
491 arsta
->txrate
.nss
= arsta
->last_txrate
.nss
;
493 arsta
->txrate
.nss
= arsta
->peer_nss
;
495 if (pkt_type
== HAL_TX_RATE_STATS_PKT_TYPE_11A
||
496 pkt_type
== HAL_TX_RATE_STATS_PKT_TYPE_11B
) {
497 ret
= ath11k_mac_hw_ratecode_to_legacy_rate(mcs
,
503 arsta
->txrate
.legacy
= rate
;
504 } else if (pkt_type
== HAL_TX_RATE_STATS_PKT_TYPE_11N
) {
506 ath11k_warn(ab
, "Invalid HT mcs index %d\n", mcs
);
510 if (arsta
->txrate
.nss
!= 0)
511 arsta
->txrate
.mcs
= mcs
+ 8 * (arsta
->txrate
.nss
- 1);
512 arsta
->txrate
.flags
= RATE_INFO_FLAGS_MCS
;
514 arsta
->txrate
.flags
|= RATE_INFO_FLAGS_SHORT_GI
;
515 } else if (pkt_type
== HAL_TX_RATE_STATS_PKT_TYPE_11AC
) {
517 ath11k_warn(ab
, "Invalid VHT mcs index %d\n", mcs
);
521 arsta
->txrate
.mcs
= mcs
;
522 arsta
->txrate
.flags
= RATE_INFO_FLAGS_VHT_MCS
;
524 arsta
->txrate
.flags
|= RATE_INFO_FLAGS_SHORT_GI
;
525 } else if (pkt_type
== HAL_TX_RATE_STATS_PKT_TYPE_11AX
) {
527 ath11k_warn(ab
, "Invalid HE mcs index %d\n", mcs
);
531 arsta
->txrate
.mcs
= mcs
;
532 arsta
->txrate
.flags
= RATE_INFO_FLAGS_HE_MCS
;
533 arsta
->txrate
.he_gi
= ath11k_mac_he_gi_to_nl80211_he_gi(sgi
);
536 arsta
->txrate
.bw
= ath11k_mac_bw_to_mac80211_bw(bw
);
537 if (ofdma
&& pkt_type
== HAL_TX_RATE_STATS_PKT_TYPE_11AX
) {
538 arsta
->txrate
.bw
= RATE_INFO_BW_HE_RU
;
539 arsta
->txrate
.he_ru_alloc
=
540 ath11k_mac_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones
);
543 if (ath11k_debugfs_is_extd_tx_stats_enabled(ar
))
544 ath11k_debugfs_sta_add_tx_stats(arsta
, peer_stats
, rate_idx
);
547 spin_unlock_bh(&ab
->base_lock
);
550 static void ath11k_dp_tx_complete_msdu(struct ath11k
*ar
,
551 struct sk_buff
*msdu
,
552 struct hal_tx_status
*ts
)
554 struct ieee80211_tx_status status
= { 0 };
555 struct ieee80211_rate_status status_rate
= { 0 };
556 struct ath11k_base
*ab
= ar
->ab
;
557 struct ieee80211_tx_info
*info
;
558 struct ath11k_skb_cb
*skb_cb
;
559 struct ath11k_peer
*peer
;
560 struct ath11k_sta
*arsta
;
561 struct rate_info rate
;
563 if (WARN_ON_ONCE(ts
->buf_rel_source
!= HAL_WBM_REL_SRC_MODULE_TQM
)) {
564 /* Must not happen */
568 skb_cb
= ATH11K_SKB_CB(msdu
);
570 dma_unmap_single(ab
->dev
, skb_cb
->paddr
, msdu
->len
, DMA_TO_DEVICE
);
572 if (unlikely(!rcu_access_pointer(ab
->pdevs_active
[ar
->pdev_idx
]))) {
573 ieee80211_free_txskb(ar
->hw
, msdu
);
577 if (unlikely(!skb_cb
->vif
)) {
578 ieee80211_free_txskb(ar
->hw
, msdu
);
582 info
= IEEE80211_SKB_CB(msdu
);
583 memset(&info
->status
, 0, sizeof(info
->status
));
585 /* skip tx rate update from ieee80211_status*/
586 info
->status
.rates
[0].idx
= -1;
588 if (ts
->status
== HAL_WBM_TQM_REL_REASON_FRAME_ACKED
&&
589 !(info
->flags
& IEEE80211_TX_CTL_NO_ACK
)) {
590 info
->flags
|= IEEE80211_TX_STAT_ACK
;
591 info
->status
.ack_signal
= ts
->ack_rssi
;
593 if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT
,
595 info
->status
.ack_signal
+= ATH11K_DEFAULT_NOISE_FLOOR
;
597 info
->status
.flags
|= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID
;
600 if (ts
->status
== HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX
&&
601 (info
->flags
& IEEE80211_TX_CTL_NO_ACK
))
602 info
->flags
|= IEEE80211_TX_STAT_NOACK_TRANSMITTED
;
604 if (unlikely(ath11k_debugfs_is_extd_tx_stats_enabled(ar
)) ||
605 ab
->hw_params
.single_pdev_only
) {
606 if (ts
->flags
& HAL_TX_STATUS_FLAGS_FIRST_MSDU
) {
607 if (ar
->last_ppdu_id
== 0) {
608 ar
->last_ppdu_id
= ts
->ppdu_id
;
609 } else if (ar
->last_ppdu_id
== ts
->ppdu_id
||
610 ar
->cached_ppdu_id
== ar
->last_ppdu_id
) {
611 ar
->cached_ppdu_id
= ar
->last_ppdu_id
;
612 ar
->cached_stats
.is_ampdu
= true;
613 ath11k_dp_tx_update_txcompl(ar
, ts
);
614 memset(&ar
->cached_stats
, 0,
615 sizeof(struct ath11k_per_peer_tx_stats
));
617 ar
->cached_stats
.is_ampdu
= false;
618 ath11k_dp_tx_update_txcompl(ar
, ts
);
619 memset(&ar
->cached_stats
, 0,
620 sizeof(struct ath11k_per_peer_tx_stats
));
622 ar
->last_ppdu_id
= ts
->ppdu_id
;
625 ath11k_dp_tx_cache_peer_stats(ar
, msdu
, ts
);
628 spin_lock_bh(&ab
->base_lock
);
629 peer
= ath11k_peer_find_by_id(ab
, ts
->peer_id
);
630 if (!peer
|| !peer
->sta
) {
631 ath11k_dbg(ab
, ATH11K_DBG_DATA
,
632 "dp_tx: failed to find the peer with peer_id %d\n",
634 spin_unlock_bh(&ab
->base_lock
);
635 ieee80211_free_txskb(ar
->hw
, msdu
);
638 arsta
= ath11k_sta_to_arsta(peer
->sta
);
639 status
.sta
= peer
->sta
;
642 rate
= arsta
->last_txrate
;
644 status_rate
.rate_idx
= rate
;
645 status_rate
.try_count
= 1;
647 status
.rates
= &status_rate
;
650 spin_unlock_bh(&ab
->base_lock
);
652 ieee80211_tx_status_ext(ar
->hw
, &status
);
655 static inline void ath11k_dp_tx_status_parse(struct ath11k_base
*ab
,
656 struct hal_wbm_release_ring
*desc
,
657 struct hal_tx_status
*ts
)
660 FIELD_GET(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE
, desc
->info0
);
661 if (unlikely(ts
->buf_rel_source
!= HAL_WBM_REL_SRC_MODULE_FW
&&
662 ts
->buf_rel_source
!= HAL_WBM_REL_SRC_MODULE_TQM
))
665 if (unlikely(ts
->buf_rel_source
== HAL_WBM_REL_SRC_MODULE_FW
))
668 ts
->status
= FIELD_GET(HAL_WBM_RELEASE_INFO0_TQM_RELEASE_REASON
,
670 ts
->ppdu_id
= FIELD_GET(HAL_WBM_RELEASE_INFO1_TQM_STATUS_NUMBER
,
672 ts
->try_cnt
= FIELD_GET(HAL_WBM_RELEASE_INFO1_TRANSMIT_COUNT
,
674 ts
->ack_rssi
= FIELD_GET(HAL_WBM_RELEASE_INFO2_ACK_FRAME_RSSI
,
676 if (desc
->info2
& HAL_WBM_RELEASE_INFO2_FIRST_MSDU
)
677 ts
->flags
|= HAL_TX_STATUS_FLAGS_FIRST_MSDU
;
678 ts
->peer_id
= FIELD_GET(HAL_WBM_RELEASE_INFO3_PEER_ID
, desc
->info3
);
679 ts
->tid
= FIELD_GET(HAL_WBM_RELEASE_INFO3_TID
, desc
->info3
);
680 if (desc
->rate_stats
.info0
& HAL_TX_RATE_STATS_INFO0_VALID
)
681 ts
->rate_stats
= desc
->rate_stats
.info0
;
686 void ath11k_dp_tx_completion_handler(struct ath11k_base
*ab
, int ring_id
)
689 struct ath11k_dp
*dp
= &ab
->dp
;
690 int hal_ring_id
= dp
->tx_ring
[ring_id
].tcl_comp_ring
.ring_id
;
691 struct hal_srng
*status_ring
= &ab
->hal
.srng_list
[hal_ring_id
];
692 struct sk_buff
*msdu
;
693 struct hal_tx_status ts
= { 0 };
694 struct dp_tx_ring
*tx_ring
= &dp
->tx_ring
[ring_id
];
699 spin_lock_bh(&status_ring
->lock
);
701 ath11k_hal_srng_access_begin(ab
, status_ring
);
703 while ((ATH11K_TX_COMPL_NEXT(tx_ring
->tx_status_head
) !=
704 tx_ring
->tx_status_tail
) &&
705 (desc
= ath11k_hal_srng_dst_get_next_entry(ab
, status_ring
))) {
706 memcpy(&tx_ring
->tx_status
[tx_ring
->tx_status_head
],
707 desc
, sizeof(struct hal_wbm_release_ring
));
708 tx_ring
->tx_status_head
=
709 ATH11K_TX_COMPL_NEXT(tx_ring
->tx_status_head
);
712 if (unlikely((ath11k_hal_srng_dst_peek(ab
, status_ring
) != NULL
) &&
713 (ATH11K_TX_COMPL_NEXT(tx_ring
->tx_status_head
) ==
714 tx_ring
->tx_status_tail
))) {
715 /* TODO: Process pending tx_status messages when kfifo_is_full() */
716 ath11k_warn(ab
, "Unable to process some of the tx_status ring desc because status_fifo is full\n");
719 ath11k_hal_srng_access_end(ab
, status_ring
);
721 spin_unlock_bh(&status_ring
->lock
);
723 while (ATH11K_TX_COMPL_NEXT(tx_ring
->tx_status_tail
) != tx_ring
->tx_status_head
) {
724 struct hal_wbm_release_ring
*tx_status
;
727 tx_ring
->tx_status_tail
=
728 ATH11K_TX_COMPL_NEXT(tx_ring
->tx_status_tail
);
729 tx_status
= &tx_ring
->tx_status
[tx_ring
->tx_status_tail
];
730 ath11k_dp_tx_status_parse(ab
, tx_status
, &ts
);
732 desc_id
= FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE
,
733 tx_status
->buf_addr_info
.info1
);
734 mac_id
= FIELD_GET(DP_TX_DESC_ID_MAC_ID
, desc_id
);
735 msdu_id
= FIELD_GET(DP_TX_DESC_ID_MSDU_ID
, desc_id
);
737 if (unlikely(ts
.buf_rel_source
== HAL_WBM_REL_SRC_MODULE_FW
)) {
738 ath11k_dp_tx_process_htt_tx_complete(ab
,
745 spin_lock(&tx_ring
->tx_idr_lock
);
746 msdu
= idr_remove(&tx_ring
->txbuf_idr
, msdu_id
);
747 if (unlikely(!msdu
)) {
748 ath11k_warn(ab
, "tx completion for unknown msdu_id %d\n",
750 spin_unlock(&tx_ring
->tx_idr_lock
);
754 spin_unlock(&tx_ring
->tx_idr_lock
);
756 ar
= ab
->pdevs
[mac_id
].ar
;
758 if (atomic_dec_and_test(&ar
->dp
.num_tx_pending
))
759 wake_up(&ar
->dp
.tx_empty_waitq
);
761 ath11k_dp_tx_complete_msdu(ar
, msdu
, &ts
);
765 int ath11k_dp_tx_send_reo_cmd(struct ath11k_base
*ab
, struct dp_rx_tid
*rx_tid
,
766 enum hal_reo_cmd_type type
,
767 struct ath11k_hal_reo_cmd
*cmd
,
768 void (*cb
)(struct ath11k_dp
*, void *,
769 enum hal_reo_cmd_status
))
771 struct ath11k_dp
*dp
= &ab
->dp
;
772 struct dp_reo_cmd
*dp_cmd
;
773 struct hal_srng
*cmd_ring
;
776 if (test_bit(ATH11K_FLAG_CRASH_FLUSH
, &ab
->dev_flags
))
779 cmd_ring
= &ab
->hal
.srng_list
[dp
->reo_cmd_ring
.ring_id
];
780 cmd_num
= ath11k_hal_reo_cmd_send(ab
, cmd_ring
, type
, cmd
);
782 /* cmd_num should start from 1, during failure return the error code */
786 /* reo cmd ring descriptors has cmd_num starting from 1 */
793 /* Can this be optimized so that we keep the pending command list only
794 * for tid delete command to free up the resource on the command status
797 dp_cmd
= kzalloc(sizeof(*dp_cmd
), GFP_ATOMIC
);
802 memcpy(&dp_cmd
->data
, rx_tid
, sizeof(struct dp_rx_tid
));
803 dp_cmd
->cmd_num
= cmd_num
;
804 dp_cmd
->handler
= cb
;
806 spin_lock_bh(&dp
->reo_cmd_lock
);
807 list_add_tail(&dp_cmd
->list
, &dp
->reo_cmd_list
);
808 spin_unlock_bh(&dp
->reo_cmd_lock
);
814 ath11k_dp_tx_get_ring_id_type(struct ath11k_base
*ab
,
815 int mac_id
, u32 ring_id
,
816 enum hal_ring_type ring_type
,
817 enum htt_srng_ring_type
*htt_ring_type
,
818 enum htt_srng_ring_id
*htt_ring_id
)
820 int lmac_ring_id_offset
= 0;
825 lmac_ring_id_offset
= mac_id
* HAL_SRNG_RINGS_PER_LMAC
;
827 /* for QCA6390, host fills rx buffer to fw and fw fills to
828 * rxbuf ring for each rxdma
830 if (!ab
->hw_params
.rx_mac_buf_ring
) {
831 if (!(ring_id
== (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF
+
832 lmac_ring_id_offset
) ||
833 ring_id
== (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_BUF
+
834 lmac_ring_id_offset
))) {
837 *htt_ring_id
= HTT_RXDMA_HOST_BUF_RING
;
838 *htt_ring_type
= HTT_SW_TO_HW_RING
;
840 if (ring_id
== HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF
) {
841 *htt_ring_id
= HTT_HOST1_TO_FW_RXBUF_RING
;
842 *htt_ring_type
= HTT_SW_TO_SW_RING
;
844 *htt_ring_id
= HTT_RXDMA_HOST_BUF_RING
;
845 *htt_ring_type
= HTT_SW_TO_HW_RING
;
850 *htt_ring_id
= HTT_RXDMA_NON_MONITOR_DEST_RING
;
851 *htt_ring_type
= HTT_HW_TO_SW_RING
;
853 case HAL_RXDMA_MONITOR_BUF
:
854 *htt_ring_id
= HTT_RXDMA_MONITOR_BUF_RING
;
855 *htt_ring_type
= HTT_SW_TO_HW_RING
;
857 case HAL_RXDMA_MONITOR_STATUS
:
858 *htt_ring_id
= HTT_RXDMA_MONITOR_STATUS_RING
;
859 *htt_ring_type
= HTT_SW_TO_HW_RING
;
861 case HAL_RXDMA_MONITOR_DST
:
862 *htt_ring_id
= HTT_RXDMA_MONITOR_DEST_RING
;
863 *htt_ring_type
= HTT_HW_TO_SW_RING
;
865 case HAL_RXDMA_MONITOR_DESC
:
866 *htt_ring_id
= HTT_RXDMA_MONITOR_DESC_RING
;
867 *htt_ring_type
= HTT_SW_TO_HW_RING
;
870 ath11k_warn(ab
, "Unsupported ring type in DP :%d\n", ring_type
);
876 int ath11k_dp_tx_htt_srng_setup(struct ath11k_base
*ab
, u32 ring_id
,
877 int mac_id
, enum hal_ring_type ring_type
)
879 struct htt_srng_setup_cmd
*cmd
;
880 struct hal_srng
*srng
= &ab
->hal
.srng_list
[ring_id
];
881 struct hal_srng_params params
;
884 int len
= sizeof(*cmd
);
885 dma_addr_t hp_addr
, tp_addr
;
886 enum htt_srng_ring_type htt_ring_type
;
887 enum htt_srng_ring_id htt_ring_id
;
890 skb
= ath11k_htc_alloc_skb(ab
, len
);
894 memset(¶ms
, 0, sizeof(params
));
895 ath11k_hal_srng_get_params(ab
, srng
, ¶ms
);
897 hp_addr
= ath11k_hal_srng_get_hp_addr(ab
, srng
);
898 tp_addr
= ath11k_hal_srng_get_tp_addr(ab
, srng
);
900 ret
= ath11k_dp_tx_get_ring_id_type(ab
, mac_id
, ring_id
,
901 ring_type
, &htt_ring_type
,
907 cmd
= (struct htt_srng_setup_cmd
*)skb
->data
;
908 cmd
->info0
= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE
,
909 HTT_H2T_MSG_TYPE_SRING_SETUP
);
910 if (htt_ring_type
== HTT_SW_TO_HW_RING
||
911 htt_ring_type
== HTT_HW_TO_SW_RING
)
912 cmd
->info0
|= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID
,
913 DP_SW2HW_MACID(mac_id
));
915 cmd
->info0
|= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID
,
917 cmd
->info0
|= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE
,
919 cmd
->info0
|= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_ID
, htt_ring_id
);
921 cmd
->ring_base_addr_lo
= params
.ring_base_paddr
&
922 HAL_ADDR_LSB_REG_MASK
;
924 cmd
->ring_base_addr_hi
= (u64
)params
.ring_base_paddr
>>
925 HAL_ADDR_MSB_REG_SHIFT
;
927 ret
= ath11k_hal_srng_get_entrysize(ab
, ring_type
);
934 cmd
->info1
= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE
,
936 cmd
->info1
|= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE
,
937 params
.num_entries
* ring_entry_sz
);
938 cmd
->info1
|= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP
,
939 !!(params
.flags
& HAL_SRNG_FLAGS_MSI_SWAP
));
940 cmd
->info1
|= FIELD_PREP(
941 HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP
,
942 !!(params
.flags
& HAL_SRNG_FLAGS_DATA_TLV_SWAP
));
943 cmd
->info1
|= FIELD_PREP(
944 HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP
,
945 !!(params
.flags
& HAL_SRNG_FLAGS_RING_PTR_SWAP
));
946 if (htt_ring_type
== HTT_SW_TO_HW_RING
)
947 cmd
->info1
|= HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS
;
949 cmd
->ring_head_off32_remote_addr_lo
= hp_addr
& HAL_ADDR_LSB_REG_MASK
;
950 cmd
->ring_head_off32_remote_addr_hi
= (u64
)hp_addr
>>
951 HAL_ADDR_MSB_REG_SHIFT
;
953 cmd
->ring_tail_off32_remote_addr_lo
= tp_addr
& HAL_ADDR_LSB_REG_MASK
;
954 cmd
->ring_tail_off32_remote_addr_hi
= (u64
)tp_addr
>>
955 HAL_ADDR_MSB_REG_SHIFT
;
957 cmd
->ring_msi_addr_lo
= lower_32_bits(params
.msi_addr
);
958 cmd
->ring_msi_addr_hi
= upper_32_bits(params
.msi_addr
);
959 cmd
->msi_data
= params
.msi_data
;
961 cmd
->intr_info
= FIELD_PREP(
962 HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH
,
963 params
.intr_batch_cntr_thres_entries
* ring_entry_sz
);
964 cmd
->intr_info
|= FIELD_PREP(
965 HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH
,
966 params
.intr_timer_thres_us
>> 3);
969 if (params
.flags
& HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN
) {
970 cmd
->info2
= FIELD_PREP(
971 HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH
,
972 params
.low_threshold
);
975 ath11k_dbg(ab
, ATH11K_DBG_DP_TX
,
976 "htt srng setup msi_addr_lo 0x%x msi_addr_hi 0x%x msi_data 0x%x ring_id %d ring_type %d intr_info 0x%x flags 0x%x\n",
977 cmd
->ring_msi_addr_lo
, cmd
->ring_msi_addr_hi
,
978 cmd
->msi_data
, ring_id
, ring_type
, cmd
->intr_info
, cmd
->info2
);
980 ret
= ath11k_htc_send(&ab
->htc
, ab
->dp
.eid
, skb
);
987 dev_kfree_skb_any(skb
);
992 #define HTT_TARGET_VERSION_TIMEOUT_HZ (3 * HZ)
994 int ath11k_dp_tx_htt_h2t_ver_req_msg(struct ath11k_base
*ab
)
996 struct ath11k_dp
*dp
= &ab
->dp
;
998 struct htt_ver_req_cmd
*cmd
;
999 int len
= sizeof(*cmd
);
1002 init_completion(&dp
->htt_tgt_version_received
);
1004 skb
= ath11k_htc_alloc_skb(ab
, len
);
1009 cmd
= (struct htt_ver_req_cmd
*)skb
->data
;
1010 cmd
->ver_reg_info
= FIELD_PREP(HTT_VER_REQ_INFO_MSG_ID
,
1011 HTT_H2T_MSG_TYPE_VERSION_REQ
);
1013 ret
= ath11k_htc_send(&ab
->htc
, dp
->eid
, skb
);
1015 dev_kfree_skb_any(skb
);
1019 ret
= wait_for_completion_timeout(&dp
->htt_tgt_version_received
,
1020 HTT_TARGET_VERSION_TIMEOUT_HZ
);
1022 ath11k_warn(ab
, "htt target version request timed out\n");
1026 if (dp
->htt_tgt_ver_major
!= HTT_TARGET_VERSION_MAJOR
) {
1027 ath11k_err(ab
, "unsupported htt major version %d supported version is %d\n",
1028 dp
->htt_tgt_ver_major
, HTT_TARGET_VERSION_MAJOR
);
1035 int ath11k_dp_tx_htt_h2t_ppdu_stats_req(struct ath11k
*ar
, u32 mask
)
1037 struct ath11k_base
*ab
= ar
->ab
;
1038 struct ath11k_dp
*dp
= &ab
->dp
;
1039 struct sk_buff
*skb
;
1040 struct htt_ppdu_stats_cfg_cmd
*cmd
;
1041 int len
= sizeof(*cmd
);
1046 for (i
= 0; i
< ab
->hw_params
.num_rxdma_per_pdev
; i
++) {
1047 skb
= ath11k_htc_alloc_skb(ab
, len
);
1052 cmd
= (struct htt_ppdu_stats_cfg_cmd
*)skb
->data
;
1053 cmd
->msg
= FIELD_PREP(HTT_PPDU_STATS_CFG_MSG_TYPE
,
1054 HTT_H2T_MSG_TYPE_PPDU_STATS_CFG
);
1056 pdev_mask
= 1 << (ar
->pdev_idx
+ i
);
1057 cmd
->msg
|= FIELD_PREP(HTT_PPDU_STATS_CFG_PDEV_ID
, pdev_mask
);
1058 cmd
->msg
|= FIELD_PREP(HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK
, mask
);
1060 ret
= ath11k_htc_send(&ab
->htc
, dp
->eid
, skb
);
1062 dev_kfree_skb_any(skb
);
1070 int ath11k_dp_tx_htt_rx_filter_setup(struct ath11k_base
*ab
, u32 ring_id
,
1071 int mac_id
, enum hal_ring_type ring_type
,
1073 struct htt_rx_ring_tlv_filter
*tlv_filter
)
1075 struct htt_rx_ring_selection_cfg_cmd
*cmd
;
1076 struct hal_srng
*srng
= &ab
->hal
.srng_list
[ring_id
];
1077 struct hal_srng_params params
;
1078 struct sk_buff
*skb
;
1079 int len
= sizeof(*cmd
);
1080 enum htt_srng_ring_type htt_ring_type
;
1081 enum htt_srng_ring_id htt_ring_id
;
1084 skb
= ath11k_htc_alloc_skb(ab
, len
);
1088 memset(¶ms
, 0, sizeof(params
));
1089 ath11k_hal_srng_get_params(ab
, srng
, ¶ms
);
1091 ret
= ath11k_dp_tx_get_ring_id_type(ab
, mac_id
, ring_id
,
1092 ring_type
, &htt_ring_type
,
1098 cmd
= (struct htt_rx_ring_selection_cfg_cmd
*)skb
->data
;
1099 cmd
->info0
= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE
,
1100 HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG
);
1101 if (htt_ring_type
== HTT_SW_TO_HW_RING
||
1102 htt_ring_type
== HTT_HW_TO_SW_RING
)
1104 FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID
,
1105 DP_SW2HW_MACID(mac_id
));
1108 FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID
,
1110 cmd
->info0
|= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID
,
1112 cmd
->info0
|= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS
,
1113 !!(params
.flags
& HAL_SRNG_FLAGS_MSI_SWAP
));
1114 cmd
->info0
|= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS
,
1115 !!(params
.flags
& HAL_SRNG_FLAGS_DATA_TLV_SWAP
));
1117 cmd
->info1
= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE
,
1119 cmd
->pkt_type_en_flags0
= tlv_filter
->pkt_filter_flags0
;
1120 cmd
->pkt_type_en_flags1
= tlv_filter
->pkt_filter_flags1
;
1121 cmd
->pkt_type_en_flags2
= tlv_filter
->pkt_filter_flags2
;
1122 cmd
->pkt_type_en_flags3
= tlv_filter
->pkt_filter_flags3
;
1123 cmd
->rx_filter_tlv
= tlv_filter
->rx_filter
;
1125 ret
= ath11k_htc_send(&ab
->htc
, ab
->dp
.eid
, skb
);
1132 dev_kfree_skb_any(skb
);
1138 ath11k_dp_tx_htt_h2t_ext_stats_req(struct ath11k
*ar
, u8 type
,
1139 struct htt_ext_stats_cfg_params
*cfg_params
,
1142 struct ath11k_base
*ab
= ar
->ab
;
1143 struct ath11k_dp
*dp
= &ab
->dp
;
1144 struct sk_buff
*skb
;
1145 struct htt_ext_stats_cfg_cmd
*cmd
;
1147 int len
= sizeof(*cmd
);
1150 skb
= ath11k_htc_alloc_skb(ab
, len
);
1156 cmd
= (struct htt_ext_stats_cfg_cmd
*)skb
->data
;
1157 memset(cmd
, 0, sizeof(*cmd
));
1158 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_EXT_STATS_CFG
;
1160 if (ab
->hw_params
.single_pdev_only
)
1161 pdev_id
= ath11k_mac_get_target_pdev_id(ar
);
1163 pdev_id
= ar
->pdev
->pdev_id
;
1165 cmd
->hdr
.pdev_mask
= 1 << pdev_id
;
1167 cmd
->hdr
.stats_type
= type
;
1168 cmd
->cfg_param0
= cfg_params
->cfg0
;
1169 cmd
->cfg_param1
= cfg_params
->cfg1
;
1170 cmd
->cfg_param2
= cfg_params
->cfg2
;
1171 cmd
->cfg_param3
= cfg_params
->cfg3
;
1172 cmd
->cookie_lsb
= lower_32_bits(cookie
);
1173 cmd
->cookie_msb
= upper_32_bits(cookie
);
1175 ret
= ath11k_htc_send(&ab
->htc
, dp
->eid
, skb
);
1177 ath11k_warn(ab
, "failed to send htt type stats request: %d",
1179 dev_kfree_skb_any(skb
);
1186 int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k
*ar
, bool reset
)
1188 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
1189 struct ath11k_base
*ab
= ar
->ab
;
1190 struct htt_rx_ring_tlv_filter tlv_filter
= {0};
1191 int ret
= 0, ring_id
= 0, i
;
1193 if (ab
->hw_params
.full_monitor_mode
) {
1194 ret
= ath11k_dp_tx_htt_rx_full_mon_setup(ab
,
1195 dp
->mac_id
, !reset
);
1197 ath11k_err(ab
, "failed to setup full monitor %d\n", ret
);
1202 ring_id
= dp
->rxdma_mon_buf_ring
.refill_buf_ring
.ring_id
;
1205 tlv_filter
.rx_filter
= HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING
;
1206 tlv_filter
.pkt_filter_flags0
=
1207 HTT_RX_MON_FP_MGMT_FILTER_FLAGS0
|
1208 HTT_RX_MON_MO_MGMT_FILTER_FLAGS0
;
1209 tlv_filter
.pkt_filter_flags1
=
1210 HTT_RX_MON_FP_MGMT_FILTER_FLAGS1
|
1211 HTT_RX_MON_MO_MGMT_FILTER_FLAGS1
;
1212 tlv_filter
.pkt_filter_flags2
=
1213 HTT_RX_MON_FP_CTRL_FILTER_FLASG2
|
1214 HTT_RX_MON_MO_CTRL_FILTER_FLASG2
;
1215 tlv_filter
.pkt_filter_flags3
=
1216 HTT_RX_MON_FP_CTRL_FILTER_FLASG3
|
1217 HTT_RX_MON_MO_CTRL_FILTER_FLASG3
|
1218 HTT_RX_MON_FP_DATA_FILTER_FLASG3
|
1219 HTT_RX_MON_MO_DATA_FILTER_FLASG3
;
1222 if (ab
->hw_params
.rxdma1_enable
) {
1223 ret
= ath11k_dp_tx_htt_rx_filter_setup(ar
->ab
, ring_id
, dp
->mac_id
,
1224 HAL_RXDMA_MONITOR_BUF
,
1225 DP_RXDMA_REFILL_RING_SIZE
,
1227 } else if (!reset
) {
1228 /* set in monitor mode only */
1229 for (i
= 0; i
< ab
->hw_params
.num_rxdma_per_pdev
; i
++) {
1230 ring_id
= dp
->rx_mac_buf_ring
[i
].ring_id
;
1231 ret
= ath11k_dp_tx_htt_rx_filter_setup(ar
->ab
, ring_id
,
1242 for (i
= 0; i
< ab
->hw_params
.num_rxdma_per_pdev
; i
++) {
1243 ring_id
= dp
->rx_mon_status_refill_ring
[i
].refill_buf_ring
.ring_id
;
1245 tlv_filter
.rx_filter
=
1246 HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING
;
1248 tlv_filter
= ath11k_mac_mon_status_filter_default
;
1250 if (ath11k_debugfs_is_extd_rx_stats_enabled(ar
))
1251 tlv_filter
.rx_filter
= ath11k_debugfs_rx_filter(ar
);
1254 ret
= ath11k_dp_tx_htt_rx_filter_setup(ab
, ring_id
,
1256 HAL_RXDMA_MONITOR_STATUS
,
1257 DP_RXDMA_REFILL_RING_SIZE
,
1261 if (!ar
->ab
->hw_params
.rxdma1_enable
)
1262 mod_timer(&ar
->ab
->mon_reap_timer
, jiffies
+
1263 msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL
));
1268 int ath11k_dp_tx_htt_rx_full_mon_setup(struct ath11k_base
*ab
, int mac_id
,
1271 struct htt_rx_full_monitor_mode_cfg_cmd
*cmd
;
1272 struct sk_buff
*skb
;
1273 int ret
, len
= sizeof(*cmd
);
1275 skb
= ath11k_htc_alloc_skb(ab
, len
);
1280 cmd
= (struct htt_rx_full_monitor_mode_cfg_cmd
*)skb
->data
;
1281 memset(cmd
, 0, sizeof(*cmd
));
1282 cmd
->info0
= FIELD_PREP(HTT_RX_FULL_MON_MODE_CFG_CMD_INFO0_MSG_TYPE
,
1283 HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE
);
1285 cmd
->info0
|= FIELD_PREP(HTT_RX_FULL_MON_MODE_CFG_CMD_INFO0_PDEV_ID
, mac_id
);
1287 cmd
->cfg
= HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_ENABLE
|
1288 FIELD_PREP(HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_RELEASE_RING
,
1289 HTT_RX_MON_RING_SW
);
1291 cmd
->cfg
|= HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_ZERO_MPDUS_END
|
1292 HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_NON_ZERO_MPDUS_END
;
1295 ret
= ath11k_htc_send(&ab
->htc
, ab
->dp
.eid
, skb
);
1302 dev_kfree_skb_any(skb
);