1 // SPDX-License-Identifier: BSD-3-Clause-Clear
3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
9 #include "debugfs_sta.h"
13 static enum hal_tcl_encap_type
14 ath11k_dp_tx_get_encap_type(struct ath11k_vif
*arvif
, struct sk_buff
*skb
)
16 struct ieee80211_tx_info
*tx_info
= IEEE80211_SKB_CB(skb
);
17 struct ath11k_base
*ab
= arvif
->ar
->ab
;
19 if (test_bit(ATH11K_FLAG_RAW_MODE
, &ab
->dev_flags
))
20 return HAL_TCL_ENCAP_TYPE_RAW
;
22 if (tx_info
->flags
& IEEE80211_TX_CTL_HW_80211_ENCAP
)
23 return HAL_TCL_ENCAP_TYPE_ETHERNET
;
25 return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI
;
28 static void ath11k_dp_tx_encap_nwifi(struct sk_buff
*skb
)
30 struct ieee80211_hdr
*hdr
= (void *)skb
->data
;
33 if (!ieee80211_is_data_qos(hdr
->frame_control
))
36 qos_ctl
= ieee80211_get_qos_ctl(hdr
);
37 memmove(skb
->data
+ IEEE80211_QOS_CTL_LEN
,
38 skb
->data
, (void *)qos_ctl
- (void *)skb
->data
);
39 skb_pull(skb
, IEEE80211_QOS_CTL_LEN
);
41 hdr
= (void *)skb
->data
;
42 hdr
->frame_control
&= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA
);
45 static u8
ath11k_dp_tx_get_tid(struct sk_buff
*skb
)
47 struct ieee80211_hdr
*hdr
= (void *)skb
->data
;
48 struct ath11k_skb_cb
*cb
= ATH11K_SKB_CB(skb
);
50 if (cb
->flags
& ATH11K_SKB_HW_80211_ENCAP
)
51 return skb
->priority
& IEEE80211_QOS_CTL_TID_MASK
;
52 else if (!ieee80211_is_data_qos(hdr
->frame_control
))
53 return HAL_DESC_REO_NON_QOS_TID
;
55 return skb
->priority
& IEEE80211_QOS_CTL_TID_MASK
;
58 enum hal_encrypt_type
ath11k_dp_tx_get_encrypt_type(u32 cipher
)
61 case WLAN_CIPHER_SUITE_WEP40
:
62 return HAL_ENCRYPT_TYPE_WEP_40
;
63 case WLAN_CIPHER_SUITE_WEP104
:
64 return HAL_ENCRYPT_TYPE_WEP_104
;
65 case WLAN_CIPHER_SUITE_TKIP
:
66 return HAL_ENCRYPT_TYPE_TKIP_MIC
;
67 case WLAN_CIPHER_SUITE_CCMP
:
68 return HAL_ENCRYPT_TYPE_CCMP_128
;
69 case WLAN_CIPHER_SUITE_CCMP_256
:
70 return HAL_ENCRYPT_TYPE_CCMP_256
;
71 case WLAN_CIPHER_SUITE_GCMP
:
72 return HAL_ENCRYPT_TYPE_GCMP_128
;
73 case WLAN_CIPHER_SUITE_GCMP_256
:
74 return HAL_ENCRYPT_TYPE_AES_GCMP_256
;
76 return HAL_ENCRYPT_TYPE_OPEN
;
80 int ath11k_dp_tx(struct ath11k
*ar
, struct ath11k_vif
*arvif
,
83 struct ath11k_base
*ab
= ar
->ab
;
84 struct ath11k_dp
*dp
= &ab
->dp
;
85 struct hal_tx_info ti
= {0};
86 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
87 struct ath11k_skb_cb
*skb_cb
= ATH11K_SKB_CB(skb
);
88 struct hal_srng
*tcl_ring
;
89 struct ieee80211_hdr
*hdr
= (void *)skb
->data
;
90 struct dp_tx_ring
*tx_ring
;
95 u8 ring_selector
= 0, ring_map
= 0;
98 if (test_bit(ATH11K_FLAG_CRASH_FLUSH
, &ar
->ab
->dev_flags
))
101 if (!(info
->flags
& IEEE80211_TX_CTL_HW_80211_ENCAP
) &&
102 !ieee80211_is_data(hdr
->frame_control
))
105 pool_id
= skb_get_queue_mapping(skb
) & (ATH11K_HW_MAX_QUEUES
- 1);
107 /* Let the default ring selection be based on current processor
108 * number, where one of the 3 tcl rings are selected based on
109 * the smp_processor_id(). In case that ring
110 * is full/busy, we resort to other available rings.
111 * If all rings are full, we drop the packet.
112 * //TODO Add throttling logic when all rings are full
114 ring_selector
= smp_processor_id();
117 tcl_ring_retry
= false;
118 /* For some chip, it can only use tcl0 to tx */
119 if (ar
->ab
->hw_params
.tcl_0_only
)
122 ti
.ring_id
= ring_selector
% DP_TCL_NUM_RING_MAX
;
124 ring_map
|= BIT(ti
.ring_id
);
126 tx_ring
= &dp
->tx_ring
[ti
.ring_id
];
128 spin_lock_bh(&tx_ring
->tx_idr_lock
);
129 ret
= idr_alloc(&tx_ring
->txbuf_idr
, skb
, 0,
130 DP_TX_IDR_SIZE
- 1, GFP_ATOMIC
);
131 spin_unlock_bh(&tx_ring
->tx_idr_lock
);
134 if (ring_map
== (BIT(DP_TCL_NUM_RING_MAX
) - 1)) {
135 atomic_inc(&ab
->soc_stats
.tx_err
.misc_fail
);
139 /* Check if the next ring is available */
144 ti
.desc_id
= FIELD_PREP(DP_TX_DESC_ID_MAC_ID
, ar
->pdev_idx
) |
145 FIELD_PREP(DP_TX_DESC_ID_MSDU_ID
, ret
) |
146 FIELD_PREP(DP_TX_DESC_ID_POOL_ID
, pool_id
);
147 ti
.encap_type
= ath11k_dp_tx_get_encap_type(arvif
, skb
);
148 ti
.meta_data_flags
= arvif
->tcl_metadata
;
150 if (ti
.encap_type
== HAL_TCL_ENCAP_TYPE_RAW
) {
151 if (skb_cb
->flags
& ATH11K_SKB_CIPHER_SET
) {
153 ath11k_dp_tx_get_encrypt_type(skb_cb
->cipher
);
155 if (ieee80211_has_protected(hdr
->frame_control
))
156 skb_put(skb
, IEEE80211_CCMP_MIC_LEN
);
158 ti
.encrypt_type
= HAL_ENCRYPT_TYPE_OPEN
;
162 ti
.addr_search_flags
= arvif
->hal_addr_search_flags
;
163 ti
.search_type
= arvif
->search_type
;
164 ti
.type
= HAL_TCL_DESC_TYPE_BUFFER
;
166 ti
.lmac_id
= ar
->lmac_id
;
167 ti
.bss_ast_hash
= arvif
->ast_hash
;
168 ti
.dscp_tid_tbl_idx
= 0;
170 if (skb
->ip_summed
== CHECKSUM_PARTIAL
&&
171 ti
.encap_type
!= HAL_TCL_ENCAP_TYPE_RAW
) {
172 ti
.flags0
|= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_IP4_CKSUM_EN
, 1) |
173 FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP4_CKSUM_EN
, 1) |
174 FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP6_CKSUM_EN
, 1) |
175 FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP4_CKSUM_EN
, 1) |
176 FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP6_CKSUM_EN
, 1);
179 if (ieee80211_vif_is_mesh(arvif
->vif
))
180 ti
.flags1
|= FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_MESH_ENABLE
, 1);
182 ti
.flags1
|= FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_TID_OVERWRITE
, 1);
184 ti
.tid
= ath11k_dp_tx_get_tid(skb
);
186 switch (ti
.encap_type
) {
187 case HAL_TCL_ENCAP_TYPE_NATIVE_WIFI
:
188 ath11k_dp_tx_encap_nwifi(skb
);
190 case HAL_TCL_ENCAP_TYPE_RAW
:
191 if (!test_bit(ATH11K_FLAG_RAW_MODE
, &ab
->dev_flags
)) {
193 goto fail_remove_idr
;
196 case HAL_TCL_ENCAP_TYPE_ETHERNET
:
197 /* no need to encap */
199 case HAL_TCL_ENCAP_TYPE_802_3
:
201 /* TODO: Take care of other encap modes as well */
203 atomic_inc(&ab
->soc_stats
.tx_err
.misc_fail
);
204 goto fail_remove_idr
;
207 ti
.paddr
= dma_map_single(ab
->dev
, skb
->data
, skb
->len
, DMA_TO_DEVICE
);
208 if (dma_mapping_error(ab
->dev
, ti
.paddr
)) {
209 atomic_inc(&ab
->soc_stats
.tx_err
.misc_fail
);
210 ath11k_warn(ab
, "failed to DMA map data Tx buffer\n");
212 goto fail_remove_idr
;
215 ti
.data_len
= skb
->len
;
216 skb_cb
->paddr
= ti
.paddr
;
217 skb_cb
->vif
= arvif
->vif
;
220 hal_ring_id
= tx_ring
->tcl_data_ring
.ring_id
;
221 tcl_ring
= &ab
->hal
.srng_list
[hal_ring_id
];
223 spin_lock_bh(&tcl_ring
->lock
);
225 ath11k_hal_srng_access_begin(ab
, tcl_ring
);
227 hal_tcl_desc
= (void *)ath11k_hal_srng_src_get_next_entry(ab
, tcl_ring
);
229 /* NOTE: It is highly unlikely we'll be running out of tcl_ring
230 * desc because the desc is directly enqueued onto hw queue.
232 ath11k_hal_srng_access_end(ab
, tcl_ring
);
233 ab
->soc_stats
.tx_err
.desc_na
[ti
.ring_id
]++;
234 spin_unlock_bh(&tcl_ring
->lock
);
237 /* Checking for available tcl descritors in another ring in
238 * case of failure due to full tcl ring now, is better than
239 * checking this ring earlier for each pkt tx.
240 * Restart ring selection if some rings are not checked yet.
242 if (ring_map
!= (BIT(DP_TCL_NUM_RING_MAX
) - 1) &&
243 !ar
->ab
->hw_params
.tcl_0_only
) {
244 tcl_ring_retry
= true;
251 ath11k_hal_tx_cmd_desc_setup(ab
, hal_tcl_desc
+
252 sizeof(struct hal_tlv_hdr
), &ti
);
254 ath11k_hal_srng_access_end(ab
, tcl_ring
);
256 ath11k_dp_shadow_start_timer(ab
, tcl_ring
, &dp
->tx_ring_timer
[ti
.ring_id
]);
258 spin_unlock_bh(&tcl_ring
->lock
);
260 ath11k_dbg_dump(ab
, ATH11K_DBG_DP_TX
, NULL
, "dp tx msdu: ",
261 skb
->data
, skb
->len
);
263 atomic_inc(&ar
->dp
.num_tx_pending
);
268 dma_unmap_single(ab
->dev
, ti
.paddr
, ti
.data_len
, DMA_TO_DEVICE
);
271 spin_lock_bh(&tx_ring
->tx_idr_lock
);
272 idr_remove(&tx_ring
->txbuf_idr
,
273 FIELD_GET(DP_TX_DESC_ID_MSDU_ID
, ti
.desc_id
));
274 spin_unlock_bh(&tx_ring
->tx_idr_lock
);
282 static void ath11k_dp_tx_free_txbuf(struct ath11k_base
*ab
, u8 mac_id
,
284 struct dp_tx_ring
*tx_ring
)
287 struct sk_buff
*msdu
;
288 struct ath11k_skb_cb
*skb_cb
;
290 spin_lock_bh(&tx_ring
->tx_idr_lock
);
291 msdu
= idr_find(&tx_ring
->txbuf_idr
, msdu_id
);
293 ath11k_warn(ab
, "tx completion for unknown msdu_id %d\n",
295 spin_unlock_bh(&tx_ring
->tx_idr_lock
);
299 skb_cb
= ATH11K_SKB_CB(msdu
);
301 idr_remove(&tx_ring
->txbuf_idr
, msdu_id
);
302 spin_unlock_bh(&tx_ring
->tx_idr_lock
);
304 dma_unmap_single(ab
->dev
, skb_cb
->paddr
, msdu
->len
, DMA_TO_DEVICE
);
305 dev_kfree_skb_any(msdu
);
307 ar
= ab
->pdevs
[mac_id
].ar
;
308 if (atomic_dec_and_test(&ar
->dp
.num_tx_pending
))
309 wake_up(&ar
->dp
.tx_empty_waitq
);
313 ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base
*ab
,
314 struct dp_tx_ring
*tx_ring
,
315 struct ath11k_dp_htt_wbm_tx_status
*ts
)
317 struct sk_buff
*msdu
;
318 struct ieee80211_tx_info
*info
;
319 struct ath11k_skb_cb
*skb_cb
;
322 spin_lock_bh(&tx_ring
->tx_idr_lock
);
323 msdu
= idr_find(&tx_ring
->txbuf_idr
, ts
->msdu_id
);
325 ath11k_warn(ab
, "htt tx completion for unknown msdu_id %d\n",
327 spin_unlock_bh(&tx_ring
->tx_idr_lock
);
331 skb_cb
= ATH11K_SKB_CB(msdu
);
332 info
= IEEE80211_SKB_CB(msdu
);
336 idr_remove(&tx_ring
->txbuf_idr
, ts
->msdu_id
);
337 spin_unlock_bh(&tx_ring
->tx_idr_lock
);
339 if (atomic_dec_and_test(&ar
->dp
.num_tx_pending
))
340 wake_up(&ar
->dp
.tx_empty_waitq
);
342 dma_unmap_single(ab
->dev
, skb_cb
->paddr
, msdu
->len
, DMA_TO_DEVICE
);
344 memset(&info
->status
, 0, sizeof(info
->status
));
347 if (!(info
->flags
& IEEE80211_TX_CTL_NO_ACK
)) {
348 info
->flags
|= IEEE80211_TX_STAT_ACK
;
349 info
->status
.ack_signal
= ATH11K_DEFAULT_NOISE_FLOOR
+
351 info
->status
.is_valid_ack_signal
= true;
353 info
->flags
|= IEEE80211_TX_STAT_NOACK_TRANSMITTED
;
357 ieee80211_tx_status(ar
->hw
, msdu
);
361 ath11k_dp_tx_process_htt_tx_complete(struct ath11k_base
*ab
,
362 void *desc
, u8 mac_id
,
363 u32 msdu_id
, struct dp_tx_ring
*tx_ring
)
365 struct htt_tx_wbm_completion
*status_desc
;
366 struct ath11k_dp_htt_wbm_tx_status ts
= {0};
367 enum hal_wbm_htt_tx_comp_status wbm_status
;
369 status_desc
= desc
+ HTT_TX_WBM_COMP_STATUS_OFFSET
;
371 wbm_status
= FIELD_GET(HTT_TX_WBM_COMP_INFO0_STATUS
,
373 switch (wbm_status
) {
374 case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK
:
375 case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP
:
376 case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL
:
377 ts
.acked
= (wbm_status
== HAL_WBM_REL_HTT_TX_COMP_STATUS_OK
);
378 ts
.msdu_id
= msdu_id
;
379 ts
.ack_rssi
= FIELD_GET(HTT_TX_WBM_COMP_INFO1_ACK_RSSI
,
381 ath11k_dp_tx_htt_tx_complete_buf(ab
, tx_ring
, &ts
);
383 case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ
:
384 case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT
:
385 ath11k_dp_tx_free_txbuf(ab
, mac_id
, msdu_id
, tx_ring
);
387 case HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY
:
388 /* This event is to be handled only when the driver decides to
389 * use WDS offload functionality.
393 ath11k_warn(ab
, "Unknown htt tx status %d\n", wbm_status
);
398 static void ath11k_dp_tx_cache_peer_stats(struct ath11k
*ar
,
399 struct sk_buff
*msdu
,
400 struct hal_tx_status
*ts
)
402 struct ath11k_per_peer_tx_stats
*peer_stats
= &ar
->cached_stats
;
404 if (ts
->try_cnt
> 1) {
405 peer_stats
->retry_pkts
+= ts
->try_cnt
- 1;
406 peer_stats
->retry_bytes
+= (ts
->try_cnt
- 1) * msdu
->len
;
408 if (ts
->status
!= HAL_WBM_TQM_REL_REASON_FRAME_ACKED
) {
409 peer_stats
->failed_pkts
+= 1;
410 peer_stats
->failed_bytes
+= msdu
->len
;
415 static void ath11k_dp_tx_complete_msdu(struct ath11k
*ar
,
416 struct sk_buff
*msdu
,
417 struct hal_tx_status
*ts
)
419 struct ath11k_base
*ab
= ar
->ab
;
420 struct ieee80211_tx_info
*info
;
421 struct ath11k_skb_cb
*skb_cb
;
423 if (WARN_ON_ONCE(ts
->buf_rel_source
!= HAL_WBM_REL_SRC_MODULE_TQM
)) {
424 /* Must not happen */
428 skb_cb
= ATH11K_SKB_CB(msdu
);
430 dma_unmap_single(ab
->dev
, skb_cb
->paddr
, msdu
->len
, DMA_TO_DEVICE
);
434 if (!rcu_dereference(ab
->pdevs_active
[ar
->pdev_idx
])) {
435 dev_kfree_skb_any(msdu
);
440 dev_kfree_skb_any(msdu
);
444 info
= IEEE80211_SKB_CB(msdu
);
445 memset(&info
->status
, 0, sizeof(info
->status
));
447 /* skip tx rate update from ieee80211_status*/
448 info
->status
.rates
[0].idx
= -1;
450 if (ts
->status
== HAL_WBM_TQM_REL_REASON_FRAME_ACKED
&&
451 !(info
->flags
& IEEE80211_TX_CTL_NO_ACK
)) {
452 info
->flags
|= IEEE80211_TX_STAT_ACK
;
453 info
->status
.ack_signal
= ATH11K_DEFAULT_NOISE_FLOOR
+
455 info
->status
.is_valid_ack_signal
= true;
458 if (ts
->status
== HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX
&&
459 (info
->flags
& IEEE80211_TX_CTL_NO_ACK
))
460 info
->flags
|= IEEE80211_TX_STAT_NOACK_TRANSMITTED
;
462 if (ath11k_debugfs_is_extd_tx_stats_enabled(ar
)) {
463 if (ts
->flags
& HAL_TX_STATUS_FLAGS_FIRST_MSDU
) {
464 if (ar
->last_ppdu_id
== 0) {
465 ar
->last_ppdu_id
= ts
->ppdu_id
;
466 } else if (ar
->last_ppdu_id
== ts
->ppdu_id
||
467 ar
->cached_ppdu_id
== ar
->last_ppdu_id
) {
468 ar
->cached_ppdu_id
= ar
->last_ppdu_id
;
469 ar
->cached_stats
.is_ampdu
= true;
470 ath11k_debugfs_sta_update_txcompl(ar
, msdu
, ts
);
471 memset(&ar
->cached_stats
, 0,
472 sizeof(struct ath11k_per_peer_tx_stats
));
474 ar
->cached_stats
.is_ampdu
= false;
475 ath11k_debugfs_sta_update_txcompl(ar
, msdu
, ts
);
476 memset(&ar
->cached_stats
, 0,
477 sizeof(struct ath11k_per_peer_tx_stats
));
479 ar
->last_ppdu_id
= ts
->ppdu_id
;
482 ath11k_dp_tx_cache_peer_stats(ar
, msdu
, ts
);
485 /* NOTE: Tx rate status reporting. Tx completion status does not have
486 * necessary information (for example nss) to build the tx rate.
487 * Might end up reporting it out-of-band from HTT stats.
490 ieee80211_tx_status(ar
->hw
, msdu
);
496 static inline void ath11k_dp_tx_status_parse(struct ath11k_base
*ab
,
497 struct hal_wbm_release_ring
*desc
,
498 struct hal_tx_status
*ts
)
501 FIELD_GET(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE
, desc
->info0
);
502 if (ts
->buf_rel_source
!= HAL_WBM_REL_SRC_MODULE_FW
&&
503 ts
->buf_rel_source
!= HAL_WBM_REL_SRC_MODULE_TQM
)
506 if (ts
->buf_rel_source
== HAL_WBM_REL_SRC_MODULE_FW
)
509 ts
->status
= FIELD_GET(HAL_WBM_RELEASE_INFO0_TQM_RELEASE_REASON
,
511 ts
->ppdu_id
= FIELD_GET(HAL_WBM_RELEASE_INFO1_TQM_STATUS_NUMBER
,
513 ts
->try_cnt
= FIELD_GET(HAL_WBM_RELEASE_INFO1_TRANSMIT_COUNT
,
515 ts
->ack_rssi
= FIELD_GET(HAL_WBM_RELEASE_INFO2_ACK_FRAME_RSSI
,
517 if (desc
->info2
& HAL_WBM_RELEASE_INFO2_FIRST_MSDU
)
518 ts
->flags
|= HAL_TX_STATUS_FLAGS_FIRST_MSDU
;
519 ts
->peer_id
= FIELD_GET(HAL_WBM_RELEASE_INFO3_PEER_ID
, desc
->info3
);
520 ts
->tid
= FIELD_GET(HAL_WBM_RELEASE_INFO3_TID
, desc
->info3
);
521 if (desc
->rate_stats
.info0
& HAL_TX_RATE_STATS_INFO0_VALID
)
522 ts
->rate_stats
= desc
->rate_stats
.info0
;
527 void ath11k_dp_tx_completion_handler(struct ath11k_base
*ab
, int ring_id
)
530 struct ath11k_dp
*dp
= &ab
->dp
;
531 int hal_ring_id
= dp
->tx_ring
[ring_id
].tcl_comp_ring
.ring_id
;
532 struct hal_srng
*status_ring
= &ab
->hal
.srng_list
[hal_ring_id
];
533 struct sk_buff
*msdu
;
534 struct hal_tx_status ts
= { 0 };
535 struct dp_tx_ring
*tx_ring
= &dp
->tx_ring
[ring_id
];
540 spin_lock_bh(&status_ring
->lock
);
542 ath11k_hal_srng_access_begin(ab
, status_ring
);
544 while ((ATH11K_TX_COMPL_NEXT(tx_ring
->tx_status_head
) !=
545 tx_ring
->tx_status_tail
) &&
546 (desc
= ath11k_hal_srng_dst_get_next_entry(ab
, status_ring
))) {
547 memcpy(&tx_ring
->tx_status
[tx_ring
->tx_status_head
],
548 desc
, sizeof(struct hal_wbm_release_ring
));
549 tx_ring
->tx_status_head
=
550 ATH11K_TX_COMPL_NEXT(tx_ring
->tx_status_head
);
553 if ((ath11k_hal_srng_dst_peek(ab
, status_ring
) != NULL
) &&
554 (ATH11K_TX_COMPL_NEXT(tx_ring
->tx_status_head
) == tx_ring
->tx_status_tail
)) {
555 /* TODO: Process pending tx_status messages when kfifo_is_full() */
556 ath11k_warn(ab
, "Unable to process some of the tx_status ring desc because status_fifo is full\n");
559 ath11k_hal_srng_access_end(ab
, status_ring
);
561 spin_unlock_bh(&status_ring
->lock
);
563 while (ATH11K_TX_COMPL_NEXT(tx_ring
->tx_status_tail
) != tx_ring
->tx_status_head
) {
564 struct hal_wbm_release_ring
*tx_status
;
567 tx_ring
->tx_status_tail
=
568 ATH11K_TX_COMPL_NEXT(tx_ring
->tx_status_tail
);
569 tx_status
= &tx_ring
->tx_status
[tx_ring
->tx_status_tail
];
570 ath11k_dp_tx_status_parse(ab
, tx_status
, &ts
);
572 desc_id
= FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE
,
573 tx_status
->buf_addr_info
.info1
);
574 mac_id
= FIELD_GET(DP_TX_DESC_ID_MAC_ID
, desc_id
);
575 msdu_id
= FIELD_GET(DP_TX_DESC_ID_MSDU_ID
, desc_id
);
577 if (ts
.buf_rel_source
== HAL_WBM_REL_SRC_MODULE_FW
) {
578 ath11k_dp_tx_process_htt_tx_complete(ab
,
585 spin_lock_bh(&tx_ring
->tx_idr_lock
);
586 msdu
= idr_find(&tx_ring
->txbuf_idr
, msdu_id
);
588 ath11k_warn(ab
, "tx completion for unknown msdu_id %d\n",
590 spin_unlock_bh(&tx_ring
->tx_idr_lock
);
593 idr_remove(&tx_ring
->txbuf_idr
, msdu_id
);
594 spin_unlock_bh(&tx_ring
->tx_idr_lock
);
596 ar
= ab
->pdevs
[mac_id
].ar
;
598 if (atomic_dec_and_test(&ar
->dp
.num_tx_pending
))
599 wake_up(&ar
->dp
.tx_empty_waitq
);
601 ath11k_dp_tx_complete_msdu(ar
, msdu
, &ts
);
605 int ath11k_dp_tx_send_reo_cmd(struct ath11k_base
*ab
, struct dp_rx_tid
*rx_tid
,
606 enum hal_reo_cmd_type type
,
607 struct ath11k_hal_reo_cmd
*cmd
,
608 void (*cb
)(struct ath11k_dp
*, void *,
609 enum hal_reo_cmd_status
))
611 struct ath11k_dp
*dp
= &ab
->dp
;
612 struct dp_reo_cmd
*dp_cmd
;
613 struct hal_srng
*cmd_ring
;
616 cmd_ring
= &ab
->hal
.srng_list
[dp
->reo_cmd_ring
.ring_id
];
617 cmd_num
= ath11k_hal_reo_cmd_send(ab
, cmd_ring
, type
, cmd
);
619 /* cmd_num should start from 1, during failure return the error code */
623 /* reo cmd ring descriptors has cmd_num starting from 1 */
630 /* Can this be optimized so that we keep the pending command list only
631 * for tid delete command to free up the resoruce on the command status
634 dp_cmd
= kzalloc(sizeof(*dp_cmd
), GFP_ATOMIC
);
639 memcpy(&dp_cmd
->data
, rx_tid
, sizeof(struct dp_rx_tid
));
640 dp_cmd
->cmd_num
= cmd_num
;
641 dp_cmd
->handler
= cb
;
643 spin_lock_bh(&dp
->reo_cmd_lock
);
644 list_add_tail(&dp_cmd
->list
, &dp
->reo_cmd_list
);
645 spin_unlock_bh(&dp
->reo_cmd_lock
);
651 ath11k_dp_tx_get_ring_id_type(struct ath11k_base
*ab
,
652 int mac_id
, u32 ring_id
,
653 enum hal_ring_type ring_type
,
654 enum htt_srng_ring_type
*htt_ring_type
,
655 enum htt_srng_ring_id
*htt_ring_id
)
657 int lmac_ring_id_offset
= 0;
662 lmac_ring_id_offset
= mac_id
* HAL_SRNG_RINGS_PER_LMAC
;
664 /* for QCA6390, host fills rx buffer to fw and fw fills to
665 * rxbuf ring for each rxdma
667 if (!ab
->hw_params
.rx_mac_buf_ring
) {
668 if (!(ring_id
== (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF
+
669 lmac_ring_id_offset
) ||
670 ring_id
== (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_BUF
+
671 lmac_ring_id_offset
))) {
674 *htt_ring_id
= HTT_RXDMA_HOST_BUF_RING
;
675 *htt_ring_type
= HTT_SW_TO_HW_RING
;
677 if (ring_id
== HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF
) {
678 *htt_ring_id
= HTT_HOST1_TO_FW_RXBUF_RING
;
679 *htt_ring_type
= HTT_SW_TO_SW_RING
;
681 *htt_ring_id
= HTT_RXDMA_HOST_BUF_RING
;
682 *htt_ring_type
= HTT_SW_TO_HW_RING
;
687 *htt_ring_id
= HTT_RXDMA_NON_MONITOR_DEST_RING
;
688 *htt_ring_type
= HTT_HW_TO_SW_RING
;
690 case HAL_RXDMA_MONITOR_BUF
:
691 *htt_ring_id
= HTT_RXDMA_MONITOR_BUF_RING
;
692 *htt_ring_type
= HTT_SW_TO_HW_RING
;
694 case HAL_RXDMA_MONITOR_STATUS
:
695 *htt_ring_id
= HTT_RXDMA_MONITOR_STATUS_RING
;
696 *htt_ring_type
= HTT_SW_TO_HW_RING
;
698 case HAL_RXDMA_MONITOR_DST
:
699 *htt_ring_id
= HTT_RXDMA_MONITOR_DEST_RING
;
700 *htt_ring_type
= HTT_HW_TO_SW_RING
;
702 case HAL_RXDMA_MONITOR_DESC
:
703 *htt_ring_id
= HTT_RXDMA_MONITOR_DESC_RING
;
704 *htt_ring_type
= HTT_SW_TO_HW_RING
;
707 ath11k_warn(ab
, "Unsupported ring type in DP :%d\n", ring_type
);
713 int ath11k_dp_tx_htt_srng_setup(struct ath11k_base
*ab
, u32 ring_id
,
714 int mac_id
, enum hal_ring_type ring_type
)
716 struct htt_srng_setup_cmd
*cmd
;
717 struct hal_srng
*srng
= &ab
->hal
.srng_list
[ring_id
];
718 struct hal_srng_params params
;
721 int len
= sizeof(*cmd
);
722 dma_addr_t hp_addr
, tp_addr
;
723 enum htt_srng_ring_type htt_ring_type
;
724 enum htt_srng_ring_id htt_ring_id
;
727 skb
= ath11k_htc_alloc_skb(ab
, len
);
731 memset(¶ms
, 0, sizeof(params
));
732 ath11k_hal_srng_get_params(ab
, srng
, ¶ms
);
734 hp_addr
= ath11k_hal_srng_get_hp_addr(ab
, srng
);
735 tp_addr
= ath11k_hal_srng_get_tp_addr(ab
, srng
);
737 ret
= ath11k_dp_tx_get_ring_id_type(ab
, mac_id
, ring_id
,
738 ring_type
, &htt_ring_type
,
744 cmd
= (struct htt_srng_setup_cmd
*)skb
->data
;
745 cmd
->info0
= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE
,
746 HTT_H2T_MSG_TYPE_SRING_SETUP
);
747 if (htt_ring_type
== HTT_SW_TO_HW_RING
||
748 htt_ring_type
== HTT_HW_TO_SW_RING
)
749 cmd
->info0
|= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID
,
750 DP_SW2HW_MACID(mac_id
));
752 cmd
->info0
|= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID
,
754 cmd
->info0
|= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE
,
756 cmd
->info0
|= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_ID
, htt_ring_id
);
758 cmd
->ring_base_addr_lo
= params
.ring_base_paddr
&
759 HAL_ADDR_LSB_REG_MASK
;
761 cmd
->ring_base_addr_hi
= (u64
)params
.ring_base_paddr
>>
762 HAL_ADDR_MSB_REG_SHIFT
;
764 ret
= ath11k_hal_srng_get_entrysize(ab
, ring_type
);
771 cmd
->info1
= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE
,
773 cmd
->info1
|= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE
,
774 params
.num_entries
* ring_entry_sz
);
775 cmd
->info1
|= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP
,
776 !!(params
.flags
& HAL_SRNG_FLAGS_MSI_SWAP
));
777 cmd
->info1
|= FIELD_PREP(
778 HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP
,
779 !!(params
.flags
& HAL_SRNG_FLAGS_DATA_TLV_SWAP
));
780 cmd
->info1
|= FIELD_PREP(
781 HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP
,
782 !!(params
.flags
& HAL_SRNG_FLAGS_RING_PTR_SWAP
));
783 if (htt_ring_type
== HTT_SW_TO_HW_RING
)
784 cmd
->info1
|= HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS
;
786 cmd
->ring_head_off32_remote_addr_lo
= hp_addr
& HAL_ADDR_LSB_REG_MASK
;
787 cmd
->ring_head_off32_remote_addr_hi
= (u64
)hp_addr
>>
788 HAL_ADDR_MSB_REG_SHIFT
;
790 cmd
->ring_tail_off32_remote_addr_lo
= tp_addr
& HAL_ADDR_LSB_REG_MASK
;
791 cmd
->ring_tail_off32_remote_addr_hi
= (u64
)tp_addr
>>
792 HAL_ADDR_MSB_REG_SHIFT
;
794 cmd
->ring_msi_addr_lo
= params
.msi_addr
& 0xffffffff;
795 cmd
->ring_msi_addr_hi
= ((uint64_t)(params
.msi_addr
) >> 32) & 0xffffffff;
796 cmd
->msi_data
= params
.msi_data
;
798 cmd
->intr_info
= FIELD_PREP(
799 HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH
,
800 params
.intr_batch_cntr_thres_entries
* ring_entry_sz
);
801 cmd
->intr_info
|= FIELD_PREP(
802 HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH
,
803 params
.intr_timer_thres_us
>> 3);
806 if (params
.flags
& HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN
) {
807 cmd
->info2
= FIELD_PREP(
808 HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH
,
809 params
.low_threshold
);
812 ath11k_dbg(ab
, ATH11k_DBG_HAL
,
813 "%s msi_addr_lo:0x%x, msi_addr_hi:0x%x, msi_data:0x%x\n",
814 __func__
, cmd
->ring_msi_addr_lo
, cmd
->ring_msi_addr_hi
,
817 ath11k_dbg(ab
, ATH11k_DBG_HAL
,
818 "ring_id:%d, ring_type:%d, intr_info:0x%x, flags:0x%x\n",
819 ring_id
, ring_type
, cmd
->intr_info
, cmd
->info2
);
821 ret
= ath11k_htc_send(&ab
->htc
, ab
->dp
.eid
, skb
);
828 dev_kfree_skb_any(skb
);
833 #define HTT_TARGET_VERSION_TIMEOUT_HZ (3 * HZ)
835 int ath11k_dp_tx_htt_h2t_ver_req_msg(struct ath11k_base
*ab
)
837 struct ath11k_dp
*dp
= &ab
->dp
;
839 struct htt_ver_req_cmd
*cmd
;
840 int len
= sizeof(*cmd
);
843 init_completion(&dp
->htt_tgt_version_received
);
845 skb
= ath11k_htc_alloc_skb(ab
, len
);
850 cmd
= (struct htt_ver_req_cmd
*)skb
->data
;
851 cmd
->ver_reg_info
= FIELD_PREP(HTT_VER_REQ_INFO_MSG_ID
,
852 HTT_H2T_MSG_TYPE_VERSION_REQ
);
854 ret
= ath11k_htc_send(&ab
->htc
, dp
->eid
, skb
);
856 dev_kfree_skb_any(skb
);
860 ret
= wait_for_completion_timeout(&dp
->htt_tgt_version_received
,
861 HTT_TARGET_VERSION_TIMEOUT_HZ
);
863 ath11k_warn(ab
, "htt target version request timed out\n");
867 if (dp
->htt_tgt_ver_major
!= HTT_TARGET_VERSION_MAJOR
) {
868 ath11k_err(ab
, "unsupported htt major version %d supported version is %d\n",
869 dp
->htt_tgt_ver_major
, HTT_TARGET_VERSION_MAJOR
);
876 int ath11k_dp_tx_htt_h2t_ppdu_stats_req(struct ath11k
*ar
, u32 mask
)
878 struct ath11k_base
*ab
= ar
->ab
;
879 struct ath11k_dp
*dp
= &ab
->dp
;
881 struct htt_ppdu_stats_cfg_cmd
*cmd
;
882 int len
= sizeof(*cmd
);
887 for (i
= 0; i
< ab
->hw_params
.num_rxmda_per_pdev
; i
++) {
888 skb
= ath11k_htc_alloc_skb(ab
, len
);
893 cmd
= (struct htt_ppdu_stats_cfg_cmd
*)skb
->data
;
894 cmd
->msg
= FIELD_PREP(HTT_PPDU_STATS_CFG_MSG_TYPE
,
895 HTT_H2T_MSG_TYPE_PPDU_STATS_CFG
);
897 pdev_mask
= 1 << (i
+ 1);
898 cmd
->msg
|= FIELD_PREP(HTT_PPDU_STATS_CFG_PDEV_ID
, pdev_mask
);
899 cmd
->msg
|= FIELD_PREP(HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK
, mask
);
901 ret
= ath11k_htc_send(&ab
->htc
, dp
->eid
, skb
);
903 dev_kfree_skb_any(skb
);
911 int ath11k_dp_tx_htt_rx_filter_setup(struct ath11k_base
*ab
, u32 ring_id
,
912 int mac_id
, enum hal_ring_type ring_type
,
914 struct htt_rx_ring_tlv_filter
*tlv_filter
)
916 struct htt_rx_ring_selection_cfg_cmd
*cmd
;
917 struct hal_srng
*srng
= &ab
->hal
.srng_list
[ring_id
];
918 struct hal_srng_params params
;
920 int len
= sizeof(*cmd
);
921 enum htt_srng_ring_type htt_ring_type
;
922 enum htt_srng_ring_id htt_ring_id
;
925 skb
= ath11k_htc_alloc_skb(ab
, len
);
929 memset(¶ms
, 0, sizeof(params
));
930 ath11k_hal_srng_get_params(ab
, srng
, ¶ms
);
932 ret
= ath11k_dp_tx_get_ring_id_type(ab
, mac_id
, ring_id
,
933 ring_type
, &htt_ring_type
,
939 cmd
= (struct htt_rx_ring_selection_cfg_cmd
*)skb
->data
;
940 cmd
->info0
= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE
,
941 HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG
);
942 if (htt_ring_type
== HTT_SW_TO_HW_RING
||
943 htt_ring_type
== HTT_HW_TO_SW_RING
)
945 FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID
,
946 DP_SW2HW_MACID(mac_id
));
949 FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID
,
951 cmd
->info0
|= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID
,
953 cmd
->info0
|= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS
,
954 !!(params
.flags
& HAL_SRNG_FLAGS_MSI_SWAP
));
955 cmd
->info0
|= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS
,
956 !!(params
.flags
& HAL_SRNG_FLAGS_DATA_TLV_SWAP
));
958 cmd
->info1
= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE
,
960 cmd
->pkt_type_en_flags0
= tlv_filter
->pkt_filter_flags0
;
961 cmd
->pkt_type_en_flags1
= tlv_filter
->pkt_filter_flags1
;
962 cmd
->pkt_type_en_flags2
= tlv_filter
->pkt_filter_flags2
;
963 cmd
->pkt_type_en_flags3
= tlv_filter
->pkt_filter_flags3
;
964 cmd
->rx_filter_tlv
= tlv_filter
->rx_filter
;
966 ret
= ath11k_htc_send(&ab
->htc
, ab
->dp
.eid
, skb
);
973 dev_kfree_skb_any(skb
);
979 ath11k_dp_tx_htt_h2t_ext_stats_req(struct ath11k
*ar
, u8 type
,
980 struct htt_ext_stats_cfg_params
*cfg_params
,
983 struct ath11k_base
*ab
= ar
->ab
;
984 struct ath11k_dp
*dp
= &ab
->dp
;
986 struct htt_ext_stats_cfg_cmd
*cmd
;
987 int len
= sizeof(*cmd
);
990 skb
= ath11k_htc_alloc_skb(ab
, len
);
996 cmd
= (struct htt_ext_stats_cfg_cmd
*)skb
->data
;
997 memset(cmd
, 0, sizeof(*cmd
));
998 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_EXT_STATS_CFG
;
1000 cmd
->hdr
.pdev_mask
= 1 << ar
->pdev
->pdev_id
;
1002 cmd
->hdr
.stats_type
= type
;
1003 cmd
->cfg_param0
= cfg_params
->cfg0
;
1004 cmd
->cfg_param1
= cfg_params
->cfg1
;
1005 cmd
->cfg_param2
= cfg_params
->cfg2
;
1006 cmd
->cfg_param3
= cfg_params
->cfg3
;
1007 cmd
->cookie_lsb
= lower_32_bits(cookie
);
1008 cmd
->cookie_msb
= upper_32_bits(cookie
);
1010 ret
= ath11k_htc_send(&ab
->htc
, dp
->eid
, skb
);
1012 ath11k_warn(ab
, "failed to send htt type stats request: %d",
1014 dev_kfree_skb_any(skb
);
1021 int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k
*ar
, bool reset
)
1023 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
1024 struct ath11k_base
*ab
= ar
->ab
;
1025 struct htt_rx_ring_tlv_filter tlv_filter
= {0};
1026 int ret
= 0, ring_id
= 0, i
;
1028 ring_id
= dp
->rxdma_mon_buf_ring
.refill_buf_ring
.ring_id
;
1031 tlv_filter
.rx_filter
= HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING
;
1032 tlv_filter
.pkt_filter_flags0
=
1033 HTT_RX_MON_FP_MGMT_FILTER_FLAGS0
|
1034 HTT_RX_MON_MO_MGMT_FILTER_FLAGS0
;
1035 tlv_filter
.pkt_filter_flags1
=
1036 HTT_RX_MON_FP_MGMT_FILTER_FLAGS1
|
1037 HTT_RX_MON_MO_MGMT_FILTER_FLAGS1
;
1038 tlv_filter
.pkt_filter_flags2
=
1039 HTT_RX_MON_FP_CTRL_FILTER_FLASG2
|
1040 HTT_RX_MON_MO_CTRL_FILTER_FLASG2
;
1041 tlv_filter
.pkt_filter_flags3
=
1042 HTT_RX_MON_FP_CTRL_FILTER_FLASG3
|
1043 HTT_RX_MON_MO_CTRL_FILTER_FLASG3
|
1044 HTT_RX_MON_FP_DATA_FILTER_FLASG3
|
1045 HTT_RX_MON_MO_DATA_FILTER_FLASG3
;
1048 if (ab
->hw_params
.rxdma1_enable
) {
1049 ret
= ath11k_dp_tx_htt_rx_filter_setup(ar
->ab
, ring_id
, dp
->mac_id
,
1050 HAL_RXDMA_MONITOR_BUF
,
1051 DP_RXDMA_REFILL_RING_SIZE
,
1053 } else if (!reset
) {
1054 /* set in monitor mode only */
1055 for (i
= 0; i
< ab
->hw_params
.num_rxmda_per_pdev
; i
++) {
1056 ring_id
= dp
->rx_mac_buf_ring
[i
].ring_id
;
1057 ret
= ath11k_dp_tx_htt_rx_filter_setup(ar
->ab
, ring_id
,
1068 for (i
= 0; i
< ab
->hw_params
.num_rxmda_per_pdev
; i
++) {
1069 ring_id
= dp
->rx_mon_status_refill_ring
[i
].refill_buf_ring
.ring_id
;
1071 tlv_filter
.rx_filter
=
1072 HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING
;
1074 tlv_filter
= ath11k_mac_mon_status_filter_default
;
1076 ret
= ath11k_dp_tx_htt_rx_filter_setup(ab
, ring_id
,
1078 HAL_RXDMA_MONITOR_STATUS
,
1079 DP_RXDMA_REFILL_RING_SIZE
,
1083 if (!ar
->ab
->hw_params
.rxdma1_enable
)
1084 mod_timer(&ar
->ab
->mon_reap_timer
, jiffies
+
1085 msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL
));