1 // SPDX-License-Identifier: BSD-3-Clause-Clear
3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
11 /* NOTE: Any of the mapped ring id value must not exceed DP_TCL_NUM_RING_MAX */
13 ath11k_txq_tcl_ring_map
[ATH11K_HW_MAX_QUEUES
] = { 0x0, 0x1, 0x2, 0x2 };
15 static enum hal_tcl_encap_type
16 ath11k_dp_tx_get_encap_type(struct ath11k_vif
*arvif
, struct sk_buff
*skb
)
18 /* TODO: Determine encap type based on vif_type and configuration */
19 return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI
;
22 static void ath11k_dp_tx_encap_nwifi(struct sk_buff
*skb
)
24 struct ieee80211_hdr
*hdr
= (void *)skb
->data
;
27 if (!ieee80211_is_data_qos(hdr
->frame_control
))
30 qos_ctl
= ieee80211_get_qos_ctl(hdr
);
31 memmove(skb
->data
+ IEEE80211_QOS_CTL_LEN
,
32 skb
->data
, (void *)qos_ctl
- (void *)skb
->data
);
33 skb_pull(skb
, IEEE80211_QOS_CTL_LEN
);
35 hdr
= (void *)skb
->data
;
36 hdr
->frame_control
&= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA
);
39 static u8
ath11k_dp_tx_get_tid(struct sk_buff
*skb
)
41 struct ieee80211_hdr
*hdr
= (void *)skb
->data
;
43 if (!ieee80211_is_data_qos(hdr
->frame_control
))
44 return HAL_DESC_REO_NON_QOS_TID
;
46 return skb
->priority
& IEEE80211_QOS_CTL_TID_MASK
;
49 static enum hal_encrypt_type
ath11k_dp_tx_get_encrypt_type(u32 cipher
)
52 case WLAN_CIPHER_SUITE_WEP40
:
53 return HAL_ENCRYPT_TYPE_WEP_40
;
54 case WLAN_CIPHER_SUITE_WEP104
:
55 return HAL_ENCRYPT_TYPE_WEP_104
;
56 case WLAN_CIPHER_SUITE_TKIP
:
57 return HAL_ENCRYPT_TYPE_TKIP_MIC
;
58 case WLAN_CIPHER_SUITE_CCMP
:
59 return HAL_ENCRYPT_TYPE_CCMP_128
;
60 case WLAN_CIPHER_SUITE_CCMP_256
:
61 return HAL_ENCRYPT_TYPE_CCMP_256
;
62 case WLAN_CIPHER_SUITE_GCMP
:
63 return HAL_ENCRYPT_TYPE_GCMP_128
;
64 case WLAN_CIPHER_SUITE_GCMP_256
:
65 return HAL_ENCRYPT_TYPE_AES_GCMP_256
;
67 return HAL_ENCRYPT_TYPE_OPEN
;
71 int ath11k_dp_tx(struct ath11k
*ar
, struct ath11k_vif
*arvif
,
74 struct ath11k_base
*ab
= ar
->ab
;
75 struct ath11k_dp
*dp
= &ab
->dp
;
76 struct hal_tx_info ti
= {0};
77 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
78 struct ath11k_skb_cb
*skb_cb
= ATH11K_SKB_CB(skb
);
79 struct hal_srng
*tcl_ring
;
80 struct ieee80211_hdr
*hdr
= (void *)skb
->data
;
81 struct dp_tx_ring
*tx_ring
;
87 if (test_bit(ATH11K_FLAG_CRASH_FLUSH
, &ar
->ab
->dev_flags
))
90 if (!ieee80211_is_data(hdr
->frame_control
))
93 pool_id
= skb_get_queue_mapping(skb
) & (ATH11K_HW_MAX_QUEUES
- 1);
94 ti
.ring_id
= ath11k_txq_tcl_ring_map
[pool_id
];
96 tx_ring
= &dp
->tx_ring
[ti
.ring_id
];
98 spin_lock_bh(&tx_ring
->tx_idr_lock
);
99 ret
= idr_alloc(&tx_ring
->txbuf_idr
, skb
, 0,
100 DP_TX_IDR_SIZE
- 1, GFP_ATOMIC
);
101 spin_unlock_bh(&tx_ring
->tx_idr_lock
);
106 ti
.desc_id
= FIELD_PREP(DP_TX_DESC_ID_MAC_ID
, ar
->pdev_idx
) |
107 FIELD_PREP(DP_TX_DESC_ID_MSDU_ID
, ret
) |
108 FIELD_PREP(DP_TX_DESC_ID_POOL_ID
, pool_id
);
109 ti
.encap_type
= ath11k_dp_tx_get_encap_type(arvif
, skb
);
110 ti
.meta_data_flags
= arvif
->tcl_metadata
;
112 if (info
->control
.hw_key
)
114 ath11k_dp_tx_get_encrypt_type(info
->control
.hw_key
->cipher
);
116 ti
.encrypt_type
= HAL_ENCRYPT_TYPE_OPEN
;
118 ti
.addr_search_flags
= arvif
->hal_addr_search_flags
;
119 ti
.search_type
= arvif
->search_type
;
120 ti
.type
= HAL_TCL_DESC_TYPE_BUFFER
;
122 ti
.lmac_id
= ar
->lmac_id
;
123 ti
.bss_ast_hash
= arvif
->ast_hash
;
124 ti
.dscp_tid_tbl_idx
= 0;
126 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
127 ti
.flags0
|= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_IP4_CKSUM_EN
, 1) |
128 FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP4_CKSUM_EN
, 1) |
129 FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP6_CKSUM_EN
, 1) |
130 FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP4_CKSUM_EN
, 1) |
131 FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP6_CKSUM_EN
, 1);
134 if (ieee80211_vif_is_mesh(arvif
->vif
))
135 ti
.flags1
|= FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_MESH_ENABLE
, 1);
137 ti
.flags1
|= FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_TID_OVERWRITE
, 1);
139 ti
.tid
= ath11k_dp_tx_get_tid(skb
);
141 switch (ti
.encap_type
) {
142 case HAL_TCL_ENCAP_TYPE_NATIVE_WIFI
:
143 ath11k_dp_tx_encap_nwifi(skb
);
145 case HAL_TCL_ENCAP_TYPE_RAW
:
146 /* TODO: for CHECKSUM_PARTIAL case in raw mode, HW checksum offload
147 * is not applicable, hence manual checksum calculation using
148 * skb_checksum_help() is needed
150 case HAL_TCL_ENCAP_TYPE_ETHERNET
:
151 case HAL_TCL_ENCAP_TYPE_802_3
:
152 /* TODO: Take care of other encap modes as well */
154 goto fail_remove_idr
;
157 ti
.paddr
= dma_map_single(ab
->dev
, skb
->data
, skb
->len
, DMA_TO_DEVICE
);
158 if (dma_mapping_error(ab
->dev
, ti
.paddr
)) {
159 ath11k_warn(ab
, "failed to DMA map data Tx buffer\n");
161 goto fail_remove_idr
;
164 ti
.data_len
= skb
->len
;
165 skb_cb
->paddr
= ti
.paddr
;
166 skb_cb
->vif
= arvif
->vif
;
169 hal_ring_id
= tx_ring
->tcl_data_ring
.ring_id
;
170 tcl_ring
= &ab
->hal
.srng_list
[hal_ring_id
];
172 spin_lock_bh(&tcl_ring
->lock
);
174 ath11k_hal_srng_access_begin(ab
, tcl_ring
);
176 hal_tcl_desc
= (void *)ath11k_hal_srng_src_get_next_entry(ab
, tcl_ring
);
178 /* NOTE: It is highly unlikely we'll be running out of tcl_ring
179 * desc because the desc is directly enqueued onto hw queue.
180 * So add tx packet throttling logic in future if required.
182 ath11k_hal_srng_access_end(ab
, tcl_ring
);
183 spin_unlock_bh(&tcl_ring
->lock
);
188 ath11k_hal_tx_cmd_desc_setup(ab
, hal_tcl_desc
+
189 sizeof(struct hal_tlv_hdr
), &ti
);
191 ath11k_hal_srng_access_end(ab
, tcl_ring
);
193 spin_unlock_bh(&tcl_ring
->lock
);
195 atomic_inc(&ar
->dp
.num_tx_pending
);
200 dma_unmap_single(ab
->dev
, ti
.paddr
, ti
.data_len
, DMA_TO_DEVICE
);
203 spin_lock_bh(&tx_ring
->tx_idr_lock
);
204 idr_remove(&tx_ring
->txbuf_idr
,
205 FIELD_GET(DP_TX_DESC_ID_MSDU_ID
, ti
.desc_id
));
206 spin_unlock_bh(&tx_ring
->tx_idr_lock
);
211 static void ath11k_dp_tx_free_txbuf(struct ath11k_base
*ab
, u8 mac_id
,
213 struct dp_tx_ring
*tx_ring
)
216 struct sk_buff
*msdu
;
217 struct ath11k_skb_cb
*skb_cb
;
219 spin_lock_bh(&tx_ring
->tx_idr_lock
);
220 msdu
= idr_find(&tx_ring
->txbuf_idr
, msdu_id
);
222 ath11k_warn(ab
, "tx completion for unknown msdu_id %d\n",
224 spin_unlock_bh(&tx_ring
->tx_idr_lock
);
228 skb_cb
= ATH11K_SKB_CB(msdu
);
230 idr_remove(&tx_ring
->txbuf_idr
, msdu_id
);
231 spin_unlock_bh(&tx_ring
->tx_idr_lock
);
233 dma_unmap_single(ab
->dev
, skb_cb
->paddr
, msdu
->len
, DMA_TO_DEVICE
);
234 dev_kfree_skb_any(msdu
);
236 ar
= ab
->pdevs
[mac_id
].ar
;
237 if (atomic_dec_and_test(&ar
->dp
.num_tx_pending
))
238 wake_up(&ar
->dp
.tx_empty_waitq
);
242 ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base
*ab
,
243 struct dp_tx_ring
*tx_ring
,
244 struct ath11k_dp_htt_wbm_tx_status
*ts
)
246 struct sk_buff
*msdu
;
247 struct ieee80211_tx_info
*info
;
248 struct ath11k_skb_cb
*skb_cb
;
251 spin_lock_bh(&tx_ring
->tx_idr_lock
);
252 msdu
= idr_find(&tx_ring
->txbuf_idr
, ts
->msdu_id
);
254 ath11k_warn(ab
, "htt tx completion for unknown msdu_id %d\n",
256 spin_unlock_bh(&tx_ring
->tx_idr_lock
);
260 skb_cb
= ATH11K_SKB_CB(msdu
);
261 info
= IEEE80211_SKB_CB(msdu
);
265 idr_remove(&tx_ring
->txbuf_idr
, ts
->msdu_id
);
266 spin_unlock_bh(&tx_ring
->tx_idr_lock
);
268 if (atomic_dec_and_test(&ar
->dp
.num_tx_pending
))
269 wake_up(&ar
->dp
.tx_empty_waitq
);
271 dma_unmap_single(ab
->dev
, skb_cb
->paddr
, msdu
->len
, DMA_TO_DEVICE
);
273 memset(&info
->status
, 0, sizeof(info
->status
));
276 if (!(info
->flags
& IEEE80211_TX_CTL_NO_ACK
)) {
277 info
->flags
|= IEEE80211_TX_STAT_ACK
;
278 info
->status
.ack_signal
= ATH11K_DEFAULT_NOISE_FLOOR
+
280 info
->status
.is_valid_ack_signal
= true;
282 info
->flags
|= IEEE80211_TX_STAT_NOACK_TRANSMITTED
;
286 ieee80211_tx_status(ar
->hw
, msdu
);
290 ath11k_dp_tx_process_htt_tx_complete(struct ath11k_base
*ab
,
291 void *desc
, u8 mac_id
,
292 u32 msdu_id
, struct dp_tx_ring
*tx_ring
)
294 struct htt_tx_wbm_completion
*status_desc
;
295 struct ath11k_dp_htt_wbm_tx_status ts
= {0};
296 enum hal_wbm_htt_tx_comp_status wbm_status
;
298 status_desc
= desc
+ HTT_TX_WBM_COMP_STATUS_OFFSET
;
300 wbm_status
= FIELD_GET(HTT_TX_WBM_COMP_INFO0_STATUS
,
303 switch (wbm_status
) {
304 case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK
:
305 case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP
:
306 case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL
:
307 ts
.acked
= (wbm_status
== HAL_WBM_REL_HTT_TX_COMP_STATUS_OK
);
308 ts
.msdu_id
= msdu_id
;
309 ts
.ack_rssi
= FIELD_GET(HTT_TX_WBM_COMP_INFO1_ACK_RSSI
,
311 ath11k_dp_tx_htt_tx_complete_buf(ab
, tx_ring
, &ts
);
313 case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ
:
314 case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT
:
315 ath11k_dp_tx_free_txbuf(ab
, mac_id
, msdu_id
, tx_ring
);
317 case HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY
:
318 /* This event is to be handled only when the driver decides to
319 * use WDS offload functionality.
323 ath11k_warn(ab
, "Unknown htt tx status %d\n", wbm_status
);
328 static void ath11k_dp_tx_cache_peer_stats(struct ath11k
*ar
,
329 struct sk_buff
*msdu
,
330 struct hal_tx_status
*ts
)
332 struct ath11k_per_peer_tx_stats
*peer_stats
= &ar
->cached_stats
;
334 if (ts
->try_cnt
> 1) {
335 peer_stats
->retry_pkts
+= ts
->try_cnt
- 1;
336 peer_stats
->retry_bytes
+= (ts
->try_cnt
- 1) * msdu
->len
;
338 if (ts
->status
!= HAL_WBM_TQM_REL_REASON_FRAME_ACKED
) {
339 peer_stats
->failed_pkts
+= 1;
340 peer_stats
->failed_bytes
+= msdu
->len
;
345 static void ath11k_dp_tx_complete_msdu(struct ath11k
*ar
,
346 struct sk_buff
*msdu
,
347 struct hal_tx_status
*ts
)
349 struct ath11k_base
*ab
= ar
->ab
;
350 struct ieee80211_tx_info
*info
;
351 struct ath11k_skb_cb
*skb_cb
;
353 if (WARN_ON_ONCE(ts
->buf_rel_source
!= HAL_WBM_REL_SRC_MODULE_TQM
)) {
354 /* Must not happen */
358 skb_cb
= ATH11K_SKB_CB(msdu
);
360 dma_unmap_single(ab
->dev
, skb_cb
->paddr
, msdu
->len
, DMA_TO_DEVICE
);
364 if (!rcu_dereference(ab
->pdevs_active
[ar
->pdev_idx
])) {
365 dev_kfree_skb_any(msdu
);
370 dev_kfree_skb_any(msdu
);
374 info
= IEEE80211_SKB_CB(msdu
);
375 memset(&info
->status
, 0, sizeof(info
->status
));
377 /* skip tx rate update from ieee80211_status*/
378 info
->status
.rates
[0].idx
= -1;
380 if (ts
->status
== HAL_WBM_TQM_REL_REASON_FRAME_ACKED
&&
381 !(info
->flags
& IEEE80211_TX_CTL_NO_ACK
)) {
382 info
->flags
|= IEEE80211_TX_STAT_ACK
;
383 info
->status
.ack_signal
= ATH11K_DEFAULT_NOISE_FLOOR
+
385 info
->status
.is_valid_ack_signal
= true;
388 if (ts
->status
== HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX
&&
389 (info
->flags
& IEEE80211_TX_CTL_NO_ACK
))
390 info
->flags
|= IEEE80211_TX_STAT_NOACK_TRANSMITTED
;
392 if (ath11k_debug_is_extd_tx_stats_enabled(ar
)) {
393 if (ts
->flags
& HAL_TX_STATUS_FLAGS_FIRST_MSDU
) {
394 if (ar
->last_ppdu_id
== 0) {
395 ar
->last_ppdu_id
= ts
->ppdu_id
;
396 } else if (ar
->last_ppdu_id
== ts
->ppdu_id
||
397 ar
->cached_ppdu_id
== ar
->last_ppdu_id
) {
398 ar
->cached_ppdu_id
= ar
->last_ppdu_id
;
399 ar
->cached_stats
.is_ampdu
= true;
400 ath11k_update_per_peer_stats_from_txcompl(ar
, msdu
, ts
);
401 memset(&ar
->cached_stats
, 0,
402 sizeof(struct ath11k_per_peer_tx_stats
));
404 ar
->cached_stats
.is_ampdu
= false;
405 ath11k_update_per_peer_stats_from_txcompl(ar
, msdu
, ts
);
406 memset(&ar
->cached_stats
, 0,
407 sizeof(struct ath11k_per_peer_tx_stats
));
409 ar
->last_ppdu_id
= ts
->ppdu_id
;
412 ath11k_dp_tx_cache_peer_stats(ar
, msdu
, ts
);
415 /* NOTE: Tx rate status reporting. Tx completion status does not have
416 * necessary information (for example nss) to build the tx rate.
417 * Might end up reporting it out-of-band from HTT stats.
420 ieee80211_tx_status(ar
->hw
, msdu
);
426 static inline void ath11k_dp_tx_status_parse(struct ath11k_base
*ab
,
427 struct hal_wbm_release_ring
*desc
,
428 struct hal_tx_status
*ts
)
431 FIELD_GET(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE
, desc
->info0
);
432 if (ts
->buf_rel_source
!= HAL_WBM_REL_SRC_MODULE_FW
&&
433 ts
->buf_rel_source
!= HAL_WBM_REL_SRC_MODULE_TQM
)
436 if (ts
->buf_rel_source
== HAL_WBM_REL_SRC_MODULE_FW
)
439 ts
->status
= FIELD_GET(HAL_WBM_RELEASE_INFO0_TQM_RELEASE_REASON
,
441 ts
->ppdu_id
= FIELD_GET(HAL_WBM_RELEASE_INFO1_TQM_STATUS_NUMBER
,
443 ts
->try_cnt
= FIELD_GET(HAL_WBM_RELEASE_INFO1_TRANSMIT_COUNT
,
445 ts
->ack_rssi
= FIELD_GET(HAL_WBM_RELEASE_INFO2_ACK_FRAME_RSSI
,
447 if (desc
->info2
& HAL_WBM_RELEASE_INFO2_FIRST_MSDU
)
448 ts
->flags
|= HAL_TX_STATUS_FLAGS_FIRST_MSDU
;
449 ts
->peer_id
= FIELD_GET(HAL_WBM_RELEASE_INFO3_PEER_ID
, desc
->info3
);
450 ts
->tid
= FIELD_GET(HAL_WBM_RELEASE_INFO3_TID
, desc
->info3
);
451 if (desc
->rate_stats
.info0
& HAL_TX_RATE_STATS_INFO0_VALID
)
452 ts
->rate_stats
= desc
->rate_stats
.info0
;
457 void ath11k_dp_tx_completion_handler(struct ath11k_base
*ab
, int ring_id
)
460 struct ath11k_dp
*dp
= &ab
->dp
;
461 int hal_ring_id
= dp
->tx_ring
[ring_id
].tcl_comp_ring
.ring_id
;
462 struct hal_srng
*status_ring
= &ab
->hal
.srng_list
[hal_ring_id
];
463 struct sk_buff
*msdu
;
464 struct hal_tx_status ts
= { 0 };
465 struct dp_tx_ring
*tx_ring
= &dp
->tx_ring
[ring_id
];
470 ath11k_hal_srng_access_begin(ab
, status_ring
);
472 while ((ATH11K_TX_COMPL_NEXT(tx_ring
->tx_status_head
) !=
473 tx_ring
->tx_status_tail
) &&
474 (desc
= ath11k_hal_srng_dst_get_next_entry(ab
, status_ring
))) {
475 memcpy(&tx_ring
->tx_status
[tx_ring
->tx_status_head
],
476 desc
, sizeof(struct hal_wbm_release_ring
));
477 tx_ring
->tx_status_head
=
478 ATH11K_TX_COMPL_NEXT(tx_ring
->tx_status_head
);
481 if ((ath11k_hal_srng_dst_peek(ab
, status_ring
) != NULL
) &&
482 (ATH11K_TX_COMPL_NEXT(tx_ring
->tx_status_head
) == tx_ring
->tx_status_tail
)) {
483 /* TODO: Process pending tx_status messages when kfifo_is_full() */
484 ath11k_warn(ab
, "Unable to process some of the tx_status ring desc because status_fifo is full\n");
487 ath11k_hal_srng_access_end(ab
, status_ring
);
489 while (ATH11K_TX_COMPL_NEXT(tx_ring
->tx_status_tail
) != tx_ring
->tx_status_head
) {
490 struct hal_wbm_release_ring
*tx_status
;
493 tx_ring
->tx_status_tail
=
494 ATH11K_TX_COMPL_NEXT(tx_ring
->tx_status_tail
);
495 tx_status
= &tx_ring
->tx_status
[tx_ring
->tx_status_tail
];
496 ath11k_dp_tx_status_parse(ab
, tx_status
, &ts
);
498 desc_id
= FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE
,
499 tx_status
->buf_addr_info
.info1
);
500 mac_id
= FIELD_GET(DP_TX_DESC_ID_MAC_ID
, desc_id
);
501 msdu_id
= FIELD_GET(DP_TX_DESC_ID_MSDU_ID
, desc_id
);
503 if (ts
.buf_rel_source
== HAL_WBM_REL_SRC_MODULE_FW
) {
504 ath11k_dp_tx_process_htt_tx_complete(ab
,
511 spin_lock_bh(&tx_ring
->tx_idr_lock
);
512 msdu
= idr_find(&tx_ring
->txbuf_idr
, msdu_id
);
514 ath11k_warn(ab
, "tx completion for unknown msdu_id %d\n",
516 spin_unlock_bh(&tx_ring
->tx_idr_lock
);
519 idr_remove(&tx_ring
->txbuf_idr
, msdu_id
);
520 spin_unlock_bh(&tx_ring
->tx_idr_lock
);
522 ar
= ab
->pdevs
[mac_id
].ar
;
524 if (atomic_dec_and_test(&ar
->dp
.num_tx_pending
))
525 wake_up(&ar
->dp
.tx_empty_waitq
);
527 ath11k_dp_tx_complete_msdu(ar
, msdu
, &ts
);
531 int ath11k_dp_tx_send_reo_cmd(struct ath11k_base
*ab
, struct dp_rx_tid
*rx_tid
,
532 enum hal_reo_cmd_type type
,
533 struct ath11k_hal_reo_cmd
*cmd
,
534 void (*cb
)(struct ath11k_dp
*, void *,
535 enum hal_reo_cmd_status
))
537 struct ath11k_dp
*dp
= &ab
->dp
;
538 struct dp_reo_cmd
*dp_cmd
;
539 struct hal_srng
*cmd_ring
;
542 cmd_ring
= &ab
->hal
.srng_list
[dp
->reo_cmd_ring
.ring_id
];
543 cmd_num
= ath11k_hal_reo_cmd_send(ab
, cmd_ring
, type
, cmd
);
545 /* reo cmd ring descriptors has cmd_num starting from 1 */
552 /* Can this be optimized so that we keep the pending command list only
553 * for tid delete command to free up the resoruce on the command status
556 dp_cmd
= kzalloc(sizeof(*dp_cmd
), GFP_ATOMIC
);
561 memcpy(&dp_cmd
->data
, rx_tid
, sizeof(struct dp_rx_tid
));
562 dp_cmd
->cmd_num
= cmd_num
;
563 dp_cmd
->handler
= cb
;
565 spin_lock_bh(&dp
->reo_cmd_lock
);
566 list_add_tail(&dp_cmd
->list
, &dp
->reo_cmd_list
);
567 spin_unlock_bh(&dp
->reo_cmd_lock
);
573 ath11k_dp_tx_get_ring_id_type(struct ath11k_base
*ab
,
574 int mac_id
, u32 ring_id
,
575 enum hal_ring_type ring_type
,
576 enum htt_srng_ring_type
*htt_ring_type
,
577 enum htt_srng_ring_id
*htt_ring_id
)
579 int lmac_ring_id_offset
= 0;
584 lmac_ring_id_offset
= mac_id
* HAL_SRNG_RINGS_PER_LMAC
;
585 if (!(ring_id
== (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF
+
586 lmac_ring_id_offset
) ||
587 ring_id
== (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_BUF
+
588 lmac_ring_id_offset
))) {
591 *htt_ring_id
= HTT_RXDMA_HOST_BUF_RING
;
592 *htt_ring_type
= HTT_SW_TO_HW_RING
;
595 *htt_ring_id
= HTT_RXDMA_NON_MONITOR_DEST_RING
;
596 *htt_ring_type
= HTT_HW_TO_SW_RING
;
598 case HAL_RXDMA_MONITOR_BUF
:
599 *htt_ring_id
= HTT_RXDMA_MONITOR_BUF_RING
;
600 *htt_ring_type
= HTT_SW_TO_HW_RING
;
602 case HAL_RXDMA_MONITOR_STATUS
:
603 *htt_ring_id
= HTT_RXDMA_MONITOR_STATUS_RING
;
604 *htt_ring_type
= HTT_SW_TO_HW_RING
;
606 case HAL_RXDMA_MONITOR_DST
:
607 *htt_ring_id
= HTT_RXDMA_MONITOR_DEST_RING
;
608 *htt_ring_type
= HTT_HW_TO_SW_RING
;
610 case HAL_RXDMA_MONITOR_DESC
:
611 *htt_ring_id
= HTT_RXDMA_MONITOR_DESC_RING
;
612 *htt_ring_type
= HTT_SW_TO_HW_RING
;
615 ath11k_warn(ab
, "Unsupported ring type in DP :%d\n", ring_type
);
621 int ath11k_dp_tx_htt_srng_setup(struct ath11k_base
*ab
, u32 ring_id
,
622 int mac_id
, enum hal_ring_type ring_type
)
624 struct htt_srng_setup_cmd
*cmd
;
625 struct hal_srng
*srng
= &ab
->hal
.srng_list
[ring_id
];
626 struct hal_srng_params params
;
629 int len
= sizeof(*cmd
);
630 dma_addr_t hp_addr
, tp_addr
;
631 enum htt_srng_ring_type htt_ring_type
;
632 enum htt_srng_ring_id htt_ring_id
;
635 skb
= ath11k_htc_alloc_skb(ab
, len
);
639 memset(¶ms
, 0, sizeof(params
));
640 ath11k_hal_srng_get_params(ab
, srng
, ¶ms
);
642 hp_addr
= ath11k_hal_srng_get_hp_addr(ab
, srng
);
643 tp_addr
= ath11k_hal_srng_get_tp_addr(ab
, srng
);
645 ret
= ath11k_dp_tx_get_ring_id_type(ab
, mac_id
, ring_id
,
646 ring_type
, &htt_ring_type
,
652 cmd
= (struct htt_srng_setup_cmd
*)skb
->data
;
653 cmd
->info0
= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE
,
654 HTT_H2T_MSG_TYPE_SRING_SETUP
);
655 if (htt_ring_type
== HTT_SW_TO_HW_RING
||
656 htt_ring_type
== HTT_HW_TO_SW_RING
)
657 cmd
->info0
|= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID
,
658 DP_SW2HW_MACID(mac_id
));
660 cmd
->info0
|= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID
,
662 cmd
->info0
|= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE
,
664 cmd
->info0
|= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_ID
, htt_ring_id
);
666 cmd
->ring_base_addr_lo
= params
.ring_base_paddr
&
667 HAL_ADDR_LSB_REG_MASK
;
669 cmd
->ring_base_addr_hi
= (u64
)params
.ring_base_paddr
>>
670 HAL_ADDR_MSB_REG_SHIFT
;
672 ret
= ath11k_hal_srng_get_entrysize(ring_type
);
679 cmd
->info1
= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE
,
681 cmd
->info1
|= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE
,
682 params
.num_entries
* ring_entry_sz
);
683 cmd
->info1
|= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP
,
684 !!(params
.flags
& HAL_SRNG_FLAGS_MSI_SWAP
));
685 cmd
->info1
|= FIELD_PREP(
686 HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP
,
687 !!(params
.flags
& HAL_SRNG_FLAGS_DATA_TLV_SWAP
));
688 cmd
->info1
|= FIELD_PREP(
689 HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP
,
690 !!(params
.flags
& HAL_SRNG_FLAGS_RING_PTR_SWAP
));
691 if (htt_ring_type
== HTT_SW_TO_HW_RING
)
692 cmd
->info1
|= HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS
;
694 cmd
->ring_head_off32_remote_addr_lo
= hp_addr
& HAL_ADDR_LSB_REG_MASK
;
695 cmd
->ring_head_off32_remote_addr_hi
= (u64
)hp_addr
>>
696 HAL_ADDR_MSB_REG_SHIFT
;
698 cmd
->ring_tail_off32_remote_addr_lo
= tp_addr
& HAL_ADDR_LSB_REG_MASK
;
699 cmd
->ring_tail_off32_remote_addr_hi
= (u64
)tp_addr
>>
700 HAL_ADDR_MSB_REG_SHIFT
;
702 cmd
->ring_msi_addr_lo
= 0;
703 cmd
->ring_msi_addr_hi
= 0;
706 cmd
->intr_info
= FIELD_PREP(
707 HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH
,
708 params
.intr_batch_cntr_thres_entries
* ring_entry_sz
);
709 cmd
->intr_info
|= FIELD_PREP(
710 HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH
,
711 params
.intr_timer_thres_us
>> 3);
714 if (params
.flags
& HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN
) {
715 cmd
->info2
= FIELD_PREP(
716 HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH
,
717 params
.low_threshold
);
720 ret
= ath11k_htc_send(&ab
->htc
, ab
->dp
.eid
, skb
);
727 dev_kfree_skb_any(skb
);
732 #define HTT_TARGET_VERSION_TIMEOUT_HZ (3 * HZ)
734 int ath11k_dp_tx_htt_h2t_ver_req_msg(struct ath11k_base
*ab
)
736 struct ath11k_dp
*dp
= &ab
->dp
;
738 struct htt_ver_req_cmd
*cmd
;
739 int len
= sizeof(*cmd
);
742 init_completion(&dp
->htt_tgt_version_received
);
744 skb
= ath11k_htc_alloc_skb(ab
, len
);
749 cmd
= (struct htt_ver_req_cmd
*)skb
->data
;
750 cmd
->ver_reg_info
= FIELD_PREP(HTT_VER_REQ_INFO_MSG_ID
,
751 HTT_H2T_MSG_TYPE_VERSION_REQ
);
753 ret
= ath11k_htc_send(&ab
->htc
, dp
->eid
, skb
);
755 dev_kfree_skb_any(skb
);
759 ret
= wait_for_completion_timeout(&dp
->htt_tgt_version_received
,
760 HTT_TARGET_VERSION_TIMEOUT_HZ
);
762 ath11k_warn(ab
, "htt target version request timed out\n");
766 if (dp
->htt_tgt_ver_major
!= HTT_TARGET_VERSION_MAJOR
) {
767 ath11k_err(ab
, "unsupported htt major version %d supported version is %d\n",
768 dp
->htt_tgt_ver_major
, HTT_TARGET_VERSION_MAJOR
);
775 int ath11k_dp_tx_htt_h2t_ppdu_stats_req(struct ath11k
*ar
, u32 mask
)
777 struct ath11k_base
*ab
= ar
->ab
;
778 struct ath11k_dp
*dp
= &ab
->dp
;
780 struct htt_ppdu_stats_cfg_cmd
*cmd
;
781 int len
= sizeof(*cmd
);
785 skb
= ath11k_htc_alloc_skb(ab
, len
);
790 cmd
= (struct htt_ppdu_stats_cfg_cmd
*)skb
->data
;
791 cmd
->msg
= FIELD_PREP(HTT_PPDU_STATS_CFG_MSG_TYPE
,
792 HTT_H2T_MSG_TYPE_PPDU_STATS_CFG
);
794 pdev_mask
= 1 << (ar
->pdev_idx
);
795 cmd
->msg
|= FIELD_PREP(HTT_PPDU_STATS_CFG_PDEV_ID
, pdev_mask
);
796 cmd
->msg
|= FIELD_PREP(HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK
, mask
);
798 ret
= ath11k_htc_send(&ab
->htc
, dp
->eid
, skb
);
800 dev_kfree_skb_any(skb
);
807 int ath11k_dp_tx_htt_rx_filter_setup(struct ath11k_base
*ab
, u32 ring_id
,
808 int mac_id
, enum hal_ring_type ring_type
,
810 struct htt_rx_ring_tlv_filter
*tlv_filter
)
812 struct htt_rx_ring_selection_cfg_cmd
*cmd
;
813 struct hal_srng
*srng
= &ab
->hal
.srng_list
[ring_id
];
814 struct hal_srng_params params
;
816 int len
= sizeof(*cmd
);
817 enum htt_srng_ring_type htt_ring_type
;
818 enum htt_srng_ring_id htt_ring_id
;
821 skb
= ath11k_htc_alloc_skb(ab
, len
);
825 memset(¶ms
, 0, sizeof(params
));
826 ath11k_hal_srng_get_params(ab
, srng
, ¶ms
);
828 ret
= ath11k_dp_tx_get_ring_id_type(ab
, mac_id
, ring_id
,
829 ring_type
, &htt_ring_type
,
835 cmd
= (struct htt_rx_ring_selection_cfg_cmd
*)skb
->data
;
836 cmd
->info0
= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE
,
837 HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG
);
838 if (htt_ring_type
== HTT_SW_TO_HW_RING
||
839 htt_ring_type
== HTT_HW_TO_SW_RING
)
841 FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID
,
842 DP_SW2HW_MACID(mac_id
));
845 FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID
,
847 cmd
->info0
|= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID
,
849 cmd
->info0
|= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS
,
850 !!(params
.flags
& HAL_SRNG_FLAGS_MSI_SWAP
));
851 cmd
->info0
|= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS
,
852 !!(params
.flags
& HAL_SRNG_FLAGS_DATA_TLV_SWAP
));
854 cmd
->info1
= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE
,
856 cmd
->pkt_type_en_flags0
= tlv_filter
->pkt_filter_flags0
;
857 cmd
->pkt_type_en_flags1
= tlv_filter
->pkt_filter_flags1
;
858 cmd
->pkt_type_en_flags2
= tlv_filter
->pkt_filter_flags2
;
859 cmd
->pkt_type_en_flags3
= tlv_filter
->pkt_filter_flags3
;
860 cmd
->rx_filter_tlv
= tlv_filter
->rx_filter
;
862 ret
= ath11k_htc_send(&ab
->htc
, ab
->dp
.eid
, skb
);
869 dev_kfree_skb_any(skb
);
875 ath11k_dp_tx_htt_h2t_ext_stats_req(struct ath11k
*ar
, u8 type
,
876 struct htt_ext_stats_cfg_params
*cfg_params
,
879 struct ath11k_base
*ab
= ar
->ab
;
880 struct ath11k_dp
*dp
= &ab
->dp
;
882 struct htt_ext_stats_cfg_cmd
*cmd
;
883 int len
= sizeof(*cmd
);
886 skb
= ath11k_htc_alloc_skb(ab
, len
);
892 cmd
= (struct htt_ext_stats_cfg_cmd
*)skb
->data
;
893 memset(cmd
, 0, sizeof(*cmd
));
894 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_EXT_STATS_CFG
;
896 cmd
->hdr
.pdev_mask
= 1 << ar
->pdev
->pdev_id
;
898 cmd
->hdr
.stats_type
= type
;
899 cmd
->cfg_param0
= cfg_params
->cfg0
;
900 cmd
->cfg_param1
= cfg_params
->cfg1
;
901 cmd
->cfg_param2
= cfg_params
->cfg2
;
902 cmd
->cfg_param3
= cfg_params
->cfg3
;
903 cmd
->cookie_lsb
= lower_32_bits(cookie
);
904 cmd
->cookie_msb
= upper_32_bits(cookie
);
906 ret
= ath11k_htc_send(&ab
->htc
, dp
->eid
, skb
);
908 ath11k_warn(ab
, "failed to send htt type stats request: %d",
910 dev_kfree_skb_any(skb
);
917 int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k
*ar
, bool reset
)
919 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
920 struct htt_rx_ring_tlv_filter tlv_filter
= {0};
921 int ret
= 0, ring_id
= 0;
923 ring_id
= dp
->rxdma_mon_buf_ring
.refill_buf_ring
.ring_id
;
926 tlv_filter
.rx_filter
= HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING
;
927 tlv_filter
.pkt_filter_flags0
=
928 HTT_RX_MON_FP_MGMT_FILTER_FLAGS0
|
929 HTT_RX_MON_MO_MGMT_FILTER_FLAGS0
;
930 tlv_filter
.pkt_filter_flags1
=
931 HTT_RX_MON_FP_MGMT_FILTER_FLAGS1
|
932 HTT_RX_MON_MO_MGMT_FILTER_FLAGS1
;
933 tlv_filter
.pkt_filter_flags2
=
934 HTT_RX_MON_FP_CTRL_FILTER_FLASG2
|
935 HTT_RX_MON_MO_CTRL_FILTER_FLASG2
;
936 tlv_filter
.pkt_filter_flags3
=
937 HTT_RX_MON_FP_CTRL_FILTER_FLASG3
|
938 HTT_RX_MON_MO_CTRL_FILTER_FLASG3
|
939 HTT_RX_MON_FP_DATA_FILTER_FLASG3
|
940 HTT_RX_MON_MO_DATA_FILTER_FLASG3
;
943 ret
= ath11k_dp_tx_htt_rx_filter_setup(ar
->ab
, ring_id
, dp
->mac_id
,
944 HAL_RXDMA_MONITOR_BUF
,
945 DP_RXDMA_REFILL_RING_SIZE
,
950 ring_id
= dp
->rx_mon_status_refill_ring
.refill_buf_ring
.ring_id
;
952 tlv_filter
.rx_filter
=
953 HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING
;
955 tlv_filter
= ath11k_mac_mon_status_filter_default
;
957 ret
= ath11k_dp_tx_htt_rx_filter_setup(ar
->ab
, ring_id
, dp
->mac_id
,
958 HAL_RXDMA_MONITOR_STATUS
,
959 DP_RXDMA_REFILL_RING_SIZE
,