1 // SPDX-License-Identifier: BSD-3-Clause-Clear
3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
12 static enum hal_tcl_encap_type
13 ath12k_dp_tx_get_encap_type(struct ath12k_link_vif
*arvif
, struct sk_buff
*skb
)
15 struct ieee80211_tx_info
*tx_info
= IEEE80211_SKB_CB(skb
);
16 struct ath12k_base
*ab
= arvif
->ar
->ab
;
18 if (test_bit(ATH12K_FLAG_RAW_MODE
, &ab
->dev_flags
))
19 return HAL_TCL_ENCAP_TYPE_RAW
;
21 if (tx_info
->flags
& IEEE80211_TX_CTL_HW_80211_ENCAP
)
22 return HAL_TCL_ENCAP_TYPE_ETHERNET
;
24 return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI
;
27 static void ath12k_dp_tx_encap_nwifi(struct sk_buff
*skb
)
29 struct ieee80211_hdr
*hdr
= (void *)skb
->data
;
32 if (!ieee80211_is_data_qos(hdr
->frame_control
))
35 qos_ctl
= ieee80211_get_qos_ctl(hdr
);
36 memmove(skb
->data
+ IEEE80211_QOS_CTL_LEN
,
37 skb
->data
, (void *)qos_ctl
- (void *)skb
->data
);
38 skb_pull(skb
, IEEE80211_QOS_CTL_LEN
);
40 hdr
= (void *)skb
->data
;
41 hdr
->frame_control
&= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA
);
44 static u8
ath12k_dp_tx_get_tid(struct sk_buff
*skb
)
46 struct ieee80211_hdr
*hdr
= (void *)skb
->data
;
47 struct ath12k_skb_cb
*cb
= ATH12K_SKB_CB(skb
);
49 if (cb
->flags
& ATH12K_SKB_HW_80211_ENCAP
)
50 return skb
->priority
& IEEE80211_QOS_CTL_TID_MASK
;
51 else if (!ieee80211_is_data_qos(hdr
->frame_control
))
52 return HAL_DESC_REO_NON_QOS_TID
;
54 return skb
->priority
& IEEE80211_QOS_CTL_TID_MASK
;
57 enum hal_encrypt_type
ath12k_dp_tx_get_encrypt_type(u32 cipher
)
60 case WLAN_CIPHER_SUITE_WEP40
:
61 return HAL_ENCRYPT_TYPE_WEP_40
;
62 case WLAN_CIPHER_SUITE_WEP104
:
63 return HAL_ENCRYPT_TYPE_WEP_104
;
64 case WLAN_CIPHER_SUITE_TKIP
:
65 return HAL_ENCRYPT_TYPE_TKIP_MIC
;
66 case WLAN_CIPHER_SUITE_CCMP
:
67 return HAL_ENCRYPT_TYPE_CCMP_128
;
68 case WLAN_CIPHER_SUITE_CCMP_256
:
69 return HAL_ENCRYPT_TYPE_CCMP_256
;
70 case WLAN_CIPHER_SUITE_GCMP
:
71 return HAL_ENCRYPT_TYPE_GCMP_128
;
72 case WLAN_CIPHER_SUITE_GCMP_256
:
73 return HAL_ENCRYPT_TYPE_AES_GCMP_256
;
75 return HAL_ENCRYPT_TYPE_OPEN
;
79 static void ath12k_dp_tx_release_txbuf(struct ath12k_dp
*dp
,
80 struct ath12k_tx_desc_info
*tx_desc
,
83 spin_lock_bh(&dp
->tx_desc_lock
[pool_id
]);
84 list_move_tail(&tx_desc
->list
, &dp
->tx_desc_free_list
[pool_id
]);
85 spin_unlock_bh(&dp
->tx_desc_lock
[pool_id
]);
88 static struct ath12k_tx_desc_info
*ath12k_dp_tx_assign_buffer(struct ath12k_dp
*dp
,
91 struct ath12k_tx_desc_info
*desc
;
93 spin_lock_bh(&dp
->tx_desc_lock
[pool_id
]);
94 desc
= list_first_entry_or_null(&dp
->tx_desc_free_list
[pool_id
],
95 struct ath12k_tx_desc_info
,
98 spin_unlock_bh(&dp
->tx_desc_lock
[pool_id
]);
99 ath12k_warn(dp
->ab
, "failed to allocate data Tx buffer\n");
103 list_move_tail(&desc
->list
, &dp
->tx_desc_used_list
[pool_id
]);
104 spin_unlock_bh(&dp
->tx_desc_lock
[pool_id
]);
109 static void ath12k_hal_tx_cmd_ext_desc_setup(struct ath12k_base
*ab
,
110 struct hal_tx_msdu_ext_desc
*tcl_ext_cmd
,
111 struct hal_tx_info
*ti
)
113 tcl_ext_cmd
->info0
= le32_encode_bits(ti
->paddr
,
114 HAL_TX_MSDU_EXT_INFO0_BUF_PTR_LO
);
115 tcl_ext_cmd
->info1
= le32_encode_bits(0x0,
116 HAL_TX_MSDU_EXT_INFO1_BUF_PTR_HI
) |
117 le32_encode_bits(ti
->data_len
,
118 HAL_TX_MSDU_EXT_INFO1_BUF_LEN
);
120 tcl_ext_cmd
->info1
= le32_encode_bits(1, HAL_TX_MSDU_EXT_INFO1_EXTN_OVERRIDE
) |
121 le32_encode_bits(ti
->encap_type
,
122 HAL_TX_MSDU_EXT_INFO1_ENCAP_TYPE
) |
123 le32_encode_bits(ti
->encrypt_type
,
124 HAL_TX_MSDU_EXT_INFO1_ENCRYPT_TYPE
);
127 #define HTT_META_DATA_ALIGNMENT 0x8
129 static void *ath12k_dp_metadata_align_skb(struct sk_buff
*skb
, u8 tail_len
)
131 struct sk_buff
*tail
;
134 if (unlikely(skb_cow_data(skb
, tail_len
, &tail
) < 0))
137 metadata
= pskb_put(skb
, tail
, tail_len
);
138 memset(metadata
, 0, tail_len
);
142 /* Preparing HTT Metadata when utilized with ext MSDU */
143 static int ath12k_dp_prepare_htt_metadata(struct sk_buff
*skb
)
145 struct hal_tx_msdu_metadata
*desc_ext
;
147 /* Size rounded of multiple of 8 bytes */
148 u8 htt_desc_size_aligned
;
150 htt_desc_size
= sizeof(struct hal_tx_msdu_metadata
);
151 htt_desc_size_aligned
= ALIGN(htt_desc_size
, HTT_META_DATA_ALIGNMENT
);
153 desc_ext
= ath12k_dp_metadata_align_skb(skb
, htt_desc_size_aligned
);
157 desc_ext
->info0
= le32_encode_bits(1, HAL_TX_MSDU_METADATA_INFO0_ENCRYPT_FLAG
) |
158 le32_encode_bits(0, HAL_TX_MSDU_METADATA_INFO0_ENCRYPT_TYPE
) |
160 HAL_TX_MSDU_METADATA_INFO0_HOST_TX_DESC_POOL
);
165 static void ath12k_dp_tx_move_payload(struct sk_buff
*skb
,
169 unsigned long len
= skb
->len
;
172 skb_push(skb
, delta
);
173 memmove(skb
->data
, skb
->data
+ delta
, len
);
177 memmove(skb
->data
+ delta
, skb
->data
, len
);
178 skb_pull(skb
, delta
);
182 static int ath12k_dp_tx_align_payload(struct ath12k_base
*ab
,
183 struct sk_buff
**pskb
)
185 u32 iova_mask
= ab
->hw_params
->iova_mask
;
186 unsigned long offset
, delta1
, delta2
;
187 struct sk_buff
*skb2
, *skb
= *pskb
;
188 unsigned int headroom
= skb_headroom(skb
);
189 int tailroom
= skb_tailroom(skb
);
192 offset
= (unsigned long)skb
->data
& iova_mask
;
194 delta2
= iova_mask
- offset
+ 1;
196 if (headroom
>= delta1
) {
197 ath12k_dp_tx_move_payload(skb
, delta1
, true);
198 } else if (tailroom
>= delta2
) {
199 ath12k_dp_tx_move_payload(skb
, delta2
, false);
201 skb2
= skb_realloc_headroom(skb
, iova_mask
);
207 dev_kfree_skb_any(skb
);
209 offset
= (unsigned long)skb2
->data
& iova_mask
;
211 ath12k_dp_tx_move_payload(skb2
, offset
, true);
219 int ath12k_dp_tx(struct ath12k
*ar
, struct ath12k_link_vif
*arvif
,
222 struct ath12k_base
*ab
= ar
->ab
;
223 struct ath12k_dp
*dp
= &ab
->dp
;
224 struct hal_tx_info ti
= {0};
225 struct ath12k_tx_desc_info
*tx_desc
;
226 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
227 struct ath12k_skb_cb
*skb_cb
= ATH12K_SKB_CB(skb
);
228 struct hal_tcl_data_cmd
*hal_tcl_desc
;
229 struct hal_tx_msdu_ext_desc
*msg
;
230 struct sk_buff
*skb_ext_desc
;
231 struct hal_srng
*tcl_ring
;
232 struct ieee80211_hdr
*hdr
= (void *)skb
->data
;
233 struct ath12k_vif
*ahvif
= arvif
->ahvif
;
234 struct dp_tx_ring
*tx_ring
;
238 u8 ring_selector
, ring_map
= 0;
240 bool msdu_ext_desc
= false;
241 bool add_htt_metadata
= false;
242 u32 iova_mask
= ab
->hw_params
->iova_mask
;
244 if (test_bit(ATH12K_FLAG_CRASH_FLUSH
, &ar
->ab
->dev_flags
))
247 if (!(info
->flags
& IEEE80211_TX_CTL_HW_80211_ENCAP
) &&
248 !ieee80211_is_data(hdr
->frame_control
))
251 pool_id
= skb_get_queue_mapping(skb
) & (ATH12K_HW_MAX_QUEUES
- 1);
253 /* Let the default ring selection be based on current processor
254 * number, where one of the 3 tcl rings are selected based on
255 * the smp_processor_id(). In case that ring
256 * is full/busy, we resort to other available rings.
257 * If all rings are full, we drop the packet.
258 * TODO: Add throttling logic when all rings are full
260 ring_selector
= ab
->hw_params
->hw_ops
->get_ring_selector(skb
);
263 tcl_ring_retry
= false;
264 ti
.ring_id
= ring_selector
% ab
->hw_params
->max_tx_ring
;
266 ring_map
|= BIT(ti
.ring_id
);
267 ti
.rbm_id
= ab
->hw_params
->hal_ops
->tcl_to_wbm_rbm_map
[ti
.ring_id
].rbm_id
;
269 tx_ring
= &dp
->tx_ring
[ti
.ring_id
];
271 tx_desc
= ath12k_dp_tx_assign_buffer(dp
, pool_id
);
275 ti
.bank_id
= arvif
->bank_id
;
276 ti
.meta_data_flags
= arvif
->tcl_metadata
;
278 if (ahvif
->tx_encap_type
== HAL_TCL_ENCAP_TYPE_RAW
&&
279 test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED
, &ar
->ab
->dev_flags
)) {
280 if (skb_cb
->flags
& ATH12K_SKB_CIPHER_SET
) {
282 ath12k_dp_tx_get_encrypt_type(skb_cb
->cipher
);
284 if (ieee80211_has_protected(hdr
->frame_control
))
285 skb_put(skb
, IEEE80211_CCMP_MIC_LEN
);
287 ti
.encrypt_type
= HAL_ENCRYPT_TYPE_OPEN
;
290 msdu_ext_desc
= true;
293 ti
.encap_type
= ath12k_dp_tx_get_encap_type(arvif
, skb
);
294 ti
.addr_search_flags
= arvif
->hal_addr_search_flags
;
295 ti
.search_type
= arvif
->search_type
;
296 ti
.type
= HAL_TCL_DESC_TYPE_BUFFER
;
298 ti
.lmac_id
= ar
->lmac_id
;
299 ti
.vdev_id
= arvif
->vdev_id
;
300 ti
.bss_ast_hash
= arvif
->ast_hash
;
301 ti
.bss_ast_idx
= arvif
->ast_idx
;
302 ti
.dscp_tid_tbl_idx
= 0;
304 if (skb
->ip_summed
== CHECKSUM_PARTIAL
&&
305 ti
.encap_type
!= HAL_TCL_ENCAP_TYPE_RAW
) {
306 ti
.flags0
|= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_IP4_CKSUM_EN
) |
307 u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_UDP4_CKSUM_EN
) |
308 u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_UDP6_CKSUM_EN
) |
309 u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TCP4_CKSUM_EN
) |
310 u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TCP6_CKSUM_EN
);
313 ti
.flags1
|= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO3_TID_OVERWRITE
);
315 ti
.tid
= ath12k_dp_tx_get_tid(skb
);
317 switch (ti
.encap_type
) {
318 case HAL_TCL_ENCAP_TYPE_NATIVE_WIFI
:
319 ath12k_dp_tx_encap_nwifi(skb
);
321 case HAL_TCL_ENCAP_TYPE_RAW
:
322 if (!test_bit(ATH12K_FLAG_RAW_MODE
, &ab
->dev_flags
)) {
324 goto fail_remove_tx_buf
;
327 case HAL_TCL_ENCAP_TYPE_ETHERNET
:
328 /* no need to encap */
330 case HAL_TCL_ENCAP_TYPE_802_3
:
332 /* TODO: Take care of other encap modes as well */
334 atomic_inc(&ab
->soc_stats
.tx_err
.misc_fail
);
335 goto fail_remove_tx_buf
;
339 (unsigned long)skb
->data
& iova_mask
) {
340 ret
= ath12k_dp_tx_align_payload(ab
, &skb
);
342 ath12k_warn(ab
, "failed to align TX buffer %d\n", ret
);
343 /* don't bail out, give original buffer
344 * a chance even unaligned.
349 /* hdr is pointing to a wrong place after alignment,
350 * so refresh it for later use.
352 hdr
= (void *)skb
->data
;
355 ti
.paddr
= dma_map_single(ab
->dev
, skb
->data
, skb
->len
, DMA_TO_DEVICE
);
356 if (dma_mapping_error(ab
->dev
, ti
.paddr
)) {
357 atomic_inc(&ab
->soc_stats
.tx_err
.misc_fail
);
358 ath12k_warn(ab
, "failed to DMA map data Tx buffer\n");
360 goto fail_remove_tx_buf
;
363 if (!test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED
, &ar
->ab
->dev_flags
) &&
364 !(skb_cb
->flags
& ATH12K_SKB_HW_80211_ENCAP
) &&
365 !(skb_cb
->flags
& ATH12K_SKB_CIPHER_SET
) &&
366 ieee80211_has_protected(hdr
->frame_control
)) {
367 /* Add metadata for sw encrypted vlan group traffic */
368 add_htt_metadata
= true;
369 msdu_ext_desc
= true;
370 ti
.flags0
|= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TO_FW
);
371 ti
.encap_type
= HAL_TCL_ENCAP_TYPE_RAW
;
372 ti
.encrypt_type
= HAL_ENCRYPT_TYPE_OPEN
;
376 tx_desc
->mac_id
= ar
->pdev_idx
;
377 ti
.desc_id
= tx_desc
->desc_id
;
378 ti
.data_len
= skb
->len
;
379 skb_cb
->paddr
= ti
.paddr
;
380 skb_cb
->vif
= ahvif
->vif
;
384 skb_ext_desc
= dev_alloc_skb(sizeof(struct hal_tx_msdu_ext_desc
));
390 skb_put(skb_ext_desc
, sizeof(struct hal_tx_msdu_ext_desc
));
391 memset(skb_ext_desc
->data
, 0, skb_ext_desc
->len
);
393 msg
= (struct hal_tx_msdu_ext_desc
*)skb_ext_desc
->data
;
394 ath12k_hal_tx_cmd_ext_desc_setup(ab
, msg
, &ti
);
396 if (add_htt_metadata
) {
397 ret
= ath12k_dp_prepare_htt_metadata(skb_ext_desc
);
399 ath12k_dbg(ab
, ATH12K_DBG_DP_TX
,
400 "Failed to add HTT meta data, dropping packet\n");
405 ti
.paddr
= dma_map_single(ab
->dev
, skb_ext_desc
->data
,
406 skb_ext_desc
->len
, DMA_TO_DEVICE
);
407 ret
= dma_mapping_error(ab
->dev
, ti
.paddr
);
409 kfree_skb(skb_ext_desc
);
413 ti
.data_len
= skb_ext_desc
->len
;
414 ti
.type
= HAL_TCL_DESC_TYPE_EXT_DESC
;
416 skb_cb
->paddr_ext_desc
= ti
.paddr
;
419 hal_ring_id
= tx_ring
->tcl_data_ring
.ring_id
;
420 tcl_ring
= &ab
->hal
.srng_list
[hal_ring_id
];
422 spin_lock_bh(&tcl_ring
->lock
);
424 ath12k_hal_srng_access_begin(ab
, tcl_ring
);
426 hal_tcl_desc
= ath12k_hal_srng_src_get_next_entry(ab
, tcl_ring
);
428 /* NOTE: It is highly unlikely we'll be running out of tcl_ring
429 * desc because the desc is directly enqueued onto hw queue.
431 ath12k_hal_srng_access_end(ab
, tcl_ring
);
432 ab
->soc_stats
.tx_err
.desc_na
[ti
.ring_id
]++;
433 spin_unlock_bh(&tcl_ring
->lock
);
436 /* Checking for available tcl descriptors in another ring in
437 * case of failure due to full tcl ring now, is better than
438 * checking this ring earlier for each pkt tx.
439 * Restart ring selection if some rings are not checked yet.
441 if (ring_map
!= (BIT(ab
->hw_params
->max_tx_ring
) - 1) &&
442 ab
->hw_params
->tcl_ring_retry
) {
443 tcl_ring_retry
= true;
450 ath12k_hal_tx_cmd_desc_setup(ab
, hal_tcl_desc
, &ti
);
452 ath12k_hal_srng_access_end(ab
, tcl_ring
);
454 spin_unlock_bh(&tcl_ring
->lock
);
456 ath12k_dbg_dump(ab
, ATH12K_DBG_DP_TX
, NULL
, "dp tx msdu: ",
457 skb
->data
, skb
->len
);
459 atomic_inc(&ar
->dp
.num_tx_pending
);
464 dma_unmap_single(ab
->dev
, ti
.paddr
, ti
.data_len
, DMA_TO_DEVICE
);
466 if (skb_cb
->paddr_ext_desc
)
467 dma_unmap_single(ab
->dev
, skb_cb
->paddr_ext_desc
,
468 sizeof(struct hal_tx_msdu_ext_desc
),
472 ath12k_dp_tx_release_txbuf(dp
, tx_desc
, pool_id
);
479 static void ath12k_dp_tx_free_txbuf(struct ath12k_base
*ab
,
480 struct sk_buff
*msdu
, u8 mac_id
,
481 struct dp_tx_ring
*tx_ring
)
484 struct ath12k_skb_cb
*skb_cb
;
485 u8 pdev_id
= ath12k_hw_mac_id_to_pdev_id(ab
->hw_params
, mac_id
);
487 skb_cb
= ATH12K_SKB_CB(msdu
);
488 ar
= ab
->pdevs
[pdev_id
].ar
;
490 dma_unmap_single(ab
->dev
, skb_cb
->paddr
, msdu
->len
, DMA_TO_DEVICE
);
491 if (skb_cb
->paddr_ext_desc
)
492 dma_unmap_single(ab
->dev
, skb_cb
->paddr_ext_desc
,
493 sizeof(struct hal_tx_msdu_ext_desc
), DMA_TO_DEVICE
);
495 ieee80211_free_txskb(ar
->ah
->hw
, msdu
);
497 if (atomic_dec_and_test(&ar
->dp
.num_tx_pending
))
498 wake_up(&ar
->dp
.tx_empty_waitq
);
502 ath12k_dp_tx_htt_tx_complete_buf(struct ath12k_base
*ab
,
503 struct sk_buff
*msdu
,
504 struct dp_tx_ring
*tx_ring
,
505 struct ath12k_dp_htt_wbm_tx_status
*ts
)
507 struct ieee80211_tx_info
*info
;
508 struct ath12k_skb_cb
*skb_cb
;
511 skb_cb
= ATH12K_SKB_CB(msdu
);
512 info
= IEEE80211_SKB_CB(msdu
);
516 if (atomic_dec_and_test(&ar
->dp
.num_tx_pending
))
517 wake_up(&ar
->dp
.tx_empty_waitq
);
519 dma_unmap_single(ab
->dev
, skb_cb
->paddr
, msdu
->len
, DMA_TO_DEVICE
);
520 if (skb_cb
->paddr_ext_desc
)
521 dma_unmap_single(ab
->dev
, skb_cb
->paddr_ext_desc
,
522 sizeof(struct hal_tx_msdu_ext_desc
), DMA_TO_DEVICE
);
524 memset(&info
->status
, 0, sizeof(info
->status
));
527 if (!(info
->flags
& IEEE80211_TX_CTL_NO_ACK
)) {
528 info
->flags
|= IEEE80211_TX_STAT_ACK
;
529 info
->status
.ack_signal
= ts
->ack_rssi
;
531 if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT
,
533 info
->status
.ack_signal
+= ATH12K_DEFAULT_NOISE_FLOOR
;
535 info
->status
.flags
= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID
;
537 info
->flags
|= IEEE80211_TX_STAT_NOACK_TRANSMITTED
;
541 ieee80211_tx_status_skb(ath12k_ar_to_hw(ar
), msdu
);
545 ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base
*ab
,
546 void *desc
, u8 mac_id
,
547 struct sk_buff
*msdu
,
548 struct dp_tx_ring
*tx_ring
)
550 struct htt_tx_wbm_completion
*status_desc
;
551 struct ath12k_dp_htt_wbm_tx_status ts
= {0};
552 enum hal_wbm_htt_tx_comp_status wbm_status
;
556 wbm_status
= le32_get_bits(status_desc
->info0
,
557 HTT_TX_WBM_COMP_INFO0_STATUS
);
559 switch (wbm_status
) {
560 case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK
:
561 case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP
:
562 case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL
:
563 ts
.acked
= (wbm_status
== HAL_WBM_REL_HTT_TX_COMP_STATUS_OK
);
564 ts
.ack_rssi
= le32_get_bits(status_desc
->info2
,
565 HTT_TX_WBM_COMP_INFO2_ACK_RSSI
);
566 ath12k_dp_tx_htt_tx_complete_buf(ab
, msdu
, tx_ring
, &ts
);
568 case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ
:
569 case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT
:
570 ath12k_dp_tx_free_txbuf(ab
, msdu
, mac_id
, tx_ring
);
572 case HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY
:
573 /* This event is to be handled only when the driver decides to
574 * use WDS offload functionality.
578 ath12k_warn(ab
, "Unknown htt tx status %d\n", wbm_status
);
583 static void ath12k_dp_tx_complete_msdu(struct ath12k
*ar
,
584 struct sk_buff
*msdu
,
585 struct hal_tx_status
*ts
)
587 struct ath12k_base
*ab
= ar
->ab
;
588 struct ath12k_hw
*ah
= ar
->ah
;
589 struct ieee80211_tx_info
*info
;
590 struct ath12k_skb_cb
*skb_cb
;
592 if (WARN_ON_ONCE(ts
->buf_rel_source
!= HAL_WBM_REL_SRC_MODULE_TQM
)) {
593 /* Must not happen */
597 skb_cb
= ATH12K_SKB_CB(msdu
);
599 dma_unmap_single(ab
->dev
, skb_cb
->paddr
, msdu
->len
, DMA_TO_DEVICE
);
600 if (skb_cb
->paddr_ext_desc
)
601 dma_unmap_single(ab
->dev
, skb_cb
->paddr_ext_desc
,
602 sizeof(struct hal_tx_msdu_ext_desc
), DMA_TO_DEVICE
);
606 if (!rcu_dereference(ab
->pdevs_active
[ar
->pdev_idx
])) {
607 ieee80211_free_txskb(ah
->hw
, msdu
);
612 ieee80211_free_txskb(ah
->hw
, msdu
);
616 info
= IEEE80211_SKB_CB(msdu
);
617 memset(&info
->status
, 0, sizeof(info
->status
));
619 /* skip tx rate update from ieee80211_status*/
620 info
->status
.rates
[0].idx
= -1;
622 switch (ts
->status
) {
623 case HAL_WBM_TQM_REL_REASON_FRAME_ACKED
:
624 if (!(info
->flags
& IEEE80211_TX_CTL_NO_ACK
)) {
625 info
->flags
|= IEEE80211_TX_STAT_ACK
;
626 info
->status
.ack_signal
= ts
->ack_rssi
;
628 if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT
,
630 info
->status
.ack_signal
+= ATH12K_DEFAULT_NOISE_FLOOR
;
632 info
->status
.flags
= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID
;
635 case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX
:
636 if (info
->flags
& IEEE80211_TX_CTL_NO_ACK
) {
637 info
->flags
|= IEEE80211_TX_STAT_NOACK_TRANSMITTED
;
641 case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_MPDU
:
642 case HAL_WBM_TQM_REL_REASON_DROP_THRESHOLD
:
643 case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_AGED_FRAMES
:
644 /* The failure status is due to internal firmware tx failure
645 * hence drop the frame; do not update the status of frame to
648 ieee80211_free_txskb(ah
->hw
, msdu
);
651 ath12k_dbg(ab
, ATH12K_DBG_DP_TX
, "tx frame is not acked status %d\n",
656 /* NOTE: Tx rate status reporting. Tx completion status does not have
657 * necessary information (for example nss) to build the tx rate.
658 * Might end up reporting it out-of-band from HTT stats.
661 ieee80211_tx_status_skb(ath12k_ar_to_hw(ar
), msdu
);
667 static void ath12k_dp_tx_status_parse(struct ath12k_base
*ab
,
668 struct hal_wbm_completion_ring_tx
*desc
,
669 struct hal_tx_status
*ts
)
672 le32_get_bits(desc
->info0
, HAL_WBM_COMPL_TX_INFO0_REL_SRC_MODULE
);
673 if (ts
->buf_rel_source
!= HAL_WBM_REL_SRC_MODULE_FW
&&
674 ts
->buf_rel_source
!= HAL_WBM_REL_SRC_MODULE_TQM
)
677 if (ts
->buf_rel_source
== HAL_WBM_REL_SRC_MODULE_FW
)
680 ts
->status
= le32_get_bits(desc
->info0
,
681 HAL_WBM_COMPL_TX_INFO0_TQM_RELEASE_REASON
);
683 ts
->ppdu_id
= le32_get_bits(desc
->info1
,
684 HAL_WBM_COMPL_TX_INFO1_TQM_STATUS_NUMBER
);
685 if (le32_to_cpu(desc
->rate_stats
.info0
) & HAL_TX_RATE_STATS_INFO0_VALID
)
686 ts
->rate_stats
= le32_to_cpu(desc
->rate_stats
.info0
);
691 void ath12k_dp_tx_completion_handler(struct ath12k_base
*ab
, int ring_id
)
694 struct ath12k_dp
*dp
= &ab
->dp
;
695 int hal_ring_id
= dp
->tx_ring
[ring_id
].tcl_comp_ring
.ring_id
;
696 struct hal_srng
*status_ring
= &ab
->hal
.srng_list
[hal_ring_id
];
697 struct ath12k_tx_desc_info
*tx_desc
= NULL
;
698 struct sk_buff
*msdu
;
699 struct hal_tx_status ts
= { 0 };
700 struct dp_tx_ring
*tx_ring
= &dp
->tx_ring
[ring_id
];
701 struct hal_wbm_release_ring
*desc
;
705 spin_lock_bh(&status_ring
->lock
);
707 ath12k_hal_srng_access_begin(ab
, status_ring
);
709 while (ATH12K_TX_COMPL_NEXT(tx_ring
->tx_status_head
) != tx_ring
->tx_status_tail
) {
710 desc
= ath12k_hal_srng_dst_get_next_entry(ab
, status_ring
);
714 memcpy(&tx_ring
->tx_status
[tx_ring
->tx_status_head
],
715 desc
, sizeof(*desc
));
716 tx_ring
->tx_status_head
=
717 ATH12K_TX_COMPL_NEXT(tx_ring
->tx_status_head
);
720 if (ath12k_hal_srng_dst_peek(ab
, status_ring
) &&
721 (ATH12K_TX_COMPL_NEXT(tx_ring
->tx_status_head
) == tx_ring
->tx_status_tail
)) {
722 /* TODO: Process pending tx_status messages when kfifo_is_full() */
723 ath12k_warn(ab
, "Unable to process some of the tx_status ring desc because status_fifo is full\n");
726 ath12k_hal_srng_access_end(ab
, status_ring
);
728 spin_unlock_bh(&status_ring
->lock
);
730 while (ATH12K_TX_COMPL_NEXT(tx_ring
->tx_status_tail
) != tx_ring
->tx_status_head
) {
731 struct hal_wbm_completion_ring_tx
*tx_status
;
734 tx_ring
->tx_status_tail
=
735 ATH12K_TX_COMPL_NEXT(tx_ring
->tx_status_tail
);
736 tx_status
= &tx_ring
->tx_status
[tx_ring
->tx_status_tail
];
737 ath12k_dp_tx_status_parse(ab
, tx_status
, &ts
);
739 if (le32_get_bits(tx_status
->info0
, HAL_WBM_COMPL_TX_INFO0_CC_DONE
)) {
740 /* HW done cookie conversion */
741 desc_va
= ((u64
)le32_to_cpu(tx_status
->buf_va_hi
) << 32 |
742 le32_to_cpu(tx_status
->buf_va_lo
));
743 tx_desc
= (struct ath12k_tx_desc_info
*)((unsigned long)desc_va
);
745 /* SW does cookie conversion to VA */
746 desc_id
= le32_get_bits(tx_status
->buf_va_hi
,
747 BUFFER_ADDR_INFO1_SW_COOKIE
);
749 tx_desc
= ath12k_dp_get_tx_desc(ab
, desc_id
);
752 ath12k_warn(ab
, "unable to retrieve tx_desc!");
757 mac_id
= tx_desc
->mac_id
;
759 /* Release descriptor as soon as extracting necessary info
760 * to reduce contention
762 ath12k_dp_tx_release_txbuf(dp
, tx_desc
, tx_desc
->pool_id
);
763 if (ts
.buf_rel_source
== HAL_WBM_REL_SRC_MODULE_FW
) {
764 ath12k_dp_tx_process_htt_tx_complete(ab
,
771 pdev_id
= ath12k_hw_mac_id_to_pdev_id(ab
->hw_params
, mac_id
);
772 ar
= ab
->pdevs
[pdev_id
].ar
;
774 if (atomic_dec_and_test(&ar
->dp
.num_tx_pending
))
775 wake_up(&ar
->dp
.tx_empty_waitq
);
777 ath12k_dp_tx_complete_msdu(ar
, msdu
, &ts
);
782 ath12k_dp_tx_get_ring_id_type(struct ath12k_base
*ab
,
783 int mac_id
, u32 ring_id
,
784 enum hal_ring_type ring_type
,
785 enum htt_srng_ring_type
*htt_ring_type
,
786 enum htt_srng_ring_id
*htt_ring_id
)
792 /* for some targets, host fills rx buffer to fw and fw fills to
793 * rxbuf ring for each rxdma
795 if (!ab
->hw_params
->rx_mac_buf_ring
) {
796 if (!(ring_id
== HAL_SRNG_SW2RXDMA_BUF0
||
797 ring_id
== HAL_SRNG_SW2RXDMA_BUF1
)) {
800 *htt_ring_id
= HTT_RXDMA_HOST_BUF_RING
;
801 *htt_ring_type
= HTT_SW_TO_HW_RING
;
803 if (ring_id
== HAL_SRNG_SW2RXDMA_BUF0
) {
804 *htt_ring_id
= HTT_HOST1_TO_FW_RXBUF_RING
;
805 *htt_ring_type
= HTT_SW_TO_SW_RING
;
807 *htt_ring_id
= HTT_RXDMA_HOST_BUF_RING
;
808 *htt_ring_type
= HTT_SW_TO_HW_RING
;
813 *htt_ring_id
= HTT_RXDMA_NON_MONITOR_DEST_RING
;
814 *htt_ring_type
= HTT_HW_TO_SW_RING
;
816 case HAL_RXDMA_MONITOR_BUF
:
817 *htt_ring_id
= HTT_RXDMA_MONITOR_BUF_RING
;
818 *htt_ring_type
= HTT_SW_TO_HW_RING
;
820 case HAL_RXDMA_MONITOR_STATUS
:
821 *htt_ring_id
= HTT_RXDMA_MONITOR_STATUS_RING
;
822 *htt_ring_type
= HTT_SW_TO_HW_RING
;
824 case HAL_RXDMA_MONITOR_DST
:
825 *htt_ring_id
= HTT_RXDMA_MONITOR_DEST_RING
;
826 *htt_ring_type
= HTT_HW_TO_SW_RING
;
828 case HAL_RXDMA_MONITOR_DESC
:
829 *htt_ring_id
= HTT_RXDMA_MONITOR_DESC_RING
;
830 *htt_ring_type
= HTT_SW_TO_HW_RING
;
833 ath12k_warn(ab
, "Unsupported ring type in DP :%d\n", ring_type
);
839 int ath12k_dp_tx_htt_srng_setup(struct ath12k_base
*ab
, u32 ring_id
,
840 int mac_id
, enum hal_ring_type ring_type
)
842 struct htt_srng_setup_cmd
*cmd
;
843 struct hal_srng
*srng
= &ab
->hal
.srng_list
[ring_id
];
844 struct hal_srng_params params
;
847 int len
= sizeof(*cmd
);
848 dma_addr_t hp_addr
, tp_addr
;
849 enum htt_srng_ring_type htt_ring_type
;
850 enum htt_srng_ring_id htt_ring_id
;
853 skb
= ath12k_htc_alloc_skb(ab
, len
);
857 memset(¶ms
, 0, sizeof(params
));
858 ath12k_hal_srng_get_params(ab
, srng
, ¶ms
);
860 hp_addr
= ath12k_hal_srng_get_hp_addr(ab
, srng
);
861 tp_addr
= ath12k_hal_srng_get_tp_addr(ab
, srng
);
863 ret
= ath12k_dp_tx_get_ring_id_type(ab
, mac_id
, ring_id
,
864 ring_type
, &htt_ring_type
,
870 cmd
= (struct htt_srng_setup_cmd
*)skb
->data
;
871 cmd
->info0
= le32_encode_bits(HTT_H2T_MSG_TYPE_SRING_SETUP
,
872 HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE
);
873 if (htt_ring_type
== HTT_SW_TO_HW_RING
||
874 htt_ring_type
== HTT_HW_TO_SW_RING
)
875 cmd
->info0
|= le32_encode_bits(DP_SW2HW_MACID(mac_id
),
876 HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID
);
878 cmd
->info0
|= le32_encode_bits(mac_id
,
879 HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID
);
880 cmd
->info0
|= le32_encode_bits(htt_ring_type
,
881 HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE
);
882 cmd
->info0
|= le32_encode_bits(htt_ring_id
,
883 HTT_SRNG_SETUP_CMD_INFO0_RING_ID
);
885 cmd
->ring_base_addr_lo
= cpu_to_le32(params
.ring_base_paddr
&
886 HAL_ADDR_LSB_REG_MASK
);
888 cmd
->ring_base_addr_hi
= cpu_to_le32((u64
)params
.ring_base_paddr
>>
889 HAL_ADDR_MSB_REG_SHIFT
);
891 ret
= ath12k_hal_srng_get_entrysize(ab
, ring_type
);
898 cmd
->info1
= le32_encode_bits(ring_entry_sz
,
899 HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE
);
900 cmd
->info1
|= le32_encode_bits(params
.num_entries
* ring_entry_sz
,
901 HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE
);
902 cmd
->info1
|= le32_encode_bits(!!(params
.flags
& HAL_SRNG_FLAGS_MSI_SWAP
),
903 HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP
);
904 cmd
->info1
|= le32_encode_bits(!!(params
.flags
& HAL_SRNG_FLAGS_DATA_TLV_SWAP
),
905 HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP
);
906 cmd
->info1
|= le32_encode_bits(!!(params
.flags
& HAL_SRNG_FLAGS_RING_PTR_SWAP
),
907 HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP
);
908 if (htt_ring_type
== HTT_SW_TO_HW_RING
)
909 cmd
->info1
|= cpu_to_le32(HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS
);
911 cmd
->ring_head_off32_remote_addr_lo
= cpu_to_le32(lower_32_bits(hp_addr
));
912 cmd
->ring_head_off32_remote_addr_hi
= cpu_to_le32(upper_32_bits(hp_addr
));
914 cmd
->ring_tail_off32_remote_addr_lo
= cpu_to_le32(lower_32_bits(tp_addr
));
915 cmd
->ring_tail_off32_remote_addr_hi
= cpu_to_le32(upper_32_bits(tp_addr
));
917 cmd
->ring_msi_addr_lo
= cpu_to_le32(lower_32_bits(params
.msi_addr
));
918 cmd
->ring_msi_addr_hi
= cpu_to_le32(upper_32_bits(params
.msi_addr
));
919 cmd
->msi_data
= cpu_to_le32(params
.msi_data
);
922 le32_encode_bits(params
.intr_batch_cntr_thres_entries
* ring_entry_sz
,
923 HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH
);
925 le32_encode_bits(params
.intr_timer_thres_us
>> 3,
926 HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH
);
929 if (params
.flags
& HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN
) {
930 cmd
->info2
= le32_encode_bits(params
.low_threshold
,
931 HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH
);
934 ath12k_dbg(ab
, ATH12K_DBG_HAL
,
935 "%s msi_addr_lo:0x%x, msi_addr_hi:0x%x, msi_data:0x%x\n",
936 __func__
, cmd
->ring_msi_addr_lo
, cmd
->ring_msi_addr_hi
,
939 ath12k_dbg(ab
, ATH12K_DBG_HAL
,
940 "ring_id:%d, ring_type:%d, intr_info:0x%x, flags:0x%x\n",
941 ring_id
, ring_type
, cmd
->intr_info
, cmd
->info2
);
943 ret
= ath12k_htc_send(&ab
->htc
, ab
->dp
.eid
, skb
);
950 dev_kfree_skb_any(skb
);
955 #define HTT_TARGET_VERSION_TIMEOUT_HZ (3 * HZ)
957 int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base
*ab
)
959 struct ath12k_dp
*dp
= &ab
->dp
;
961 struct htt_ver_req_cmd
*cmd
;
962 int len
= sizeof(*cmd
);
965 init_completion(&dp
->htt_tgt_version_received
);
967 skb
= ath12k_htc_alloc_skb(ab
, len
);
972 cmd
= (struct htt_ver_req_cmd
*)skb
->data
;
973 cmd
->ver_reg_info
= le32_encode_bits(HTT_H2T_MSG_TYPE_VERSION_REQ
,
974 HTT_VER_REQ_INFO_MSG_ID
);
976 ret
= ath12k_htc_send(&ab
->htc
, dp
->eid
, skb
);
978 dev_kfree_skb_any(skb
);
982 ret
= wait_for_completion_timeout(&dp
->htt_tgt_version_received
,
983 HTT_TARGET_VERSION_TIMEOUT_HZ
);
985 ath12k_warn(ab
, "htt target version request timed out\n");
989 if (dp
->htt_tgt_ver_major
!= HTT_TARGET_VERSION_MAJOR
) {
990 ath12k_err(ab
, "unsupported htt major version %d supported version is %d\n",
991 dp
->htt_tgt_ver_major
, HTT_TARGET_VERSION_MAJOR
);
998 int ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k
*ar
, u32 mask
)
1000 struct ath12k_base
*ab
= ar
->ab
;
1001 struct ath12k_dp
*dp
= &ab
->dp
;
1002 struct sk_buff
*skb
;
1003 struct htt_ppdu_stats_cfg_cmd
*cmd
;
1004 int len
= sizeof(*cmd
);
1009 for (i
= 0; i
< ab
->hw_params
->num_rxdma_per_pdev
; i
++) {
1010 skb
= ath12k_htc_alloc_skb(ab
, len
);
1015 cmd
= (struct htt_ppdu_stats_cfg_cmd
*)skb
->data
;
1016 cmd
->msg
= le32_encode_bits(HTT_H2T_MSG_TYPE_PPDU_STATS_CFG
,
1017 HTT_PPDU_STATS_CFG_MSG_TYPE
);
1019 pdev_mask
= 1 << (i
+ 1);
1020 cmd
->msg
|= le32_encode_bits(pdev_mask
, HTT_PPDU_STATS_CFG_PDEV_ID
);
1021 cmd
->msg
|= le32_encode_bits(mask
, HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK
);
1023 ret
= ath12k_htc_send(&ab
->htc
, dp
->eid
, skb
);
1025 dev_kfree_skb_any(skb
);
1033 int ath12k_dp_tx_htt_rx_filter_setup(struct ath12k_base
*ab
, u32 ring_id
,
1034 int mac_id
, enum hal_ring_type ring_type
,
1036 struct htt_rx_ring_tlv_filter
*tlv_filter
)
1038 struct htt_rx_ring_selection_cfg_cmd
*cmd
;
1039 struct hal_srng
*srng
= &ab
->hal
.srng_list
[ring_id
];
1040 struct hal_srng_params params
;
1041 struct sk_buff
*skb
;
1042 int len
= sizeof(*cmd
);
1043 enum htt_srng_ring_type htt_ring_type
;
1044 enum htt_srng_ring_id htt_ring_id
;
1047 skb
= ath12k_htc_alloc_skb(ab
, len
);
1051 memset(¶ms
, 0, sizeof(params
));
1052 ath12k_hal_srng_get_params(ab
, srng
, ¶ms
);
1054 ret
= ath12k_dp_tx_get_ring_id_type(ab
, mac_id
, ring_id
,
1055 ring_type
, &htt_ring_type
,
1061 cmd
= (struct htt_rx_ring_selection_cfg_cmd
*)skb
->data
;
1062 cmd
->info0
= le32_encode_bits(HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG
,
1063 HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE
);
1064 if (htt_ring_type
== HTT_SW_TO_HW_RING
||
1065 htt_ring_type
== HTT_HW_TO_SW_RING
)
1067 le32_encode_bits(DP_SW2HW_MACID(mac_id
),
1068 HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID
);
1071 le32_encode_bits(mac_id
,
1072 HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID
);
1073 cmd
->info0
|= le32_encode_bits(htt_ring_id
,
1074 HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID
);
1075 cmd
->info0
|= le32_encode_bits(!!(params
.flags
& HAL_SRNG_FLAGS_MSI_SWAP
),
1076 HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS
);
1077 cmd
->info0
|= le32_encode_bits(!!(params
.flags
& HAL_SRNG_FLAGS_DATA_TLV_SWAP
),
1078 HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS
);
1079 cmd
->info0
|= le32_encode_bits(tlv_filter
->offset_valid
,
1080 HTT_RX_RING_SELECTION_CFG_CMD_OFFSET_VALID
);
1081 cmd
->info1
= le32_encode_bits(rx_buf_size
,
1082 HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE
);
1083 cmd
->pkt_type_en_flags0
= cpu_to_le32(tlv_filter
->pkt_filter_flags0
);
1084 cmd
->pkt_type_en_flags1
= cpu_to_le32(tlv_filter
->pkt_filter_flags1
);
1085 cmd
->pkt_type_en_flags2
= cpu_to_le32(tlv_filter
->pkt_filter_flags2
);
1086 cmd
->pkt_type_en_flags3
= cpu_to_le32(tlv_filter
->pkt_filter_flags3
);
1087 cmd
->rx_filter_tlv
= cpu_to_le32(tlv_filter
->rx_filter
);
1089 if (tlv_filter
->offset_valid
) {
1090 cmd
->rx_packet_offset
=
1091 le32_encode_bits(tlv_filter
->rx_packet_offset
,
1092 HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET
);
1094 cmd
->rx_packet_offset
|=
1095 le32_encode_bits(tlv_filter
->rx_header_offset
,
1096 HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET
);
1098 cmd
->rx_mpdu_offset
=
1099 le32_encode_bits(tlv_filter
->rx_mpdu_end_offset
,
1100 HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET
);
1102 cmd
->rx_mpdu_offset
|=
1103 le32_encode_bits(tlv_filter
->rx_mpdu_start_offset
,
1104 HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET
);
1106 cmd
->rx_msdu_offset
=
1107 le32_encode_bits(tlv_filter
->rx_msdu_end_offset
,
1108 HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET
);
1110 cmd
->rx_msdu_offset
|=
1111 le32_encode_bits(tlv_filter
->rx_msdu_start_offset
,
1112 HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET
);
1114 cmd
->rx_attn_offset
=
1115 le32_encode_bits(tlv_filter
->rx_attn_offset
,
1116 HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET
);
1119 if (tlv_filter
->rx_mpdu_start_wmask
> 0 &&
1120 tlv_filter
->rx_msdu_end_wmask
> 0) {
1122 le32_encode_bits(true,
1123 HTT_RX_RING_SELECTION_CFG_WORD_MASK_COMPACT_SET
);
1124 cmd
->rx_mpdu_start_end_mask
=
1125 le32_encode_bits(tlv_filter
->rx_mpdu_start_wmask
,
1126 HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_MASK
);
1127 /* mpdu_end is not used for any hardwares so far
1128 * please assign it in future if any chip is
1129 * using through hal ops
1131 cmd
->rx_mpdu_start_end_mask
|=
1132 le32_encode_bits(tlv_filter
->rx_mpdu_end_wmask
,
1133 HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_MASK
);
1134 cmd
->rx_msdu_end_word_mask
=
1135 le32_encode_bits(tlv_filter
->rx_msdu_end_wmask
,
1136 HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_MASK
);
1139 ret
= ath12k_htc_send(&ab
->htc
, ab
->dp
.eid
, skb
);
1146 dev_kfree_skb_any(skb
);
1152 ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k
*ar
, u8 type
,
1153 struct htt_ext_stats_cfg_params
*cfg_params
,
1156 struct ath12k_base
*ab
= ar
->ab
;
1157 struct ath12k_dp
*dp
= &ab
->dp
;
1158 struct sk_buff
*skb
;
1159 struct htt_ext_stats_cfg_cmd
*cmd
;
1160 int len
= sizeof(*cmd
);
1164 skb
= ath12k_htc_alloc_skb(ab
, len
);
1170 cmd
= (struct htt_ext_stats_cfg_cmd
*)skb
->data
;
1171 memset(cmd
, 0, sizeof(*cmd
));
1172 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_EXT_STATS_CFG
;
1174 pdev_id
= ath12k_mac_get_target_pdev_id(ar
);
1175 cmd
->hdr
.pdev_mask
= 1 << pdev_id
;
1177 cmd
->hdr
.stats_type
= type
;
1178 cmd
->cfg_param0
= cpu_to_le32(cfg_params
->cfg0
);
1179 cmd
->cfg_param1
= cpu_to_le32(cfg_params
->cfg1
);
1180 cmd
->cfg_param2
= cpu_to_le32(cfg_params
->cfg2
);
1181 cmd
->cfg_param3
= cpu_to_le32(cfg_params
->cfg3
);
1182 cmd
->cookie_lsb
= cpu_to_le32(lower_32_bits(cookie
));
1183 cmd
->cookie_msb
= cpu_to_le32(upper_32_bits(cookie
));
1185 ret
= ath12k_htc_send(&ab
->htc
, dp
->eid
, skb
);
1187 ath12k_warn(ab
, "failed to send htt type stats request: %d",
1189 dev_kfree_skb_any(skb
);
1196 int ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k
*ar
, bool reset
)
1198 struct ath12k_base
*ab
= ar
->ab
;
1201 ret
= ath12k_dp_tx_htt_rx_monitor_mode_ring_config(ar
, reset
);
1203 ath12k_err(ab
, "failed to setup rx monitor filter %d\n", ret
);
1210 int ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k
*ar
, bool reset
)
1212 struct ath12k_base
*ab
= ar
->ab
;
1213 struct ath12k_dp
*dp
= &ab
->dp
;
1214 struct htt_rx_ring_tlv_filter tlv_filter
= {0};
1217 ring_id
= dp
->rxdma_mon_buf_ring
.refill_buf_ring
.ring_id
;
1218 tlv_filter
.offset_valid
= false;
1221 tlv_filter
.rx_filter
= HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING
;
1222 tlv_filter
.pkt_filter_flags0
=
1223 HTT_RX_MON_FP_MGMT_FILTER_FLAGS0
|
1224 HTT_RX_MON_MO_MGMT_FILTER_FLAGS0
;
1225 tlv_filter
.pkt_filter_flags1
=
1226 HTT_RX_MON_FP_MGMT_FILTER_FLAGS1
|
1227 HTT_RX_MON_MO_MGMT_FILTER_FLAGS1
;
1228 tlv_filter
.pkt_filter_flags2
=
1229 HTT_RX_MON_FP_CTRL_FILTER_FLASG2
|
1230 HTT_RX_MON_MO_CTRL_FILTER_FLASG2
;
1231 tlv_filter
.pkt_filter_flags3
=
1232 HTT_RX_MON_FP_CTRL_FILTER_FLASG3
|
1233 HTT_RX_MON_MO_CTRL_FILTER_FLASG3
|
1234 HTT_RX_MON_FP_DATA_FILTER_FLASG3
|
1235 HTT_RX_MON_MO_DATA_FILTER_FLASG3
;
1238 if (ab
->hw_params
->rxdma1_enable
) {
1239 ret
= ath12k_dp_tx_htt_rx_filter_setup(ar
->ab
, ring_id
, 0,
1240 HAL_RXDMA_MONITOR_BUF
,
1241 DP_RXDMA_REFILL_RING_SIZE
,
1245 "failed to setup filter for monitor buf %d\n", ret
);
1253 int ath12k_dp_tx_htt_tx_filter_setup(struct ath12k_base
*ab
, u32 ring_id
,
1254 int mac_id
, enum hal_ring_type ring_type
,
1256 struct htt_tx_ring_tlv_filter
*htt_tlv_filter
)
1258 struct htt_tx_ring_selection_cfg_cmd
*cmd
;
1259 struct hal_srng
*srng
= &ab
->hal
.srng_list
[ring_id
];
1260 struct hal_srng_params params
;
1261 struct sk_buff
*skb
;
1262 int len
= sizeof(*cmd
);
1263 enum htt_srng_ring_type htt_ring_type
;
1264 enum htt_srng_ring_id htt_ring_id
;
1267 skb
= ath12k_htc_alloc_skb(ab
, len
);
1271 memset(¶ms
, 0, sizeof(params
));
1272 ath12k_hal_srng_get_params(ab
, srng
, ¶ms
);
1274 ret
= ath12k_dp_tx_get_ring_id_type(ab
, mac_id
, ring_id
,
1275 ring_type
, &htt_ring_type
,
1282 cmd
= (struct htt_tx_ring_selection_cfg_cmd
*)skb
->data
;
1283 cmd
->info0
= le32_encode_bits(HTT_H2T_MSG_TYPE_TX_MONITOR_CFG
,
1284 HTT_TX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE
);
1285 if (htt_ring_type
== HTT_SW_TO_HW_RING
||
1286 htt_ring_type
== HTT_HW_TO_SW_RING
)
1288 le32_encode_bits(DP_SW2HW_MACID(mac_id
),
1289 HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID
);
1292 le32_encode_bits(mac_id
,
1293 HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID
);
1294 cmd
->info0
|= le32_encode_bits(htt_ring_id
,
1295 HTT_TX_RING_SELECTION_CFG_CMD_INFO0_RING_ID
);
1296 cmd
->info0
|= le32_encode_bits(!!(params
.flags
& HAL_SRNG_FLAGS_MSI_SWAP
),
1297 HTT_TX_RING_SELECTION_CFG_CMD_INFO0_SS
);
1298 cmd
->info0
|= le32_encode_bits(!!(params
.flags
& HAL_SRNG_FLAGS_DATA_TLV_SWAP
),
1299 HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PS
);
1302 le32_encode_bits(tx_buf_size
,
1303 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_RING_BUFF_SIZE
);
1305 if (htt_tlv_filter
->tx_mon_mgmt_filter
) {
1307 le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_MGMT
,
1308 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE
);
1310 le32_encode_bits(htt_tlv_filter
->tx_mon_pkt_dma_len
,
1311 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT
);
1313 le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_MGMT
,
1314 HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG
);
1317 if (htt_tlv_filter
->tx_mon_data_filter
) {
1319 le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_CTRL
,
1320 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE
);
1322 le32_encode_bits(htt_tlv_filter
->tx_mon_pkt_dma_len
,
1323 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL
);
1325 le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_CTRL
,
1326 HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG
);
1329 if (htt_tlv_filter
->tx_mon_ctrl_filter
) {
1331 le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_DATA
,
1332 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE
);
1334 le32_encode_bits(htt_tlv_filter
->tx_mon_pkt_dma_len
,
1335 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA
);
1337 le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_DATA
,
1338 HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG
);
1341 cmd
->tlv_filter_mask_in0
=
1342 cpu_to_le32(htt_tlv_filter
->tx_mon_downstream_tlv_flags
);
1343 cmd
->tlv_filter_mask_in1
=
1344 cpu_to_le32(htt_tlv_filter
->tx_mon_upstream_tlv_flags0
);
1345 cmd
->tlv_filter_mask_in2
=
1346 cpu_to_le32(htt_tlv_filter
->tx_mon_upstream_tlv_flags1
);
1347 cmd
->tlv_filter_mask_in3
=
1348 cpu_to_le32(htt_tlv_filter
->tx_mon_upstream_tlv_flags2
);
1350 ret
= ath12k_htc_send(&ab
->htc
, ab
->dp
.eid
, skb
);
1357 dev_kfree_skb_any(skb
);