1 // SPDX-License-Identifier: BSD-3-Clause-Clear
3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
7 #include <linux/ieee80211.h>
8 #include <linux/kernel.h>
9 #include <linux/skbuff.h>
10 #include <crypto/hash.h>
13 #include "debugfs_htt_stats.h"
14 #include "debugfs_sta.h"
22 #define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
25 u8
*ath11k_dp_rx_h_80211_hdr(struct ath11k_base
*ab
, struct hal_rx_desc
*desc
)
27 return ab
->hw_params
.hw_ops
->rx_desc_get_hdr_status(desc
);
31 enum hal_encrypt_type
ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_base
*ab
,
32 struct hal_rx_desc
*desc
)
34 if (!ab
->hw_params
.hw_ops
->rx_desc_encrypt_valid(desc
))
35 return HAL_ENCRYPT_TYPE_OPEN
;
37 return ab
->hw_params
.hw_ops
->rx_desc_get_encrypt_type(desc
);
40 static inline u8
ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base
*ab
,
41 struct hal_rx_desc
*desc
)
43 return ab
->hw_params
.hw_ops
->rx_desc_get_decap_type(desc
);
47 bool ath11k_dp_rx_h_msdu_start_ldpc_support(struct ath11k_base
*ab
,
48 struct hal_rx_desc
*desc
)
50 return ab
->hw_params
.hw_ops
->rx_desc_get_ldpc_support(desc
);
54 u8
ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base
*ab
,
55 struct hal_rx_desc
*desc
)
57 return ab
->hw_params
.hw_ops
->rx_desc_get_mesh_ctl(desc
);
61 bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct ath11k_base
*ab
,
62 struct hal_rx_desc
*desc
)
64 return ab
->hw_params
.hw_ops
->rx_desc_get_mpdu_seq_ctl_vld(desc
);
67 static inline bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct ath11k_base
*ab
,
68 struct hal_rx_desc
*desc
)
70 return ab
->hw_params
.hw_ops
->rx_desc_get_mpdu_fc_valid(desc
);
73 static inline bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base
*ab
,
76 struct ieee80211_hdr
*hdr
;
78 hdr
= (struct ieee80211_hdr
*)(skb
->data
+ ab
->hw_params
.hal_desc_sz
);
79 return ieee80211_has_morefrags(hdr
->frame_control
);
82 static inline u16
ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base
*ab
,
85 struct ieee80211_hdr
*hdr
;
87 hdr
= (struct ieee80211_hdr
*)(skb
->data
+ ab
->hw_params
.hal_desc_sz
);
88 return le16_to_cpu(hdr
->seq_ctrl
) & IEEE80211_SCTL_FRAG
;
91 static inline u16
ath11k_dp_rx_h_mpdu_start_seq_no(struct ath11k_base
*ab
,
92 struct hal_rx_desc
*desc
)
94 return ab
->hw_params
.hw_ops
->rx_desc_get_mpdu_start_seq_no(desc
);
97 static inline void *ath11k_dp_rx_get_attention(struct ath11k_base
*ab
,
98 struct hal_rx_desc
*desc
)
100 return ab
->hw_params
.hw_ops
->rx_desc_get_attention(desc
);
103 static inline bool ath11k_dp_rx_h_attn_msdu_done(struct rx_attention
*attn
)
105 return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE
,
106 __le32_to_cpu(attn
->info2
));
109 static inline bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct rx_attention
*attn
)
111 return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL
,
112 __le32_to_cpu(attn
->info1
));
115 static inline bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct rx_attention
*attn
)
117 return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL
,
118 __le32_to_cpu(attn
->info1
));
121 static inline bool ath11k_dp_rx_h_attn_is_decrypted(struct rx_attention
*attn
)
123 return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE
,
124 __le32_to_cpu(attn
->info2
)) ==
125 RX_DESC_DECRYPT_STATUS_CODE_OK
);
128 static u32
ath11k_dp_rx_h_attn_mpdu_err(struct rx_attention
*attn
)
130 u32 info
= __le32_to_cpu(attn
->info1
);
133 if (info
& RX_ATTENTION_INFO1_FCS_ERR
)
134 errmap
|= DP_RX_MPDU_ERR_FCS
;
136 if (info
& RX_ATTENTION_INFO1_DECRYPT_ERR
)
137 errmap
|= DP_RX_MPDU_ERR_DECRYPT
;
139 if (info
& RX_ATTENTION_INFO1_TKIP_MIC_ERR
)
140 errmap
|= DP_RX_MPDU_ERR_TKIP_MIC
;
142 if (info
& RX_ATTENTION_INFO1_A_MSDU_ERROR
)
143 errmap
|= DP_RX_MPDU_ERR_AMSDU_ERR
;
145 if (info
& RX_ATTENTION_INFO1_OVERFLOW_ERR
)
146 errmap
|= DP_RX_MPDU_ERR_OVERFLOW
;
148 if (info
& RX_ATTENTION_INFO1_MSDU_LEN_ERR
)
149 errmap
|= DP_RX_MPDU_ERR_MSDU_LEN
;
151 if (info
& RX_ATTENTION_INFO1_MPDU_LEN_ERR
)
152 errmap
|= DP_RX_MPDU_ERR_MPDU_LEN
;
157 static bool ath11k_dp_rx_h_attn_msdu_len_err(struct ath11k_base
*ab
,
158 struct hal_rx_desc
*desc
)
160 struct rx_attention
*rx_attention
;
163 rx_attention
= ath11k_dp_rx_get_attention(ab
, desc
);
164 errmap
= ath11k_dp_rx_h_attn_mpdu_err(rx_attention
);
166 return errmap
& DP_RX_MPDU_ERR_MSDU_LEN
;
169 static inline u16
ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base
*ab
,
170 struct hal_rx_desc
*desc
)
172 return ab
->hw_params
.hw_ops
->rx_desc_get_msdu_len(desc
);
175 static inline u8
ath11k_dp_rx_h_msdu_start_sgi(struct ath11k_base
*ab
,
176 struct hal_rx_desc
*desc
)
178 return ab
->hw_params
.hw_ops
->rx_desc_get_msdu_sgi(desc
);
181 static inline u8
ath11k_dp_rx_h_msdu_start_rate_mcs(struct ath11k_base
*ab
,
182 struct hal_rx_desc
*desc
)
184 return ab
->hw_params
.hw_ops
->rx_desc_get_msdu_rate_mcs(desc
);
187 static inline u8
ath11k_dp_rx_h_msdu_start_rx_bw(struct ath11k_base
*ab
,
188 struct hal_rx_desc
*desc
)
190 return ab
->hw_params
.hw_ops
->rx_desc_get_msdu_rx_bw(desc
);
193 static inline u32
ath11k_dp_rx_h_msdu_start_freq(struct ath11k_base
*ab
,
194 struct hal_rx_desc
*desc
)
196 return ab
->hw_params
.hw_ops
->rx_desc_get_msdu_freq(desc
);
199 static inline u8
ath11k_dp_rx_h_msdu_start_pkt_type(struct ath11k_base
*ab
,
200 struct hal_rx_desc
*desc
)
202 return ab
->hw_params
.hw_ops
->rx_desc_get_msdu_pkt_type(desc
);
205 static inline u8
ath11k_dp_rx_h_msdu_start_nss(struct ath11k_base
*ab
,
206 struct hal_rx_desc
*desc
)
208 return hweight8(ab
->hw_params
.hw_ops
->rx_desc_get_msdu_nss(desc
));
211 static inline u8
ath11k_dp_rx_h_mpdu_start_tid(struct ath11k_base
*ab
,
212 struct hal_rx_desc
*desc
)
214 return ab
->hw_params
.hw_ops
->rx_desc_get_mpdu_tid(desc
);
217 static inline u16
ath11k_dp_rx_h_mpdu_start_peer_id(struct ath11k_base
*ab
,
218 struct hal_rx_desc
*desc
)
220 return ab
->hw_params
.hw_ops
->rx_desc_get_mpdu_peer_id(desc
);
223 static inline u8
ath11k_dp_rx_h_msdu_end_l3pad(struct ath11k_base
*ab
,
224 struct hal_rx_desc
*desc
)
226 return ab
->hw_params
.hw_ops
->rx_desc_get_l3_pad_bytes(desc
);
229 static inline bool ath11k_dp_rx_h_msdu_end_first_msdu(struct ath11k_base
*ab
,
230 struct hal_rx_desc
*desc
)
232 return ab
->hw_params
.hw_ops
->rx_desc_get_first_msdu(desc
);
235 static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct ath11k_base
*ab
,
236 struct hal_rx_desc
*desc
)
238 return ab
->hw_params
.hw_ops
->rx_desc_get_last_msdu(desc
);
241 static void ath11k_dp_rx_desc_end_tlv_copy(struct ath11k_base
*ab
,
242 struct hal_rx_desc
*fdesc
,
243 struct hal_rx_desc
*ldesc
)
245 ab
->hw_params
.hw_ops
->rx_desc_copy_attn_end_tlv(fdesc
, ldesc
);
248 static inline u32
ath11k_dp_rxdesc_get_mpdulen_err(struct rx_attention
*attn
)
250 return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR
,
251 __le32_to_cpu(attn
->info1
));
254 static inline u8
*ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base
*ab
,
255 struct hal_rx_desc
*rx_desc
)
259 rx_pkt_hdr
= ab
->hw_params
.hw_ops
->rx_desc_get_msdu_payload(rx_desc
);
264 static inline bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base
*ab
,
265 struct hal_rx_desc
*rx_desc
)
269 tlv_tag
= ab
->hw_params
.hw_ops
->rx_desc_get_mpdu_start_tag(rx_desc
);
271 return tlv_tag
== HAL_RX_MPDU_START
;
274 static inline u32
ath11k_dp_rxdesc_get_ppduid(struct ath11k_base
*ab
,
275 struct hal_rx_desc
*rx_desc
)
277 return ab
->hw_params
.hw_ops
->rx_desc_get_mpdu_ppdu_id(rx_desc
);
280 static inline void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base
*ab
,
281 struct hal_rx_desc
*desc
,
284 ab
->hw_params
.hw_ops
->rx_desc_set_msdu_len(desc
, len
);
287 static bool ath11k_dp_rx_h_attn_is_mcbc(struct ath11k_base
*ab
,
288 struct hal_rx_desc
*desc
)
290 struct rx_attention
*attn
= ath11k_dp_rx_get_attention(ab
, desc
);
292 return ath11k_dp_rx_h_msdu_end_first_msdu(ab
, desc
) &&
293 (!!FIELD_GET(RX_ATTENTION_INFO1_MCAST_BCAST
,
294 __le32_to_cpu(attn
->info1
)));
297 static bool ath11k_dp_rxdesc_mac_addr2_valid(struct ath11k_base
*ab
,
298 struct hal_rx_desc
*desc
)
300 return ab
->hw_params
.hw_ops
->rx_desc_mac_addr2_valid(desc
);
303 static u8
*ath11k_dp_rxdesc_mpdu_start_addr2(struct ath11k_base
*ab
,
304 struct hal_rx_desc
*desc
)
306 return ab
->hw_params
.hw_ops
->rx_desc_mpdu_start_addr2(desc
);
309 static void ath11k_dp_service_mon_ring(struct timer_list
*t
)
311 struct ath11k_base
*ab
= from_timer(ab
, t
, mon_reap_timer
);
314 for (i
= 0; i
< ab
->hw_params
.num_rxdma_per_pdev
; i
++)
315 ath11k_dp_rx_process_mon_rings(ab
, i
, NULL
, DP_MON_SERVICE_BUDGET
);
317 mod_timer(&ab
->mon_reap_timer
, jiffies
+
318 msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL
));
321 static int ath11k_dp_purge_mon_ring(struct ath11k_base
*ab
)
324 unsigned long timeout
= jiffies
+ msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS
);
327 for (i
= 0; i
< ab
->hw_params
.num_rxdma_per_pdev
; i
++)
328 reaped
+= ath11k_dp_rx_process_mon_rings(ab
, i
,
330 DP_MON_SERVICE_BUDGET
);
332 /* nothing more to reap */
333 if (reaped
< DP_MON_SERVICE_BUDGET
)
336 } while (time_before(jiffies
, timeout
));
338 ath11k_warn(ab
, "dp mon ring purge timeout");
343 /* Returns number of Rx buffers replenished */
344 int ath11k_dp_rxbufs_replenish(struct ath11k_base
*ab
, int mac_id
,
345 struct dp_rxdma_ring
*rx_ring
,
347 enum hal_rx_buf_return_buf_manager mgr
)
349 struct hal_srng
*srng
;
358 req_entries
= min(req_entries
, rx_ring
->bufs_max
);
360 srng
= &ab
->hal
.srng_list
[rx_ring
->refill_buf_ring
.ring_id
];
362 spin_lock_bh(&srng
->lock
);
364 ath11k_hal_srng_access_begin(ab
, srng
);
366 num_free
= ath11k_hal_srng_src_num_free(ab
, srng
, true);
367 if (!req_entries
&& (num_free
> (rx_ring
->bufs_max
* 3) / 4))
368 req_entries
= num_free
;
370 req_entries
= min(num_free
, req_entries
);
371 num_remain
= req_entries
;
373 while (num_remain
> 0) {
374 skb
= dev_alloc_skb(DP_RX_BUFFER_SIZE
+
375 DP_RX_BUFFER_ALIGN_SIZE
);
379 if (!IS_ALIGNED((unsigned long)skb
->data
,
380 DP_RX_BUFFER_ALIGN_SIZE
)) {
382 PTR_ALIGN(skb
->data
, DP_RX_BUFFER_ALIGN_SIZE
) -
386 paddr
= dma_map_single(ab
->dev
, skb
->data
,
387 skb
->len
+ skb_tailroom(skb
),
389 if (dma_mapping_error(ab
->dev
, paddr
))
392 spin_lock_bh(&rx_ring
->idr_lock
);
393 buf_id
= idr_alloc(&rx_ring
->bufs_idr
, skb
, 1,
394 (rx_ring
->bufs_max
* 3) + 1, GFP_ATOMIC
);
395 spin_unlock_bh(&rx_ring
->idr_lock
);
399 desc
= ath11k_hal_srng_src_get_next_entry(ab
, srng
);
401 goto fail_idr_remove
;
403 ATH11K_SKB_RXCB(skb
)->paddr
= paddr
;
405 cookie
= FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID
, mac_id
) |
406 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID
, buf_id
);
410 ath11k_hal_rx_buf_addr_info_set(desc
, paddr
, cookie
, mgr
);
413 ath11k_hal_srng_access_end(ab
, srng
);
415 spin_unlock_bh(&srng
->lock
);
417 return req_entries
- num_remain
;
420 spin_lock_bh(&rx_ring
->idr_lock
);
421 idr_remove(&rx_ring
->bufs_idr
, buf_id
);
422 spin_unlock_bh(&rx_ring
->idr_lock
);
424 dma_unmap_single(ab
->dev
, paddr
, skb
->len
+ skb_tailroom(skb
),
427 dev_kfree_skb_any(skb
);
429 ath11k_hal_srng_access_end(ab
, srng
);
431 spin_unlock_bh(&srng
->lock
);
433 return req_entries
- num_remain
;
436 static int ath11k_dp_rxdma_buf_ring_free(struct ath11k
*ar
,
437 struct dp_rxdma_ring
*rx_ring
)
442 spin_lock_bh(&rx_ring
->idr_lock
);
443 idr_for_each_entry(&rx_ring
->bufs_idr
, skb
, buf_id
) {
444 idr_remove(&rx_ring
->bufs_idr
, buf_id
);
445 /* TODO: Understand where internal driver does this dma_unmap
448 dma_unmap_single(ar
->ab
->dev
, ATH11K_SKB_RXCB(skb
)->paddr
,
449 skb
->len
+ skb_tailroom(skb
), DMA_FROM_DEVICE
);
450 dev_kfree_skb_any(skb
);
453 idr_destroy(&rx_ring
->bufs_idr
);
454 spin_unlock_bh(&rx_ring
->idr_lock
);
459 static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k
*ar
)
461 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
462 struct ath11k_base
*ab
= ar
->ab
;
463 struct dp_rxdma_ring
*rx_ring
= &dp
->rx_refill_buf_ring
;
466 ath11k_dp_rxdma_buf_ring_free(ar
, rx_ring
);
468 rx_ring
= &dp
->rxdma_mon_buf_ring
;
469 ath11k_dp_rxdma_buf_ring_free(ar
, rx_ring
);
471 for (i
= 0; i
< ab
->hw_params
.num_rxdma_per_pdev
; i
++) {
472 rx_ring
= &dp
->rx_mon_status_refill_ring
[i
];
473 ath11k_dp_rxdma_buf_ring_free(ar
, rx_ring
);
479 static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k
*ar
,
480 struct dp_rxdma_ring
*rx_ring
,
483 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
486 num_entries
= rx_ring
->refill_buf_ring
.size
/
487 ath11k_hal_srng_get_entrysize(ar
->ab
, ringtype
);
489 rx_ring
->bufs_max
= num_entries
;
490 ath11k_dp_rxbufs_replenish(ar
->ab
, dp
->mac_id
, rx_ring
, num_entries
,
491 ar
->ab
->hw_params
.hal_params
->rx_buf_rbm
);
495 static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k
*ar
)
497 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
498 struct ath11k_base
*ab
= ar
->ab
;
499 struct dp_rxdma_ring
*rx_ring
= &dp
->rx_refill_buf_ring
;
502 ath11k_dp_rxdma_ring_buf_setup(ar
, rx_ring
, HAL_RXDMA_BUF
);
504 if (ar
->ab
->hw_params
.rxdma1_enable
) {
505 rx_ring
= &dp
->rxdma_mon_buf_ring
;
506 ath11k_dp_rxdma_ring_buf_setup(ar
, rx_ring
, HAL_RXDMA_MONITOR_BUF
);
509 for (i
= 0; i
< ab
->hw_params
.num_rxdma_per_pdev
; i
++) {
510 rx_ring
= &dp
->rx_mon_status_refill_ring
[i
];
511 ath11k_dp_rxdma_ring_buf_setup(ar
, rx_ring
, HAL_RXDMA_MONITOR_STATUS
);
517 static void ath11k_dp_rx_pdev_srng_free(struct ath11k
*ar
)
519 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
520 struct ath11k_base
*ab
= ar
->ab
;
523 ath11k_dp_srng_cleanup(ab
, &dp
->rx_refill_buf_ring
.refill_buf_ring
);
525 for (i
= 0; i
< ab
->hw_params
.num_rxdma_per_pdev
; i
++) {
526 if (ab
->hw_params
.rx_mac_buf_ring
)
527 ath11k_dp_srng_cleanup(ab
, &dp
->rx_mac_buf_ring
[i
]);
529 ath11k_dp_srng_cleanup(ab
, &dp
->rxdma_err_dst_ring
[i
]);
530 ath11k_dp_srng_cleanup(ab
,
531 &dp
->rx_mon_status_refill_ring
[i
].refill_buf_ring
);
534 ath11k_dp_srng_cleanup(ab
, &dp
->rxdma_mon_buf_ring
.refill_buf_ring
);
537 void ath11k_dp_pdev_reo_cleanup(struct ath11k_base
*ab
)
539 struct ath11k_dp
*dp
= &ab
->dp
;
542 for (i
= 0; i
< DP_REO_DST_RING_MAX
; i
++)
543 ath11k_dp_srng_cleanup(ab
, &dp
->reo_dst_ring
[i
]);
546 int ath11k_dp_pdev_reo_setup(struct ath11k_base
*ab
)
548 struct ath11k_dp
*dp
= &ab
->dp
;
552 for (i
= 0; i
< DP_REO_DST_RING_MAX
; i
++) {
553 ret
= ath11k_dp_srng_setup(ab
, &dp
->reo_dst_ring
[i
],
555 DP_REO_DST_RING_SIZE
);
557 ath11k_warn(ab
, "failed to setup reo_dst_ring\n");
558 goto err_reo_cleanup
;
565 ath11k_dp_pdev_reo_cleanup(ab
);
570 static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k
*ar
)
572 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
573 struct ath11k_base
*ab
= ar
->ab
;
574 struct dp_srng
*srng
= NULL
;
578 ret
= ath11k_dp_srng_setup(ar
->ab
,
579 &dp
->rx_refill_buf_ring
.refill_buf_ring
,
581 dp
->mac_id
, DP_RXDMA_BUF_RING_SIZE
);
583 ath11k_warn(ar
->ab
, "failed to setup rx_refill_buf_ring\n");
587 if (ar
->ab
->hw_params
.rx_mac_buf_ring
) {
588 for (i
= 0; i
< ab
->hw_params
.num_rxdma_per_pdev
; i
++) {
589 ret
= ath11k_dp_srng_setup(ar
->ab
,
590 &dp
->rx_mac_buf_ring
[i
],
592 dp
->mac_id
+ i
, 1024);
594 ath11k_warn(ar
->ab
, "failed to setup rx_mac_buf_ring %d\n",
601 for (i
= 0; i
< ab
->hw_params
.num_rxdma_per_pdev
; i
++) {
602 ret
= ath11k_dp_srng_setup(ar
->ab
, &dp
->rxdma_err_dst_ring
[i
],
603 HAL_RXDMA_DST
, 0, dp
->mac_id
+ i
,
604 DP_RXDMA_ERR_DST_RING_SIZE
);
606 ath11k_warn(ar
->ab
, "failed to setup rxdma_err_dst_ring %d\n", i
);
611 for (i
= 0; i
< ab
->hw_params
.num_rxdma_per_pdev
; i
++) {
612 srng
= &dp
->rx_mon_status_refill_ring
[i
].refill_buf_ring
;
613 ret
= ath11k_dp_srng_setup(ar
->ab
,
615 HAL_RXDMA_MONITOR_STATUS
, 0, dp
->mac_id
+ i
,
616 DP_RXDMA_MON_STATUS_RING_SIZE
);
619 "failed to setup rx_mon_status_refill_ring %d\n", i
);
624 /* if rxdma1_enable is false, then it doesn't need
625 * to setup rxdam_mon_buf_ring, rxdma_mon_dst_ring
626 * and rxdma_mon_desc_ring.
627 * init reap timer for QCA6390.
629 if (!ar
->ab
->hw_params
.rxdma1_enable
) {
630 //init mon status buffer reap timer
631 timer_setup(&ar
->ab
->mon_reap_timer
,
632 ath11k_dp_service_mon_ring
, 0);
636 ret
= ath11k_dp_srng_setup(ar
->ab
,
637 &dp
->rxdma_mon_buf_ring
.refill_buf_ring
,
638 HAL_RXDMA_MONITOR_BUF
, 0, dp
->mac_id
,
639 DP_RXDMA_MONITOR_BUF_RING_SIZE
);
642 "failed to setup HAL_RXDMA_MONITOR_BUF\n");
646 ret
= ath11k_dp_srng_setup(ar
->ab
, &dp
->rxdma_mon_dst_ring
,
647 HAL_RXDMA_MONITOR_DST
, 0, dp
->mac_id
,
648 DP_RXDMA_MONITOR_DST_RING_SIZE
);
651 "failed to setup HAL_RXDMA_MONITOR_DST\n");
655 ret
= ath11k_dp_srng_setup(ar
->ab
, &dp
->rxdma_mon_desc_ring
,
656 HAL_RXDMA_MONITOR_DESC
, 0, dp
->mac_id
,
657 DP_RXDMA_MONITOR_DESC_RING_SIZE
);
660 "failed to setup HAL_RXDMA_MONITOR_DESC\n");
667 void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base
*ab
)
669 struct ath11k_dp
*dp
= &ab
->dp
;
670 struct dp_reo_cmd
*cmd
, *tmp
;
671 struct dp_reo_cache_flush_elem
*cmd_cache
, *tmp_cache
;
672 struct dp_rx_tid
*rx_tid
;
674 spin_lock_bh(&dp
->reo_cmd_lock
);
675 list_for_each_entry_safe(cmd
, tmp
, &dp
->reo_cmd_list
, list
) {
676 list_del(&cmd
->list
);
679 dma_unmap_single(ab
->dev
, rx_tid
->paddr
,
680 rx_tid
->size
, DMA_BIDIRECTIONAL
);
681 kfree(rx_tid
->vaddr
);
682 rx_tid
->vaddr
= NULL
;
687 list_for_each_entry_safe(cmd_cache
, tmp_cache
,
688 &dp
->reo_cmd_cache_flush_list
, list
) {
689 list_del(&cmd_cache
->list
);
690 dp
->reo_cmd_cache_flush_count
--;
691 rx_tid
= &cmd_cache
->data
;
693 dma_unmap_single(ab
->dev
, rx_tid
->paddr
,
694 rx_tid
->size
, DMA_BIDIRECTIONAL
);
695 kfree(rx_tid
->vaddr
);
696 rx_tid
->vaddr
= NULL
;
700 spin_unlock_bh(&dp
->reo_cmd_lock
);
703 static void ath11k_dp_reo_cmd_free(struct ath11k_dp
*dp
, void *ctx
,
704 enum hal_reo_cmd_status status
)
706 struct dp_rx_tid
*rx_tid
= ctx
;
708 if (status
!= HAL_REO_CMD_SUCCESS
)
709 ath11k_warn(dp
->ab
, "failed to flush rx tid hw desc, tid %d status %d\n",
710 rx_tid
->tid
, status
);
712 dma_unmap_single(dp
->ab
->dev
, rx_tid
->paddr
, rx_tid
->size
,
714 kfree(rx_tid
->vaddr
);
715 rx_tid
->vaddr
= NULL
;
719 static void ath11k_dp_reo_cache_flush(struct ath11k_base
*ab
,
720 struct dp_rx_tid
*rx_tid
)
722 struct ath11k_hal_reo_cmd cmd
= {0};
723 unsigned long tot_desc_sz
, desc_sz
;
726 tot_desc_sz
= rx_tid
->size
;
727 desc_sz
= ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID
);
729 while (tot_desc_sz
> desc_sz
) {
730 tot_desc_sz
-= desc_sz
;
731 cmd
.addr_lo
= lower_32_bits(rx_tid
->paddr
+ tot_desc_sz
);
732 cmd
.addr_hi
= upper_32_bits(rx_tid
->paddr
);
733 ret
= ath11k_dp_tx_send_reo_cmd(ab
, rx_tid
,
734 HAL_REO_CMD_FLUSH_CACHE
, &cmd
,
738 "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
742 memset(&cmd
, 0, sizeof(cmd
));
743 cmd
.addr_lo
= lower_32_bits(rx_tid
->paddr
);
744 cmd
.addr_hi
= upper_32_bits(rx_tid
->paddr
);
745 cmd
.flag
|= HAL_REO_CMD_FLG_NEED_STATUS
;
746 ret
= ath11k_dp_tx_send_reo_cmd(ab
, rx_tid
,
747 HAL_REO_CMD_FLUSH_CACHE
,
748 &cmd
, ath11k_dp_reo_cmd_free
);
750 ath11k_err(ab
, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
752 dma_unmap_single(ab
->dev
, rx_tid
->paddr
, rx_tid
->size
,
754 kfree(rx_tid
->vaddr
);
755 rx_tid
->vaddr
= NULL
;
759 static void ath11k_dp_rx_tid_del_func(struct ath11k_dp
*dp
, void *ctx
,
760 enum hal_reo_cmd_status status
)
762 struct ath11k_base
*ab
= dp
->ab
;
763 struct dp_rx_tid
*rx_tid
= ctx
;
764 struct dp_reo_cache_flush_elem
*elem
, *tmp
;
766 if (status
== HAL_REO_CMD_DRAIN
) {
768 } else if (status
!= HAL_REO_CMD_SUCCESS
) {
769 /* Shouldn't happen! Cleanup in case of other failure? */
770 ath11k_warn(ab
, "failed to delete rx tid %d hw descriptor %d\n",
771 rx_tid
->tid
, status
);
775 elem
= kzalloc(sizeof(*elem
), GFP_ATOMIC
);
780 memcpy(&elem
->data
, rx_tid
, sizeof(*rx_tid
));
782 spin_lock_bh(&dp
->reo_cmd_lock
);
783 list_add_tail(&elem
->list
, &dp
->reo_cmd_cache_flush_list
);
784 dp
->reo_cmd_cache_flush_count
++;
786 /* Flush and invalidate aged REO desc from HW cache */
787 list_for_each_entry_safe(elem
, tmp
, &dp
->reo_cmd_cache_flush_list
,
789 if (dp
->reo_cmd_cache_flush_count
> DP_REO_DESC_FREE_THRESHOLD
||
790 time_after(jiffies
, elem
->ts
+
791 msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS
))) {
792 list_del(&elem
->list
);
793 dp
->reo_cmd_cache_flush_count
--;
794 spin_unlock_bh(&dp
->reo_cmd_lock
);
796 ath11k_dp_reo_cache_flush(ab
, &elem
->data
);
798 spin_lock_bh(&dp
->reo_cmd_lock
);
801 spin_unlock_bh(&dp
->reo_cmd_lock
);
805 dma_unmap_single(ab
->dev
, rx_tid
->paddr
, rx_tid
->size
,
807 kfree(rx_tid
->vaddr
);
808 rx_tid
->vaddr
= NULL
;
811 void ath11k_peer_rx_tid_delete(struct ath11k
*ar
,
812 struct ath11k_peer
*peer
, u8 tid
)
814 struct ath11k_hal_reo_cmd cmd
= {0};
815 struct dp_rx_tid
*rx_tid
= &peer
->rx_tid
[tid
];
821 rx_tid
->active
= false;
823 cmd
.flag
= HAL_REO_CMD_FLG_NEED_STATUS
;
824 cmd
.addr_lo
= lower_32_bits(rx_tid
->paddr
);
825 cmd
.addr_hi
= upper_32_bits(rx_tid
->paddr
);
826 cmd
.upd0
|= HAL_REO_CMD_UPD0_VLD
;
827 ret
= ath11k_dp_tx_send_reo_cmd(ar
->ab
, rx_tid
,
828 HAL_REO_CMD_UPDATE_RX_QUEUE
, &cmd
,
829 ath11k_dp_rx_tid_del_func
);
831 if (ret
!= -ESHUTDOWN
)
832 ath11k_err(ar
->ab
, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
834 dma_unmap_single(ar
->ab
->dev
, rx_tid
->paddr
, rx_tid
->size
,
836 kfree(rx_tid
->vaddr
);
837 rx_tid
->vaddr
= NULL
;
844 static int ath11k_dp_rx_link_desc_return(struct ath11k_base
*ab
,
846 enum hal_wbm_rel_bm_act action
)
848 struct ath11k_dp
*dp
= &ab
->dp
;
849 struct hal_srng
*srng
;
853 srng
= &ab
->hal
.srng_list
[dp
->wbm_desc_rel_ring
.ring_id
];
855 spin_lock_bh(&srng
->lock
);
857 ath11k_hal_srng_access_begin(ab
, srng
);
859 desc
= ath11k_hal_srng_src_get_next_entry(ab
, srng
);
865 ath11k_hal_rx_msdu_link_desc_set(ab
, (void *)desc
, (void *)link_desc
,
869 ath11k_hal_srng_access_end(ab
, srng
);
871 spin_unlock_bh(&srng
->lock
);
876 static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid
*rx_tid
, bool rel_link_desc
)
878 struct ath11k_base
*ab
= rx_tid
->ab
;
880 lockdep_assert_held(&ab
->base_lock
);
882 if (rx_tid
->dst_ring_desc
) {
884 ath11k_dp_rx_link_desc_return(ab
, (u32
*)rx_tid
->dst_ring_desc
,
885 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE
);
886 kfree(rx_tid
->dst_ring_desc
);
887 rx_tid
->dst_ring_desc
= NULL
;
891 rx_tid
->last_frag_no
= 0;
892 rx_tid
->rx_frag_bitmap
= 0;
893 __skb_queue_purge(&rx_tid
->rx_frags
);
896 void ath11k_peer_frags_flush(struct ath11k
*ar
, struct ath11k_peer
*peer
)
898 struct dp_rx_tid
*rx_tid
;
901 lockdep_assert_held(&ar
->ab
->base_lock
);
903 for (i
= 0; i
<= IEEE80211_NUM_TIDS
; i
++) {
904 rx_tid
= &peer
->rx_tid
[i
];
906 spin_unlock_bh(&ar
->ab
->base_lock
);
907 del_timer_sync(&rx_tid
->frag_timer
);
908 spin_lock_bh(&ar
->ab
->base_lock
);
910 ath11k_dp_rx_frags_cleanup(rx_tid
, true);
914 void ath11k_peer_rx_tid_cleanup(struct ath11k
*ar
, struct ath11k_peer
*peer
)
916 struct dp_rx_tid
*rx_tid
;
919 lockdep_assert_held(&ar
->ab
->base_lock
);
921 for (i
= 0; i
<= IEEE80211_NUM_TIDS
; i
++) {
922 rx_tid
= &peer
->rx_tid
[i
];
924 ath11k_peer_rx_tid_delete(ar
, peer
, i
);
925 ath11k_dp_rx_frags_cleanup(rx_tid
, true);
927 spin_unlock_bh(&ar
->ab
->base_lock
);
928 del_timer_sync(&rx_tid
->frag_timer
);
929 spin_lock_bh(&ar
->ab
->base_lock
);
933 static int ath11k_peer_rx_tid_reo_update(struct ath11k
*ar
,
934 struct ath11k_peer
*peer
,
935 struct dp_rx_tid
*rx_tid
,
936 u32 ba_win_sz
, u16 ssn
,
939 struct ath11k_hal_reo_cmd cmd
= {0};
942 cmd
.addr_lo
= lower_32_bits(rx_tid
->paddr
);
943 cmd
.addr_hi
= upper_32_bits(rx_tid
->paddr
);
944 cmd
.flag
= HAL_REO_CMD_FLG_NEED_STATUS
;
945 cmd
.upd0
= HAL_REO_CMD_UPD0_BA_WINDOW_SIZE
;
946 cmd
.ba_window_size
= ba_win_sz
;
949 cmd
.upd0
|= HAL_REO_CMD_UPD0_SSN
;
950 cmd
.upd2
= FIELD_PREP(HAL_REO_CMD_UPD2_SSN
, ssn
);
953 ret
= ath11k_dp_tx_send_reo_cmd(ar
->ab
, rx_tid
,
954 HAL_REO_CMD_UPDATE_RX_QUEUE
, &cmd
,
957 ath11k_warn(ar
->ab
, "failed to update rx tid queue, tid %d (%d)\n",
962 rx_tid
->ba_win_sz
= ba_win_sz
;
967 static void ath11k_dp_rx_tid_mem_free(struct ath11k_base
*ab
,
968 const u8
*peer_mac
, int vdev_id
, u8 tid
)
970 struct ath11k_peer
*peer
;
971 struct dp_rx_tid
*rx_tid
;
973 spin_lock_bh(&ab
->base_lock
);
975 peer
= ath11k_peer_find(ab
, vdev_id
, peer_mac
);
977 ath11k_warn(ab
, "failed to find the peer to free up rx tid mem\n");
981 rx_tid
= &peer
->rx_tid
[tid
];
985 dma_unmap_single(ab
->dev
, rx_tid
->paddr
, rx_tid
->size
,
987 kfree(rx_tid
->vaddr
);
988 rx_tid
->vaddr
= NULL
;
990 rx_tid
->active
= false;
993 spin_unlock_bh(&ab
->base_lock
);
996 int ath11k_peer_rx_tid_setup(struct ath11k
*ar
, const u8
*peer_mac
, int vdev_id
,
997 u8 tid
, u32 ba_win_sz
, u16 ssn
,
998 enum hal_pn_type pn_type
)
1000 struct ath11k_base
*ab
= ar
->ab
;
1001 struct ath11k_peer
*peer
;
1002 struct dp_rx_tid
*rx_tid
;
1009 spin_lock_bh(&ab
->base_lock
);
1011 peer
= ath11k_peer_find(ab
, vdev_id
, peer_mac
);
1013 ath11k_warn(ab
, "failed to find the peer %pM to set up rx tid\n",
1015 spin_unlock_bh(&ab
->base_lock
);
1019 rx_tid
= &peer
->rx_tid
[tid
];
1020 /* Update the tid queue if it is already setup */
1021 if (rx_tid
->active
) {
1022 paddr
= rx_tid
->paddr
;
1023 ret
= ath11k_peer_rx_tid_reo_update(ar
, peer
, rx_tid
,
1024 ba_win_sz
, ssn
, true);
1025 spin_unlock_bh(&ab
->base_lock
);
1027 ath11k_warn(ab
, "failed to update reo for peer %pM rx tid %d\n: %d",
1028 peer_mac
, tid
, ret
);
1032 ret
= ath11k_wmi_peer_rx_reorder_queue_setup(ar
, vdev_id
,
1036 ath11k_warn(ab
, "failed to send wmi rx reorder queue for peer %pM tid %d: %d\n",
1037 peer_mac
, tid
, ret
);
1043 rx_tid
->ba_win_sz
= ba_win_sz
;
1045 /* TODO: Optimize the memory allocation for qos tid based on
1046 * the actual BA window size in REO tid update path.
1048 if (tid
== HAL_DESC_REO_NON_QOS_TID
)
1049 hw_desc_sz
= ath11k_hal_reo_qdesc_size(ba_win_sz
, tid
);
1051 hw_desc_sz
= ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX
, tid
);
1053 vaddr
= kzalloc(hw_desc_sz
+ HAL_LINK_DESC_ALIGN
- 1, GFP_ATOMIC
);
1055 spin_unlock_bh(&ab
->base_lock
);
1059 addr_aligned
= PTR_ALIGN(vaddr
, HAL_LINK_DESC_ALIGN
);
1061 ath11k_hal_reo_qdesc_setup(addr_aligned
, tid
, ba_win_sz
,
1064 paddr
= dma_map_single(ab
->dev
, addr_aligned
, hw_desc_sz
,
1067 ret
= dma_mapping_error(ab
->dev
, paddr
);
1069 spin_unlock_bh(&ab
->base_lock
);
1070 ath11k_warn(ab
, "failed to setup dma map for peer %pM rx tid %d: %d\n",
1071 peer_mac
, tid
, ret
);
1075 rx_tid
->vaddr
= vaddr
;
1076 rx_tid
->paddr
= paddr
;
1077 rx_tid
->size
= hw_desc_sz
;
1078 rx_tid
->active
= true;
1080 spin_unlock_bh(&ab
->base_lock
);
1082 ret
= ath11k_wmi_peer_rx_reorder_queue_setup(ar
, vdev_id
, peer_mac
,
1083 paddr
, tid
, 1, ba_win_sz
);
1085 ath11k_warn(ar
->ab
, "failed to setup rx reorder queue for peer %pM tid %d: %d\n",
1086 peer_mac
, tid
, ret
);
1087 ath11k_dp_rx_tid_mem_free(ab
, peer_mac
, vdev_id
, tid
);
1093 kfree(rx_tid
->vaddr
);
1094 rx_tid
->vaddr
= NULL
;
1099 int ath11k_dp_rx_ampdu_start(struct ath11k
*ar
,
1100 struct ieee80211_ampdu_params
*params
)
1102 struct ath11k_base
*ab
= ar
->ab
;
1103 struct ath11k_sta
*arsta
= ath11k_sta_to_arsta(params
->sta
);
1104 int vdev_id
= arsta
->arvif
->vdev_id
;
1107 ret
= ath11k_peer_rx_tid_setup(ar
, params
->sta
->addr
, vdev_id
,
1108 params
->tid
, params
->buf_size
,
1109 params
->ssn
, arsta
->pn_type
);
1111 ath11k_warn(ab
, "failed to setup rx tid %d\n", ret
);
1116 int ath11k_dp_rx_ampdu_stop(struct ath11k
*ar
,
1117 struct ieee80211_ampdu_params
*params
)
1119 struct ath11k_base
*ab
= ar
->ab
;
1120 struct ath11k_peer
*peer
;
1121 struct ath11k_sta
*arsta
= ath11k_sta_to_arsta(params
->sta
);
1122 int vdev_id
= arsta
->arvif
->vdev_id
;
1127 spin_lock_bh(&ab
->base_lock
);
1129 peer
= ath11k_peer_find(ab
, vdev_id
, params
->sta
->addr
);
1131 ath11k_warn(ab
, "failed to find the peer to stop rx aggregation\n");
1132 spin_unlock_bh(&ab
->base_lock
);
1136 paddr
= peer
->rx_tid
[params
->tid
].paddr
;
1137 active
= peer
->rx_tid
[params
->tid
].active
;
1140 spin_unlock_bh(&ab
->base_lock
);
1144 ret
= ath11k_peer_rx_tid_reo_update(ar
, peer
, peer
->rx_tid
, 1, 0, false);
1145 spin_unlock_bh(&ab
->base_lock
);
1147 ath11k_warn(ab
, "failed to update reo for rx tid %d: %d\n",
1152 ret
= ath11k_wmi_peer_rx_reorder_queue_setup(ar
, vdev_id
,
1153 params
->sta
->addr
, paddr
,
1156 ath11k_warn(ab
, "failed to send wmi to delete rx tid %d\n",
1162 int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif
*arvif
,
1163 const u8
*peer_addr
,
1164 enum set_key_cmd key_cmd
,
1165 struct ieee80211_key_conf
*key
)
1167 struct ath11k
*ar
= arvif
->ar
;
1168 struct ath11k_base
*ab
= ar
->ab
;
1169 struct ath11k_hal_reo_cmd cmd
= {0};
1170 struct ath11k_peer
*peer
;
1171 struct dp_rx_tid
*rx_tid
;
1175 /* NOTE: Enable PN/TSC replay check offload only for unicast frames.
1176 * We use mac80211 PN/TSC replay check functionality for bcast/mcast
1179 if (!(key
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
))
1182 cmd
.flag
|= HAL_REO_CMD_FLG_NEED_STATUS
;
1183 cmd
.upd0
|= HAL_REO_CMD_UPD0_PN
|
1184 HAL_REO_CMD_UPD0_PN_SIZE
|
1185 HAL_REO_CMD_UPD0_PN_VALID
|
1186 HAL_REO_CMD_UPD0_PN_CHECK
|
1187 HAL_REO_CMD_UPD0_SVLD
;
1189 switch (key
->cipher
) {
1190 case WLAN_CIPHER_SUITE_TKIP
:
1191 case WLAN_CIPHER_SUITE_CCMP
:
1192 case WLAN_CIPHER_SUITE_CCMP_256
:
1193 case WLAN_CIPHER_SUITE_GCMP
:
1194 case WLAN_CIPHER_SUITE_GCMP_256
:
1195 if (key_cmd
== SET_KEY
) {
1196 cmd
.upd1
|= HAL_REO_CMD_UPD1_PN_CHECK
;
1204 spin_lock_bh(&ab
->base_lock
);
1206 peer
= ath11k_peer_find(ab
, arvif
->vdev_id
, peer_addr
);
1208 ath11k_warn(ab
, "failed to find the peer to configure pn replay detection\n");
1209 spin_unlock_bh(&ab
->base_lock
);
1213 for (tid
= 0; tid
<= IEEE80211_NUM_TIDS
; tid
++) {
1214 rx_tid
= &peer
->rx_tid
[tid
];
1215 if (!rx_tid
->active
)
1217 cmd
.addr_lo
= lower_32_bits(rx_tid
->paddr
);
1218 cmd
.addr_hi
= upper_32_bits(rx_tid
->paddr
);
1219 ret
= ath11k_dp_tx_send_reo_cmd(ab
, rx_tid
,
1220 HAL_REO_CMD_UPDATE_RX_QUEUE
,
1223 ath11k_warn(ab
, "failed to configure rx tid %d queue for pn replay detection %d\n",
1229 spin_unlock_bh(&ab
->base_lock
);
1234 static inline int ath11k_get_ppdu_user_index(struct htt_ppdu_stats
*ppdu_stats
,
1239 for (i
= 0; i
< HTT_PPDU_STATS_MAX_USERS
- 1; i
++) {
1240 if (ppdu_stats
->user_stats
[i
].is_valid_peer_id
) {
1241 if (peer_id
== ppdu_stats
->user_stats
[i
].peer_id
)
1251 static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base
*ab
,
1252 u16 tag
, u16 len
, const void *ptr
,
1255 struct htt_ppdu_stats_info
*ppdu_info
;
1256 struct htt_ppdu_user_stats
*user_stats
;
1263 case HTT_PPDU_STATS_TAG_COMMON
:
1264 if (len
< sizeof(struct htt_ppdu_stats_common
)) {
1265 ath11k_warn(ab
, "Invalid len %d for the tag 0x%x\n",
1269 memcpy((void *)&ppdu_info
->ppdu_stats
.common
, ptr
,
1270 sizeof(struct htt_ppdu_stats_common
));
1272 case HTT_PPDU_STATS_TAG_USR_RATE
:
1273 if (len
< sizeof(struct htt_ppdu_stats_user_rate
)) {
1274 ath11k_warn(ab
, "Invalid len %d for the tag 0x%x\n",
1279 peer_id
= ((struct htt_ppdu_stats_user_rate
*)ptr
)->sw_peer_id
;
1280 cur_user
= ath11k_get_ppdu_user_index(&ppdu_info
->ppdu_stats
,
1284 user_stats
= &ppdu_info
->ppdu_stats
.user_stats
[cur_user
];
1285 user_stats
->peer_id
= peer_id
;
1286 user_stats
->is_valid_peer_id
= true;
1287 memcpy((void *)&user_stats
->rate
, ptr
,
1288 sizeof(struct htt_ppdu_stats_user_rate
));
1289 user_stats
->tlv_flags
|= BIT(tag
);
1291 case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON
:
1292 if (len
< sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn
)) {
1293 ath11k_warn(ab
, "Invalid len %d for the tag 0x%x\n",
1298 peer_id
= ((struct htt_ppdu_stats_usr_cmpltn_cmn
*)ptr
)->sw_peer_id
;
1299 cur_user
= ath11k_get_ppdu_user_index(&ppdu_info
->ppdu_stats
,
1303 user_stats
= &ppdu_info
->ppdu_stats
.user_stats
[cur_user
];
1304 user_stats
->peer_id
= peer_id
;
1305 user_stats
->is_valid_peer_id
= true;
1306 memcpy((void *)&user_stats
->cmpltn_cmn
, ptr
,
1307 sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn
));
1308 user_stats
->tlv_flags
|= BIT(tag
);
1310 case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS
:
1312 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status
)) {
1313 ath11k_warn(ab
, "Invalid len %d for the tag 0x%x\n",
1319 ((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status
*)ptr
)->sw_peer_id
;
1320 cur_user
= ath11k_get_ppdu_user_index(&ppdu_info
->ppdu_stats
,
1324 user_stats
= &ppdu_info
->ppdu_stats
.user_stats
[cur_user
];
1325 user_stats
->peer_id
= peer_id
;
1326 user_stats
->is_valid_peer_id
= true;
1327 memcpy((void *)&user_stats
->ack_ba
, ptr
,
1328 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status
));
1329 user_stats
->tlv_flags
|= BIT(tag
);
1335 int ath11k_dp_htt_tlv_iter(struct ath11k_base
*ab
, const void *ptr
, size_t len
,
1336 int (*iter
)(struct ath11k_base
*ar
, u16 tag
, u16 len
,
1337 const void *ptr
, void *data
),
1340 const struct htt_tlv
*tlv
;
1341 const void *begin
= ptr
;
1342 u16 tlv_tag
, tlv_len
;
1346 if (len
< sizeof(*tlv
)) {
1347 ath11k_err(ab
, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
1348 ptr
- begin
, len
, sizeof(*tlv
));
1351 tlv
= (struct htt_tlv
*)ptr
;
1352 tlv_tag
= FIELD_GET(HTT_TLV_TAG
, tlv
->header
);
1353 tlv_len
= FIELD_GET(HTT_TLV_LEN
, tlv
->header
);
1354 ptr
+= sizeof(*tlv
);
1355 len
-= sizeof(*tlv
);
1357 if (tlv_len
> len
) {
1358 ath11k_err(ab
, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
1359 tlv_tag
, ptr
- begin
, len
, tlv_len
);
1362 ret
= iter(ab
, tlv_tag
, tlv_len
, ptr
, data
);
1373 ath11k_update_per_peer_tx_stats(struct ath11k
*ar
,
1374 struct htt_ppdu_stats
*ppdu_stats
, u8 user
)
1376 struct ath11k_base
*ab
= ar
->ab
;
1377 struct ath11k_peer
*peer
;
1378 struct ieee80211_sta
*sta
;
1379 struct ath11k_sta
*arsta
;
1380 struct htt_ppdu_stats_user_rate
*user_rate
;
1381 struct ath11k_per_peer_tx_stats
*peer_stats
= &ar
->peer_tx_stats
;
1382 struct htt_ppdu_user_stats
*usr_stats
= &ppdu_stats
->user_stats
[user
];
1383 struct htt_ppdu_stats_common
*common
= &ppdu_stats
->common
;
1385 u8 flags
, mcs
, nss
, bw
, sgi
, dcm
, rate_idx
= 0;
1387 u16 rate
= 0, succ_pkts
= 0;
1388 u32 tx_duration
= 0;
1389 u8 tid
= HTT_PPDU_STATS_NON_QOS_TID
;
1390 bool is_ampdu
= false;
1392 if (!(usr_stats
->tlv_flags
& BIT(HTT_PPDU_STATS_TAG_USR_RATE
)))
1395 if (usr_stats
->tlv_flags
& BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON
))
1397 HTT_USR_CMPLTN_IS_AMPDU(usr_stats
->cmpltn_cmn
.flags
);
1399 if (usr_stats
->tlv_flags
&
1400 BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS
)) {
1401 succ_bytes
= usr_stats
->ack_ba
.success_bytes
;
1402 succ_pkts
= FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M
,
1403 usr_stats
->ack_ba
.info
);
1404 tid
= FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM
,
1405 usr_stats
->ack_ba
.info
);
1408 if (common
->fes_duration_us
)
1409 tx_duration
= common
->fes_duration_us
;
1411 user_rate
= &usr_stats
->rate
;
1412 flags
= HTT_USR_RATE_PREAMBLE(user_rate
->rate_flags
);
1413 bw
= HTT_USR_RATE_BW(user_rate
->rate_flags
) - 2;
1414 nss
= HTT_USR_RATE_NSS(user_rate
->rate_flags
) + 1;
1415 mcs
= HTT_USR_RATE_MCS(user_rate
->rate_flags
);
1416 sgi
= HTT_USR_RATE_GI(user_rate
->rate_flags
);
1417 dcm
= HTT_USR_RATE_DCM(user_rate
->rate_flags
);
1419 /* Note: If host configured fixed rates and in some other special
1420 * cases, the broadcast/management frames are sent in different rates.
1421 * Firmware rate's control to be skipped for this?
1424 if (flags
== WMI_RATE_PREAMBLE_HE
&& mcs
> ATH11K_HE_MCS_MAX
) {
1425 ath11k_warn(ab
, "Invalid HE mcs %d peer stats", mcs
);
1429 if (flags
== WMI_RATE_PREAMBLE_VHT
&& mcs
> ATH11K_VHT_MCS_MAX
) {
1430 ath11k_warn(ab
, "Invalid VHT mcs %d peer stats", mcs
);
1434 if (flags
== WMI_RATE_PREAMBLE_HT
&& (mcs
> ATH11K_HT_MCS_MAX
|| nss
< 1)) {
1435 ath11k_warn(ab
, "Invalid HT mcs %d nss %d peer stats",
1440 if (flags
== WMI_RATE_PREAMBLE_CCK
|| flags
== WMI_RATE_PREAMBLE_OFDM
) {
1441 ret
= ath11k_mac_hw_ratecode_to_legacy_rate(mcs
,
1450 spin_lock_bh(&ab
->base_lock
);
1451 peer
= ath11k_peer_find_by_id(ab
, usr_stats
->peer_id
);
1453 if (!peer
|| !peer
->sta
) {
1454 spin_unlock_bh(&ab
->base_lock
);
1460 arsta
= ath11k_sta_to_arsta(sta
);
1462 memset(&arsta
->txrate
, 0, sizeof(arsta
->txrate
));
1465 case WMI_RATE_PREAMBLE_OFDM
:
1466 arsta
->txrate
.legacy
= rate
;
1468 case WMI_RATE_PREAMBLE_CCK
:
1469 arsta
->txrate
.legacy
= rate
;
1471 case WMI_RATE_PREAMBLE_HT
:
1472 arsta
->txrate
.mcs
= mcs
+ 8 * (nss
- 1);
1473 arsta
->txrate
.flags
= RATE_INFO_FLAGS_MCS
;
1475 arsta
->txrate
.flags
|= RATE_INFO_FLAGS_SHORT_GI
;
1477 case WMI_RATE_PREAMBLE_VHT
:
1478 arsta
->txrate
.mcs
= mcs
;
1479 arsta
->txrate
.flags
= RATE_INFO_FLAGS_VHT_MCS
;
1481 arsta
->txrate
.flags
|= RATE_INFO_FLAGS_SHORT_GI
;
1483 case WMI_RATE_PREAMBLE_HE
:
1484 arsta
->txrate
.mcs
= mcs
;
1485 arsta
->txrate
.flags
= RATE_INFO_FLAGS_HE_MCS
;
1486 arsta
->txrate
.he_dcm
= dcm
;
1487 arsta
->txrate
.he_gi
= ath11k_mac_he_gi_to_nl80211_he_gi(sgi
);
1488 arsta
->txrate
.he_ru_alloc
= ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc
1489 ((user_rate
->ru_end
-
1490 user_rate
->ru_start
) + 1);
1494 arsta
->txrate
.nss
= nss
;
1496 arsta
->txrate
.bw
= ath11k_mac_bw_to_mac80211_bw(bw
);
1497 arsta
->tx_duration
+= tx_duration
;
1498 memcpy(&arsta
->last_txrate
, &arsta
->txrate
, sizeof(struct rate_info
));
1500 /* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
1501 * So skip peer stats update for mgmt packets.
1503 if (tid
< HTT_PPDU_STATS_NON_QOS_TID
) {
1504 memset(peer_stats
, 0, sizeof(*peer_stats
));
1505 peer_stats
->succ_pkts
= succ_pkts
;
1506 peer_stats
->succ_bytes
= succ_bytes
;
1507 peer_stats
->is_ampdu
= is_ampdu
;
1508 peer_stats
->duration
= tx_duration
;
1509 peer_stats
->ba_fails
=
1510 HTT_USR_CMPLTN_LONG_RETRY(usr_stats
->cmpltn_cmn
.flags
) +
1511 HTT_USR_CMPLTN_SHORT_RETRY(usr_stats
->cmpltn_cmn
.flags
);
1513 if (ath11k_debugfs_is_extd_tx_stats_enabled(ar
))
1514 ath11k_debugfs_sta_add_tx_stats(arsta
, peer_stats
, rate_idx
);
1517 spin_unlock_bh(&ab
->base_lock
);
1521 static void ath11k_htt_update_ppdu_stats(struct ath11k
*ar
,
1522 struct htt_ppdu_stats
*ppdu_stats
)
1526 for (user
= 0; user
< HTT_PPDU_STATS_MAX_USERS
- 1; user
++)
1527 ath11k_update_per_peer_tx_stats(ar
, ppdu_stats
, user
);
1531 struct htt_ppdu_stats_info
*ath11k_dp_htt_get_ppdu_desc(struct ath11k
*ar
,
1534 struct htt_ppdu_stats_info
*ppdu_info
;
1536 lockdep_assert_held(&ar
->data_lock
);
1538 if (!list_empty(&ar
->ppdu_stats_info
)) {
1539 list_for_each_entry(ppdu_info
, &ar
->ppdu_stats_info
, list
) {
1540 if (ppdu_info
->ppdu_id
== ppdu_id
)
1544 if (ar
->ppdu_stat_list_depth
> HTT_PPDU_DESC_MAX_DEPTH
) {
1545 ppdu_info
= list_first_entry(&ar
->ppdu_stats_info
,
1546 typeof(*ppdu_info
), list
);
1547 list_del(&ppdu_info
->list
);
1548 ar
->ppdu_stat_list_depth
--;
1549 ath11k_htt_update_ppdu_stats(ar
, &ppdu_info
->ppdu_stats
);
1554 ppdu_info
= kzalloc(sizeof(*ppdu_info
), GFP_ATOMIC
);
1558 list_add_tail(&ppdu_info
->list
, &ar
->ppdu_stats_info
);
1559 ar
->ppdu_stat_list_depth
++;
1564 static int ath11k_htt_pull_ppdu_stats(struct ath11k_base
*ab
,
1565 struct sk_buff
*skb
)
1567 struct ath11k_htt_ppdu_stats_msg
*msg
;
1568 struct htt_ppdu_stats_info
*ppdu_info
;
1574 msg
= (struct ath11k_htt_ppdu_stats_msg
*)skb
->data
;
1575 len
= FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE
, msg
->info
);
1576 pdev_id
= FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID
, msg
->info
);
1577 ppdu_id
= msg
->ppdu_id
;
1580 ar
= ath11k_mac_get_ar_by_pdev_id(ab
, pdev_id
);
1586 if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar
))
1587 trace_ath11k_htt_ppdu_stats(ar
, skb
->data
, len
);
1589 spin_lock_bh(&ar
->data_lock
);
1590 ppdu_info
= ath11k_dp_htt_get_ppdu_desc(ar
, ppdu_id
);
1593 goto out_unlock_data
;
1596 ppdu_info
->ppdu_id
= ppdu_id
;
1597 ret
= ath11k_dp_htt_tlv_iter(ab
, msg
->data
, len
,
1598 ath11k_htt_tlv_ppdu_stats_parse
,
1601 ath11k_warn(ab
, "Failed to parse tlv %d\n", ret
);
1602 goto out_unlock_data
;
1606 spin_unlock_bh(&ar
->data_lock
);
1614 static void ath11k_htt_pktlog(struct ath11k_base
*ab
, struct sk_buff
*skb
)
1616 struct htt_pktlog_msg
*data
= (struct htt_pktlog_msg
*)skb
->data
;
1617 struct ath_pktlog_hdr
*hdr
= (struct ath_pktlog_hdr
*)data
;
1621 pdev_id
= FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID
, data
->hdr
);
1625 ar
= ath11k_mac_get_ar_by_pdev_id(ab
, pdev_id
);
1627 ath11k_warn(ab
, "invalid pdev id %d on htt pktlog\n", pdev_id
);
1631 trace_ath11k_htt_pktlog(ar
, data
->payload
, hdr
->size
,
1632 ar
->ab
->pktlog_defs_checksum
);
1638 static void ath11k_htt_backpressure_event_handler(struct ath11k_base
*ab
,
1639 struct sk_buff
*skb
)
1641 u32
*data
= (u32
*)skb
->data
;
1642 u8 pdev_id
, ring_type
, ring_id
, pdev_idx
;
1644 u32 backpressure_time
;
1645 struct ath11k_bp_stats
*bp_stats
;
1647 pdev_id
= FIELD_GET(HTT_BACKPRESSURE_EVENT_PDEV_ID_M
, *data
);
1648 ring_type
= FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_TYPE_M
, *data
);
1649 ring_id
= FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_ID_M
, *data
);
1652 hp
= FIELD_GET(HTT_BACKPRESSURE_EVENT_HP_M
, *data
);
1653 tp
= FIELD_GET(HTT_BACKPRESSURE_EVENT_TP_M
, *data
);
1656 backpressure_time
= *data
;
1658 ath11k_dbg(ab
, ATH11K_DBG_DP_HTT
, "backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n",
1659 pdev_id
, ring_type
, ring_id
, hp
, tp
, backpressure_time
);
1661 if (ring_type
== HTT_BACKPRESSURE_UMAC_RING_TYPE
) {
1662 if (ring_id
>= HTT_SW_UMAC_RING_IDX_MAX
)
1665 bp_stats
= &ab
->soc_stats
.bp_stats
.umac_ring_bp_stats
[ring_id
];
1666 } else if (ring_type
== HTT_BACKPRESSURE_LMAC_RING_TYPE
) {
1667 pdev_idx
= DP_HW2SW_MACID(pdev_id
);
1669 if (ring_id
>= HTT_SW_LMAC_RING_IDX_MAX
|| pdev_idx
>= MAX_RADIOS
)
1672 bp_stats
= &ab
->soc_stats
.bp_stats
.lmac_ring_bp_stats
[ring_id
][pdev_idx
];
1674 ath11k_warn(ab
, "unknown ring type received in htt bp event %d\n",
1679 spin_lock_bh(&ab
->base_lock
);
1683 bp_stats
->jiffies
= jiffies
;
1684 spin_unlock_bh(&ab
->base_lock
);
1687 void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base
*ab
,
1688 struct sk_buff
*skb
)
1690 struct ath11k_dp
*dp
= &ab
->dp
;
1691 struct htt_resp_msg
*resp
= (struct htt_resp_msg
*)skb
->data
;
1692 enum htt_t2h_msg_type type
= FIELD_GET(HTT_T2H_MSG_TYPE
, *(u32
*)resp
);
1695 u8 mac_addr
[ETH_ALEN
];
1700 ath11k_dbg(ab
, ATH11K_DBG_DP_HTT
, "dp_htt rx msg type :0x%0x\n", type
);
1703 case HTT_T2H_MSG_TYPE_VERSION_CONF
:
1704 dp
->htt_tgt_ver_major
= FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR
,
1705 resp
->version_msg
.version
);
1706 dp
->htt_tgt_ver_minor
= FIELD_GET(HTT_T2H_VERSION_CONF_MINOR
,
1707 resp
->version_msg
.version
);
1708 complete(&dp
->htt_tgt_version_received
);
1710 case HTT_T2H_MSG_TYPE_PEER_MAP
:
1711 vdev_id
= FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID
,
1712 resp
->peer_map_ev
.info
);
1713 peer_id
= FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID
,
1714 resp
->peer_map_ev
.info
);
1715 peer_mac_h16
= FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16
,
1716 resp
->peer_map_ev
.info1
);
1717 ath11k_dp_get_mac_addr(resp
->peer_map_ev
.mac_addr_l32
,
1718 peer_mac_h16
, mac_addr
);
1719 ath11k_peer_map_event(ab
, vdev_id
, peer_id
, mac_addr
, 0, 0);
1721 case HTT_T2H_MSG_TYPE_PEER_MAP2
:
1722 vdev_id
= FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID
,
1723 resp
->peer_map_ev
.info
);
1724 peer_id
= FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID
,
1725 resp
->peer_map_ev
.info
);
1726 peer_mac_h16
= FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16
,
1727 resp
->peer_map_ev
.info1
);
1728 ath11k_dp_get_mac_addr(resp
->peer_map_ev
.mac_addr_l32
,
1729 peer_mac_h16
, mac_addr
);
1730 ast_hash
= FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL
,
1731 resp
->peer_map_ev
.info2
);
1732 hw_peer_id
= FIELD_GET(HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID
,
1733 resp
->peer_map_ev
.info1
);
1734 ath11k_peer_map_event(ab
, vdev_id
, peer_id
, mac_addr
, ast_hash
,
1737 case HTT_T2H_MSG_TYPE_PEER_UNMAP
:
1738 case HTT_T2H_MSG_TYPE_PEER_UNMAP2
:
1739 peer_id
= FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID
,
1740 resp
->peer_unmap_ev
.info
);
1741 ath11k_peer_unmap_event(ab
, peer_id
);
1743 case HTT_T2H_MSG_TYPE_PPDU_STATS_IND
:
1744 ath11k_htt_pull_ppdu_stats(ab
, skb
);
1746 case HTT_T2H_MSG_TYPE_EXT_STATS_CONF
:
1747 ath11k_debugfs_htt_ext_stats_handler(ab
, skb
);
1749 case HTT_T2H_MSG_TYPE_PKTLOG
:
1750 ath11k_htt_pktlog(ab
, skb
);
1752 case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND
:
1753 ath11k_htt_backpressure_event_handler(ab
, skb
);
1756 ath11k_warn(ab
, "htt event %d not handled\n", type
);
1760 dev_kfree_skb_any(skb
);
1763 static int ath11k_dp_rx_msdu_coalesce(struct ath11k
*ar
,
1764 struct sk_buff_head
*msdu_list
,
1765 struct sk_buff
*first
, struct sk_buff
*last
,
1766 u8 l3pad_bytes
, int msdu_len
)
1768 struct ath11k_base
*ab
= ar
->ab
;
1769 struct sk_buff
*skb
;
1770 struct ath11k_skb_rxcb
*rxcb
= ATH11K_SKB_RXCB(first
);
1771 int buf_first_hdr_len
, buf_first_len
;
1772 struct hal_rx_desc
*ldesc
;
1773 int space_extra
, rem_len
, buf_len
;
1774 u32 hal_rx_desc_sz
= ar
->ab
->hw_params
.hal_desc_sz
;
1776 /* As the msdu is spread across multiple rx buffers,
1777 * find the offset to the start of msdu for computing
1778 * the length of the msdu in the first buffer.
1780 buf_first_hdr_len
= hal_rx_desc_sz
+ l3pad_bytes
;
1781 buf_first_len
= DP_RX_BUFFER_SIZE
- buf_first_hdr_len
;
1783 if (WARN_ON_ONCE(msdu_len
<= buf_first_len
)) {
1784 skb_put(first
, buf_first_hdr_len
+ msdu_len
);
1785 skb_pull(first
, buf_first_hdr_len
);
1789 ldesc
= (struct hal_rx_desc
*)last
->data
;
1790 rxcb
->is_first_msdu
= ath11k_dp_rx_h_msdu_end_first_msdu(ab
, ldesc
);
1791 rxcb
->is_last_msdu
= ath11k_dp_rx_h_msdu_end_last_msdu(ab
, ldesc
);
1793 /* MSDU spans over multiple buffers because the length of the MSDU
1794 * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
1795 * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.
1797 skb_put(first
, DP_RX_BUFFER_SIZE
);
1798 skb_pull(first
, buf_first_hdr_len
);
1800 /* When an MSDU spread over multiple buffers attention, MSDU_END and
1801 * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs.
1803 ath11k_dp_rx_desc_end_tlv_copy(ab
, rxcb
->rx_desc
, ldesc
);
1805 space_extra
= msdu_len
- (buf_first_len
+ skb_tailroom(first
));
1806 if (space_extra
> 0 &&
1807 (pskb_expand_head(first
, 0, space_extra
, GFP_ATOMIC
) < 0)) {
1808 /* Free up all buffers of the MSDU */
1809 while ((skb
= __skb_dequeue(msdu_list
)) != NULL
) {
1810 rxcb
= ATH11K_SKB_RXCB(skb
);
1811 if (!rxcb
->is_continuation
) {
1812 dev_kfree_skb_any(skb
);
1815 dev_kfree_skb_any(skb
);
1820 rem_len
= msdu_len
- buf_first_len
;
1821 while ((skb
= __skb_dequeue(msdu_list
)) != NULL
&& rem_len
> 0) {
1822 rxcb
= ATH11K_SKB_RXCB(skb
);
1823 if (rxcb
->is_continuation
)
1824 buf_len
= DP_RX_BUFFER_SIZE
- hal_rx_desc_sz
;
1828 if (buf_len
> (DP_RX_BUFFER_SIZE
- hal_rx_desc_sz
)) {
1830 dev_kfree_skb_any(skb
);
1834 skb_put(skb
, buf_len
+ hal_rx_desc_sz
);
1835 skb_pull(skb
, hal_rx_desc_sz
);
1836 skb_copy_from_linear_data(skb
, skb_put(first
, buf_len
),
1838 dev_kfree_skb_any(skb
);
1841 if (!rxcb
->is_continuation
)
1848 static struct sk_buff
*ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head
*msdu_list
,
1849 struct sk_buff
*first
)
1851 struct sk_buff
*skb
;
1852 struct ath11k_skb_rxcb
*rxcb
= ATH11K_SKB_RXCB(first
);
1854 if (!rxcb
->is_continuation
)
1857 skb_queue_walk(msdu_list
, skb
) {
1858 rxcb
= ATH11K_SKB_RXCB(skb
);
1859 if (!rxcb
->is_continuation
)
1866 static void ath11k_dp_rx_h_csum_offload(struct ath11k
*ar
, struct sk_buff
*msdu
)
1868 struct ath11k_skb_rxcb
*rxcb
= ATH11K_SKB_RXCB(msdu
);
1869 struct rx_attention
*rx_attention
;
1870 bool ip_csum_fail
, l4_csum_fail
;
1872 rx_attention
= ath11k_dp_rx_get_attention(ar
->ab
, rxcb
->rx_desc
);
1873 ip_csum_fail
= ath11k_dp_rx_h_attn_ip_cksum_fail(rx_attention
);
1874 l4_csum_fail
= ath11k_dp_rx_h_attn_l4_cksum_fail(rx_attention
);
1876 msdu
->ip_summed
= (ip_csum_fail
|| l4_csum_fail
) ?
1877 CHECKSUM_NONE
: CHECKSUM_UNNECESSARY
;
1880 int ath11k_dp_rx_crypto_mic_len(struct ath11k
*ar
, enum hal_encrypt_type enctype
)
1883 case HAL_ENCRYPT_TYPE_OPEN
:
1884 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC
:
1885 case HAL_ENCRYPT_TYPE_TKIP_MIC
:
1887 case HAL_ENCRYPT_TYPE_CCMP_128
:
1888 return IEEE80211_CCMP_MIC_LEN
;
1889 case HAL_ENCRYPT_TYPE_CCMP_256
:
1890 return IEEE80211_CCMP_256_MIC_LEN
;
1891 case HAL_ENCRYPT_TYPE_GCMP_128
:
1892 case HAL_ENCRYPT_TYPE_AES_GCMP_256
:
1893 return IEEE80211_GCMP_MIC_LEN
;
1894 case HAL_ENCRYPT_TYPE_WEP_40
:
1895 case HAL_ENCRYPT_TYPE_WEP_104
:
1896 case HAL_ENCRYPT_TYPE_WEP_128
:
1897 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4
:
1898 case HAL_ENCRYPT_TYPE_WAPI
:
1902 ath11k_warn(ar
->ab
, "unsupported encryption type %d for mic len\n", enctype
);
1906 static int ath11k_dp_rx_crypto_param_len(struct ath11k
*ar
,
1907 enum hal_encrypt_type enctype
)
1910 case HAL_ENCRYPT_TYPE_OPEN
:
1912 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC
:
1913 case HAL_ENCRYPT_TYPE_TKIP_MIC
:
1914 return IEEE80211_TKIP_IV_LEN
;
1915 case HAL_ENCRYPT_TYPE_CCMP_128
:
1916 return IEEE80211_CCMP_HDR_LEN
;
1917 case HAL_ENCRYPT_TYPE_CCMP_256
:
1918 return IEEE80211_CCMP_256_HDR_LEN
;
1919 case HAL_ENCRYPT_TYPE_GCMP_128
:
1920 case HAL_ENCRYPT_TYPE_AES_GCMP_256
:
1921 return IEEE80211_GCMP_HDR_LEN
;
1922 case HAL_ENCRYPT_TYPE_WEP_40
:
1923 case HAL_ENCRYPT_TYPE_WEP_104
:
1924 case HAL_ENCRYPT_TYPE_WEP_128
:
1925 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4
:
1926 case HAL_ENCRYPT_TYPE_WAPI
:
1930 ath11k_warn(ar
->ab
, "unsupported encryption type %d\n", enctype
);
1934 static int ath11k_dp_rx_crypto_icv_len(struct ath11k
*ar
,
1935 enum hal_encrypt_type enctype
)
1938 case HAL_ENCRYPT_TYPE_OPEN
:
1939 case HAL_ENCRYPT_TYPE_CCMP_128
:
1940 case HAL_ENCRYPT_TYPE_CCMP_256
:
1941 case HAL_ENCRYPT_TYPE_GCMP_128
:
1942 case HAL_ENCRYPT_TYPE_AES_GCMP_256
:
1944 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC
:
1945 case HAL_ENCRYPT_TYPE_TKIP_MIC
:
1946 return IEEE80211_TKIP_ICV_LEN
;
1947 case HAL_ENCRYPT_TYPE_WEP_40
:
1948 case HAL_ENCRYPT_TYPE_WEP_104
:
1949 case HAL_ENCRYPT_TYPE_WEP_128
:
1950 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4
:
1951 case HAL_ENCRYPT_TYPE_WAPI
:
1955 ath11k_warn(ar
->ab
, "unsupported encryption type %d\n", enctype
);
1959 static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k
*ar
,
1960 struct sk_buff
*msdu
,
1962 enum hal_encrypt_type enctype
,
1963 struct ieee80211_rx_status
*status
)
1965 struct ath11k_skb_rxcb
*rxcb
= ATH11K_SKB_RXCB(msdu
);
1966 u8 decap_hdr
[DP_MAX_NWIFI_HDR_LEN
];
1967 struct ieee80211_hdr
*hdr
;
1974 /* copy SA & DA and pull decapped header */
1975 hdr
= (struct ieee80211_hdr
*)msdu
->data
;
1976 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1977 ether_addr_copy(da
, ieee80211_get_DA(hdr
));
1978 ether_addr_copy(sa
, ieee80211_get_SA(hdr
));
1979 skb_pull(msdu
, ieee80211_hdrlen(hdr
->frame_control
));
1981 if (rxcb
->is_first_msdu
) {
1982 /* original 802.11 header is valid for the first msdu
1983 * hence we can reuse the same header
1985 hdr
= (struct ieee80211_hdr
*)first_hdr
;
1986 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1988 /* Each A-MSDU subframe will be reported as a separate MSDU,
1989 * so strip the A-MSDU bit from QoS Ctl.
1991 if (ieee80211_is_data_qos(hdr
->frame_control
)) {
1992 qos
= ieee80211_get_qos_ctl(hdr
);
1993 qos
[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT
;
1996 /* Rebuild qos header if this is a middle/last msdu */
1997 hdr
->frame_control
|= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA
);
1999 /* Reset the order bit as the HT_Control header is stripped */
2000 hdr
->frame_control
&= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER
));
2002 qos_ctl
= rxcb
->tid
;
2004 if (ath11k_dp_rx_h_msdu_start_mesh_ctl_present(ar
->ab
, rxcb
->rx_desc
))
2005 qos_ctl
|= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT
;
2007 /* TODO Add other QoS ctl fields when required */
2009 /* copy decap header before overwriting for reuse below */
2010 memcpy(decap_hdr
, (uint8_t *)hdr
, hdr_len
);
2013 if (!(status
->flag
& RX_FLAG_IV_STRIPPED
)) {
2014 memcpy(skb_push(msdu
,
2015 ath11k_dp_rx_crypto_param_len(ar
, enctype
)),
2016 (void *)hdr
+ hdr_len
,
2017 ath11k_dp_rx_crypto_param_len(ar
, enctype
));
2020 if (!rxcb
->is_first_msdu
) {
2021 memcpy(skb_push(msdu
,
2022 IEEE80211_QOS_CTL_LEN
), &qos_ctl
,
2023 IEEE80211_QOS_CTL_LEN
);
2024 memcpy(skb_push(msdu
, hdr_len
), decap_hdr
, hdr_len
);
2028 memcpy(skb_push(msdu
, hdr_len
), hdr
, hdr_len
);
2030 /* original 802.11 header has a different DA and in
2031 * case of 4addr it may also have different SA
2033 hdr
= (struct ieee80211_hdr
*)msdu
->data
;
2034 ether_addr_copy(ieee80211_get_DA(hdr
), da
);
2035 ether_addr_copy(ieee80211_get_SA(hdr
), sa
);
2038 static void ath11k_dp_rx_h_undecap_raw(struct ath11k
*ar
, struct sk_buff
*msdu
,
2039 enum hal_encrypt_type enctype
,
2040 struct ieee80211_rx_status
*status
,
2043 struct ath11k_skb_rxcb
*rxcb
= ATH11K_SKB_RXCB(msdu
);
2044 struct ieee80211_hdr
*hdr
;
2048 if (!rxcb
->is_first_msdu
||
2049 !(rxcb
->is_first_msdu
&& rxcb
->is_last_msdu
)) {
2054 skb_trim(msdu
, msdu
->len
- FCS_LEN
);
2059 hdr
= (void *)msdu
->data
;
2062 if (status
->flag
& RX_FLAG_IV_STRIPPED
) {
2063 skb_trim(msdu
, msdu
->len
-
2064 ath11k_dp_rx_crypto_mic_len(ar
, enctype
));
2066 skb_trim(msdu
, msdu
->len
-
2067 ath11k_dp_rx_crypto_icv_len(ar
, enctype
));
2070 if (status
->flag
& RX_FLAG_MIC_STRIPPED
)
2071 skb_trim(msdu
, msdu
->len
-
2072 ath11k_dp_rx_crypto_mic_len(ar
, enctype
));
2075 if (status
->flag
& RX_FLAG_ICV_STRIPPED
)
2076 skb_trim(msdu
, msdu
->len
-
2077 ath11k_dp_rx_crypto_icv_len(ar
, enctype
));
2081 if ((status
->flag
& RX_FLAG_MMIC_STRIPPED
) &&
2082 !ieee80211_has_morefrags(hdr
->frame_control
) &&
2083 enctype
== HAL_ENCRYPT_TYPE_TKIP_MIC
)
2084 skb_trim(msdu
, msdu
->len
- IEEE80211_CCMP_MIC_LEN
);
2087 if (status
->flag
& RX_FLAG_IV_STRIPPED
) {
2088 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
2089 crypto_len
= ath11k_dp_rx_crypto_param_len(ar
, enctype
);
2091 memmove((void *)msdu
->data
+ crypto_len
,
2092 (void *)msdu
->data
, hdr_len
);
2093 skb_pull(msdu
, crypto_len
);
2097 static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k
*ar
,
2098 struct sk_buff
*msdu
,
2099 enum hal_encrypt_type enctype
)
2101 struct ath11k_skb_rxcb
*rxcb
= ATH11K_SKB_RXCB(msdu
);
2102 struct ieee80211_hdr
*hdr
;
2103 size_t hdr_len
, crypto_len
;
2107 is_amsdu
= !(rxcb
->is_first_msdu
&& rxcb
->is_last_msdu
);
2108 hdr
= (struct ieee80211_hdr
*)ath11k_dp_rx_h_80211_hdr(ar
->ab
, rxcb
->rx_desc
);
2111 if (rxcb
->is_first_msdu
) {
2112 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
2113 crypto_len
= ath11k_dp_rx_crypto_param_len(ar
, enctype
);
2115 rfc1042
+= hdr_len
+ crypto_len
;
2119 rfc1042
+= sizeof(struct ath11k_dp_amsdu_subframe_hdr
);
2124 static void ath11k_dp_rx_h_undecap_eth(struct ath11k
*ar
,
2125 struct sk_buff
*msdu
,
2127 enum hal_encrypt_type enctype
,
2128 struct ieee80211_rx_status
*status
)
2130 struct ieee80211_hdr
*hdr
;
2137 rfc1042
= ath11k_dp_rx_h_find_rfc1042(ar
, msdu
, enctype
);
2138 if (WARN_ON_ONCE(!rfc1042
))
2141 /* pull decapped header and copy SA & DA */
2142 eth
= (struct ethhdr
*)msdu
->data
;
2143 ether_addr_copy(da
, eth
->h_dest
);
2144 ether_addr_copy(sa
, eth
->h_source
);
2145 skb_pull(msdu
, sizeof(struct ethhdr
));
2147 /* push rfc1042/llc/snap */
2148 memcpy(skb_push(msdu
, sizeof(struct ath11k_dp_rfc1042_hdr
)), rfc1042
,
2149 sizeof(struct ath11k_dp_rfc1042_hdr
));
2151 /* push original 802.11 header */
2152 hdr
= (struct ieee80211_hdr
*)first_hdr
;
2153 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
2155 if (!(status
->flag
& RX_FLAG_IV_STRIPPED
)) {
2156 memcpy(skb_push(msdu
,
2157 ath11k_dp_rx_crypto_param_len(ar
, enctype
)),
2158 (void *)hdr
+ hdr_len
,
2159 ath11k_dp_rx_crypto_param_len(ar
, enctype
));
2162 memcpy(skb_push(msdu
, hdr_len
), hdr
, hdr_len
);
2164 /* original 802.11 header has a different DA and in
2165 * case of 4addr it may also have different SA
2167 hdr
= (struct ieee80211_hdr
*)msdu
->data
;
2168 ether_addr_copy(ieee80211_get_DA(hdr
), da
);
2169 ether_addr_copy(ieee80211_get_SA(hdr
), sa
);
2172 static void ath11k_dp_rx_h_undecap(struct ath11k
*ar
, struct sk_buff
*msdu
,
2173 struct hal_rx_desc
*rx_desc
,
2174 enum hal_encrypt_type enctype
,
2175 struct ieee80211_rx_status
*status
,
2180 struct ethhdr
*ehdr
;
2182 first_hdr
= ath11k_dp_rx_h_80211_hdr(ar
->ab
, rx_desc
);
2183 decap
= ath11k_dp_rx_h_msdu_start_decap_type(ar
->ab
, rx_desc
);
2186 case DP_RX_DECAP_TYPE_NATIVE_WIFI
:
2187 ath11k_dp_rx_h_undecap_nwifi(ar
, msdu
, first_hdr
,
2190 case DP_RX_DECAP_TYPE_RAW
:
2191 ath11k_dp_rx_h_undecap_raw(ar
, msdu
, enctype
, status
,
2194 case DP_RX_DECAP_TYPE_ETHERNET2_DIX
:
2195 ehdr
= (struct ethhdr
*)msdu
->data
;
2197 /* mac80211 allows fast path only for authorized STA */
2198 if (ehdr
->h_proto
== cpu_to_be16(ETH_P_PAE
)) {
2199 ATH11K_SKB_RXCB(msdu
)->is_eapol
= true;
2200 ath11k_dp_rx_h_undecap_eth(ar
, msdu
, first_hdr
,
2205 /* PN for mcast packets will be validated in mac80211;
2206 * remove eth header and add 802.11 header.
2208 if (ATH11K_SKB_RXCB(msdu
)->is_mcbc
&& decrypted
)
2209 ath11k_dp_rx_h_undecap_eth(ar
, msdu
, first_hdr
,
2212 case DP_RX_DECAP_TYPE_8023
:
2213 /* TODO: Handle undecap for these formats */
2218 static struct ath11k_peer
*
2219 ath11k_dp_rx_h_find_peer(struct ath11k_base
*ab
, struct sk_buff
*msdu
)
2221 struct ath11k_skb_rxcb
*rxcb
= ATH11K_SKB_RXCB(msdu
);
2222 struct hal_rx_desc
*rx_desc
= rxcb
->rx_desc
;
2223 struct ath11k_peer
*peer
= NULL
;
2225 lockdep_assert_held(&ab
->base_lock
);
2228 peer
= ath11k_peer_find_by_id(ab
, rxcb
->peer_id
);
2233 if (!rx_desc
|| !(ath11k_dp_rxdesc_mac_addr2_valid(ab
, rx_desc
)))
2236 peer
= ath11k_peer_find_by_addr(ab
,
2237 ath11k_dp_rxdesc_mpdu_start_addr2(ab
, rx_desc
));
2241 static void ath11k_dp_rx_h_mpdu(struct ath11k
*ar
,
2242 struct sk_buff
*msdu
,
2243 struct hal_rx_desc
*rx_desc
,
2244 struct ieee80211_rx_status
*rx_status
)
2246 bool fill_crypto_hdr
;
2247 enum hal_encrypt_type enctype
;
2248 bool is_decrypted
= false;
2249 struct ath11k_skb_rxcb
*rxcb
;
2250 struct ieee80211_hdr
*hdr
;
2251 struct ath11k_peer
*peer
;
2252 struct rx_attention
*rx_attention
;
2255 /* PN for multicast packets will be checked in mac80211 */
2256 rxcb
= ATH11K_SKB_RXCB(msdu
);
2257 fill_crypto_hdr
= ath11k_dp_rx_h_attn_is_mcbc(ar
->ab
, rx_desc
);
2258 rxcb
->is_mcbc
= fill_crypto_hdr
;
2260 if (rxcb
->is_mcbc
) {
2261 rxcb
->peer_id
= ath11k_dp_rx_h_mpdu_start_peer_id(ar
->ab
, rx_desc
);
2262 rxcb
->seq_no
= ath11k_dp_rx_h_mpdu_start_seq_no(ar
->ab
, rx_desc
);
2265 spin_lock_bh(&ar
->ab
->base_lock
);
2266 peer
= ath11k_dp_rx_h_find_peer(ar
->ab
, msdu
);
2269 enctype
= peer
->sec_type_grp
;
2271 enctype
= peer
->sec_type
;
2273 enctype
= ath11k_dp_rx_h_mpdu_start_enctype(ar
->ab
, rx_desc
);
2275 spin_unlock_bh(&ar
->ab
->base_lock
);
2277 rx_attention
= ath11k_dp_rx_get_attention(ar
->ab
, rx_desc
);
2278 err_bitmap
= ath11k_dp_rx_h_attn_mpdu_err(rx_attention
);
2279 if (enctype
!= HAL_ENCRYPT_TYPE_OPEN
&& !err_bitmap
)
2280 is_decrypted
= ath11k_dp_rx_h_attn_is_decrypted(rx_attention
);
2282 /* Clear per-MPDU flags while leaving per-PPDU flags intact */
2283 rx_status
->flag
&= ~(RX_FLAG_FAILED_FCS_CRC
|
2284 RX_FLAG_MMIC_ERROR
|
2286 RX_FLAG_IV_STRIPPED
|
2287 RX_FLAG_MMIC_STRIPPED
);
2289 if (err_bitmap
& DP_RX_MPDU_ERR_FCS
)
2290 rx_status
->flag
|= RX_FLAG_FAILED_FCS_CRC
;
2291 if (err_bitmap
& DP_RX_MPDU_ERR_TKIP_MIC
)
2292 rx_status
->flag
|= RX_FLAG_MMIC_ERROR
;
2295 rx_status
->flag
|= RX_FLAG_DECRYPTED
| RX_FLAG_MMIC_STRIPPED
;
2297 if (fill_crypto_hdr
)
2298 rx_status
->flag
|= RX_FLAG_MIC_STRIPPED
|
2299 RX_FLAG_ICV_STRIPPED
;
2301 rx_status
->flag
|= RX_FLAG_IV_STRIPPED
|
2302 RX_FLAG_PN_VALIDATED
;
2305 ath11k_dp_rx_h_csum_offload(ar
, msdu
);
2306 ath11k_dp_rx_h_undecap(ar
, msdu
, rx_desc
,
2307 enctype
, rx_status
, is_decrypted
);
2309 if (!is_decrypted
|| fill_crypto_hdr
)
2312 if (ath11k_dp_rx_h_msdu_start_decap_type(ar
->ab
, rx_desc
) !=
2313 DP_RX_DECAP_TYPE_ETHERNET2_DIX
) {
2314 hdr
= (void *)msdu
->data
;
2315 hdr
->frame_control
&= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED
);
2319 static void ath11k_dp_rx_h_rate(struct ath11k
*ar
, struct hal_rx_desc
*rx_desc
,
2320 struct ieee80211_rx_status
*rx_status
)
2322 struct ieee80211_supported_band
*sband
;
2323 enum rx_msdu_start_pkt_type pkt_type
;
2327 bool is_cck
, is_ldpc
;
2329 pkt_type
= ath11k_dp_rx_h_msdu_start_pkt_type(ar
->ab
, rx_desc
);
2330 bw
= ath11k_dp_rx_h_msdu_start_rx_bw(ar
->ab
, rx_desc
);
2331 rate_mcs
= ath11k_dp_rx_h_msdu_start_rate_mcs(ar
->ab
, rx_desc
);
2332 nss
= ath11k_dp_rx_h_msdu_start_nss(ar
->ab
, rx_desc
);
2333 sgi
= ath11k_dp_rx_h_msdu_start_sgi(ar
->ab
, rx_desc
);
2336 case RX_MSDU_START_PKT_TYPE_11A
:
2337 case RX_MSDU_START_PKT_TYPE_11B
:
2338 is_cck
= (pkt_type
== RX_MSDU_START_PKT_TYPE_11B
);
2339 sband
= &ar
->mac
.sbands
[rx_status
->band
];
2340 rx_status
->rate_idx
= ath11k_mac_hw_rate_to_idx(sband
, rate_mcs
,
2343 case RX_MSDU_START_PKT_TYPE_11N
:
2344 rx_status
->encoding
= RX_ENC_HT
;
2345 if (rate_mcs
> ATH11K_HT_MCS_MAX
) {
2347 "Received with invalid mcs in HT mode %d\n",
2351 rx_status
->rate_idx
= rate_mcs
+ (8 * (nss
- 1));
2353 rx_status
->enc_flags
|= RX_ENC_FLAG_SHORT_GI
;
2354 rx_status
->bw
= ath11k_mac_bw_to_mac80211_bw(bw
);
2356 case RX_MSDU_START_PKT_TYPE_11AC
:
2357 rx_status
->encoding
= RX_ENC_VHT
;
2358 rx_status
->rate_idx
= rate_mcs
;
2359 if (rate_mcs
> ATH11K_VHT_MCS_MAX
) {
2361 "Received with invalid mcs in VHT mode %d\n",
2365 rx_status
->nss
= nss
;
2367 rx_status
->enc_flags
|= RX_ENC_FLAG_SHORT_GI
;
2368 rx_status
->bw
= ath11k_mac_bw_to_mac80211_bw(bw
);
2369 is_ldpc
= ath11k_dp_rx_h_msdu_start_ldpc_support(ar
->ab
, rx_desc
);
2371 rx_status
->enc_flags
|= RX_ENC_FLAG_LDPC
;
2373 case RX_MSDU_START_PKT_TYPE_11AX
:
2374 rx_status
->rate_idx
= rate_mcs
;
2375 if (rate_mcs
> ATH11K_HE_MCS_MAX
) {
2377 "Received with invalid mcs in HE mode %d\n",
2381 rx_status
->encoding
= RX_ENC_HE
;
2382 rx_status
->nss
= nss
;
2383 rx_status
->he_gi
= ath11k_mac_he_gi_to_nl80211_he_gi(sgi
);
2384 rx_status
->bw
= ath11k_mac_bw_to_mac80211_bw(bw
);
2389 static void ath11k_dp_rx_h_ppdu(struct ath11k
*ar
, struct hal_rx_desc
*rx_desc
,
2390 struct ieee80211_rx_status
*rx_status
)
2393 u32 center_freq
, meta_data
;
2394 struct ieee80211_channel
*channel
;
2396 rx_status
->freq
= 0;
2397 rx_status
->rate_idx
= 0;
2399 rx_status
->encoding
= RX_ENC_LEGACY
;
2400 rx_status
->bw
= RATE_INFO_BW_20
;
2402 rx_status
->flag
|= RX_FLAG_NO_SIGNAL_VAL
;
2404 meta_data
= ath11k_dp_rx_h_msdu_start_freq(ar
->ab
, rx_desc
);
2405 channel_num
= meta_data
;
2406 center_freq
= meta_data
>> 16;
2408 if (center_freq
>= ATH11K_MIN_6G_FREQ
&&
2409 center_freq
<= ATH11K_MAX_6G_FREQ
) {
2410 rx_status
->band
= NL80211_BAND_6GHZ
;
2411 rx_status
->freq
= center_freq
;
2412 } else if (channel_num
>= 1 && channel_num
<= 14) {
2413 rx_status
->band
= NL80211_BAND_2GHZ
;
2414 } else if (channel_num
>= 36 && channel_num
<= 177) {
2415 rx_status
->band
= NL80211_BAND_5GHZ
;
2417 spin_lock_bh(&ar
->data_lock
);
2418 channel
= ar
->rx_channel
;
2420 rx_status
->band
= channel
->band
;
2422 ieee80211_frequency_to_channel(channel
->center_freq
);
2424 spin_unlock_bh(&ar
->data_lock
);
2425 ath11k_dbg_dump(ar
->ab
, ATH11K_DBG_DATA
, NULL
, "rx_desc: ",
2426 rx_desc
, sizeof(struct hal_rx_desc
));
2429 if (rx_status
->band
!= NL80211_BAND_6GHZ
)
2430 rx_status
->freq
= ieee80211_channel_to_frequency(channel_num
,
2433 ath11k_dp_rx_h_rate(ar
, rx_desc
, rx_status
);
2436 static void ath11k_dp_rx_deliver_msdu(struct ath11k
*ar
, struct napi_struct
*napi
,
2437 struct sk_buff
*msdu
,
2438 struct ieee80211_rx_status
*status
)
2440 static const struct ieee80211_radiotap_he known
= {
2441 .data1
= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN
|
2442 IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN
),
2443 .data2
= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN
),
2445 struct ieee80211_rx_status
*rx_status
;
2446 struct ieee80211_radiotap_he
*he
= NULL
;
2447 struct ieee80211_sta
*pubsta
= NULL
;
2448 struct ath11k_peer
*peer
;
2449 struct ath11k_skb_rxcb
*rxcb
= ATH11K_SKB_RXCB(msdu
);
2450 u8 decap
= DP_RX_DECAP_TYPE_RAW
;
2451 bool is_mcbc
= rxcb
->is_mcbc
;
2452 bool is_eapol
= rxcb
->is_eapol
;
2454 if (status
->encoding
== RX_ENC_HE
&&
2455 !(status
->flag
& RX_FLAG_RADIOTAP_HE
) &&
2456 !(status
->flag
& RX_FLAG_SKIP_MONITOR
)) {
2457 he
= skb_push(msdu
, sizeof(known
));
2458 memcpy(he
, &known
, sizeof(known
));
2459 status
->flag
|= RX_FLAG_RADIOTAP_HE
;
2462 if (!(status
->flag
& RX_FLAG_ONLY_MONITOR
))
2463 decap
= ath11k_dp_rx_h_msdu_start_decap_type(ar
->ab
, rxcb
->rx_desc
);
2465 spin_lock_bh(&ar
->ab
->base_lock
);
2466 peer
= ath11k_dp_rx_h_find_peer(ar
->ab
, msdu
);
2467 if (peer
&& peer
->sta
)
2469 spin_unlock_bh(&ar
->ab
->base_lock
);
2471 ath11k_dbg(ar
->ab
, ATH11K_DBG_DATA
,
2472 "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
2475 peer
? peer
->addr
: NULL
,
2477 is_mcbc
? "mcast" : "ucast",
2479 (status
->encoding
== RX_ENC_LEGACY
) ? "legacy" : "",
2480 (status
->encoding
== RX_ENC_HT
) ? "ht" : "",
2481 (status
->encoding
== RX_ENC_VHT
) ? "vht" : "",
2482 (status
->encoding
== RX_ENC_HE
) ? "he" : "",
2483 (status
->bw
== RATE_INFO_BW_40
) ? "40" : "",
2484 (status
->bw
== RATE_INFO_BW_80
) ? "80" : "",
2485 (status
->bw
== RATE_INFO_BW_160
) ? "160" : "",
2486 status
->enc_flags
& RX_ENC_FLAG_SHORT_GI
? "sgi " : "",
2490 status
->band
, status
->flag
,
2491 !!(status
->flag
& RX_FLAG_FAILED_FCS_CRC
),
2492 !!(status
->flag
& RX_FLAG_MMIC_ERROR
),
2493 !!(status
->flag
& RX_FLAG_AMSDU_MORE
));
2495 ath11k_dbg_dump(ar
->ab
, ATH11K_DBG_DP_RX
, NULL
, "dp rx msdu: ",
2496 msdu
->data
, msdu
->len
);
2498 rx_status
= IEEE80211_SKB_RXCB(msdu
);
2499 *rx_status
= *status
;
2501 /* TODO: trace rx packet */
2503 /* PN for multicast packets are not validate in HW,
2504 * so skip 802.3 rx path
2505 * Also, fast_rx expects the STA to be authorized, hence
2506 * eapol packets are sent in slow path.
2508 if (decap
== DP_RX_DECAP_TYPE_ETHERNET2_DIX
&& !is_eapol
&&
2509 !(is_mcbc
&& rx_status
->flag
& RX_FLAG_DECRYPTED
))
2510 rx_status
->flag
|= RX_FLAG_8023
;
2512 ieee80211_rx_napi(ar
->hw
, pubsta
, msdu
, napi
);
2515 static int ath11k_dp_rx_process_msdu(struct ath11k
*ar
,
2516 struct sk_buff
*msdu
,
2517 struct sk_buff_head
*msdu_list
,
2518 struct ieee80211_rx_status
*rx_status
)
2520 struct ath11k_base
*ab
= ar
->ab
;
2521 struct hal_rx_desc
*rx_desc
, *lrx_desc
;
2522 struct rx_attention
*rx_attention
;
2523 struct ath11k_skb_rxcb
*rxcb
;
2524 struct sk_buff
*last_buf
;
2529 u32 hal_rx_desc_sz
= ar
->ab
->hw_params
.hal_desc_sz
;
2531 last_buf
= ath11k_dp_rx_get_msdu_last_buf(msdu_list
, msdu
);
2534 "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n");
2539 rx_desc
= (struct hal_rx_desc
*)msdu
->data
;
2540 if (ath11k_dp_rx_h_attn_msdu_len_err(ab
, rx_desc
)) {
2541 ath11k_warn(ar
->ab
, "msdu len not valid\n");
2546 lrx_desc
= (struct hal_rx_desc
*)last_buf
->data
;
2547 rx_attention
= ath11k_dp_rx_get_attention(ab
, lrx_desc
);
2548 if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention
)) {
2549 ath11k_warn(ab
, "msdu_done bit in attention is not set\n");
2554 rxcb
= ATH11K_SKB_RXCB(msdu
);
2555 rxcb
->rx_desc
= rx_desc
;
2556 msdu_len
= ath11k_dp_rx_h_msdu_start_msdu_len(ab
, rx_desc
);
2557 l3_pad_bytes
= ath11k_dp_rx_h_msdu_end_l3pad(ab
, lrx_desc
);
2559 if (rxcb
->is_frag
) {
2560 skb_pull(msdu
, hal_rx_desc_sz
);
2561 } else if (!rxcb
->is_continuation
) {
2562 if ((msdu_len
+ hal_rx_desc_sz
) > DP_RX_BUFFER_SIZE
) {
2563 hdr_status
= ath11k_dp_rx_h_80211_hdr(ab
, rx_desc
);
2565 ath11k_warn(ab
, "invalid msdu len %u\n", msdu_len
);
2566 ath11k_dbg_dump(ab
, ATH11K_DBG_DATA
, NULL
, "", hdr_status
,
2567 sizeof(struct ieee80211_hdr
));
2568 ath11k_dbg_dump(ab
, ATH11K_DBG_DATA
, NULL
, "", rx_desc
,
2569 sizeof(struct hal_rx_desc
));
2572 skb_put(msdu
, hal_rx_desc_sz
+ l3_pad_bytes
+ msdu_len
);
2573 skb_pull(msdu
, hal_rx_desc_sz
+ l3_pad_bytes
);
2575 ret
= ath11k_dp_rx_msdu_coalesce(ar
, msdu_list
,
2577 l3_pad_bytes
, msdu_len
);
2580 "failed to coalesce msdu rx buffer%d\n", ret
);
2585 ath11k_dp_rx_h_ppdu(ar
, rx_desc
, rx_status
);
2586 ath11k_dp_rx_h_mpdu(ar
, msdu
, rx_desc
, rx_status
);
2588 rx_status
->flag
|= RX_FLAG_SKIP_MONITOR
| RX_FLAG_DUP_VALIDATED
;
2596 static void ath11k_dp_rx_process_received_packets(struct ath11k_base
*ab
,
2597 struct napi_struct
*napi
,
2598 struct sk_buff_head
*msdu_list
,
2601 struct sk_buff
*msdu
;
2603 struct ieee80211_rx_status rx_status
= {0};
2606 if (skb_queue_empty(msdu_list
))
2609 if (unlikely(!rcu_access_pointer(ab
->pdevs_active
[mac_id
]))) {
2610 __skb_queue_purge(msdu_list
);
2614 ar
= ab
->pdevs
[mac_id
].ar
;
2615 if (unlikely(test_bit(ATH11K_CAC_RUNNING
, &ar
->dev_flags
))) {
2616 __skb_queue_purge(msdu_list
);
2620 while ((msdu
= __skb_dequeue(msdu_list
))) {
2621 ret
= ath11k_dp_rx_process_msdu(ar
, msdu
, msdu_list
, &rx_status
);
2622 if (unlikely(ret
)) {
2623 ath11k_dbg(ab
, ATH11K_DBG_DATA
,
2624 "Unable to process msdu %d", ret
);
2625 dev_kfree_skb_any(msdu
);
2629 ath11k_dp_rx_deliver_msdu(ar
, napi
, msdu
, &rx_status
);
2633 int ath11k_dp_process_rx(struct ath11k_base
*ab
, int ring_id
,
2634 struct napi_struct
*napi
, int budget
)
2636 struct ath11k_dp
*dp
= &ab
->dp
;
2637 struct dp_rxdma_ring
*rx_ring
;
2638 int num_buffs_reaped
[MAX_RADIOS
] = {0};
2639 struct sk_buff_head msdu_list
[MAX_RADIOS
];
2640 struct ath11k_skb_rxcb
*rxcb
;
2641 int total_msdu_reaped
= 0;
2642 struct hal_srng
*srng
;
2643 struct sk_buff
*msdu
;
2647 struct hal_reo_dest_ring
*desc
;
2648 enum hal_reo_dest_ring_push_reason push_reason
;
2652 for (i
= 0; i
< MAX_RADIOS
; i
++)
2653 __skb_queue_head_init(&msdu_list
[i
]);
2655 srng
= &ab
->hal
.srng_list
[dp
->reo_dst_ring
[ring_id
].ring_id
];
2657 spin_lock_bh(&srng
->lock
);
2660 ath11k_hal_srng_access_begin(ab
, srng
);
2662 while (likely(desc
=
2663 (struct hal_reo_dest_ring
*)ath11k_hal_srng_dst_get_next_entry(ab
,
2665 cookie
= FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE
,
2666 desc
->buf_addr_info
.info1
);
2667 buf_id
= FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID
,
2669 mac_id
= FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID
, cookie
);
2671 if (unlikely(buf_id
== 0))
2674 ar
= ab
->pdevs
[mac_id
].ar
;
2675 rx_ring
= &ar
->dp
.rx_refill_buf_ring
;
2676 spin_lock_bh(&rx_ring
->idr_lock
);
2677 msdu
= idr_find(&rx_ring
->bufs_idr
, buf_id
);
2678 if (unlikely(!msdu
)) {
2679 ath11k_warn(ab
, "frame rx with invalid buf_id %d\n",
2681 spin_unlock_bh(&rx_ring
->idr_lock
);
2685 idr_remove(&rx_ring
->bufs_idr
, buf_id
);
2686 spin_unlock_bh(&rx_ring
->idr_lock
);
2688 rxcb
= ATH11K_SKB_RXCB(msdu
);
2689 dma_unmap_single(ab
->dev
, rxcb
->paddr
,
2690 msdu
->len
+ skb_tailroom(msdu
),
2693 num_buffs_reaped
[mac_id
]++;
2695 push_reason
= FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON
,
2697 if (unlikely(push_reason
!=
2698 HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION
)) {
2699 dev_kfree_skb_any(msdu
);
2700 ab
->soc_stats
.hal_reo_error
[ring_id
]++;
2704 rxcb
->is_first_msdu
= !!(desc
->rx_msdu_info
.info0
&
2705 RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU
);
2706 rxcb
->is_last_msdu
= !!(desc
->rx_msdu_info
.info0
&
2707 RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU
);
2708 rxcb
->is_continuation
= !!(desc
->rx_msdu_info
.info0
&
2709 RX_MSDU_DESC_INFO0_MSDU_CONTINUATION
);
2710 rxcb
->peer_id
= FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID
,
2711 desc
->rx_mpdu_info
.meta_data
);
2712 rxcb
->seq_no
= FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM
,
2713 desc
->rx_mpdu_info
.info0
);
2714 rxcb
->tid
= FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM
,
2717 rxcb
->mac_id
= mac_id
;
2718 __skb_queue_tail(&msdu_list
[mac_id
], msdu
);
2720 if (rxcb
->is_continuation
) {
2723 total_msdu_reaped
++;
2727 if (total_msdu_reaped
>= budget
)
2731 /* Hw might have updated the head pointer after we cached it.
2732 * In this case, even though there are entries in the ring we'll
2733 * get rx_desc NULL. Give the read another try with updated cached
2734 * head pointer so that we can reap complete MPDU in the current
2737 if (unlikely(!done
&& ath11k_hal_srng_dst_num_free(ab
, srng
, true))) {
2738 ath11k_hal_srng_access_end(ab
, srng
);
2742 ath11k_hal_srng_access_end(ab
, srng
);
2744 spin_unlock_bh(&srng
->lock
);
2746 if (unlikely(!total_msdu_reaped
))
2749 for (i
= 0; i
< ab
->num_radios
; i
++) {
2750 if (!num_buffs_reaped
[i
])
2753 ath11k_dp_rx_process_received_packets(ab
, napi
, &msdu_list
[i
], i
);
2755 ar
= ab
->pdevs
[i
].ar
;
2756 rx_ring
= &ar
->dp
.rx_refill_buf_ring
;
2758 ath11k_dp_rxbufs_replenish(ab
, i
, rx_ring
, num_buffs_reaped
[i
],
2759 ab
->hw_params
.hal_params
->rx_buf_rbm
);
2762 return total_msdu_reaped
;
2765 static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta
*arsta
,
2766 struct hal_rx_mon_ppdu_info
*ppdu_info
)
2768 struct ath11k_rx_peer_stats
*rx_stats
= arsta
->rx_stats
;
2775 arsta
->rssi_comb
= ppdu_info
->rssi_comb
;
2776 ewma_avg_rssi_add(&arsta
->avg_rssi
, ppdu_info
->rssi_comb
);
2778 num_msdu
= ppdu_info
->tcp_msdu_count
+ ppdu_info
->tcp_ack_msdu_count
+
2779 ppdu_info
->udp_msdu_count
+ ppdu_info
->other_msdu_count
;
2781 rx_stats
->num_msdu
+= num_msdu
;
2782 rx_stats
->tcp_msdu_count
+= ppdu_info
->tcp_msdu_count
+
2783 ppdu_info
->tcp_ack_msdu_count
;
2784 rx_stats
->udp_msdu_count
+= ppdu_info
->udp_msdu_count
;
2785 rx_stats
->other_msdu_count
+= ppdu_info
->other_msdu_count
;
2787 if (ppdu_info
->preamble_type
== HAL_RX_PREAMBLE_11A
||
2788 ppdu_info
->preamble_type
== HAL_RX_PREAMBLE_11B
) {
2790 ppdu_info
->mcs
= HAL_RX_MAX_MCS
;
2791 ppdu_info
->tid
= IEEE80211_NUM_TIDS
;
2794 if (ppdu_info
->nss
> 0 && ppdu_info
->nss
<= HAL_RX_MAX_NSS
)
2795 rx_stats
->nss_count
[ppdu_info
->nss
- 1] += num_msdu
;
2797 if (ppdu_info
->mcs
<= HAL_RX_MAX_MCS
)
2798 rx_stats
->mcs_count
[ppdu_info
->mcs
] += num_msdu
;
2800 if (ppdu_info
->gi
< HAL_RX_GI_MAX
)
2801 rx_stats
->gi_count
[ppdu_info
->gi
] += num_msdu
;
2803 if (ppdu_info
->bw
< HAL_RX_BW_MAX
)
2804 rx_stats
->bw_count
[ppdu_info
->bw
] += num_msdu
;
2806 if (ppdu_info
->ldpc
< HAL_RX_SU_MU_CODING_MAX
)
2807 rx_stats
->coding_count
[ppdu_info
->ldpc
] += num_msdu
;
2809 if (ppdu_info
->tid
<= IEEE80211_NUM_TIDS
)
2810 rx_stats
->tid_count
[ppdu_info
->tid
] += num_msdu
;
2812 if (ppdu_info
->preamble_type
< HAL_RX_PREAMBLE_MAX
)
2813 rx_stats
->pream_cnt
[ppdu_info
->preamble_type
] += num_msdu
;
2815 if (ppdu_info
->reception_type
< HAL_RX_RECEPTION_TYPE_MAX
)
2816 rx_stats
->reception_type
[ppdu_info
->reception_type
] += num_msdu
;
2818 if (ppdu_info
->is_stbc
)
2819 rx_stats
->stbc_count
+= num_msdu
;
2821 if (ppdu_info
->beamformed
)
2822 rx_stats
->beamformed_count
+= num_msdu
;
2824 if (ppdu_info
->num_mpdu_fcs_ok
> 1)
2825 rx_stats
->ampdu_msdu_count
+= num_msdu
;
2827 rx_stats
->non_ampdu_msdu_count
+= num_msdu
;
2829 rx_stats
->num_mpdu_fcs_ok
+= ppdu_info
->num_mpdu_fcs_ok
;
2830 rx_stats
->num_mpdu_fcs_err
+= ppdu_info
->num_mpdu_fcs_err
;
2831 rx_stats
->dcm_count
+= ppdu_info
->dcm
;
2832 rx_stats
->ru_alloc_cnt
[ppdu_info
->ru_alloc
] += num_msdu
;
2834 arsta
->rssi_comb
= ppdu_info
->rssi_comb
;
2836 BUILD_BUG_ON(ARRAY_SIZE(arsta
->chain_signal
) >
2837 ARRAY_SIZE(ppdu_info
->rssi_chain_pri20
));
2839 for (i
= 0; i
< ARRAY_SIZE(arsta
->chain_signal
); i
++)
2840 arsta
->chain_signal
[i
] = ppdu_info
->rssi_chain_pri20
[i
];
2842 rx_stats
->rx_duration
+= ppdu_info
->rx_duration
;
2843 arsta
->rx_duration
= rx_stats
->rx_duration
;
2846 static struct sk_buff
*ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base
*ab
,
2847 struct dp_rxdma_ring
*rx_ring
,
2850 struct sk_buff
*skb
;
2853 skb
= dev_alloc_skb(DP_RX_BUFFER_SIZE
+
2854 DP_RX_BUFFER_ALIGN_SIZE
);
2857 goto fail_alloc_skb
;
2859 if (!IS_ALIGNED((unsigned long)skb
->data
,
2860 DP_RX_BUFFER_ALIGN_SIZE
)) {
2861 skb_pull(skb
, PTR_ALIGN(skb
->data
, DP_RX_BUFFER_ALIGN_SIZE
) -
2865 paddr
= dma_map_single(ab
->dev
, skb
->data
,
2866 skb
->len
+ skb_tailroom(skb
),
2868 if (unlikely(dma_mapping_error(ab
->dev
, paddr
)))
2871 spin_lock_bh(&rx_ring
->idr_lock
);
2872 *buf_id
= idr_alloc(&rx_ring
->bufs_idr
, skb
, 0,
2873 rx_ring
->bufs_max
, GFP_ATOMIC
);
2874 spin_unlock_bh(&rx_ring
->idr_lock
);
2876 goto fail_dma_unmap
;
2878 ATH11K_SKB_RXCB(skb
)->paddr
= paddr
;
2882 dma_unmap_single(ab
->dev
, paddr
, skb
->len
+ skb_tailroom(skb
),
2885 dev_kfree_skb_any(skb
);
2890 int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base
*ab
, int mac_id
,
2891 struct dp_rxdma_ring
*rx_ring
,
2893 enum hal_rx_buf_return_buf_manager mgr
)
2895 struct hal_srng
*srng
;
2897 struct sk_buff
*skb
;
2904 req_entries
= min(req_entries
, rx_ring
->bufs_max
);
2906 srng
= &ab
->hal
.srng_list
[rx_ring
->refill_buf_ring
.ring_id
];
2908 spin_lock_bh(&srng
->lock
);
2910 ath11k_hal_srng_access_begin(ab
, srng
);
2912 num_free
= ath11k_hal_srng_src_num_free(ab
, srng
, true);
2914 req_entries
= min(num_free
, req_entries
);
2915 num_remain
= req_entries
;
2917 while (num_remain
> 0) {
2918 skb
= ath11k_dp_rx_alloc_mon_status_buf(ab
, rx_ring
,
2922 paddr
= ATH11K_SKB_RXCB(skb
)->paddr
;
2924 desc
= ath11k_hal_srng_src_get_next_entry(ab
, srng
);
2928 cookie
= FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID
, mac_id
) |
2929 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID
, buf_id
);
2933 ath11k_hal_rx_buf_addr_info_set(desc
, paddr
, cookie
, mgr
);
2936 ath11k_hal_srng_access_end(ab
, srng
);
2938 spin_unlock_bh(&srng
->lock
);
2940 return req_entries
- num_remain
;
2943 spin_lock_bh(&rx_ring
->idr_lock
);
2944 idr_remove(&rx_ring
->bufs_idr
, buf_id
);
2945 spin_unlock_bh(&rx_ring
->idr_lock
);
2946 dma_unmap_single(ab
->dev
, paddr
, skb
->len
+ skb_tailroom(skb
),
2948 dev_kfree_skb_any(skb
);
2949 ath11k_hal_srng_access_end(ab
, srng
);
2950 spin_unlock_bh(&srng
->lock
);
2952 return req_entries
- num_remain
;
2955 #define ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP 32535
2958 ath11k_dp_rx_mon_update_status_buf_state(struct ath11k_mon_data
*pmon
,
2959 struct hal_tlv_hdr
*tlv
)
2961 struct hal_rx_ppdu_start
*ppdu_start
;
2962 u16 ppdu_id_diff
, ppdu_id
, tlv_len
;
2965 /* PPDU id is part of second tlv, move ptr to second tlv */
2966 tlv_len
= FIELD_GET(HAL_TLV_HDR_LEN
, tlv
->tl
);
2968 ptr
+= sizeof(*tlv
) + tlv_len
;
2969 tlv
= (struct hal_tlv_hdr
*)ptr
;
2971 if (FIELD_GET(HAL_TLV_HDR_TAG
, tlv
->tl
) != HAL_RX_PPDU_START
)
2974 ptr
+= sizeof(*tlv
);
2975 ppdu_start
= (struct hal_rx_ppdu_start
*)ptr
;
2976 ppdu_id
= FIELD_GET(HAL_RX_PPDU_START_INFO0_PPDU_ID
,
2977 __le32_to_cpu(ppdu_start
->info0
));
2979 if (pmon
->sw_mon_entries
.ppdu_id
< ppdu_id
) {
2980 pmon
->buf_state
= DP_MON_STATUS_LEAD
;
2981 ppdu_id_diff
= ppdu_id
- pmon
->sw_mon_entries
.ppdu_id
;
2982 if (ppdu_id_diff
> ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP
)
2983 pmon
->buf_state
= DP_MON_STATUS_LAG
;
2984 } else if (pmon
->sw_mon_entries
.ppdu_id
> ppdu_id
) {
2985 pmon
->buf_state
= DP_MON_STATUS_LAG
;
2986 ppdu_id_diff
= pmon
->sw_mon_entries
.ppdu_id
- ppdu_id
;
2987 if (ppdu_id_diff
> ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP
)
2988 pmon
->buf_state
= DP_MON_STATUS_LEAD
;
2992 static enum dp_mon_status_buf_state
2993 ath11k_dp_rx_mon_buf_done(struct ath11k_base
*ab
, struct hal_srng
*srng
,
2994 struct dp_rxdma_ring
*rx_ring
)
2996 struct ath11k_skb_rxcb
*rxcb
;
2997 struct hal_tlv_hdr
*tlv
;
2998 struct sk_buff
*skb
;
3005 status_desc
= ath11k_hal_srng_src_next_peek(ab
, srng
);
3007 return DP_MON_STATUS_NO_DMA
;
3009 ath11k_hal_rx_buf_addr_info_get(status_desc
, &paddr
, &cookie
, &rbm
);
3011 buf_id
= FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID
, cookie
);
3013 spin_lock_bh(&rx_ring
->idr_lock
);
3014 skb
= idr_find(&rx_ring
->bufs_idr
, buf_id
);
3015 spin_unlock_bh(&rx_ring
->idr_lock
);
3018 return DP_MON_STATUS_NO_DMA
;
3020 rxcb
= ATH11K_SKB_RXCB(skb
);
3021 dma_sync_single_for_cpu(ab
->dev
, rxcb
->paddr
,
3022 skb
->len
+ skb_tailroom(skb
),
3025 tlv
= (struct hal_tlv_hdr
*)skb
->data
;
3026 if (FIELD_GET(HAL_TLV_HDR_TAG
, tlv
->tl
) != HAL_RX_STATUS_BUFFER_DONE
)
3027 return DP_MON_STATUS_NO_DMA
;
3029 return DP_MON_STATUS_REPLINISH
;
3032 static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base
*ab
, int mac_id
,
3033 int *budget
, struct sk_buff_head
*skb_list
)
3036 const struct ath11k_hw_hal_params
*hal_params
;
3037 enum dp_mon_status_buf_state reap_status
;
3038 struct ath11k_pdev_dp
*dp
;
3039 struct dp_rxdma_ring
*rx_ring
;
3040 struct ath11k_mon_data
*pmon
;
3041 struct hal_srng
*srng
;
3042 void *rx_mon_status_desc
;
3043 struct sk_buff
*skb
;
3044 struct ath11k_skb_rxcb
*rxcb
;
3045 struct hal_tlv_hdr
*tlv
;
3047 int buf_id
, srng_id
;
3050 int num_buffs_reaped
= 0;
3052 ar
= ab
->pdevs
[ath11k_hw_mac_id_to_pdev_id(&ab
->hw_params
, mac_id
)].ar
;
3054 pmon
= &dp
->mon_data
;
3055 srng_id
= ath11k_hw_mac_id_to_srng_id(&ab
->hw_params
, mac_id
);
3056 rx_ring
= &dp
->rx_mon_status_refill_ring
[srng_id
];
3058 srng
= &ab
->hal
.srng_list
[rx_ring
->refill_buf_ring
.ring_id
];
3060 spin_lock_bh(&srng
->lock
);
3062 ath11k_hal_srng_access_begin(ab
, srng
);
3065 rx_mon_status_desc
=
3066 ath11k_hal_srng_src_peek(ab
, srng
);
3067 if (!rx_mon_status_desc
) {
3068 pmon
->buf_state
= DP_MON_STATUS_REPLINISH
;
3072 ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc
, &paddr
,
3075 buf_id
= FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID
, cookie
);
3077 spin_lock_bh(&rx_ring
->idr_lock
);
3078 skb
= idr_find(&rx_ring
->bufs_idr
, buf_id
);
3079 spin_unlock_bh(&rx_ring
->idr_lock
);
3082 ath11k_warn(ab
, "rx monitor status with invalid buf_id %d\n",
3084 pmon
->buf_state
= DP_MON_STATUS_REPLINISH
;
3088 rxcb
= ATH11K_SKB_RXCB(skb
);
3090 dma_sync_single_for_cpu(ab
->dev
, rxcb
->paddr
,
3091 skb
->len
+ skb_tailroom(skb
),
3094 tlv
= (struct hal_tlv_hdr
*)skb
->data
;
3095 if (FIELD_GET(HAL_TLV_HDR_TAG
, tlv
->tl
) !=
3096 HAL_RX_STATUS_BUFFER_DONE
) {
3097 ath11k_warn(ab
, "mon status DONE not set %lx, buf_id %d\n",
3098 FIELD_GET(HAL_TLV_HDR_TAG
,
3100 /* RxDMA status done bit might not be set even
3101 * though tp is moved by HW.
3104 /* If done status is missing:
3105 * 1. As per MAC team's suggestion,
3106 * when HP + 1 entry is peeked and if DMA
3107 * is not done and if HP + 2 entry's DMA done
3108 * is set. skip HP + 1 entry and
3109 * start processing in next interrupt.
3110 * 2. If HP + 2 entry's DMA done is not set,
3111 * poll onto HP + 1 entry DMA done to be set.
3112 * Check status for same buffer for next time
3113 * dp_rx_mon_status_srng_process
3116 reap_status
= ath11k_dp_rx_mon_buf_done(ab
, srng
,
3118 if (reap_status
== DP_MON_STATUS_NO_DMA
)
3121 spin_lock_bh(&rx_ring
->idr_lock
);
3122 idr_remove(&rx_ring
->bufs_idr
, buf_id
);
3123 spin_unlock_bh(&rx_ring
->idr_lock
);
3125 dma_unmap_single(ab
->dev
, rxcb
->paddr
,
3126 skb
->len
+ skb_tailroom(skb
),
3129 dev_kfree_skb_any(skb
);
3130 pmon
->buf_state
= DP_MON_STATUS_REPLINISH
;
3134 spin_lock_bh(&rx_ring
->idr_lock
);
3135 idr_remove(&rx_ring
->bufs_idr
, buf_id
);
3136 spin_unlock_bh(&rx_ring
->idr_lock
);
3137 if (ab
->hw_params
.full_monitor_mode
) {
3138 ath11k_dp_rx_mon_update_status_buf_state(pmon
, tlv
);
3139 if (paddr
== pmon
->mon_status_paddr
)
3140 pmon
->buf_state
= DP_MON_STATUS_MATCH
;
3143 dma_unmap_single(ab
->dev
, rxcb
->paddr
,
3144 skb
->len
+ skb_tailroom(skb
),
3147 __skb_queue_tail(skb_list
, skb
);
3149 pmon
->buf_state
= DP_MON_STATUS_REPLINISH
;
3152 skb
= ath11k_dp_rx_alloc_mon_status_buf(ab
, rx_ring
,
3156 hal_params
= ab
->hw_params
.hal_params
;
3157 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc
, 0, 0,
3158 hal_params
->rx_buf_rbm
);
3162 rxcb
= ATH11K_SKB_RXCB(skb
);
3164 cookie
= FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID
, mac_id
) |
3165 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID
, buf_id
);
3167 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc
, rxcb
->paddr
,
3169 ab
->hw_params
.hal_params
->rx_buf_rbm
);
3170 ath11k_hal_srng_src_get_next_entry(ab
, srng
);
3173 ath11k_hal_srng_access_end(ab
, srng
);
3174 spin_unlock_bh(&srng
->lock
);
3176 return num_buffs_reaped
;
3179 static void ath11k_dp_rx_frag_timer(struct timer_list
*timer
)
3181 struct dp_rx_tid
*rx_tid
= from_timer(rx_tid
, timer
, frag_timer
);
3183 spin_lock_bh(&rx_tid
->ab
->base_lock
);
3184 if (rx_tid
->last_frag_no
&&
3185 rx_tid
->rx_frag_bitmap
== GENMASK(rx_tid
->last_frag_no
, 0)) {
3186 spin_unlock_bh(&rx_tid
->ab
->base_lock
);
3189 ath11k_dp_rx_frags_cleanup(rx_tid
, true);
3190 spin_unlock_bh(&rx_tid
->ab
->base_lock
);
3193 int ath11k_peer_rx_frag_setup(struct ath11k
*ar
, const u8
*peer_mac
, int vdev_id
)
3195 struct ath11k_base
*ab
= ar
->ab
;
3196 struct crypto_shash
*tfm
;
3197 struct ath11k_peer
*peer
;
3198 struct dp_rx_tid
*rx_tid
;
3201 tfm
= crypto_alloc_shash("michael_mic", 0, 0);
3203 ath11k_warn(ab
, "failed to allocate michael_mic shash: %ld\n",
3205 return PTR_ERR(tfm
);
3208 spin_lock_bh(&ab
->base_lock
);
3210 peer
= ath11k_peer_find(ab
, vdev_id
, peer_mac
);
3212 ath11k_warn(ab
, "failed to find the peer to set up fragment info\n");
3213 spin_unlock_bh(&ab
->base_lock
);
3214 crypto_free_shash(tfm
);
3218 for (i
= 0; i
<= IEEE80211_NUM_TIDS
; i
++) {
3219 rx_tid
= &peer
->rx_tid
[i
];
3221 timer_setup(&rx_tid
->frag_timer
, ath11k_dp_rx_frag_timer
, 0);
3222 skb_queue_head_init(&rx_tid
->rx_frags
);
3225 peer
->tfm_mmic
= tfm
;
3226 peer
->dp_setup_done
= true;
3227 spin_unlock_bh(&ab
->base_lock
);
3232 static int ath11k_dp_rx_h_michael_mic(struct crypto_shash
*tfm
, u8
*key
,
3233 struct ieee80211_hdr
*hdr
, u8
*data
,
3234 size_t data_len
, u8
*mic
)
3236 SHASH_DESC_ON_STACK(desc
, tfm
);
3237 u8 mic_hdr
[16] = {0};
3246 ret
= crypto_shash_setkey(tfm
, key
, 8);
3250 ret
= crypto_shash_init(desc
);
3254 /* TKIP MIC header */
3255 memcpy(mic_hdr
, ieee80211_get_DA(hdr
), ETH_ALEN
);
3256 memcpy(mic_hdr
+ ETH_ALEN
, ieee80211_get_SA(hdr
), ETH_ALEN
);
3257 if (ieee80211_is_data_qos(hdr
->frame_control
))
3258 tid
= ieee80211_get_tid(hdr
);
3261 ret
= crypto_shash_update(desc
, mic_hdr
, 16);
3264 ret
= crypto_shash_update(desc
, data
, data_len
);
3267 ret
= crypto_shash_final(desc
, mic
);
3269 shash_desc_zero(desc
);
3273 static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k
*ar
, struct ath11k_peer
*peer
,
3274 struct sk_buff
*msdu
)
3276 struct hal_rx_desc
*rx_desc
= (struct hal_rx_desc
*)msdu
->data
;
3277 struct ieee80211_rx_status
*rxs
= IEEE80211_SKB_RXCB(msdu
);
3278 struct ieee80211_key_conf
*key_conf
;
3279 struct ieee80211_hdr
*hdr
;
3280 u8 mic
[IEEE80211_CCMP_MIC_LEN
];
3281 int head_len
, tail_len
, ret
;
3283 u32 hdr_len
, hal_rx_desc_sz
= ar
->ab
->hw_params
.hal_desc_sz
;
3287 if (ath11k_dp_rx_h_mpdu_start_enctype(ar
->ab
, rx_desc
) !=
3288 HAL_ENCRYPT_TYPE_TKIP_MIC
)
3291 hdr
= (struct ieee80211_hdr
*)(msdu
->data
+ hal_rx_desc_sz
);
3292 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
3293 head_len
= hdr_len
+ hal_rx_desc_sz
+ IEEE80211_TKIP_IV_LEN
;
3294 tail_len
= IEEE80211_CCMP_MIC_LEN
+ IEEE80211_TKIP_ICV_LEN
+ FCS_LEN
;
3296 if (!is_multicast_ether_addr(hdr
->addr1
))
3297 key_idx
= peer
->ucast_keyidx
;
3299 key_idx
= peer
->mcast_keyidx
;
3301 key_conf
= peer
->keys
[key_idx
];
3303 data
= msdu
->data
+ head_len
;
3304 data_len
= msdu
->len
- head_len
- tail_len
;
3305 key
= &key_conf
->key
[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY
];
3307 ret
= ath11k_dp_rx_h_michael_mic(peer
->tfm_mmic
, key
, hdr
, data
, data_len
, mic
);
3308 if (ret
|| memcmp(mic
, data
+ data_len
, IEEE80211_CCMP_MIC_LEN
))
3314 (ATH11K_SKB_RXCB(msdu
))->is_first_msdu
= true;
3315 (ATH11K_SKB_RXCB(msdu
))->is_last_msdu
= true;
3317 rxs
->flag
|= RX_FLAG_MMIC_ERROR
| RX_FLAG_MMIC_STRIPPED
|
3318 RX_FLAG_IV_STRIPPED
| RX_FLAG_DECRYPTED
;
3319 skb_pull(msdu
, hal_rx_desc_sz
);
3321 ath11k_dp_rx_h_ppdu(ar
, rx_desc
, rxs
);
3322 ath11k_dp_rx_h_undecap(ar
, msdu
, rx_desc
,
3323 HAL_ENCRYPT_TYPE_TKIP_MIC
, rxs
, true);
3324 ieee80211_rx(ar
->hw
, msdu
);
3328 static void ath11k_dp_rx_h_undecap_frag(struct ath11k
*ar
, struct sk_buff
*msdu
,
3329 enum hal_encrypt_type enctype
, u32 flags
)
3331 struct ieee80211_hdr
*hdr
;
3334 u32 hal_rx_desc_sz
= ar
->ab
->hw_params
.hal_desc_sz
;
3339 hdr
= (struct ieee80211_hdr
*)(msdu
->data
+ hal_rx_desc_sz
);
3341 if (flags
& RX_FLAG_MIC_STRIPPED
)
3342 skb_trim(msdu
, msdu
->len
-
3343 ath11k_dp_rx_crypto_mic_len(ar
, enctype
));
3345 if (flags
& RX_FLAG_ICV_STRIPPED
)
3346 skb_trim(msdu
, msdu
->len
-
3347 ath11k_dp_rx_crypto_icv_len(ar
, enctype
));
3349 if (flags
& RX_FLAG_IV_STRIPPED
) {
3350 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
3351 crypto_len
= ath11k_dp_rx_crypto_param_len(ar
, enctype
);
3353 memmove((void *)msdu
->data
+ hal_rx_desc_sz
+ crypto_len
,
3354 (void *)msdu
->data
+ hal_rx_desc_sz
, hdr_len
);
3355 skb_pull(msdu
, crypto_len
);
3359 static int ath11k_dp_rx_h_defrag(struct ath11k
*ar
,
3360 struct ath11k_peer
*peer
,
3361 struct dp_rx_tid
*rx_tid
,
3362 struct sk_buff
**defrag_skb
)
3364 struct hal_rx_desc
*rx_desc
;
3365 struct sk_buff
*skb
, *first_frag
, *last_frag
;
3366 struct ieee80211_hdr
*hdr
;
3367 struct rx_attention
*rx_attention
;
3368 enum hal_encrypt_type enctype
;
3369 bool is_decrypted
= false;
3372 u32 flags
, hal_rx_desc_sz
= ar
->ab
->hw_params
.hal_desc_sz
;
3374 first_frag
= skb_peek(&rx_tid
->rx_frags
);
3375 last_frag
= skb_peek_tail(&rx_tid
->rx_frags
);
3377 skb_queue_walk(&rx_tid
->rx_frags
, skb
) {
3379 rx_desc
= (struct hal_rx_desc
*)skb
->data
;
3380 hdr
= (struct ieee80211_hdr
*)(skb
->data
+ hal_rx_desc_sz
);
3382 enctype
= ath11k_dp_rx_h_mpdu_start_enctype(ar
->ab
, rx_desc
);
3383 if (enctype
!= HAL_ENCRYPT_TYPE_OPEN
) {
3384 rx_attention
= ath11k_dp_rx_get_attention(ar
->ab
, rx_desc
);
3385 is_decrypted
= ath11k_dp_rx_h_attn_is_decrypted(rx_attention
);
3389 if (skb
!= first_frag
)
3390 flags
|= RX_FLAG_IV_STRIPPED
;
3391 if (skb
!= last_frag
)
3392 flags
|= RX_FLAG_ICV_STRIPPED
|
3393 RX_FLAG_MIC_STRIPPED
;
3396 /* RX fragments are always raw packets */
3397 if (skb
!= last_frag
)
3398 skb_trim(skb
, skb
->len
- FCS_LEN
);
3399 ath11k_dp_rx_h_undecap_frag(ar
, skb
, enctype
, flags
);
3401 if (skb
!= first_frag
)
3402 skb_pull(skb
, hal_rx_desc_sz
+
3403 ieee80211_hdrlen(hdr
->frame_control
));
3404 msdu_len
+= skb
->len
;
3407 extra_space
= msdu_len
- (DP_RX_BUFFER_SIZE
+ skb_tailroom(first_frag
));
3408 if (extra_space
> 0 &&
3409 (pskb_expand_head(first_frag
, 0, extra_space
, GFP_ATOMIC
) < 0))
3412 __skb_unlink(first_frag
, &rx_tid
->rx_frags
);
3413 while ((skb
= __skb_dequeue(&rx_tid
->rx_frags
))) {
3414 skb_put_data(first_frag
, skb
->data
, skb
->len
);
3415 dev_kfree_skb_any(skb
);
3418 hdr
= (struct ieee80211_hdr
*)(first_frag
->data
+ hal_rx_desc_sz
);
3419 hdr
->frame_control
&= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS
);
3420 ATH11K_SKB_RXCB(first_frag
)->is_frag
= 1;
3422 if (ath11k_dp_rx_h_verify_tkip_mic(ar
, peer
, first_frag
))
3425 *defrag_skb
= first_frag
;
3429 static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k
*ar
, struct dp_rx_tid
*rx_tid
,
3430 struct sk_buff
*defrag_skb
)
3432 struct ath11k_base
*ab
= ar
->ab
;
3433 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
3434 struct dp_rxdma_ring
*rx_refill_ring
= &dp
->rx_refill_buf_ring
;
3435 struct hal_rx_desc
*rx_desc
= (struct hal_rx_desc
*)defrag_skb
->data
;
3436 struct hal_reo_entrance_ring
*reo_ent_ring
;
3437 struct hal_reo_dest_ring
*reo_dest_ring
;
3438 struct dp_link_desc_bank
*link_desc_banks
;
3439 struct hal_rx_msdu_link
*msdu_link
;
3440 struct hal_rx_msdu_details
*msdu0
;
3441 struct hal_srng
*srng
;
3443 u32 desc_bank
, msdu_info
, mpdu_info
;
3444 u32 dst_idx
, cookie
, hal_rx_desc_sz
;
3447 hal_rx_desc_sz
= ab
->hw_params
.hal_desc_sz
;
3448 link_desc_banks
= ab
->dp
.link_desc_banks
;
3449 reo_dest_ring
= rx_tid
->dst_ring_desc
;
3451 ath11k_hal_rx_reo_ent_paddr_get(ab
, reo_dest_ring
, &paddr
, &desc_bank
);
3452 msdu_link
= (struct hal_rx_msdu_link
*)(link_desc_banks
[desc_bank
].vaddr
+
3453 (paddr
- link_desc_banks
[desc_bank
].paddr
));
3454 msdu0
= &msdu_link
->msdu_link
[0];
3455 dst_idx
= FIELD_GET(RX_MSDU_DESC_INFO0_REO_DEST_IND
, msdu0
->rx_msdu_info
.info0
);
3456 memset(msdu0
, 0, sizeof(*msdu0
));
3458 msdu_info
= FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU
, 1) |
3459 FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU
, 1) |
3460 FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_CONTINUATION
, 0) |
3461 FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_LENGTH
,
3462 defrag_skb
->len
- hal_rx_desc_sz
) |
3463 FIELD_PREP(RX_MSDU_DESC_INFO0_REO_DEST_IND
, dst_idx
) |
3464 FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_SA
, 1) |
3465 FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_DA
, 1);
3466 msdu0
->rx_msdu_info
.info0
= msdu_info
;
3468 /* change msdu len in hal rx desc */
3469 ath11k_dp_rxdesc_set_msdu_len(ab
, rx_desc
, defrag_skb
->len
- hal_rx_desc_sz
);
3471 paddr
= dma_map_single(ab
->dev
, defrag_skb
->data
,
3472 defrag_skb
->len
+ skb_tailroom(defrag_skb
),
3474 if (dma_mapping_error(ab
->dev
, paddr
))
3477 spin_lock_bh(&rx_refill_ring
->idr_lock
);
3478 buf_id
= idr_alloc(&rx_refill_ring
->bufs_idr
, defrag_skb
, 0,
3479 rx_refill_ring
->bufs_max
* 3, GFP_ATOMIC
);
3480 spin_unlock_bh(&rx_refill_ring
->idr_lock
);
3486 ATH11K_SKB_RXCB(defrag_skb
)->paddr
= paddr
;
3487 cookie
= FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID
, dp
->mac_id
) |
3488 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID
, buf_id
);
3490 ath11k_hal_rx_buf_addr_info_set(msdu0
, paddr
, cookie
,
3491 ab
->hw_params
.hal_params
->rx_buf_rbm
);
3493 /* Fill mpdu details into reo entrance ring */
3494 srng
= &ab
->hal
.srng_list
[ab
->dp
.reo_reinject_ring
.ring_id
];
3496 spin_lock_bh(&srng
->lock
);
3497 ath11k_hal_srng_access_begin(ab
, srng
);
3499 reo_ent_ring
= (struct hal_reo_entrance_ring
*)
3500 ath11k_hal_srng_src_get_next_entry(ab
, srng
);
3501 if (!reo_ent_ring
) {
3502 ath11k_hal_srng_access_end(ab
, srng
);
3503 spin_unlock_bh(&srng
->lock
);
3507 memset(reo_ent_ring
, 0, sizeof(*reo_ent_ring
));
3509 ath11k_hal_rx_reo_ent_paddr_get(ab
, reo_dest_ring
, &paddr
, &desc_bank
);
3510 ath11k_hal_rx_buf_addr_info_set(reo_ent_ring
, paddr
, desc_bank
,
3511 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST
);
3513 mpdu_info
= FIELD_PREP(RX_MPDU_DESC_INFO0_MSDU_COUNT
, 1) |
3514 FIELD_PREP(RX_MPDU_DESC_INFO0_SEQ_NUM
, rx_tid
->cur_sn
) |
3515 FIELD_PREP(RX_MPDU_DESC_INFO0_FRAG_FLAG
, 0) |
3516 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_SA
, 1) |
3517 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_DA
, 1) |
3518 FIELD_PREP(RX_MPDU_DESC_INFO0_RAW_MPDU
, 1) |
3519 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_PN
, 1);
3521 reo_ent_ring
->rx_mpdu_info
.info0
= mpdu_info
;
3522 reo_ent_ring
->rx_mpdu_info
.meta_data
= reo_dest_ring
->rx_mpdu_info
.meta_data
;
3523 reo_ent_ring
->queue_addr_lo
= reo_dest_ring
->queue_addr_lo
;
3524 reo_ent_ring
->info0
= FIELD_PREP(HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI
,
3525 FIELD_GET(HAL_REO_DEST_RING_INFO0_QUEUE_ADDR_HI
,
3526 reo_dest_ring
->info0
)) |
3527 FIELD_PREP(HAL_REO_ENTR_RING_INFO0_DEST_IND
, dst_idx
);
3528 ath11k_hal_srng_access_end(ab
, srng
);
3529 spin_unlock_bh(&srng
->lock
);
3534 spin_lock_bh(&rx_refill_ring
->idr_lock
);
3535 idr_remove(&rx_refill_ring
->bufs_idr
, buf_id
);
3536 spin_unlock_bh(&rx_refill_ring
->idr_lock
);
3538 dma_unmap_single(ab
->dev
, paddr
, defrag_skb
->len
+ skb_tailroom(defrag_skb
),
3543 static int ath11k_dp_rx_h_cmp_frags(struct ath11k
*ar
,
3544 struct sk_buff
*a
, struct sk_buff
*b
)
3548 frag1
= ath11k_dp_rx_h_mpdu_start_frag_no(ar
->ab
, a
);
3549 frag2
= ath11k_dp_rx_h_mpdu_start_frag_no(ar
->ab
, b
);
3551 return frag1
- frag2
;
3554 static void ath11k_dp_rx_h_sort_frags(struct ath11k
*ar
,
3555 struct sk_buff_head
*frag_list
,
3556 struct sk_buff
*cur_frag
)
3558 struct sk_buff
*skb
;
3561 skb_queue_walk(frag_list
, skb
) {
3562 cmp
= ath11k_dp_rx_h_cmp_frags(ar
, skb
, cur_frag
);
3565 __skb_queue_before(frag_list
, skb
, cur_frag
);
3568 __skb_queue_tail(frag_list
, cur_frag
);
3571 static u64
ath11k_dp_rx_h_get_pn(struct ath11k
*ar
, struct sk_buff
*skb
)
3573 struct ieee80211_hdr
*hdr
;
3576 u32 hal_rx_desc_sz
= ar
->ab
->hw_params
.hal_desc_sz
;
3578 hdr
= (struct ieee80211_hdr
*)(skb
->data
+ hal_rx_desc_sz
);
3579 ehdr
= skb
->data
+ hal_rx_desc_sz
+ ieee80211_hdrlen(hdr
->frame_control
);
3582 pn
|= (u64
)ehdr
[1] << 8;
3583 pn
|= (u64
)ehdr
[4] << 16;
3584 pn
|= (u64
)ehdr
[5] << 24;
3585 pn
|= (u64
)ehdr
[6] << 32;
3586 pn
|= (u64
)ehdr
[7] << 40;
3592 ath11k_dp_rx_h_defrag_validate_incr_pn(struct ath11k
*ar
, struct dp_rx_tid
*rx_tid
)
3594 enum hal_encrypt_type encrypt_type
;
3595 struct sk_buff
*first_frag
, *skb
;
3596 struct hal_rx_desc
*desc
;
3600 first_frag
= skb_peek(&rx_tid
->rx_frags
);
3601 desc
= (struct hal_rx_desc
*)first_frag
->data
;
3603 encrypt_type
= ath11k_dp_rx_h_mpdu_start_enctype(ar
->ab
, desc
);
3604 if (encrypt_type
!= HAL_ENCRYPT_TYPE_CCMP_128
&&
3605 encrypt_type
!= HAL_ENCRYPT_TYPE_CCMP_256
&&
3606 encrypt_type
!= HAL_ENCRYPT_TYPE_GCMP_128
&&
3607 encrypt_type
!= HAL_ENCRYPT_TYPE_AES_GCMP_256
)
3610 last_pn
= ath11k_dp_rx_h_get_pn(ar
, first_frag
);
3611 skb_queue_walk(&rx_tid
->rx_frags
, skb
) {
3612 if (skb
== first_frag
)
3615 cur_pn
= ath11k_dp_rx_h_get_pn(ar
, skb
);
3616 if (cur_pn
!= last_pn
+ 1)
3623 static int ath11k_dp_rx_frag_h_mpdu(struct ath11k
*ar
,
3624 struct sk_buff
*msdu
,
3627 struct ath11k_base
*ab
= ar
->ab
;
3628 struct hal_rx_desc
*rx_desc
;
3629 struct ath11k_peer
*peer
;
3630 struct dp_rx_tid
*rx_tid
;
3631 struct sk_buff
*defrag_skb
= NULL
;
3639 rx_desc
= (struct hal_rx_desc
*)msdu
->data
;
3640 peer_id
= ath11k_dp_rx_h_mpdu_start_peer_id(ar
->ab
, rx_desc
);
3641 tid
= ath11k_dp_rx_h_mpdu_start_tid(ar
->ab
, rx_desc
);
3642 seqno
= ath11k_dp_rx_h_mpdu_start_seq_no(ar
->ab
, rx_desc
);
3643 frag_no
= ath11k_dp_rx_h_mpdu_start_frag_no(ar
->ab
, msdu
);
3644 more_frags
= ath11k_dp_rx_h_mpdu_start_more_frags(ar
->ab
, msdu
);
3645 is_mcbc
= ath11k_dp_rx_h_attn_is_mcbc(ar
->ab
, rx_desc
);
3647 /* Multicast/Broadcast fragments are not expected */
3651 if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(ar
->ab
, rx_desc
) ||
3652 !ath11k_dp_rx_h_mpdu_start_fc_valid(ar
->ab
, rx_desc
) ||
3653 tid
> IEEE80211_NUM_TIDS
)
3656 /* received unfragmented packet in reo
3657 * exception ring, this shouldn't happen
3658 * as these packets typically come from
3661 if (WARN_ON_ONCE(!frag_no
&& !more_frags
))
3664 spin_lock_bh(&ab
->base_lock
);
3665 peer
= ath11k_peer_find_by_id(ab
, peer_id
);
3667 ath11k_warn(ab
, "failed to find the peer to de-fragment received fragment peer_id %d\n",
3672 if (!peer
->dp_setup_done
) {
3673 ath11k_warn(ab
, "The peer %pM [%d] has uninitialized datapath\n",
3674 peer
->addr
, peer_id
);
3679 rx_tid
= &peer
->rx_tid
[tid
];
3681 if ((!skb_queue_empty(&rx_tid
->rx_frags
) && seqno
!= rx_tid
->cur_sn
) ||
3682 skb_queue_empty(&rx_tid
->rx_frags
)) {
3683 /* Flush stored fragments and start a new sequence */
3684 ath11k_dp_rx_frags_cleanup(rx_tid
, true);
3685 rx_tid
->cur_sn
= seqno
;
3688 if (rx_tid
->rx_frag_bitmap
& BIT(frag_no
)) {
3689 /* Fragment already present */
3694 if (!rx_tid
->rx_frag_bitmap
|| (frag_no
> __fls(rx_tid
->rx_frag_bitmap
)))
3695 __skb_queue_tail(&rx_tid
->rx_frags
, msdu
);
3697 ath11k_dp_rx_h_sort_frags(ar
, &rx_tid
->rx_frags
, msdu
);
3699 rx_tid
->rx_frag_bitmap
|= BIT(frag_no
);
3701 rx_tid
->last_frag_no
= frag_no
;
3704 rx_tid
->dst_ring_desc
= kmemdup(ring_desc
,
3705 sizeof(*rx_tid
->dst_ring_desc
),
3707 if (!rx_tid
->dst_ring_desc
) {
3712 ath11k_dp_rx_link_desc_return(ab
, ring_desc
,
3713 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE
);
3716 if (!rx_tid
->last_frag_no
||
3717 rx_tid
->rx_frag_bitmap
!= GENMASK(rx_tid
->last_frag_no
, 0)) {
3718 mod_timer(&rx_tid
->frag_timer
, jiffies
+
3719 ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS
);
3723 spin_unlock_bh(&ab
->base_lock
);
3724 del_timer_sync(&rx_tid
->frag_timer
);
3725 spin_lock_bh(&ab
->base_lock
);
3727 peer
= ath11k_peer_find_by_id(ab
, peer_id
);
3729 goto err_frags_cleanup
;
3731 if (!ath11k_dp_rx_h_defrag_validate_incr_pn(ar
, rx_tid
))
3732 goto err_frags_cleanup
;
3734 if (ath11k_dp_rx_h_defrag(ar
, peer
, rx_tid
, &defrag_skb
))
3735 goto err_frags_cleanup
;
3738 goto err_frags_cleanup
;
3740 if (ath11k_dp_rx_h_defrag_reo_reinject(ar
, rx_tid
, defrag_skb
))
3741 goto err_frags_cleanup
;
3743 ath11k_dp_rx_frags_cleanup(rx_tid
, false);
3747 dev_kfree_skb_any(defrag_skb
);
3748 ath11k_dp_rx_frags_cleanup(rx_tid
, true);
3750 spin_unlock_bh(&ab
->base_lock
);
3755 ath11k_dp_process_rx_err_buf(struct ath11k
*ar
, u32
*ring_desc
, int buf_id
, bool drop
)
3757 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
3758 struct dp_rxdma_ring
*rx_ring
= &dp
->rx_refill_buf_ring
;
3759 struct sk_buff
*msdu
;
3760 struct ath11k_skb_rxcb
*rxcb
;
3761 struct hal_rx_desc
*rx_desc
;
3764 u32 hal_rx_desc_sz
= ar
->ab
->hw_params
.hal_desc_sz
;
3766 spin_lock_bh(&rx_ring
->idr_lock
);
3767 msdu
= idr_find(&rx_ring
->bufs_idr
, buf_id
);
3769 ath11k_warn(ar
->ab
, "rx err buf with invalid buf_id %d\n",
3771 spin_unlock_bh(&rx_ring
->idr_lock
);
3775 idr_remove(&rx_ring
->bufs_idr
, buf_id
);
3776 spin_unlock_bh(&rx_ring
->idr_lock
);
3778 rxcb
= ATH11K_SKB_RXCB(msdu
);
3779 dma_unmap_single(ar
->ab
->dev
, rxcb
->paddr
,
3780 msdu
->len
+ skb_tailroom(msdu
),
3784 dev_kfree_skb_any(msdu
);
3789 if (!rcu_dereference(ar
->ab
->pdevs_active
[ar
->pdev_idx
])) {
3790 dev_kfree_skb_any(msdu
);
3794 if (test_bit(ATH11K_CAC_RUNNING
, &ar
->dev_flags
)) {
3795 dev_kfree_skb_any(msdu
);
3799 rx_desc
= (struct hal_rx_desc
*)msdu
->data
;
3800 msdu_len
= ath11k_dp_rx_h_msdu_start_msdu_len(ar
->ab
, rx_desc
);
3801 if ((msdu_len
+ hal_rx_desc_sz
) > DP_RX_BUFFER_SIZE
) {
3802 hdr_status
= ath11k_dp_rx_h_80211_hdr(ar
->ab
, rx_desc
);
3803 ath11k_warn(ar
->ab
, "invalid msdu leng %u", msdu_len
);
3804 ath11k_dbg_dump(ar
->ab
, ATH11K_DBG_DATA
, NULL
, "", hdr_status
,
3805 sizeof(struct ieee80211_hdr
));
3806 ath11k_dbg_dump(ar
->ab
, ATH11K_DBG_DATA
, NULL
, "", rx_desc
,
3807 sizeof(struct hal_rx_desc
));
3808 dev_kfree_skb_any(msdu
);
3812 skb_put(msdu
, hal_rx_desc_sz
+ msdu_len
);
3814 if (ath11k_dp_rx_frag_h_mpdu(ar
, msdu
, ring_desc
)) {
3815 dev_kfree_skb_any(msdu
);
3816 ath11k_dp_rx_link_desc_return(ar
->ab
, ring_desc
,
3817 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE
);
3824 int ath11k_dp_process_rx_err(struct ath11k_base
*ab
, struct napi_struct
*napi
,
3827 u32 msdu_cookies
[HAL_NUM_RX_MSDUS_PER_LINK_DESC
];
3828 struct dp_link_desc_bank
*link_desc_banks
;
3829 enum hal_rx_buf_return_buf_manager rbm
;
3830 int tot_n_bufs_reaped
, quota
, ret
, i
;
3831 int n_bufs_reaped
[MAX_RADIOS
] = {0};
3832 struct dp_rxdma_ring
*rx_ring
;
3833 struct dp_srng
*reo_except
;
3834 u32 desc_bank
, num_msdus
;
3835 struct hal_srng
*srng
;
3836 struct ath11k_dp
*dp
;
3845 tot_n_bufs_reaped
= 0;
3849 reo_except
= &dp
->reo_except_ring
;
3850 link_desc_banks
= dp
->link_desc_banks
;
3852 srng
= &ab
->hal
.srng_list
[reo_except
->ring_id
];
3854 spin_lock_bh(&srng
->lock
);
3856 ath11k_hal_srng_access_begin(ab
, srng
);
3859 (desc
= ath11k_hal_srng_dst_get_next_entry(ab
, srng
))) {
3860 struct hal_reo_dest_ring
*reo_desc
= (struct hal_reo_dest_ring
*)desc
;
3862 ab
->soc_stats
.err_ring_pkts
++;
3863 ret
= ath11k_hal_desc_reo_parse_err(ab
, desc
, &paddr
,
3866 ath11k_warn(ab
, "failed to parse error reo desc %d\n",
3870 link_desc_va
= link_desc_banks
[desc_bank
].vaddr
+
3871 (paddr
- link_desc_banks
[desc_bank
].paddr
);
3872 ath11k_hal_rx_msdu_link_info_get(link_desc_va
, &num_msdus
, msdu_cookies
,
3874 if (rbm
!= HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST
&&
3875 rbm
!= HAL_RX_BUF_RBM_SW3_BM
) {
3876 ab
->soc_stats
.invalid_rbm
++;
3877 ath11k_warn(ab
, "invalid return buffer manager %d\n", rbm
);
3878 ath11k_dp_rx_link_desc_return(ab
, desc
,
3879 HAL_WBM_REL_BM_ACT_REL_MSDU
);
3883 is_frag
= !!(reo_desc
->rx_mpdu_info
.info0
& RX_MPDU_DESC_INFO0_FRAG_FLAG
);
3885 /* Process only rx fragments with one msdu per link desc below, and drop
3886 * msdu's indicated due to error reasons.
3888 if (!is_frag
|| num_msdus
> 1) {
3890 /* Return the link desc back to wbm idle list */
3891 ath11k_dp_rx_link_desc_return(ab
, desc
,
3892 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE
);
3895 for (i
= 0; i
< num_msdus
; i
++) {
3896 buf_id
= FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID
,
3899 mac_id
= FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID
,
3902 ar
= ab
->pdevs
[mac_id
].ar
;
3904 if (!ath11k_dp_process_rx_err_buf(ar
, desc
, buf_id
, drop
)) {
3905 n_bufs_reaped
[mac_id
]++;
3906 tot_n_bufs_reaped
++;
3910 if (tot_n_bufs_reaped
>= quota
) {
3911 tot_n_bufs_reaped
= quota
;
3915 budget
= quota
- tot_n_bufs_reaped
;
3919 ath11k_hal_srng_access_end(ab
, srng
);
3921 spin_unlock_bh(&srng
->lock
);
3923 for (i
= 0; i
< ab
->num_radios
; i
++) {
3924 if (!n_bufs_reaped
[i
])
3927 ar
= ab
->pdevs
[i
].ar
;
3928 rx_ring
= &ar
->dp
.rx_refill_buf_ring
;
3930 ath11k_dp_rxbufs_replenish(ab
, i
, rx_ring
, n_bufs_reaped
[i
],
3931 ab
->hw_params
.hal_params
->rx_buf_rbm
);
3934 return tot_n_bufs_reaped
;
3937 static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k
*ar
,
3939 struct sk_buff_head
*msdu_list
)
3941 struct sk_buff
*skb
, *tmp
;
3942 struct ath11k_skb_rxcb
*rxcb
;
3945 n_buffs
= DIV_ROUND_UP(msdu_len
,
3946 (DP_RX_BUFFER_SIZE
- ar
->ab
->hw_params
.hal_desc_sz
));
3948 skb_queue_walk_safe(msdu_list
, skb
, tmp
) {
3949 rxcb
= ATH11K_SKB_RXCB(skb
);
3950 if (rxcb
->err_rel_src
== HAL_WBM_REL_SRC_MODULE_REO
&&
3951 rxcb
->err_code
== HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO
) {
3954 __skb_unlink(skb
, msdu_list
);
3955 dev_kfree_skb_any(skb
);
3961 static int ath11k_dp_rx_h_null_q_desc(struct ath11k
*ar
, struct sk_buff
*msdu
,
3962 struct ieee80211_rx_status
*status
,
3963 struct sk_buff_head
*msdu_list
)
3966 struct hal_rx_desc
*desc
= (struct hal_rx_desc
*)msdu
->data
;
3967 struct rx_attention
*rx_attention
;
3969 struct ath11k_skb_rxcb
*rxcb
= ATH11K_SKB_RXCB(msdu
);
3970 u32 hal_rx_desc_sz
= ar
->ab
->hw_params
.hal_desc_sz
;
3972 msdu_len
= ath11k_dp_rx_h_msdu_start_msdu_len(ar
->ab
, desc
);
3974 if (!rxcb
->is_frag
&& ((msdu_len
+ hal_rx_desc_sz
) > DP_RX_BUFFER_SIZE
)) {
3975 /* First buffer will be freed by the caller, so deduct it's length */
3976 msdu_len
= msdu_len
- (DP_RX_BUFFER_SIZE
- hal_rx_desc_sz
);
3977 ath11k_dp_rx_null_q_desc_sg_drop(ar
, msdu_len
, msdu_list
);
3981 rx_attention
= ath11k_dp_rx_get_attention(ar
->ab
, desc
);
3982 if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention
)) {
3984 "msdu_done bit not set in null_q_des processing\n");
3985 __skb_queue_purge(msdu_list
);
3989 /* Handle NULL queue descriptor violations arising out a missing
3990 * REO queue for a given peer or a given TID. This typically
3991 * may happen if a packet is received on a QOS enabled TID before the
3992 * ADDBA negotiation for that TID, when the TID queue is setup. Or
3993 * it may also happen for MC/BC frames if they are not routed to the
3994 * non-QOS TID queue, in the absence of any other default TID queue.
3995 * This error can show up both in a REO destination or WBM release ring.
3998 rxcb
->is_first_msdu
= ath11k_dp_rx_h_msdu_end_first_msdu(ar
->ab
, desc
);
3999 rxcb
->is_last_msdu
= ath11k_dp_rx_h_msdu_end_last_msdu(ar
->ab
, desc
);
4001 if (rxcb
->is_frag
) {
4002 skb_pull(msdu
, hal_rx_desc_sz
);
4004 l3pad_bytes
= ath11k_dp_rx_h_msdu_end_l3pad(ar
->ab
, desc
);
4006 if ((hal_rx_desc_sz
+ l3pad_bytes
+ msdu_len
) > DP_RX_BUFFER_SIZE
)
4009 skb_put(msdu
, hal_rx_desc_sz
+ l3pad_bytes
+ msdu_len
);
4010 skb_pull(msdu
, hal_rx_desc_sz
+ l3pad_bytes
);
4012 ath11k_dp_rx_h_ppdu(ar
, desc
, status
);
4014 ath11k_dp_rx_h_mpdu(ar
, msdu
, desc
, status
);
4016 rxcb
->tid
= ath11k_dp_rx_h_mpdu_start_tid(ar
->ab
, desc
);
4018 /* Please note that caller will having the access to msdu and completing
4019 * rx with mac80211. Need not worry about cleaning up amsdu_list.
4025 static bool ath11k_dp_rx_h_reo_err(struct ath11k
*ar
, struct sk_buff
*msdu
,
4026 struct ieee80211_rx_status
*status
,
4027 struct sk_buff_head
*msdu_list
)
4029 struct ath11k_skb_rxcb
*rxcb
= ATH11K_SKB_RXCB(msdu
);
4032 ar
->ab
->soc_stats
.reo_error
[rxcb
->err_code
]++;
4034 switch (rxcb
->err_code
) {
4035 case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO
:
4036 if (ath11k_dp_rx_h_null_q_desc(ar
, msdu
, status
, msdu_list
))
4039 case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED
:
4040 /* TODO: Do not drop PN failed packets in the driver;
4041 * instead, it is good to drop such packets in mac80211
4042 * after incrementing the replay counters.
4046 /* TODO: Review other errors and process them to mac80211
4056 static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k
*ar
, struct sk_buff
*msdu
,
4057 struct ieee80211_rx_status
*status
)
4060 struct hal_rx_desc
*desc
= (struct hal_rx_desc
*)msdu
->data
;
4062 struct ath11k_skb_rxcb
*rxcb
= ATH11K_SKB_RXCB(msdu
);
4063 u32 hal_rx_desc_sz
= ar
->ab
->hw_params
.hal_desc_sz
;
4065 rxcb
->is_first_msdu
= ath11k_dp_rx_h_msdu_end_first_msdu(ar
->ab
, desc
);
4066 rxcb
->is_last_msdu
= ath11k_dp_rx_h_msdu_end_last_msdu(ar
->ab
, desc
);
4068 l3pad_bytes
= ath11k_dp_rx_h_msdu_end_l3pad(ar
->ab
, desc
);
4069 msdu_len
= ath11k_dp_rx_h_msdu_start_msdu_len(ar
->ab
, desc
);
4070 skb_put(msdu
, hal_rx_desc_sz
+ l3pad_bytes
+ msdu_len
);
4071 skb_pull(msdu
, hal_rx_desc_sz
+ l3pad_bytes
);
4073 ath11k_dp_rx_h_ppdu(ar
, desc
, status
);
4075 status
->flag
|= (RX_FLAG_MMIC_STRIPPED
| RX_FLAG_MMIC_ERROR
|
4078 ath11k_dp_rx_h_undecap(ar
, msdu
, desc
,
4079 HAL_ENCRYPT_TYPE_TKIP_MIC
, status
, false);
4082 static bool ath11k_dp_rx_h_rxdma_err(struct ath11k
*ar
, struct sk_buff
*msdu
,
4083 struct ieee80211_rx_status
*status
)
4085 struct ath11k_skb_rxcb
*rxcb
= ATH11K_SKB_RXCB(msdu
);
4088 ar
->ab
->soc_stats
.rxdma_error
[rxcb
->err_code
]++;
4090 switch (rxcb
->err_code
) {
4091 case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR
:
4092 ath11k_dp_rx_h_tkip_mic_err(ar
, msdu
, status
);
4095 /* TODO: Review other rxdma error code to check if anything is
4096 * worth reporting to mac80211
4105 static void ath11k_dp_rx_wbm_err(struct ath11k
*ar
,
4106 struct napi_struct
*napi
,
4107 struct sk_buff
*msdu
,
4108 struct sk_buff_head
*msdu_list
)
4110 struct ath11k_skb_rxcb
*rxcb
= ATH11K_SKB_RXCB(msdu
);
4111 struct ieee80211_rx_status rxs
= {0};
4114 switch (rxcb
->err_rel_src
) {
4115 case HAL_WBM_REL_SRC_MODULE_REO
:
4116 drop
= ath11k_dp_rx_h_reo_err(ar
, msdu
, &rxs
, msdu_list
);
4118 case HAL_WBM_REL_SRC_MODULE_RXDMA
:
4119 drop
= ath11k_dp_rx_h_rxdma_err(ar
, msdu
, &rxs
);
4122 /* msdu will get freed */
4127 dev_kfree_skb_any(msdu
);
4131 ath11k_dp_rx_deliver_msdu(ar
, napi
, msdu
, &rxs
);
4134 int ath11k_dp_rx_process_wbm_err(struct ath11k_base
*ab
,
4135 struct napi_struct
*napi
, int budget
)
4138 struct ath11k_dp
*dp
= &ab
->dp
;
4139 struct dp_rxdma_ring
*rx_ring
;
4140 struct hal_rx_wbm_rel_info err_info
;
4141 struct hal_srng
*srng
;
4142 struct sk_buff
*msdu
;
4143 struct sk_buff_head msdu_list
[MAX_RADIOS
];
4144 struct ath11k_skb_rxcb
*rxcb
;
4147 int num_buffs_reaped
[MAX_RADIOS
] = {0};
4148 int total_num_buffs_reaped
= 0;
4151 for (i
= 0; i
< ab
->num_radios
; i
++)
4152 __skb_queue_head_init(&msdu_list
[i
]);
4154 srng
= &ab
->hal
.srng_list
[dp
->rx_rel_ring
.ring_id
];
4156 spin_lock_bh(&srng
->lock
);
4158 ath11k_hal_srng_access_begin(ab
, srng
);
4161 rx_desc
= ath11k_hal_srng_dst_get_next_entry(ab
, srng
);
4165 ret
= ath11k_hal_wbm_desc_parse_err(ab
, rx_desc
, &err_info
);
4168 "failed to parse rx error in wbm_rel ring desc %d\n",
4173 buf_id
= FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID
, err_info
.cookie
);
4174 mac_id
= FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID
, err_info
.cookie
);
4176 ar
= ab
->pdevs
[mac_id
].ar
;
4177 rx_ring
= &ar
->dp
.rx_refill_buf_ring
;
4179 spin_lock_bh(&rx_ring
->idr_lock
);
4180 msdu
= idr_find(&rx_ring
->bufs_idr
, buf_id
);
4182 ath11k_warn(ab
, "frame rx with invalid buf_id %d pdev %d\n",
4184 spin_unlock_bh(&rx_ring
->idr_lock
);
4188 idr_remove(&rx_ring
->bufs_idr
, buf_id
);
4189 spin_unlock_bh(&rx_ring
->idr_lock
);
4191 rxcb
= ATH11K_SKB_RXCB(msdu
);
4192 dma_unmap_single(ab
->dev
, rxcb
->paddr
,
4193 msdu
->len
+ skb_tailroom(msdu
),
4196 num_buffs_reaped
[mac_id
]++;
4197 total_num_buffs_reaped
++;
4200 if (err_info
.push_reason
!=
4201 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED
) {
4202 dev_kfree_skb_any(msdu
);
4206 rxcb
->err_rel_src
= err_info
.err_rel_src
;
4207 rxcb
->err_code
= err_info
.err_code
;
4208 rxcb
->rx_desc
= (struct hal_rx_desc
*)msdu
->data
;
4209 __skb_queue_tail(&msdu_list
[mac_id
], msdu
);
4212 ath11k_hal_srng_access_end(ab
, srng
);
4214 spin_unlock_bh(&srng
->lock
);
4216 if (!total_num_buffs_reaped
)
4219 for (i
= 0; i
< ab
->num_radios
; i
++) {
4220 if (!num_buffs_reaped
[i
])
4223 ar
= ab
->pdevs
[i
].ar
;
4224 rx_ring
= &ar
->dp
.rx_refill_buf_ring
;
4226 ath11k_dp_rxbufs_replenish(ab
, i
, rx_ring
, num_buffs_reaped
[i
],
4227 ab
->hw_params
.hal_params
->rx_buf_rbm
);
4231 for (i
= 0; i
< ab
->num_radios
; i
++) {
4232 if (!rcu_dereference(ab
->pdevs_active
[i
])) {
4233 __skb_queue_purge(&msdu_list
[i
]);
4237 ar
= ab
->pdevs
[i
].ar
;
4239 if (test_bit(ATH11K_CAC_RUNNING
, &ar
->dev_flags
)) {
4240 __skb_queue_purge(&msdu_list
[i
]);
4244 while ((msdu
= __skb_dequeue(&msdu_list
[i
])) != NULL
)
4245 ath11k_dp_rx_wbm_err(ar
, napi
, msdu
, &msdu_list
[i
]);
4249 return total_num_buffs_reaped
;
4252 int ath11k_dp_process_rxdma_err(struct ath11k_base
*ab
, int mac_id
, int budget
)
4255 struct dp_srng
*err_ring
;
4256 struct dp_rxdma_ring
*rx_ring
;
4257 struct dp_link_desc_bank
*link_desc_banks
= ab
->dp
.link_desc_banks
;
4258 struct hal_srng
*srng
;
4259 u32 msdu_cookies
[HAL_NUM_RX_MSDUS_PER_LINK_DESC
];
4260 enum hal_rx_buf_return_buf_manager rbm
;
4261 enum hal_reo_entr_rxdma_ecode rxdma_err_code
;
4262 struct ath11k_skb_rxcb
*rxcb
;
4263 struct sk_buff
*skb
;
4264 struct hal_reo_entrance_ring
*entr_ring
;
4266 int num_buf_freed
= 0;
4275 ar
= ab
->pdevs
[ath11k_hw_mac_id_to_pdev_id(&ab
->hw_params
, mac_id
)].ar
;
4276 err_ring
= &ar
->dp
.rxdma_err_dst_ring
[ath11k_hw_mac_id_to_srng_id(&ab
->hw_params
,
4278 rx_ring
= &ar
->dp
.rx_refill_buf_ring
;
4280 srng
= &ab
->hal
.srng_list
[err_ring
->ring_id
];
4282 spin_lock_bh(&srng
->lock
);
4284 ath11k_hal_srng_access_begin(ab
, srng
);
4287 (desc
= ath11k_hal_srng_dst_get_next_entry(ab
, srng
))) {
4288 ath11k_hal_rx_reo_ent_paddr_get(ab
, desc
, &paddr
, &desc_bank
);
4290 entr_ring
= (struct hal_reo_entrance_ring
*)desc
;
4292 FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE
,
4294 ab
->soc_stats
.rxdma_error
[rxdma_err_code
]++;
4296 link_desc_va
= link_desc_banks
[desc_bank
].vaddr
+
4297 (paddr
- link_desc_banks
[desc_bank
].paddr
);
4298 ath11k_hal_rx_msdu_link_info_get(link_desc_va
, &num_msdus
,
4299 msdu_cookies
, &rbm
);
4301 for (i
= 0; i
< num_msdus
; i
++) {
4302 buf_id
= FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID
,
4305 spin_lock_bh(&rx_ring
->idr_lock
);
4306 skb
= idr_find(&rx_ring
->bufs_idr
, buf_id
);
4308 ath11k_warn(ab
, "rxdma error with invalid buf_id %d\n",
4310 spin_unlock_bh(&rx_ring
->idr_lock
);
4314 idr_remove(&rx_ring
->bufs_idr
, buf_id
);
4315 spin_unlock_bh(&rx_ring
->idr_lock
);
4317 rxcb
= ATH11K_SKB_RXCB(skb
);
4318 dma_unmap_single(ab
->dev
, rxcb
->paddr
,
4319 skb
->len
+ skb_tailroom(skb
),
4321 dev_kfree_skb_any(skb
);
4326 ath11k_dp_rx_link_desc_return(ab
, desc
,
4327 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE
);
4330 ath11k_hal_srng_access_end(ab
, srng
);
4332 spin_unlock_bh(&srng
->lock
);
4335 ath11k_dp_rxbufs_replenish(ab
, mac_id
, rx_ring
, num_buf_freed
,
4336 ab
->hw_params
.hal_params
->rx_buf_rbm
);
4338 return budget
- quota
;
4341 void ath11k_dp_process_reo_status(struct ath11k_base
*ab
)
4343 struct ath11k_dp
*dp
= &ab
->dp
;
4344 struct hal_srng
*srng
;
4345 struct dp_reo_cmd
*cmd
, *tmp
;
4349 struct hal_reo_status reo_status
;
4351 srng
= &ab
->hal
.srng_list
[dp
->reo_status_ring
.ring_id
];
4353 memset(&reo_status
, 0, sizeof(reo_status
));
4355 spin_lock_bh(&srng
->lock
);
4357 ath11k_hal_srng_access_begin(ab
, srng
);
4359 while ((reo_desc
= ath11k_hal_srng_dst_get_next_entry(ab
, srng
))) {
4360 tag
= FIELD_GET(HAL_SRNG_TLV_HDR_TAG
, *reo_desc
);
4363 case HAL_REO_GET_QUEUE_STATS_STATUS
:
4364 ath11k_hal_reo_status_queue_stats(ab
, reo_desc
,
4367 case HAL_REO_FLUSH_QUEUE_STATUS
:
4368 ath11k_hal_reo_flush_queue_status(ab
, reo_desc
,
4371 case HAL_REO_FLUSH_CACHE_STATUS
:
4372 ath11k_hal_reo_flush_cache_status(ab
, reo_desc
,
4375 case HAL_REO_UNBLOCK_CACHE_STATUS
:
4376 ath11k_hal_reo_unblk_cache_status(ab
, reo_desc
,
4379 case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS
:
4380 ath11k_hal_reo_flush_timeout_list_status(ab
, reo_desc
,
4383 case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS
:
4384 ath11k_hal_reo_desc_thresh_reached_status(ab
, reo_desc
,
4387 case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS
:
4388 ath11k_hal_reo_update_rx_reo_queue_status(ab
, reo_desc
,
4392 ath11k_warn(ab
, "Unknown reo status type %d\n", tag
);
4396 spin_lock_bh(&dp
->reo_cmd_lock
);
4397 list_for_each_entry_safe(cmd
, tmp
, &dp
->reo_cmd_list
, list
) {
4398 if (reo_status
.uniform_hdr
.cmd_num
== cmd
->cmd_num
) {
4400 list_del(&cmd
->list
);
4404 spin_unlock_bh(&dp
->reo_cmd_lock
);
4407 cmd
->handler(dp
, (void *)&cmd
->data
,
4408 reo_status
.uniform_hdr
.cmd_status
);
4415 ath11k_hal_srng_access_end(ab
, srng
);
4417 spin_unlock_bh(&srng
->lock
);
4420 void ath11k_dp_rx_pdev_free(struct ath11k_base
*ab
, int mac_id
)
4422 struct ath11k
*ar
= ab
->pdevs
[mac_id
].ar
;
4424 ath11k_dp_rx_pdev_srng_free(ar
);
4425 ath11k_dp_rxdma_pdev_buf_free(ar
);
4428 int ath11k_dp_rx_pdev_alloc(struct ath11k_base
*ab
, int mac_id
)
4430 struct ath11k
*ar
= ab
->pdevs
[mac_id
].ar
;
4431 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
4436 ret
= ath11k_dp_rx_pdev_srng_alloc(ar
);
4438 ath11k_warn(ab
, "failed to setup rx srngs\n");
4442 ret
= ath11k_dp_rxdma_pdev_buf_setup(ar
);
4444 ath11k_warn(ab
, "failed to setup rxdma ring\n");
4448 ring_id
= dp
->rx_refill_buf_ring
.refill_buf_ring
.ring_id
;
4449 ret
= ath11k_dp_tx_htt_srng_setup(ab
, ring_id
, mac_id
, HAL_RXDMA_BUF
);
4451 ath11k_warn(ab
, "failed to configure rx_refill_buf_ring %d\n",
4456 if (ab
->hw_params
.rx_mac_buf_ring
) {
4457 for (i
= 0; i
< ab
->hw_params
.num_rxdma_per_pdev
; i
++) {
4458 ring_id
= dp
->rx_mac_buf_ring
[i
].ring_id
;
4459 ret
= ath11k_dp_tx_htt_srng_setup(ab
, ring_id
,
4460 mac_id
+ i
, HAL_RXDMA_BUF
);
4462 ath11k_warn(ab
, "failed to configure rx_mac_buf_ring%d %d\n",
4469 for (i
= 0; i
< ab
->hw_params
.num_rxdma_per_pdev
; i
++) {
4470 ring_id
= dp
->rxdma_err_dst_ring
[i
].ring_id
;
4471 ret
= ath11k_dp_tx_htt_srng_setup(ab
, ring_id
,
4472 mac_id
+ i
, HAL_RXDMA_DST
);
4474 ath11k_warn(ab
, "failed to configure rxdma_err_dest_ring%d %d\n",
4480 if (!ab
->hw_params
.rxdma1_enable
)
4481 goto config_refill_ring
;
4483 ring_id
= dp
->rxdma_mon_buf_ring
.refill_buf_ring
.ring_id
;
4484 ret
= ath11k_dp_tx_htt_srng_setup(ab
, ring_id
,
4485 mac_id
, HAL_RXDMA_MONITOR_BUF
);
4487 ath11k_warn(ab
, "failed to configure rxdma_mon_buf_ring %d\n",
4491 ret
= ath11k_dp_tx_htt_srng_setup(ab
,
4492 dp
->rxdma_mon_dst_ring
.ring_id
,
4493 mac_id
, HAL_RXDMA_MONITOR_DST
);
4495 ath11k_warn(ab
, "failed to configure rxdma_mon_dst_ring %d\n",
4499 ret
= ath11k_dp_tx_htt_srng_setup(ab
,
4500 dp
->rxdma_mon_desc_ring
.ring_id
,
4501 mac_id
, HAL_RXDMA_MONITOR_DESC
);
4503 ath11k_warn(ab
, "failed to configure rxdma_mon_dst_ring %d\n",
4509 for (i
= 0; i
< ab
->hw_params
.num_rxdma_per_pdev
; i
++) {
4510 ring_id
= dp
->rx_mon_status_refill_ring
[i
].refill_buf_ring
.ring_id
;
4511 ret
= ath11k_dp_tx_htt_srng_setup(ab
, ring_id
, mac_id
+ i
,
4512 HAL_RXDMA_MONITOR_STATUS
);
4515 "failed to configure mon_status_refill_ring%d %d\n",
4524 static void ath11k_dp_mon_set_frag_len(u32
*total_len
, u32
*frag_len
)
4526 if (*total_len
>= (DP_RX_BUFFER_SIZE
- sizeof(struct hal_rx_desc
))) {
4527 *frag_len
= DP_RX_BUFFER_SIZE
- sizeof(struct hal_rx_desc
);
4528 *total_len
-= *frag_len
;
4530 *frag_len
= *total_len
;
4536 int ath11k_dp_rx_monitor_link_desc_return(struct ath11k
*ar
,
4537 void *p_last_buf_addr_info
,
4540 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
4541 struct dp_srng
*dp_srng
;
4543 void *src_srng_desc
;
4546 if (ar
->ab
->hw_params
.rxdma1_enable
) {
4547 dp_srng
= &dp
->rxdma_mon_desc_ring
;
4548 hal_srng
= &ar
->ab
->hal
.srng_list
[dp_srng
->ring_id
];
4550 dp_srng
= &ar
->ab
->dp
.wbm_desc_rel_ring
;
4551 hal_srng
= &ar
->ab
->hal
.srng_list
[dp_srng
->ring_id
];
4554 ath11k_hal_srng_access_begin(ar
->ab
, hal_srng
);
4556 src_srng_desc
= ath11k_hal_srng_src_get_next_entry(ar
->ab
, hal_srng
);
4558 if (src_srng_desc
) {
4559 struct ath11k_buffer_addr
*src_desc
= src_srng_desc
;
4561 *src_desc
= *((struct ath11k_buffer_addr
*)p_last_buf_addr_info
);
4563 ath11k_dbg(ar
->ab
, ATH11K_DBG_DATA
,
4564 "Monitor Link Desc Ring %d Full", mac_id
);
4568 ath11k_hal_srng_access_end(ar
->ab
, hal_srng
);
4573 void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc
,
4574 dma_addr_t
*paddr
, u32
*sw_cookie
,
4576 void **pp_buf_addr_info
)
4578 struct hal_rx_msdu_link
*msdu_link
= rx_msdu_link_desc
;
4579 struct ath11k_buffer_addr
*buf_addr_info
;
4581 buf_addr_info
= (struct ath11k_buffer_addr
*)&msdu_link
->buf_addr_info
;
4583 ath11k_hal_rx_buf_addr_info_get(buf_addr_info
, paddr
, sw_cookie
, rbm
);
4585 *pp_buf_addr_info
= (void *)buf_addr_info
;
4588 static int ath11k_dp_pkt_set_pktlen(struct sk_buff
*skb
, u32 len
)
4590 if (skb
->len
> len
) {
4593 if (skb_tailroom(skb
) < len
- skb
->len
) {
4594 if ((pskb_expand_head(skb
, 0,
4595 len
- skb
->len
- skb_tailroom(skb
),
4597 dev_kfree_skb_any(skb
);
4601 skb_put(skb
, (len
- skb
->len
));
4606 static void ath11k_hal_rx_msdu_list_get(struct ath11k
*ar
,
4607 void *msdu_link_desc
,
4608 struct hal_rx_msdu_list
*msdu_list
,
4611 struct hal_rx_msdu_details
*msdu_details
= NULL
;
4612 struct rx_msdu_desc
*msdu_desc_info
= NULL
;
4613 struct hal_rx_msdu_link
*msdu_link
= NULL
;
4615 u32 last
= FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU
, 1);
4616 u32 first
= FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU
, 1);
4619 msdu_link
= msdu_link_desc
;
4620 msdu_details
= &msdu_link
->msdu_link
[0];
4622 for (i
= 0; i
< HAL_RX_NUM_MSDU_DESC
; i
++) {
4623 if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR
,
4624 msdu_details
[i
].buf_addr_info
.info0
) == 0) {
4625 msdu_desc_info
= &msdu_details
[i
- 1].rx_msdu_info
;
4626 msdu_desc_info
->info0
|= last
;
4630 msdu_desc_info
= &msdu_details
[i
].rx_msdu_info
;
4633 msdu_desc_info
->info0
|= first
;
4634 else if (i
== (HAL_RX_NUM_MSDU_DESC
- 1))
4635 msdu_desc_info
->info0
|= last
;
4636 msdu_list
->msdu_info
[i
].msdu_flags
= msdu_desc_info
->info0
;
4637 msdu_list
->msdu_info
[i
].msdu_len
=
4638 HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info
->info0
);
4639 msdu_list
->sw_cookie
[i
] =
4640 FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE
,
4641 msdu_details
[i
].buf_addr_info
.info1
);
4642 tmp
= FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR
,
4643 msdu_details
[i
].buf_addr_info
.info1
);
4644 msdu_list
->rbm
[i
] = tmp
;
4649 static u32
ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id
, u32
*ppdu_id
,
4654 if ((*ppdu_id
< msdu_ppdu_id
) &&
4655 ((msdu_ppdu_id
- *ppdu_id
) < DP_NOT_PPDU_ID_WRAP_AROUND
)) {
4656 *ppdu_id
= msdu_ppdu_id
;
4658 } else if ((*ppdu_id
> msdu_ppdu_id
) &&
4659 ((*ppdu_id
- msdu_ppdu_id
) > DP_NOT_PPDU_ID_WRAP_AROUND
)) {
4660 /* mon_dst is behind than mon_status
4661 * skip dst_ring and free it
4664 *ppdu_id
= msdu_ppdu_id
;
4670 static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info
*info
,
4671 bool *is_frag
, u32
*total_len
,
4672 u32
*frag_len
, u32
*msdu_cnt
)
4674 if (info
->msdu_flags
& RX_MSDU_DESC_INFO0_MSDU_CONTINUATION
) {
4676 *total_len
= info
->msdu_len
;
4679 ath11k_dp_mon_set_frag_len(total_len
,
4683 ath11k_dp_mon_set_frag_len(total_len
,
4686 *frag_len
= info
->msdu_len
;
4694 ath11k_dp_rx_mon_mpdu_pop(struct ath11k
*ar
, int mac_id
,
4695 void *ring_entry
, struct sk_buff
**head_msdu
,
4696 struct sk_buff
**tail_msdu
, u32
*npackets
,
4699 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
4700 struct ath11k_mon_data
*pmon
= (struct ath11k_mon_data
*)&dp
->mon_data
;
4701 struct dp_rxdma_ring
*rx_ring
= &dp
->rxdma_mon_buf_ring
;
4702 struct sk_buff
*msdu
= NULL
, *last
= NULL
;
4703 struct hal_rx_msdu_list msdu_list
;
4704 void *p_buf_addr_info
, *p_last_buf_addr_info
;
4705 struct hal_rx_desc
*rx_desc
;
4706 void *rx_msdu_link_desc
;
4709 u32 rx_buf_size
, rx_pkt_offset
, sw_cookie
;
4710 u32 rx_bufs_used
= 0, i
= 0;
4711 u32 msdu_ppdu_id
= 0, msdu_cnt
= 0;
4712 u32 total_len
= 0, frag_len
= 0;
4713 bool is_frag
, is_first_msdu
;
4714 bool drop_mpdu
= false;
4715 struct ath11k_skb_rxcb
*rxcb
;
4716 struct hal_reo_entrance_ring
*ent_desc
= ring_entry
;
4718 u32 rx_link_buf_info
[2];
4721 if (!ar
->ab
->hw_params
.rxdma1_enable
)
4722 rx_ring
= &dp
->rx_refill_buf_ring
;
4724 ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry
, &paddr
,
4726 &p_last_buf_addr_info
, &rbm
,
4729 if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON
,
4731 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED
) {
4733 FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE
,
4735 if (rxdma_err
== HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR
||
4736 rxdma_err
== HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR
||
4737 rxdma_err
== HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR
) {
4739 pmon
->rx_mon_stats
.dest_mpdu_drop
++;
4744 is_first_msdu
= true;
4747 if (pmon
->mon_last_linkdesc_paddr
== paddr
) {
4748 pmon
->rx_mon_stats
.dup_mon_linkdesc_cnt
++;
4749 return rx_bufs_used
;
4752 if (ar
->ab
->hw_params
.rxdma1_enable
)
4754 (void *)pmon
->link_desc_banks
[sw_cookie
].vaddr
+
4755 (paddr
- pmon
->link_desc_banks
[sw_cookie
].paddr
);
4758 (void *)ar
->ab
->dp
.link_desc_banks
[sw_cookie
].vaddr
+
4759 (paddr
- ar
->ab
->dp
.link_desc_banks
[sw_cookie
].paddr
);
4761 ath11k_hal_rx_msdu_list_get(ar
, rx_msdu_link_desc
, &msdu_list
,
4764 for (i
= 0; i
< num_msdus
; i
++) {
4767 if (pmon
->mon_last_buf_cookie
== msdu_list
.sw_cookie
[i
]) {
4768 ath11k_dbg(ar
->ab
, ATH11K_DBG_DATA
,
4769 "i %d last_cookie %d is same\n",
4770 i
, pmon
->mon_last_buf_cookie
);
4772 pmon
->rx_mon_stats
.dup_mon_buf_cnt
++;
4775 buf_id
= FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID
,
4776 msdu_list
.sw_cookie
[i
]);
4778 spin_lock_bh(&rx_ring
->idr_lock
);
4779 msdu
= idr_find(&rx_ring
->bufs_idr
, buf_id
);
4780 spin_unlock_bh(&rx_ring
->idr_lock
);
4782 ath11k_dbg(ar
->ab
, ATH11K_DBG_DATA
,
4783 "msdu_pop: invalid buf_id %d\n", buf_id
);
4786 rxcb
= ATH11K_SKB_RXCB(msdu
);
4787 if (!rxcb
->unmapped
) {
4788 dma_unmap_single(ar
->ab
->dev
, rxcb
->paddr
,
4795 ath11k_dbg(ar
->ab
, ATH11K_DBG_DATA
,
4796 "i %d drop msdu %p *ppdu_id %x\n",
4798 dev_kfree_skb_any(msdu
);
4803 rx_desc
= (struct hal_rx_desc
*)msdu
->data
;
4805 rx_pkt_offset
= sizeof(struct hal_rx_desc
);
4806 l2_hdr_offset
= ath11k_dp_rx_h_msdu_end_l3pad(ar
->ab
, rx_desc
);
4808 if (is_first_msdu
) {
4809 if (!ath11k_dp_rxdesc_mpdu_valid(ar
->ab
, rx_desc
)) {
4811 dev_kfree_skb_any(msdu
);
4813 pmon
->mon_last_linkdesc_paddr
= paddr
;
4818 ath11k_dp_rxdesc_get_ppduid(ar
->ab
, rx_desc
);
4820 if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id
,
4825 dev_kfree_skb_any(msdu
);
4829 return rx_bufs_used
;
4831 pmon
->mon_last_linkdesc_paddr
= paddr
;
4832 is_first_msdu
= false;
4834 ath11k_dp_mon_get_buf_len(&msdu_list
.msdu_info
[i
],
4835 &is_frag
, &total_len
,
4836 &frag_len
, &msdu_cnt
);
4837 rx_buf_size
= rx_pkt_offset
+ l2_hdr_offset
+ frag_len
;
4839 ath11k_dp_pkt_set_pktlen(msdu
, rx_buf_size
);
4848 pmon
->mon_last_buf_cookie
= msdu_list
.sw_cookie
[i
];
4850 spin_lock_bh(&rx_ring
->idr_lock
);
4851 idr_remove(&rx_ring
->bufs_idr
, buf_id
);
4852 spin_unlock_bh(&rx_ring
->idr_lock
);
4855 ath11k_hal_rx_buf_addr_info_set(rx_link_buf_info
, paddr
, sw_cookie
, rbm
);
4857 ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc
, &paddr
,
4861 if (ar
->ab
->hw_params
.rxdma1_enable
) {
4862 if (ath11k_dp_rx_monitor_link_desc_return(ar
,
4863 p_last_buf_addr_info
,
4865 ath11k_dbg(ar
->ab
, ATH11K_DBG_DATA
,
4866 "dp_rx_monitor_link_desc_return failed");
4868 ath11k_dp_rx_link_desc_return(ar
->ab
, rx_link_buf_info
,
4869 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE
);
4872 p_last_buf_addr_info
= p_buf_addr_info
;
4874 } while (paddr
&& msdu_cnt
);
4884 return rx_bufs_used
;
4887 static void ath11k_dp_rx_msdus_set_payload(struct ath11k
*ar
, struct sk_buff
*msdu
)
4889 u32 rx_pkt_offset
, l2_hdr_offset
;
4891 rx_pkt_offset
= ar
->ab
->hw_params
.hal_desc_sz
;
4892 l2_hdr_offset
= ath11k_dp_rx_h_msdu_end_l3pad(ar
->ab
,
4893 (struct hal_rx_desc
*)msdu
->data
);
4894 skb_pull(msdu
, rx_pkt_offset
+ l2_hdr_offset
);
4897 static struct sk_buff
*
4898 ath11k_dp_rx_mon_merg_msdus(struct ath11k
*ar
,
4899 u32 mac_id
, struct sk_buff
*head_msdu
,
4900 struct sk_buff
*last_msdu
,
4901 struct ieee80211_rx_status
*rxs
, bool *fcs_err
)
4903 struct ath11k_base
*ab
= ar
->ab
;
4904 struct sk_buff
*msdu
, *prev_buf
;
4905 struct hal_rx_desc
*rx_desc
;
4907 u8
*dest
, decap_format
;
4908 struct ieee80211_hdr_3addr
*wh
;
4909 struct rx_attention
*rx_attention
;
4913 goto err_merge_fail
;
4915 rx_desc
= (struct hal_rx_desc
*)head_msdu
->data
;
4916 rx_attention
= ath11k_dp_rx_get_attention(ab
, rx_desc
);
4917 err_bitmap
= ath11k_dp_rx_h_attn_mpdu_err(rx_attention
);
4919 if (err_bitmap
& DP_RX_MPDU_ERR_FCS
)
4922 if (ath11k_dp_rxdesc_get_mpdulen_err(rx_attention
))
4925 decap_format
= ath11k_dp_rx_h_msdu_start_decap_type(ab
, rx_desc
);
4927 ath11k_dp_rx_h_ppdu(ar
, rx_desc
, rxs
);
4929 if (decap_format
== DP_RX_DECAP_TYPE_RAW
) {
4930 ath11k_dp_rx_msdus_set_payload(ar
, head_msdu
);
4932 prev_buf
= head_msdu
;
4933 msdu
= head_msdu
->next
;
4936 ath11k_dp_rx_msdus_set_payload(ar
, msdu
);
4942 prev_buf
->next
= NULL
;
4944 skb_trim(prev_buf
, prev_buf
->len
- HAL_RX_FCS_LEN
);
4945 } else if (decap_format
== DP_RX_DECAP_TYPE_NATIVE_WIFI
) {
4948 rx_desc
= (struct hal_rx_desc
*)head_msdu
->data
;
4949 hdr_desc
= ath11k_dp_rxdesc_get_80211hdr(ab
, rx_desc
);
4952 wh
= (struct ieee80211_hdr_3addr
*)hdr_desc
;
4954 if (ieee80211_is_data_qos(wh
->frame_control
))
4960 ath11k_dp_rx_msdus_set_payload(ar
, msdu
);
4962 dest
= skb_push(msdu
, sizeof(__le16
));
4964 goto err_merge_fail
;
4965 memcpy(dest
, hdr_desc
, sizeof(struct ieee80211_qos_hdr
));
4970 dest
= skb_put(prev_buf
, HAL_RX_FCS_LEN
);
4972 goto err_merge_fail
;
4974 ath11k_dbg(ab
, ATH11K_DBG_DATA
,
4975 "mpdu_buf %p mpdu_buf->len %u",
4976 prev_buf
, prev_buf
->len
);
4978 ath11k_dbg(ab
, ATH11K_DBG_DATA
,
4979 "decap format %d is not supported!\n",
4981 goto err_merge_fail
;
4991 ath11k_dp_rx_update_radiotap_he(struct hal_rx_mon_ppdu_info
*rx_status
,
4996 put_unaligned_le16(rx_status
->he_data1
, &rtap_buf
[rtap_len
]);
4999 put_unaligned_le16(rx_status
->he_data2
, &rtap_buf
[rtap_len
]);
5002 put_unaligned_le16(rx_status
->he_data3
, &rtap_buf
[rtap_len
]);
5005 put_unaligned_le16(rx_status
->he_data4
, &rtap_buf
[rtap_len
]);
5008 put_unaligned_le16(rx_status
->he_data5
, &rtap_buf
[rtap_len
]);
5011 put_unaligned_le16(rx_status
->he_data6
, &rtap_buf
[rtap_len
]);
5015 ath11k_dp_rx_update_radiotap_he_mu(struct hal_rx_mon_ppdu_info
*rx_status
,
5020 put_unaligned_le16(rx_status
->he_flags1
, &rtap_buf
[rtap_len
]);
5023 put_unaligned_le16(rx_status
->he_flags2
, &rtap_buf
[rtap_len
]);
5026 rtap_buf
[rtap_len
] = rx_status
->he_RU
[0];
5029 rtap_buf
[rtap_len
] = rx_status
->he_RU
[1];
5032 rtap_buf
[rtap_len
] = rx_status
->he_RU
[2];
5035 rtap_buf
[rtap_len
] = rx_status
->he_RU
[3];
5038 static void ath11k_update_radiotap(struct ath11k
*ar
,
5039 struct hal_rx_mon_ppdu_info
*ppduinfo
,
5040 struct sk_buff
*mon_skb
,
5041 struct ieee80211_rx_status
*rxs
)
5043 struct ieee80211_supported_band
*sband
;
5046 rxs
->flag
|= RX_FLAG_MACTIME_START
;
5047 rxs
->signal
= ppduinfo
->rssi_comb
+ ATH11K_DEFAULT_NOISE_FLOOR
;
5050 rxs
->nss
= ppduinfo
->nss
;
5052 if (ppduinfo
->he_mu_flags
) {
5053 rxs
->flag
|= RX_FLAG_RADIOTAP_HE_MU
;
5054 rxs
->encoding
= RX_ENC_HE
;
5055 ptr
= skb_push(mon_skb
, sizeof(struct ieee80211_radiotap_he_mu
));
5056 ath11k_dp_rx_update_radiotap_he_mu(ppduinfo
, ptr
);
5057 } else if (ppduinfo
->he_flags
) {
5058 rxs
->flag
|= RX_FLAG_RADIOTAP_HE
;
5059 rxs
->encoding
= RX_ENC_HE
;
5060 ptr
= skb_push(mon_skb
, sizeof(struct ieee80211_radiotap_he
));
5061 ath11k_dp_rx_update_radiotap_he(ppduinfo
, ptr
);
5062 rxs
->rate_idx
= ppduinfo
->rate
;
5063 } else if (ppduinfo
->vht_flags
) {
5064 rxs
->encoding
= RX_ENC_VHT
;
5065 rxs
->rate_idx
= ppduinfo
->rate
;
5066 } else if (ppduinfo
->ht_flags
) {
5067 rxs
->encoding
= RX_ENC_HT
;
5068 rxs
->rate_idx
= ppduinfo
->rate
;
5070 rxs
->encoding
= RX_ENC_LEGACY
;
5071 sband
= &ar
->mac
.sbands
[rxs
->band
];
5072 rxs
->rate_idx
= ath11k_mac_hw_rate_to_idx(sband
, ppduinfo
->rate
,
5073 ppduinfo
->cck_flag
);
5076 rxs
->mactime
= ppduinfo
->tsft
;
5079 static int ath11k_dp_rx_mon_deliver(struct ath11k
*ar
, u32 mac_id
,
5080 struct sk_buff
*head_msdu
,
5081 struct hal_rx_mon_ppdu_info
*ppduinfo
,
5082 struct sk_buff
*tail_msdu
,
5083 struct napi_struct
*napi
)
5085 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
5086 struct sk_buff
*mon_skb
, *skb_next
, *header
;
5087 struct ieee80211_rx_status
*rxs
= &dp
->rx_status
;
5088 bool fcs_err
= false;
5090 mon_skb
= ath11k_dp_rx_mon_merg_msdus(ar
, mac_id
, head_msdu
,
5091 tail_msdu
, rxs
, &fcs_err
);
5094 goto mon_deliver_fail
;
5101 rxs
->flag
= RX_FLAG_FAILED_FCS_CRC
;
5104 skb_next
= mon_skb
->next
;
5106 rxs
->flag
&= ~RX_FLAG_AMSDU_MORE
;
5108 rxs
->flag
|= RX_FLAG_AMSDU_MORE
;
5110 if (mon_skb
== header
) {
5112 rxs
->flag
&= ~RX_FLAG_ALLOW_SAME_PN
;
5114 rxs
->flag
|= RX_FLAG_ALLOW_SAME_PN
;
5116 rxs
->flag
|= RX_FLAG_ONLY_MONITOR
;
5117 ath11k_update_radiotap(ar
, ppduinfo
, mon_skb
, rxs
);
5119 ath11k_dp_rx_deliver_msdu(ar
, napi
, mon_skb
, rxs
);
5127 mon_skb
= head_msdu
;
5129 skb_next
= mon_skb
->next
;
5130 dev_kfree_skb_any(mon_skb
);
5136 /* The destination ring processing is stuck if the destination is not
5137 * moving while status ring moves 16 PPDU. The destination ring processing
5138 * skips this destination ring PPDU as a workaround.
5140 #define MON_DEST_RING_STUCK_MAX_CNT 16
5142 static void ath11k_dp_rx_mon_dest_process(struct ath11k
*ar
, int mac_id
,
5143 u32 quota
, struct napi_struct
*napi
)
5145 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
5146 struct ath11k_mon_data
*pmon
= (struct ath11k_mon_data
*)&dp
->mon_data
;
5147 const struct ath11k_hw_hal_params
*hal_params
;
5153 struct ath11k_pdev_mon_stats
*rx_mon_stats
;
5155 u32 mpdu_rx_bufs_used
;
5157 if (ar
->ab
->hw_params
.rxdma1_enable
)
5158 ring_id
= dp
->rxdma_mon_dst_ring
.ring_id
;
5160 ring_id
= dp
->rxdma_err_dst_ring
[mac_id
].ring_id
;
5162 mon_dst_srng
= &ar
->ab
->hal
.srng_list
[ring_id
];
5164 spin_lock_bh(&pmon
->mon_lock
);
5166 ath11k_hal_srng_access_begin(ar
->ab
, mon_dst_srng
);
5168 ppdu_id
= pmon
->mon_ppdu_info
.ppdu_id
;
5170 rx_mon_stats
= &pmon
->rx_mon_stats
;
5172 while ((ring_entry
= ath11k_hal_srng_dst_peek(ar
->ab
, mon_dst_srng
))) {
5173 struct sk_buff
*head_msdu
, *tail_msdu
;
5178 mpdu_rx_bufs_used
= ath11k_dp_rx_mon_mpdu_pop(ar
, mac_id
, ring_entry
,
5181 &npackets
, &ppdu_id
);
5183 rx_bufs_used
+= mpdu_rx_bufs_used
;
5185 if (mpdu_rx_bufs_used
) {
5186 dp
->mon_dest_ring_stuck_cnt
= 0;
5188 dp
->mon_dest_ring_stuck_cnt
++;
5189 rx_mon_stats
->dest_mon_not_reaped
++;
5192 if (dp
->mon_dest_ring_stuck_cnt
> MON_DEST_RING_STUCK_MAX_CNT
) {
5193 rx_mon_stats
->dest_mon_stuck
++;
5194 ath11k_dbg(ar
->ab
, ATH11K_DBG_DATA
,
5195 "status ring ppdu_id=%d dest ring ppdu_id=%d mon_dest_ring_stuck_cnt=%d dest_mon_not_reaped=%u dest_mon_stuck=%u\n",
5196 pmon
->mon_ppdu_info
.ppdu_id
, ppdu_id
,
5197 dp
->mon_dest_ring_stuck_cnt
,
5198 rx_mon_stats
->dest_mon_not_reaped
,
5199 rx_mon_stats
->dest_mon_stuck
);
5200 pmon
->mon_ppdu_info
.ppdu_id
= ppdu_id
;
5204 if (ppdu_id
!= pmon
->mon_ppdu_info
.ppdu_id
) {
5205 pmon
->mon_ppdu_status
= DP_PPDU_STATUS_START
;
5206 ath11k_dbg(ar
->ab
, ATH11K_DBG_DATA
,
5207 "dest_rx: new ppdu_id %x != status ppdu_id %x dest_mon_not_reaped = %u dest_mon_stuck = %u\n",
5208 ppdu_id
, pmon
->mon_ppdu_info
.ppdu_id
,
5209 rx_mon_stats
->dest_mon_not_reaped
,
5210 rx_mon_stats
->dest_mon_stuck
);
5213 if (head_msdu
&& tail_msdu
) {
5214 ath11k_dp_rx_mon_deliver(ar
, dp
->mac_id
, head_msdu
,
5215 &pmon
->mon_ppdu_info
,
5217 rx_mon_stats
->dest_mpdu_done
++;
5220 ring_entry
= ath11k_hal_srng_dst_get_next_entry(ar
->ab
,
5223 ath11k_hal_srng_access_end(ar
->ab
, mon_dst_srng
);
5225 spin_unlock_bh(&pmon
->mon_lock
);
5228 rx_mon_stats
->dest_ppdu_done
++;
5229 hal_params
= ar
->ab
->hw_params
.hal_params
;
5231 if (ar
->ab
->hw_params
.rxdma1_enable
)
5232 ath11k_dp_rxbufs_replenish(ar
->ab
, dp
->mac_id
,
5233 &dp
->rxdma_mon_buf_ring
,
5235 hal_params
->rx_buf_rbm
);
5237 ath11k_dp_rxbufs_replenish(ar
->ab
, dp
->mac_id
,
5238 &dp
->rx_refill_buf_ring
,
5240 hal_params
->rx_buf_rbm
);
5244 int ath11k_dp_rx_process_mon_status(struct ath11k_base
*ab
, int mac_id
,
5245 struct napi_struct
*napi
, int budget
)
5247 struct ath11k
*ar
= ath11k_ab_to_ar(ab
, mac_id
);
5248 enum hal_rx_mon_status hal_status
;
5249 struct sk_buff
*skb
;
5250 struct sk_buff_head skb_list
;
5251 struct ath11k_peer
*peer
;
5252 struct ath11k_sta
*arsta
;
5253 int num_buffs_reaped
= 0;
5256 struct ath11k_mon_data
*pmon
= (struct ath11k_mon_data
*)&ar
->dp
.mon_data
;
5257 struct ath11k_pdev_mon_stats
*rx_mon_stats
= &pmon
->rx_mon_stats
;
5258 struct hal_rx_mon_ppdu_info
*ppdu_info
= &pmon
->mon_ppdu_info
;
5260 __skb_queue_head_init(&skb_list
);
5262 num_buffs_reaped
= ath11k_dp_rx_reap_mon_status_ring(ab
, mac_id
, &budget
,
5264 if (!num_buffs_reaped
)
5267 memset(ppdu_info
, 0, sizeof(*ppdu_info
));
5268 ppdu_info
->peer_id
= HAL_INVALID_PEERID
;
5270 while ((skb
= __skb_dequeue(&skb_list
))) {
5271 if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar
)) {
5272 log_type
= ATH11K_PKTLOG_TYPE_LITE_RX
;
5273 rx_buf_sz
= DP_RX_BUFFER_SIZE_LITE
;
5274 } else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar
)) {
5275 log_type
= ATH11K_PKTLOG_TYPE_RX_STATBUF
;
5276 rx_buf_sz
= DP_RX_BUFFER_SIZE
;
5278 log_type
= ATH11K_PKTLOG_TYPE_INVALID
;
5282 if (log_type
!= ATH11K_PKTLOG_TYPE_INVALID
)
5283 trace_ath11k_htt_rxdesc(ar
, skb
->data
, log_type
, rx_buf_sz
);
5285 memset(ppdu_info
, 0, sizeof(*ppdu_info
));
5286 ppdu_info
->peer_id
= HAL_INVALID_PEERID
;
5287 hal_status
= ath11k_hal_rx_parse_mon_status(ab
, ppdu_info
, skb
);
5289 if (test_bit(ATH11K_FLAG_MONITOR_STARTED
, &ar
->monitor_flags
) &&
5290 pmon
->mon_ppdu_status
== DP_PPDU_STATUS_START
&&
5291 hal_status
== HAL_TLV_STATUS_PPDU_DONE
) {
5292 rx_mon_stats
->status_ppdu_done
++;
5293 pmon
->mon_ppdu_status
= DP_PPDU_STATUS_DONE
;
5294 if (!ab
->hw_params
.full_monitor_mode
) {
5295 ath11k_dp_rx_mon_dest_process(ar
, mac_id
,
5297 pmon
->mon_ppdu_status
= DP_PPDU_STATUS_START
;
5301 if (ppdu_info
->peer_id
== HAL_INVALID_PEERID
||
5302 hal_status
!= HAL_RX_MON_STATUS_PPDU_DONE
) {
5303 dev_kfree_skb_any(skb
);
5308 spin_lock_bh(&ab
->base_lock
);
5309 peer
= ath11k_peer_find_by_id(ab
, ppdu_info
->peer_id
);
5311 if (!peer
|| !peer
->sta
) {
5312 ath11k_dbg(ab
, ATH11K_DBG_DATA
,
5313 "failed to find the peer with peer_id %d\n",
5314 ppdu_info
->peer_id
);
5318 arsta
= ath11k_sta_to_arsta(peer
->sta
);
5319 ath11k_dp_rx_update_peer_stats(arsta
, ppdu_info
);
5321 if (ath11k_debugfs_is_pktlog_peer_valid(ar
, peer
->addr
))
5322 trace_ath11k_htt_rxdesc(ar
, skb
->data
, log_type
, rx_buf_sz
);
5325 spin_unlock_bh(&ab
->base_lock
);
5328 dev_kfree_skb_any(skb
);
5329 memset(ppdu_info
, 0, sizeof(*ppdu_info
));
5330 ppdu_info
->peer_id
= HAL_INVALID_PEERID
;
5333 return num_buffs_reaped
;
5337 ath11k_dp_rx_full_mon_mpdu_pop(struct ath11k
*ar
,
5338 void *ring_entry
, struct sk_buff
**head_msdu
,
5339 struct sk_buff
**tail_msdu
,
5340 struct hal_sw_mon_ring_entries
*sw_mon_entries
)
5342 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
5343 struct ath11k_mon_data
*pmon
= &dp
->mon_data
;
5344 struct dp_rxdma_ring
*rx_ring
= &dp
->rxdma_mon_buf_ring
;
5345 struct sk_buff
*msdu
= NULL
, *last
= NULL
;
5346 struct hal_sw_monitor_ring
*sw_desc
= ring_entry
;
5347 struct hal_rx_msdu_list msdu_list
;
5348 struct hal_rx_desc
*rx_desc
;
5349 struct ath11k_skb_rxcb
*rxcb
;
5350 void *rx_msdu_link_desc
;
5351 void *p_buf_addr_info
, *p_last_buf_addr_info
;
5353 u32 rx_buf_size
, rx_pkt_offset
, l2_hdr_offset
;
5354 u32 rx_bufs_used
= 0, msdu_cnt
= 0;
5355 u32 total_len
= 0, frag_len
= 0, sw_cookie
;
5358 bool is_frag
, is_first_msdu
;
5359 bool drop_mpdu
= false;
5361 ath11k_hal_rx_sw_mon_ring_buf_paddr_get(ring_entry
, sw_mon_entries
);
5363 sw_cookie
= sw_mon_entries
->mon_dst_sw_cookie
;
5364 sw_mon_entries
->end_of_ppdu
= false;
5365 sw_mon_entries
->drop_ppdu
= false;
5366 p_last_buf_addr_info
= sw_mon_entries
->dst_buf_addr_info
;
5367 msdu_cnt
= sw_mon_entries
->msdu_cnt
;
5369 sw_mon_entries
->end_of_ppdu
=
5370 FIELD_GET(HAL_SW_MON_RING_INFO0_END_OF_PPDU
, sw_desc
->info0
);
5371 if (sw_mon_entries
->end_of_ppdu
)
5372 return rx_bufs_used
;
5374 if (FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_PUSH_REASON
,
5376 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED
) {
5378 FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_ERROR_CODE
,
5380 if (rxdma_err
== HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR
||
5381 rxdma_err
== HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR
||
5382 rxdma_err
== HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR
) {
5383 pmon
->rx_mon_stats
.dest_mpdu_drop
++;
5389 is_first_msdu
= true;
5393 (u8
*)pmon
->link_desc_banks
[sw_cookie
].vaddr
+
5394 (sw_mon_entries
->mon_dst_paddr
-
5395 pmon
->link_desc_banks
[sw_cookie
].paddr
);
5397 ath11k_hal_rx_msdu_list_get(ar
, rx_msdu_link_desc
, &msdu_list
,
5400 for (i
= 0; i
< num_msdus
; i
++) {
5401 buf_id
= FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID
,
5402 msdu_list
.sw_cookie
[i
]);
5404 spin_lock_bh(&rx_ring
->idr_lock
);
5405 msdu
= idr_find(&rx_ring
->bufs_idr
, buf_id
);
5407 ath11k_dbg(ar
->ab
, ATH11K_DBG_DATA
,
5408 "full mon msdu_pop: invalid buf_id %d\n",
5410 spin_unlock_bh(&rx_ring
->idr_lock
);
5413 idr_remove(&rx_ring
->bufs_idr
, buf_id
);
5414 spin_unlock_bh(&rx_ring
->idr_lock
);
5416 rxcb
= ATH11K_SKB_RXCB(msdu
);
5417 if (!rxcb
->unmapped
) {
5418 dma_unmap_single(ar
->ab
->dev
, rxcb
->paddr
,
5425 ath11k_dbg(ar
->ab
, ATH11K_DBG_DATA
,
5426 "full mon: i %d drop msdu %p *ppdu_id %x\n",
5427 i
, msdu
, sw_mon_entries
->ppdu_id
);
5428 dev_kfree_skb_any(msdu
);
5433 rx_desc
= (struct hal_rx_desc
*)msdu
->data
;
5435 rx_pkt_offset
= sizeof(struct hal_rx_desc
);
5436 l2_hdr_offset
= ath11k_dp_rx_h_msdu_end_l3pad(ar
->ab
, rx_desc
);
5438 if (is_first_msdu
) {
5439 if (!ath11k_dp_rxdesc_mpdu_valid(ar
->ab
, rx_desc
)) {
5441 dev_kfree_skb_any(msdu
);
5445 is_first_msdu
= false;
5448 ath11k_dp_mon_get_buf_len(&msdu_list
.msdu_info
[i
],
5449 &is_frag
, &total_len
,
5450 &frag_len
, &msdu_cnt
);
5452 rx_buf_size
= rx_pkt_offset
+ l2_hdr_offset
+ frag_len
;
5454 ath11k_dp_pkt_set_pktlen(msdu
, rx_buf_size
);
5466 ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc
,
5467 &sw_mon_entries
->mon_dst_paddr
,
5468 &sw_mon_entries
->mon_dst_sw_cookie
,
5472 if (ath11k_dp_rx_monitor_link_desc_return(ar
,
5473 p_last_buf_addr_info
,
5475 ath11k_dbg(ar
->ab
, ATH11K_DBG_DATA
,
5476 "full mon: dp_rx_monitor_link_desc_return failed\n");
5478 p_last_buf_addr_info
= p_buf_addr_info
;
5480 } while (sw_mon_entries
->mon_dst_paddr
&& msdu_cnt
);
5487 return rx_bufs_used
;
5490 static int ath11k_dp_rx_full_mon_prepare_mpdu(struct ath11k_dp
*dp
,
5491 struct dp_full_mon_mpdu
*mon_mpdu
,
5492 struct sk_buff
*head
,
5493 struct sk_buff
*tail
)
5495 mon_mpdu
= kzalloc(sizeof(*mon_mpdu
), GFP_ATOMIC
);
5499 list_add_tail(&mon_mpdu
->list
, &dp
->dp_full_mon_mpdu_list
);
5500 mon_mpdu
->head
= head
;
5501 mon_mpdu
->tail
= tail
;
5506 static void ath11k_dp_rx_full_mon_drop_ppdu(struct ath11k_dp
*dp
,
5507 struct dp_full_mon_mpdu
*mon_mpdu
)
5509 struct dp_full_mon_mpdu
*tmp
;
5510 struct sk_buff
*tmp_msdu
, *skb_next
;
5512 if (list_empty(&dp
->dp_full_mon_mpdu_list
))
5515 list_for_each_entry_safe(mon_mpdu
, tmp
, &dp
->dp_full_mon_mpdu_list
, list
) {
5516 list_del(&mon_mpdu
->list
);
5518 tmp_msdu
= mon_mpdu
->head
;
5520 skb_next
= tmp_msdu
->next
;
5521 dev_kfree_skb_any(tmp_msdu
);
5522 tmp_msdu
= skb_next
;
5529 static int ath11k_dp_rx_full_mon_deliver_ppdu(struct ath11k
*ar
,
5531 struct ath11k_mon_data
*pmon
,
5532 struct napi_struct
*napi
)
5534 struct ath11k_pdev_mon_stats
*rx_mon_stats
;
5535 struct dp_full_mon_mpdu
*tmp
;
5536 struct dp_full_mon_mpdu
*mon_mpdu
= pmon
->mon_mpdu
;
5537 struct sk_buff
*head_msdu
, *tail_msdu
;
5538 struct ath11k_base
*ab
= ar
->ab
;
5539 struct ath11k_dp
*dp
= &ab
->dp
;
5542 rx_mon_stats
= &pmon
->rx_mon_stats
;
5544 list_for_each_entry_safe(mon_mpdu
, tmp
, &dp
->dp_full_mon_mpdu_list
, list
) {
5545 list_del(&mon_mpdu
->list
);
5546 head_msdu
= mon_mpdu
->head
;
5547 tail_msdu
= mon_mpdu
->tail
;
5548 if (head_msdu
&& tail_msdu
) {
5549 ret
= ath11k_dp_rx_mon_deliver(ar
, mac_id
, head_msdu
,
5550 &pmon
->mon_ppdu_info
,
5552 rx_mon_stats
->dest_mpdu_done
++;
5553 ath11k_dbg(ar
->ab
, ATH11K_DBG_DATA
, "full mon: deliver ppdu\n");
5562 ath11k_dp_rx_process_full_mon_status_ring(struct ath11k_base
*ab
, int mac_id
,
5563 struct napi_struct
*napi
, int budget
)
5565 struct ath11k
*ar
= ab
->pdevs
[mac_id
].ar
;
5566 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
5567 struct ath11k_mon_data
*pmon
= &dp
->mon_data
;
5568 struct hal_sw_mon_ring_entries
*sw_mon_entries
;
5569 int quota
= 0, work
= 0, count
;
5571 sw_mon_entries
= &pmon
->sw_mon_entries
;
5573 while (pmon
->hold_mon_dst_ring
) {
5574 quota
= ath11k_dp_rx_process_mon_status(ab
, mac_id
,
5576 if (pmon
->buf_state
== DP_MON_STATUS_MATCH
) {
5577 count
= sw_mon_entries
->status_buf_count
;
5579 quota
+= ath11k_dp_rx_process_mon_status(ab
, mac_id
,
5583 ath11k_dp_rx_full_mon_deliver_ppdu(ar
, dp
->mac_id
,
5585 pmon
->hold_mon_dst_ring
= false;
5586 } else if (!pmon
->mon_status_paddr
||
5587 pmon
->buf_state
== DP_MON_STATUS_LEAD
) {
5588 sw_mon_entries
->drop_ppdu
= true;
5589 pmon
->hold_mon_dst_ring
= false;
5598 if (sw_mon_entries
->drop_ppdu
)
5599 ath11k_dp_rx_full_mon_drop_ppdu(&ab
->dp
, pmon
->mon_mpdu
);
5604 static int ath11k_dp_full_mon_process_rx(struct ath11k_base
*ab
, int mac_id
,
5605 struct napi_struct
*napi
, int budget
)
5607 struct ath11k
*ar
= ab
->pdevs
[mac_id
].ar
;
5608 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
5609 struct ath11k_mon_data
*pmon
= &dp
->mon_data
;
5610 struct hal_sw_mon_ring_entries
*sw_mon_entries
;
5611 struct ath11k_pdev_mon_stats
*rx_mon_stats
;
5612 struct sk_buff
*head_msdu
, *tail_msdu
;
5613 void *mon_dst_srng
= &ar
->ab
->hal
.srng_list
[dp
->rxdma_mon_dst_ring
.ring_id
];
5615 u32 rx_bufs_used
= 0, mpdu_rx_bufs_used
;
5617 bool break_dst_ring
= false;
5619 spin_lock_bh(&pmon
->mon_lock
);
5621 sw_mon_entries
= &pmon
->sw_mon_entries
;
5622 rx_mon_stats
= &pmon
->rx_mon_stats
;
5624 if (pmon
->hold_mon_dst_ring
) {
5625 spin_unlock_bh(&pmon
->mon_lock
);
5626 goto reap_status_ring
;
5629 ath11k_hal_srng_access_begin(ar
->ab
, mon_dst_srng
);
5630 while ((ring_entry
= ath11k_hal_srng_dst_peek(ar
->ab
, mon_dst_srng
))) {
5634 mpdu_rx_bufs_used
= ath11k_dp_rx_full_mon_mpdu_pop(ar
, ring_entry
,
5638 rx_bufs_used
+= mpdu_rx_bufs_used
;
5640 if (!sw_mon_entries
->end_of_ppdu
) {
5642 ret
= ath11k_dp_rx_full_mon_prepare_mpdu(&ab
->dp
,
5647 break_dst_ring
= true;
5652 if (!sw_mon_entries
->ppdu_id
&&
5653 !sw_mon_entries
->mon_status_paddr
) {
5654 break_dst_ring
= true;
5659 rx_mon_stats
->dest_ppdu_done
++;
5660 pmon
->mon_ppdu_status
= DP_PPDU_STATUS_START
;
5661 pmon
->buf_state
= DP_MON_STATUS_LAG
;
5662 pmon
->mon_status_paddr
= sw_mon_entries
->mon_status_paddr
;
5663 pmon
->hold_mon_dst_ring
= true;
5665 ring_entry
= ath11k_hal_srng_dst_get_next_entry(ar
->ab
,
5671 ath11k_hal_srng_access_end(ar
->ab
, mon_dst_srng
);
5672 spin_unlock_bh(&pmon
->mon_lock
);
5675 ath11k_dp_rxbufs_replenish(ar
->ab
, dp
->mac_id
,
5676 &dp
->rxdma_mon_buf_ring
,
5678 HAL_RX_BUF_RBM_SW3_BM
);
5682 quota
= ath11k_dp_rx_process_full_mon_status_ring(ab
, mac_id
,
5688 int ath11k_dp_rx_process_mon_rings(struct ath11k_base
*ab
, int mac_id
,
5689 struct napi_struct
*napi
, int budget
)
5691 struct ath11k
*ar
= ath11k_ab_to_ar(ab
, mac_id
);
5694 if (test_bit(ATH11K_FLAG_MONITOR_STARTED
, &ar
->monitor_flags
) &&
5695 ab
->hw_params
.full_monitor_mode
)
5696 ret
= ath11k_dp_full_mon_process_rx(ab
, mac_id
, napi
, budget
);
5698 ret
= ath11k_dp_rx_process_mon_status(ab
, mac_id
, napi
, budget
);
5703 static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k
*ar
)
5705 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
5706 struct ath11k_mon_data
*pmon
= (struct ath11k_mon_data
*)&dp
->mon_data
;
5708 skb_queue_head_init(&pmon
->rx_status_q
);
5710 pmon
->mon_ppdu_status
= DP_PPDU_STATUS_START
;
5712 memset(&pmon
->rx_mon_stats
, 0,
5713 sizeof(pmon
->rx_mon_stats
));
5717 int ath11k_dp_rx_pdev_mon_attach(struct ath11k
*ar
)
5719 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
5720 struct ath11k_mon_data
*pmon
= &dp
->mon_data
;
5721 struct hal_srng
*mon_desc_srng
= NULL
;
5722 struct dp_srng
*dp_srng
;
5724 u32 n_link_desc
= 0;
5726 ret
= ath11k_dp_rx_pdev_mon_status_attach(ar
);
5728 ath11k_warn(ar
->ab
, "pdev_mon_status_attach() failed");
5732 /* if rxdma1_enable is false, no need to setup
5733 * rxdma_mon_desc_ring.
5735 if (!ar
->ab
->hw_params
.rxdma1_enable
)
5738 dp_srng
= &dp
->rxdma_mon_desc_ring
;
5739 n_link_desc
= dp_srng
->size
/
5740 ath11k_hal_srng_get_entrysize(ar
->ab
, HAL_RXDMA_MONITOR_DESC
);
5742 &ar
->ab
->hal
.srng_list
[dp
->rxdma_mon_desc_ring
.ring_id
];
5744 ret
= ath11k_dp_link_desc_setup(ar
->ab
, pmon
->link_desc_banks
,
5745 HAL_RXDMA_MONITOR_DESC
, mon_desc_srng
,
5748 ath11k_warn(ar
->ab
, "mon_link_desc_pool_setup() failed");
5751 pmon
->mon_last_linkdesc_paddr
= 0;
5752 pmon
->mon_last_buf_cookie
= DP_RX_DESC_COOKIE_MAX
+ 1;
5753 spin_lock_init(&pmon
->mon_lock
);
5758 static int ath11k_dp_mon_link_free(struct ath11k
*ar
)
5760 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
5761 struct ath11k_mon_data
*pmon
= &dp
->mon_data
;
5763 ath11k_dp_link_desc_cleanup(ar
->ab
, pmon
->link_desc_banks
,
5764 HAL_RXDMA_MONITOR_DESC
,
5765 &dp
->rxdma_mon_desc_ring
);
5769 int ath11k_dp_rx_pdev_mon_detach(struct ath11k
*ar
)
5771 ath11k_dp_mon_link_free(ar
);
5775 int ath11k_dp_rx_pktlog_start(struct ath11k_base
*ab
)
5777 /* start reap timer */
5778 mod_timer(&ab
->mon_reap_timer
,
5779 jiffies
+ msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL
));
5784 int ath11k_dp_rx_pktlog_stop(struct ath11k_base
*ab
, bool stop_timer
)
5789 del_timer_sync(&ab
->mon_reap_timer
);
5791 /* reap all the monitor related rings */
5792 ret
= ath11k_dp_purge_mon_ring(ab
);
5794 ath11k_warn(ab
, "failed to purge dp mon ring: %d\n", ret
);