1 // SPDX-License-Identifier: BSD-3-Clause-Clear
3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
6 #include <linux/ieee80211.h>
7 #include <linux/kernel.h>
8 #include <linux/skbuff.h>
9 #include <crypto/hash.h>
12 #include "debugfs_htt_stats.h"
13 #include "debugfs_sta.h"
21 #define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
23 static u8
*ath11k_dp_rx_h_80211_hdr(struct hal_rx_desc
*desc
)
25 return desc
->hdr_status
;
28 static enum hal_encrypt_type
ath11k_dp_rx_h_mpdu_start_enctype(struct hal_rx_desc
*desc
)
30 if (!(__le32_to_cpu(desc
->mpdu_start
.info1
) &
31 RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID
))
32 return HAL_ENCRYPT_TYPE_OPEN
;
34 return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE
,
35 __le32_to_cpu(desc
->mpdu_start
.info2
));
38 static u8
ath11k_dp_rx_h_msdu_start_decap_type(struct hal_rx_desc
*desc
)
40 return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT
,
41 __le32_to_cpu(desc
->msdu_start
.info2
));
44 static u8
ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct hal_rx_desc
*desc
)
46 return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT
,
47 __le32_to_cpu(desc
->msdu_start
.info2
));
50 static bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct hal_rx_desc
*desc
)
52 return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID
,
53 __le32_to_cpu(desc
->mpdu_start
.info1
));
56 static bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct hal_rx_desc
*desc
)
58 return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_FCTRL_VALID
,
59 __le32_to_cpu(desc
->mpdu_start
.info1
));
62 static bool ath11k_dp_rx_h_mpdu_start_more_frags(struct sk_buff
*skb
)
64 struct ieee80211_hdr
*hdr
;
66 hdr
= (struct ieee80211_hdr
*)(skb
->data
+ HAL_RX_DESC_SIZE
);
67 return ieee80211_has_morefrags(hdr
->frame_control
);
70 static u16
ath11k_dp_rx_h_mpdu_start_frag_no(struct sk_buff
*skb
)
72 struct ieee80211_hdr
*hdr
;
74 hdr
= (struct ieee80211_hdr
*)(skb
->data
+ HAL_RX_DESC_SIZE
);
75 return le16_to_cpu(hdr
->seq_ctrl
) & IEEE80211_SCTL_FRAG
;
78 static u16
ath11k_dp_rx_h_mpdu_start_seq_no(struct hal_rx_desc
*desc
)
80 return FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_NUM
,
81 __le32_to_cpu(desc
->mpdu_start
.info1
));
84 static bool ath11k_dp_rx_h_attn_msdu_done(struct hal_rx_desc
*desc
)
86 return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE
,
87 __le32_to_cpu(desc
->attention
.info2
));
90 static bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct hal_rx_desc
*desc
)
92 return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL
,
93 __le32_to_cpu(desc
->attention
.info1
));
96 static bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct hal_rx_desc
*desc
)
98 return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL
,
99 __le32_to_cpu(desc
->attention
.info1
));
102 static bool ath11k_dp_rx_h_attn_is_decrypted(struct hal_rx_desc
*desc
)
104 return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE
,
105 __le32_to_cpu(desc
->attention
.info2
)) ==
106 RX_DESC_DECRYPT_STATUS_CODE_OK
);
109 static u32
ath11k_dp_rx_h_attn_mpdu_err(struct hal_rx_desc
*desc
)
111 u32 info
= __le32_to_cpu(desc
->attention
.info1
);
114 if (info
& RX_ATTENTION_INFO1_FCS_ERR
)
115 errmap
|= DP_RX_MPDU_ERR_FCS
;
117 if (info
& RX_ATTENTION_INFO1_DECRYPT_ERR
)
118 errmap
|= DP_RX_MPDU_ERR_DECRYPT
;
120 if (info
& RX_ATTENTION_INFO1_TKIP_MIC_ERR
)
121 errmap
|= DP_RX_MPDU_ERR_TKIP_MIC
;
123 if (info
& RX_ATTENTION_INFO1_A_MSDU_ERROR
)
124 errmap
|= DP_RX_MPDU_ERR_AMSDU_ERR
;
126 if (info
& RX_ATTENTION_INFO1_OVERFLOW_ERR
)
127 errmap
|= DP_RX_MPDU_ERR_OVERFLOW
;
129 if (info
& RX_ATTENTION_INFO1_MSDU_LEN_ERR
)
130 errmap
|= DP_RX_MPDU_ERR_MSDU_LEN
;
132 if (info
& RX_ATTENTION_INFO1_MPDU_LEN_ERR
)
133 errmap
|= DP_RX_MPDU_ERR_MPDU_LEN
;
138 static u16
ath11k_dp_rx_h_msdu_start_msdu_len(struct hal_rx_desc
*desc
)
140 return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH
,
141 __le32_to_cpu(desc
->msdu_start
.info1
));
144 static u8
ath11k_dp_rx_h_msdu_start_sgi(struct hal_rx_desc
*desc
)
146 return FIELD_GET(RX_MSDU_START_INFO3_SGI
,
147 __le32_to_cpu(desc
->msdu_start
.info3
));
150 static u8
ath11k_dp_rx_h_msdu_start_rate_mcs(struct hal_rx_desc
*desc
)
152 return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS
,
153 __le32_to_cpu(desc
->msdu_start
.info3
));
156 static u8
ath11k_dp_rx_h_msdu_start_rx_bw(struct hal_rx_desc
*desc
)
158 return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW
,
159 __le32_to_cpu(desc
->msdu_start
.info3
));
162 static u32
ath11k_dp_rx_h_msdu_start_freq(struct hal_rx_desc
*desc
)
164 return __le32_to_cpu(desc
->msdu_start
.phy_meta_data
);
167 static u8
ath11k_dp_rx_h_msdu_start_pkt_type(struct hal_rx_desc
*desc
)
169 return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE
,
170 __le32_to_cpu(desc
->msdu_start
.info3
));
173 static u8
ath11k_dp_rx_h_msdu_start_nss(struct hal_rx_desc
*desc
)
175 u8 mimo_ss_bitmap
= FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP
,
176 __le32_to_cpu(desc
->msdu_start
.info3
));
178 return hweight8(mimo_ss_bitmap
);
181 static u8
ath11k_dp_rx_h_mpdu_start_tid(struct hal_rx_desc
*desc
)
183 return FIELD_GET(RX_MPDU_START_INFO2_TID
,
184 __le32_to_cpu(desc
->mpdu_start
.info2
));
187 static u16
ath11k_dp_rx_h_mpdu_start_peer_id(struct hal_rx_desc
*desc
)
189 return __le16_to_cpu(desc
->mpdu_start
.sw_peer_id
);
192 static u8
ath11k_dp_rx_h_msdu_end_l3pad(struct hal_rx_desc
*desc
)
194 return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING
,
195 __le32_to_cpu(desc
->msdu_end
.info2
));
198 static bool ath11k_dp_rx_h_msdu_end_first_msdu(struct hal_rx_desc
*desc
)
200 return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU
,
201 __le32_to_cpu(desc
->msdu_end
.info2
));
204 static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct hal_rx_desc
*desc
)
206 return !!FIELD_GET(RX_MSDU_END_INFO2_LAST_MSDU
,
207 __le32_to_cpu(desc
->msdu_end
.info2
));
210 static void ath11k_dp_rx_desc_end_tlv_copy(struct hal_rx_desc
*fdesc
,
211 struct hal_rx_desc
*ldesc
)
213 memcpy((u8
*)&fdesc
->msdu_end
, (u8
*)&ldesc
->msdu_end
,
214 sizeof(struct rx_msdu_end
));
215 memcpy((u8
*)&fdesc
->attention
, (u8
*)&ldesc
->attention
,
216 sizeof(struct rx_attention
));
217 memcpy((u8
*)&fdesc
->mpdu_end
, (u8
*)&ldesc
->mpdu_end
,
218 sizeof(struct rx_mpdu_end
));
221 static u32
ath11k_dp_rxdesc_get_mpdulen_err(struct hal_rx_desc
*rx_desc
)
223 struct rx_attention
*rx_attn
;
225 rx_attn
= &rx_desc
->attention
;
227 return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR
,
228 __le32_to_cpu(rx_attn
->info1
));
231 static u32
ath11k_dp_rxdesc_get_decap_format(struct hal_rx_desc
*rx_desc
)
233 struct rx_msdu_start
*rx_msdu_start
;
235 rx_msdu_start
= &rx_desc
->msdu_start
;
237 return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT
,
238 __le32_to_cpu(rx_msdu_start
->info2
));
241 static u8
*ath11k_dp_rxdesc_get_80211hdr(struct hal_rx_desc
*rx_desc
)
245 rx_pkt_hdr
= &rx_desc
->msdu_payload
[0];
250 static bool ath11k_dp_rxdesc_mpdu_valid(struct hal_rx_desc
*rx_desc
)
254 tlv_tag
= FIELD_GET(HAL_TLV_HDR_TAG
,
255 __le32_to_cpu(rx_desc
->mpdu_start_tag
));
257 return tlv_tag
== HAL_RX_MPDU_START
;
260 static u32
ath11k_dp_rxdesc_get_ppduid(struct hal_rx_desc
*rx_desc
)
262 return __le16_to_cpu(rx_desc
->mpdu_start
.phy_ppdu_id
);
265 static void ath11k_dp_service_mon_ring(struct timer_list
*t
)
267 struct ath11k_base
*ab
= from_timer(ab
, t
, mon_reap_timer
);
270 for (i
= 0; i
< ab
->hw_params
.num_rxmda_per_pdev
; i
++)
271 ath11k_dp_rx_process_mon_rings(ab
, i
, NULL
, DP_MON_SERVICE_BUDGET
);
273 mod_timer(&ab
->mon_reap_timer
, jiffies
+
274 msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL
));
277 static int ath11k_dp_purge_mon_ring(struct ath11k_base
*ab
)
280 unsigned long timeout
= jiffies
+ msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS
);
283 for (i
= 0; i
< ab
->hw_params
.num_rxmda_per_pdev
; i
++)
284 reaped
+= ath11k_dp_rx_process_mon_rings(ab
, i
,
286 DP_MON_SERVICE_BUDGET
);
288 /* nothing more to reap */
289 if (reaped
< DP_MON_SERVICE_BUDGET
)
292 } while (time_before(jiffies
, timeout
));
294 ath11k_warn(ab
, "dp mon ring purge timeout");
299 /* Returns number of Rx buffers replenished */
300 int ath11k_dp_rxbufs_replenish(struct ath11k_base
*ab
, int mac_id
,
301 struct dp_rxdma_ring
*rx_ring
,
303 enum hal_rx_buf_return_buf_manager mgr
)
305 struct hal_srng
*srng
;
314 req_entries
= min(req_entries
, rx_ring
->bufs_max
);
316 srng
= &ab
->hal
.srng_list
[rx_ring
->refill_buf_ring
.ring_id
];
318 spin_lock_bh(&srng
->lock
);
320 ath11k_hal_srng_access_begin(ab
, srng
);
322 num_free
= ath11k_hal_srng_src_num_free(ab
, srng
, true);
323 if (!req_entries
&& (num_free
> (rx_ring
->bufs_max
* 3) / 4))
324 req_entries
= num_free
;
326 req_entries
= min(num_free
, req_entries
);
327 num_remain
= req_entries
;
329 while (num_remain
> 0) {
330 skb
= dev_alloc_skb(DP_RX_BUFFER_SIZE
+
331 DP_RX_BUFFER_ALIGN_SIZE
);
335 if (!IS_ALIGNED((unsigned long)skb
->data
,
336 DP_RX_BUFFER_ALIGN_SIZE
)) {
338 PTR_ALIGN(skb
->data
, DP_RX_BUFFER_ALIGN_SIZE
) -
342 paddr
= dma_map_single(ab
->dev
, skb
->data
,
343 skb
->len
+ skb_tailroom(skb
),
345 if (dma_mapping_error(ab
->dev
, paddr
))
348 spin_lock_bh(&rx_ring
->idr_lock
);
349 buf_id
= idr_alloc(&rx_ring
->bufs_idr
, skb
, 0,
350 rx_ring
->bufs_max
* 3, GFP_ATOMIC
);
351 spin_unlock_bh(&rx_ring
->idr_lock
);
355 desc
= ath11k_hal_srng_src_get_next_entry(ab
, srng
);
357 goto fail_idr_remove
;
359 ATH11K_SKB_RXCB(skb
)->paddr
= paddr
;
361 cookie
= FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID
, mac_id
) |
362 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID
, buf_id
);
366 ath11k_hal_rx_buf_addr_info_set(desc
, paddr
, cookie
, mgr
);
369 ath11k_hal_srng_access_end(ab
, srng
);
371 spin_unlock_bh(&srng
->lock
);
373 return req_entries
- num_remain
;
376 spin_lock_bh(&rx_ring
->idr_lock
);
377 idr_remove(&rx_ring
->bufs_idr
, buf_id
);
378 spin_unlock_bh(&rx_ring
->idr_lock
);
380 dma_unmap_single(ab
->dev
, paddr
, skb
->len
+ skb_tailroom(skb
),
383 dev_kfree_skb_any(skb
);
385 ath11k_hal_srng_access_end(ab
, srng
);
387 spin_unlock_bh(&srng
->lock
);
389 return req_entries
- num_remain
;
392 static int ath11k_dp_rxdma_buf_ring_free(struct ath11k
*ar
,
393 struct dp_rxdma_ring
*rx_ring
)
395 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
399 spin_lock_bh(&rx_ring
->idr_lock
);
400 idr_for_each_entry(&rx_ring
->bufs_idr
, skb
, buf_id
) {
401 idr_remove(&rx_ring
->bufs_idr
, buf_id
);
402 /* TODO: Understand where internal driver does this dma_unmap
405 dma_unmap_single(ar
->ab
->dev
, ATH11K_SKB_RXCB(skb
)->paddr
,
406 skb
->len
+ skb_tailroom(skb
), DMA_FROM_DEVICE
);
407 dev_kfree_skb_any(skb
);
410 idr_destroy(&rx_ring
->bufs_idr
);
411 spin_unlock_bh(&rx_ring
->idr_lock
);
413 /* if rxdma1_enable is false, mon_status_refill_ring
414 * isn't setup, so don't clean.
416 if (!ar
->ab
->hw_params
.rxdma1_enable
)
419 rx_ring
= &dp
->rx_mon_status_refill_ring
[0];
421 spin_lock_bh(&rx_ring
->idr_lock
);
422 idr_for_each_entry(&rx_ring
->bufs_idr
, skb
, buf_id
) {
423 idr_remove(&rx_ring
->bufs_idr
, buf_id
);
424 /* XXX: Understand where internal driver does this dma_unmap
427 dma_unmap_single(ar
->ab
->dev
, ATH11K_SKB_RXCB(skb
)->paddr
,
428 skb
->len
+ skb_tailroom(skb
), DMA_BIDIRECTIONAL
);
429 dev_kfree_skb_any(skb
);
432 idr_destroy(&rx_ring
->bufs_idr
);
433 spin_unlock_bh(&rx_ring
->idr_lock
);
438 static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k
*ar
)
440 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
441 struct ath11k_base
*ab
= ar
->ab
;
442 struct dp_rxdma_ring
*rx_ring
= &dp
->rx_refill_buf_ring
;
445 ath11k_dp_rxdma_buf_ring_free(ar
, rx_ring
);
447 rx_ring
= &dp
->rxdma_mon_buf_ring
;
448 ath11k_dp_rxdma_buf_ring_free(ar
, rx_ring
);
450 for (i
= 0; i
< ab
->hw_params
.num_rxmda_per_pdev
; i
++) {
451 rx_ring
= &dp
->rx_mon_status_refill_ring
[i
];
452 ath11k_dp_rxdma_buf_ring_free(ar
, rx_ring
);
458 static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k
*ar
,
459 struct dp_rxdma_ring
*rx_ring
,
462 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
465 num_entries
= rx_ring
->refill_buf_ring
.size
/
466 ath11k_hal_srng_get_entrysize(ar
->ab
, ringtype
);
468 rx_ring
->bufs_max
= num_entries
;
469 ath11k_dp_rxbufs_replenish(ar
->ab
, dp
->mac_id
, rx_ring
, num_entries
,
470 HAL_RX_BUF_RBM_SW3_BM
);
474 static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k
*ar
)
476 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
477 struct ath11k_base
*ab
= ar
->ab
;
478 struct dp_rxdma_ring
*rx_ring
= &dp
->rx_refill_buf_ring
;
481 ath11k_dp_rxdma_ring_buf_setup(ar
, rx_ring
, HAL_RXDMA_BUF
);
483 if (ar
->ab
->hw_params
.rxdma1_enable
) {
484 rx_ring
= &dp
->rxdma_mon_buf_ring
;
485 ath11k_dp_rxdma_ring_buf_setup(ar
, rx_ring
, HAL_RXDMA_MONITOR_BUF
);
488 for (i
= 0; i
< ab
->hw_params
.num_rxmda_per_pdev
; i
++) {
489 rx_ring
= &dp
->rx_mon_status_refill_ring
[i
];
490 ath11k_dp_rxdma_ring_buf_setup(ar
, rx_ring
, HAL_RXDMA_MONITOR_STATUS
);
496 static void ath11k_dp_rx_pdev_srng_free(struct ath11k
*ar
)
498 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
499 struct ath11k_base
*ab
= ar
->ab
;
502 ath11k_dp_srng_cleanup(ab
, &dp
->rx_refill_buf_ring
.refill_buf_ring
);
504 for (i
= 0; i
< ab
->hw_params
.num_rxmda_per_pdev
; i
++) {
505 if (ab
->hw_params
.rx_mac_buf_ring
)
506 ath11k_dp_srng_cleanup(ab
, &dp
->rx_mac_buf_ring
[i
]);
508 ath11k_dp_srng_cleanup(ab
, &dp
->rxdma_err_dst_ring
[i
]);
509 ath11k_dp_srng_cleanup(ab
,
510 &dp
->rx_mon_status_refill_ring
[i
].refill_buf_ring
);
513 ath11k_dp_srng_cleanup(ab
, &dp
->rxdma_mon_buf_ring
.refill_buf_ring
);
516 void ath11k_dp_pdev_reo_cleanup(struct ath11k_base
*ab
)
518 struct ath11k_dp
*dp
= &ab
->dp
;
521 for (i
= 0; i
< DP_REO_DST_RING_MAX
; i
++)
522 ath11k_dp_srng_cleanup(ab
, &dp
->reo_dst_ring
[i
]);
525 int ath11k_dp_pdev_reo_setup(struct ath11k_base
*ab
)
527 struct ath11k_dp
*dp
= &ab
->dp
;
531 for (i
= 0; i
< DP_REO_DST_RING_MAX
; i
++) {
532 ret
= ath11k_dp_srng_setup(ab
, &dp
->reo_dst_ring
[i
],
534 DP_REO_DST_RING_SIZE
);
536 ath11k_warn(ab
, "failed to setup reo_dst_ring\n");
537 goto err_reo_cleanup
;
544 ath11k_dp_pdev_reo_cleanup(ab
);
549 static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k
*ar
)
551 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
552 struct ath11k_base
*ab
= ar
->ab
;
553 struct dp_srng
*srng
= NULL
;
557 ret
= ath11k_dp_srng_setup(ar
->ab
,
558 &dp
->rx_refill_buf_ring
.refill_buf_ring
,
560 dp
->mac_id
, DP_RXDMA_BUF_RING_SIZE
);
562 ath11k_warn(ar
->ab
, "failed to setup rx_refill_buf_ring\n");
566 if (ar
->ab
->hw_params
.rx_mac_buf_ring
) {
567 for (i
= 0; i
< ab
->hw_params
.num_rxmda_per_pdev
; i
++) {
568 ret
= ath11k_dp_srng_setup(ar
->ab
,
569 &dp
->rx_mac_buf_ring
[i
],
571 dp
->mac_id
+ i
, 1024);
573 ath11k_warn(ar
->ab
, "failed to setup rx_mac_buf_ring %d\n",
580 for (i
= 0; i
< ab
->hw_params
.num_rxmda_per_pdev
; i
++) {
581 ret
= ath11k_dp_srng_setup(ar
->ab
, &dp
->rxdma_err_dst_ring
[i
],
582 HAL_RXDMA_DST
, 0, dp
->mac_id
+ i
,
583 DP_RXDMA_ERR_DST_RING_SIZE
);
585 ath11k_warn(ar
->ab
, "failed to setup rxdma_err_dst_ring %d\n", i
);
590 for (i
= 0; i
< ab
->hw_params
.num_rxmda_per_pdev
; i
++) {
591 srng
= &dp
->rx_mon_status_refill_ring
[i
].refill_buf_ring
;
592 ret
= ath11k_dp_srng_setup(ar
->ab
,
594 HAL_RXDMA_MONITOR_STATUS
, 0, dp
->mac_id
+ i
,
595 DP_RXDMA_MON_STATUS_RING_SIZE
);
598 "failed to setup rx_mon_status_refill_ring %d\n", i
);
603 /* if rxdma1_enable is false, then it doesn't need
604 * to setup rxdam_mon_buf_ring, rxdma_mon_dst_ring
605 * and rxdma_mon_desc_ring.
606 * init reap timer for QCA6390.
608 if (!ar
->ab
->hw_params
.rxdma1_enable
) {
609 //init mon status buffer reap timer
610 timer_setup(&ar
->ab
->mon_reap_timer
,
611 ath11k_dp_service_mon_ring
, 0);
615 ret
= ath11k_dp_srng_setup(ar
->ab
,
616 &dp
->rxdma_mon_buf_ring
.refill_buf_ring
,
617 HAL_RXDMA_MONITOR_BUF
, 0, dp
->mac_id
,
618 DP_RXDMA_MONITOR_BUF_RING_SIZE
);
621 "failed to setup HAL_RXDMA_MONITOR_BUF\n");
625 ret
= ath11k_dp_srng_setup(ar
->ab
, &dp
->rxdma_mon_dst_ring
,
626 HAL_RXDMA_MONITOR_DST
, 0, dp
->mac_id
,
627 DP_RXDMA_MONITOR_DST_RING_SIZE
);
630 "failed to setup HAL_RXDMA_MONITOR_DST\n");
634 ret
= ath11k_dp_srng_setup(ar
->ab
, &dp
->rxdma_mon_desc_ring
,
635 HAL_RXDMA_MONITOR_DESC
, 0, dp
->mac_id
,
636 DP_RXDMA_MONITOR_DESC_RING_SIZE
);
639 "failed to setup HAL_RXDMA_MONITOR_DESC\n");
646 void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base
*ab
)
648 struct ath11k_dp
*dp
= &ab
->dp
;
649 struct dp_reo_cmd
*cmd
, *tmp
;
650 struct dp_reo_cache_flush_elem
*cmd_cache
, *tmp_cache
;
652 spin_lock_bh(&dp
->reo_cmd_lock
);
653 list_for_each_entry_safe(cmd
, tmp
, &dp
->reo_cmd_list
, list
) {
654 list_del(&cmd
->list
);
655 dma_unmap_single(ab
->dev
, cmd
->data
.paddr
,
656 cmd
->data
.size
, DMA_BIDIRECTIONAL
);
657 kfree(cmd
->data
.vaddr
);
661 list_for_each_entry_safe(cmd_cache
, tmp_cache
,
662 &dp
->reo_cmd_cache_flush_list
, list
) {
663 list_del(&cmd_cache
->list
);
664 dp
->reo_cmd_cache_flush_count
--;
665 dma_unmap_single(ab
->dev
, cmd_cache
->data
.paddr
,
666 cmd_cache
->data
.size
, DMA_BIDIRECTIONAL
);
667 kfree(cmd_cache
->data
.vaddr
);
670 spin_unlock_bh(&dp
->reo_cmd_lock
);
673 static void ath11k_dp_reo_cmd_free(struct ath11k_dp
*dp
, void *ctx
,
674 enum hal_reo_cmd_status status
)
676 struct dp_rx_tid
*rx_tid
= ctx
;
678 if (status
!= HAL_REO_CMD_SUCCESS
)
679 ath11k_warn(dp
->ab
, "failed to flush rx tid hw desc, tid %d status %d\n",
680 rx_tid
->tid
, status
);
682 dma_unmap_single(dp
->ab
->dev
, rx_tid
->paddr
, rx_tid
->size
,
684 kfree(rx_tid
->vaddr
);
687 static void ath11k_dp_reo_cache_flush(struct ath11k_base
*ab
,
688 struct dp_rx_tid
*rx_tid
)
690 struct ath11k_hal_reo_cmd cmd
= {0};
691 unsigned long tot_desc_sz
, desc_sz
;
694 tot_desc_sz
= rx_tid
->size
;
695 desc_sz
= ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID
);
697 while (tot_desc_sz
> desc_sz
) {
698 tot_desc_sz
-= desc_sz
;
699 cmd
.addr_lo
= lower_32_bits(rx_tid
->paddr
+ tot_desc_sz
);
700 cmd
.addr_hi
= upper_32_bits(rx_tid
->paddr
);
701 ret
= ath11k_dp_tx_send_reo_cmd(ab
, rx_tid
,
702 HAL_REO_CMD_FLUSH_CACHE
, &cmd
,
706 "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
710 memset(&cmd
, 0, sizeof(cmd
));
711 cmd
.addr_lo
= lower_32_bits(rx_tid
->paddr
);
712 cmd
.addr_hi
= upper_32_bits(rx_tid
->paddr
);
713 cmd
.flag
|= HAL_REO_CMD_FLG_NEED_STATUS
;
714 ret
= ath11k_dp_tx_send_reo_cmd(ab
, rx_tid
,
715 HAL_REO_CMD_FLUSH_CACHE
,
716 &cmd
, ath11k_dp_reo_cmd_free
);
718 ath11k_err(ab
, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
720 dma_unmap_single(ab
->dev
, rx_tid
->paddr
, rx_tid
->size
,
722 kfree(rx_tid
->vaddr
);
726 static void ath11k_dp_rx_tid_del_func(struct ath11k_dp
*dp
, void *ctx
,
727 enum hal_reo_cmd_status status
)
729 struct ath11k_base
*ab
= dp
->ab
;
730 struct dp_rx_tid
*rx_tid
= ctx
;
731 struct dp_reo_cache_flush_elem
*elem
, *tmp
;
733 if (status
== HAL_REO_CMD_DRAIN
) {
735 } else if (status
!= HAL_REO_CMD_SUCCESS
) {
736 /* Shouldn't happen! Cleanup in case of other failure? */
737 ath11k_warn(ab
, "failed to delete rx tid %d hw descriptor %d\n",
738 rx_tid
->tid
, status
);
742 elem
= kzalloc(sizeof(*elem
), GFP_ATOMIC
);
747 memcpy(&elem
->data
, rx_tid
, sizeof(*rx_tid
));
749 spin_lock_bh(&dp
->reo_cmd_lock
);
750 list_add_tail(&elem
->list
, &dp
->reo_cmd_cache_flush_list
);
751 dp
->reo_cmd_cache_flush_count
++;
753 /* Flush and invalidate aged REO desc from HW cache */
754 list_for_each_entry_safe(elem
, tmp
, &dp
->reo_cmd_cache_flush_list
,
756 if (dp
->reo_cmd_cache_flush_count
> DP_REO_DESC_FREE_THRESHOLD
||
757 time_after(jiffies
, elem
->ts
+
758 msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS
))) {
759 list_del(&elem
->list
);
760 dp
->reo_cmd_cache_flush_count
--;
761 spin_unlock_bh(&dp
->reo_cmd_lock
);
763 ath11k_dp_reo_cache_flush(ab
, &elem
->data
);
765 spin_lock_bh(&dp
->reo_cmd_lock
);
768 spin_unlock_bh(&dp
->reo_cmd_lock
);
772 dma_unmap_single(ab
->dev
, rx_tid
->paddr
, rx_tid
->size
,
774 kfree(rx_tid
->vaddr
);
777 void ath11k_peer_rx_tid_delete(struct ath11k
*ar
,
778 struct ath11k_peer
*peer
, u8 tid
)
780 struct ath11k_hal_reo_cmd cmd
= {0};
781 struct dp_rx_tid
*rx_tid
= &peer
->rx_tid
[tid
];
787 cmd
.flag
= HAL_REO_CMD_FLG_NEED_STATUS
;
788 cmd
.addr_lo
= lower_32_bits(rx_tid
->paddr
);
789 cmd
.addr_hi
= upper_32_bits(rx_tid
->paddr
);
790 cmd
.upd0
|= HAL_REO_CMD_UPD0_VLD
;
791 ret
= ath11k_dp_tx_send_reo_cmd(ar
->ab
, rx_tid
,
792 HAL_REO_CMD_UPDATE_RX_QUEUE
, &cmd
,
793 ath11k_dp_rx_tid_del_func
);
795 ath11k_err(ar
->ab
, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
797 dma_unmap_single(ar
->ab
->dev
, rx_tid
->paddr
, rx_tid
->size
,
799 kfree(rx_tid
->vaddr
);
802 rx_tid
->active
= false;
805 static int ath11k_dp_rx_link_desc_return(struct ath11k_base
*ab
,
807 enum hal_wbm_rel_bm_act action
)
809 struct ath11k_dp
*dp
= &ab
->dp
;
810 struct hal_srng
*srng
;
814 srng
= &ab
->hal
.srng_list
[dp
->wbm_desc_rel_ring
.ring_id
];
816 spin_lock_bh(&srng
->lock
);
818 ath11k_hal_srng_access_begin(ab
, srng
);
820 desc
= ath11k_hal_srng_src_get_next_entry(ab
, srng
);
826 ath11k_hal_rx_msdu_link_desc_set(ab
, (void *)desc
, (void *)link_desc
,
830 ath11k_hal_srng_access_end(ab
, srng
);
832 spin_unlock_bh(&srng
->lock
);
837 static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid
*rx_tid
, bool rel_link_desc
)
839 struct ath11k_base
*ab
= rx_tid
->ab
;
841 lockdep_assert_held(&ab
->base_lock
);
843 if (rx_tid
->dst_ring_desc
) {
845 ath11k_dp_rx_link_desc_return(ab
, (u32
*)rx_tid
->dst_ring_desc
,
846 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE
);
847 kfree(rx_tid
->dst_ring_desc
);
848 rx_tid
->dst_ring_desc
= NULL
;
852 rx_tid
->last_frag_no
= 0;
853 rx_tid
->rx_frag_bitmap
= 0;
854 __skb_queue_purge(&rx_tid
->rx_frags
);
857 void ath11k_peer_rx_tid_cleanup(struct ath11k
*ar
, struct ath11k_peer
*peer
)
859 struct dp_rx_tid
*rx_tid
;
862 lockdep_assert_held(&ar
->ab
->base_lock
);
864 for (i
= 0; i
<= IEEE80211_NUM_TIDS
; i
++) {
865 rx_tid
= &peer
->rx_tid
[i
];
867 ath11k_peer_rx_tid_delete(ar
, peer
, i
);
868 ath11k_dp_rx_frags_cleanup(rx_tid
, true);
870 spin_unlock_bh(&ar
->ab
->base_lock
);
871 del_timer_sync(&rx_tid
->frag_timer
);
872 spin_lock_bh(&ar
->ab
->base_lock
);
876 static int ath11k_peer_rx_tid_reo_update(struct ath11k
*ar
,
877 struct ath11k_peer
*peer
,
878 struct dp_rx_tid
*rx_tid
,
879 u32 ba_win_sz
, u16 ssn
,
882 struct ath11k_hal_reo_cmd cmd
= {0};
885 cmd
.addr_lo
= lower_32_bits(rx_tid
->paddr
);
886 cmd
.addr_hi
= upper_32_bits(rx_tid
->paddr
);
887 cmd
.flag
= HAL_REO_CMD_FLG_NEED_STATUS
;
888 cmd
.upd0
= HAL_REO_CMD_UPD0_BA_WINDOW_SIZE
;
889 cmd
.ba_window_size
= ba_win_sz
;
892 cmd
.upd0
|= HAL_REO_CMD_UPD0_SSN
;
893 cmd
.upd2
= FIELD_PREP(HAL_REO_CMD_UPD2_SSN
, ssn
);
896 ret
= ath11k_dp_tx_send_reo_cmd(ar
->ab
, rx_tid
,
897 HAL_REO_CMD_UPDATE_RX_QUEUE
, &cmd
,
900 ath11k_warn(ar
->ab
, "failed to update rx tid queue, tid %d (%d)\n",
905 rx_tid
->ba_win_sz
= ba_win_sz
;
910 static void ath11k_dp_rx_tid_mem_free(struct ath11k_base
*ab
,
911 const u8
*peer_mac
, int vdev_id
, u8 tid
)
913 struct ath11k_peer
*peer
;
914 struct dp_rx_tid
*rx_tid
;
916 spin_lock_bh(&ab
->base_lock
);
918 peer
= ath11k_peer_find(ab
, vdev_id
, peer_mac
);
920 ath11k_warn(ab
, "failed to find the peer to free up rx tid mem\n");
924 rx_tid
= &peer
->rx_tid
[tid
];
928 dma_unmap_single(ab
->dev
, rx_tid
->paddr
, rx_tid
->size
,
930 kfree(rx_tid
->vaddr
);
932 rx_tid
->active
= false;
935 spin_unlock_bh(&ab
->base_lock
);
938 int ath11k_peer_rx_tid_setup(struct ath11k
*ar
, const u8
*peer_mac
, int vdev_id
,
939 u8 tid
, u32 ba_win_sz
, u16 ssn
,
940 enum hal_pn_type pn_type
)
942 struct ath11k_base
*ab
= ar
->ab
;
943 struct ath11k_peer
*peer
;
944 struct dp_rx_tid
*rx_tid
;
951 spin_lock_bh(&ab
->base_lock
);
953 peer
= ath11k_peer_find(ab
, vdev_id
, peer_mac
);
955 ath11k_warn(ab
, "failed to find the peer to set up rx tid\n");
956 spin_unlock_bh(&ab
->base_lock
);
960 rx_tid
= &peer
->rx_tid
[tid
];
961 /* Update the tid queue if it is already setup */
962 if (rx_tid
->active
) {
963 paddr
= rx_tid
->paddr
;
964 ret
= ath11k_peer_rx_tid_reo_update(ar
, peer
, rx_tid
,
965 ba_win_sz
, ssn
, true);
966 spin_unlock_bh(&ab
->base_lock
);
968 ath11k_warn(ab
, "failed to update reo for rx tid %d\n", tid
);
972 ret
= ath11k_wmi_peer_rx_reorder_queue_setup(ar
, vdev_id
,
976 ath11k_warn(ab
, "failed to send wmi command to update rx reorder queue, tid :%d (%d)\n",
983 rx_tid
->ba_win_sz
= ba_win_sz
;
985 /* TODO: Optimize the memory allocation for qos tid based on
986 * the actual BA window size in REO tid update path.
988 if (tid
== HAL_DESC_REO_NON_QOS_TID
)
989 hw_desc_sz
= ath11k_hal_reo_qdesc_size(ba_win_sz
, tid
);
991 hw_desc_sz
= ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX
, tid
);
993 vaddr
= kzalloc(hw_desc_sz
+ HAL_LINK_DESC_ALIGN
- 1, GFP_ATOMIC
);
995 spin_unlock_bh(&ab
->base_lock
);
999 addr_aligned
= PTR_ALIGN(vaddr
, HAL_LINK_DESC_ALIGN
);
1001 ath11k_hal_reo_qdesc_setup(addr_aligned
, tid
, ba_win_sz
,
1004 paddr
= dma_map_single(ab
->dev
, addr_aligned
, hw_desc_sz
,
1007 ret
= dma_mapping_error(ab
->dev
, paddr
);
1009 spin_unlock_bh(&ab
->base_lock
);
1013 rx_tid
->vaddr
= vaddr
;
1014 rx_tid
->paddr
= paddr
;
1015 rx_tid
->size
= hw_desc_sz
;
1016 rx_tid
->active
= true;
1018 spin_unlock_bh(&ab
->base_lock
);
1020 ret
= ath11k_wmi_peer_rx_reorder_queue_setup(ar
, vdev_id
, peer_mac
,
1021 paddr
, tid
, 1, ba_win_sz
);
1023 ath11k_warn(ar
->ab
, "failed to setup rx reorder queue, tid :%d (%d)\n",
1025 ath11k_dp_rx_tid_mem_free(ab
, peer_mac
, vdev_id
, tid
);
1036 int ath11k_dp_rx_ampdu_start(struct ath11k
*ar
,
1037 struct ieee80211_ampdu_params
*params
)
1039 struct ath11k_base
*ab
= ar
->ab
;
1040 struct ath11k_sta
*arsta
= (void *)params
->sta
->drv_priv
;
1041 int vdev_id
= arsta
->arvif
->vdev_id
;
1044 ret
= ath11k_peer_rx_tid_setup(ar
, params
->sta
->addr
, vdev_id
,
1045 params
->tid
, params
->buf_size
,
1046 params
->ssn
, arsta
->pn_type
);
1048 ath11k_warn(ab
, "failed to setup rx tid %d\n", ret
);
1053 int ath11k_dp_rx_ampdu_stop(struct ath11k
*ar
,
1054 struct ieee80211_ampdu_params
*params
)
1056 struct ath11k_base
*ab
= ar
->ab
;
1057 struct ath11k_peer
*peer
;
1058 struct ath11k_sta
*arsta
= (void *)params
->sta
->drv_priv
;
1059 int vdev_id
= arsta
->arvif
->vdev_id
;
1064 spin_lock_bh(&ab
->base_lock
);
1066 peer
= ath11k_peer_find(ab
, vdev_id
, params
->sta
->addr
);
1068 ath11k_warn(ab
, "failed to find the peer to stop rx aggregation\n");
1069 spin_unlock_bh(&ab
->base_lock
);
1073 paddr
= peer
->rx_tid
[params
->tid
].paddr
;
1074 active
= peer
->rx_tid
[params
->tid
].active
;
1077 spin_unlock_bh(&ab
->base_lock
);
1081 ret
= ath11k_peer_rx_tid_reo_update(ar
, peer
, peer
->rx_tid
, 1, 0, false);
1082 spin_unlock_bh(&ab
->base_lock
);
1084 ath11k_warn(ab
, "failed to update reo for rx tid %d: %d\n",
1089 ret
= ath11k_wmi_peer_rx_reorder_queue_setup(ar
, vdev_id
,
1090 params
->sta
->addr
, paddr
,
1093 ath11k_warn(ab
, "failed to send wmi to delete rx tid %d\n",
1099 int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif
*arvif
,
1100 const u8
*peer_addr
,
1101 enum set_key_cmd key_cmd
,
1102 struct ieee80211_key_conf
*key
)
1104 struct ath11k
*ar
= arvif
->ar
;
1105 struct ath11k_base
*ab
= ar
->ab
;
1106 struct ath11k_hal_reo_cmd cmd
= {0};
1107 struct ath11k_peer
*peer
;
1108 struct dp_rx_tid
*rx_tid
;
1112 /* NOTE: Enable PN/TSC replay check offload only for unicast frames.
1113 * We use mac80211 PN/TSC replay check functionality for bcast/mcast
1116 if (!(key
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
))
1119 cmd
.flag
|= HAL_REO_CMD_FLG_NEED_STATUS
;
1120 cmd
.upd0
|= HAL_REO_CMD_UPD0_PN
|
1121 HAL_REO_CMD_UPD0_PN_SIZE
|
1122 HAL_REO_CMD_UPD0_PN_VALID
|
1123 HAL_REO_CMD_UPD0_PN_CHECK
|
1124 HAL_REO_CMD_UPD0_SVLD
;
1126 switch (key
->cipher
) {
1127 case WLAN_CIPHER_SUITE_TKIP
:
1128 case WLAN_CIPHER_SUITE_CCMP
:
1129 case WLAN_CIPHER_SUITE_CCMP_256
:
1130 case WLAN_CIPHER_SUITE_GCMP
:
1131 case WLAN_CIPHER_SUITE_GCMP_256
:
1132 if (key_cmd
== SET_KEY
) {
1133 cmd
.upd1
|= HAL_REO_CMD_UPD1_PN_CHECK
;
1141 spin_lock_bh(&ab
->base_lock
);
1143 peer
= ath11k_peer_find(ab
, arvif
->vdev_id
, peer_addr
);
1145 ath11k_warn(ab
, "failed to find the peer to configure pn replay detection\n");
1146 spin_unlock_bh(&ab
->base_lock
);
1150 for (tid
= 0; tid
<= IEEE80211_NUM_TIDS
; tid
++) {
1151 rx_tid
= &peer
->rx_tid
[tid
];
1152 if (!rx_tid
->active
)
1154 cmd
.addr_lo
= lower_32_bits(rx_tid
->paddr
);
1155 cmd
.addr_hi
= upper_32_bits(rx_tid
->paddr
);
1156 ret
= ath11k_dp_tx_send_reo_cmd(ab
, rx_tid
,
1157 HAL_REO_CMD_UPDATE_RX_QUEUE
,
1160 ath11k_warn(ab
, "failed to configure rx tid %d queue for pn replay detection %d\n",
1166 spin_unlock_bh(&ar
->ab
->base_lock
);
1171 static inline int ath11k_get_ppdu_user_index(struct htt_ppdu_stats
*ppdu_stats
,
1176 for (i
= 0; i
< HTT_PPDU_STATS_MAX_USERS
- 1; i
++) {
1177 if (ppdu_stats
->user_stats
[i
].is_valid_peer_id
) {
1178 if (peer_id
== ppdu_stats
->user_stats
[i
].peer_id
)
1188 static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base
*ab
,
1189 u16 tag
, u16 len
, const void *ptr
,
1192 struct htt_ppdu_stats_info
*ppdu_info
;
1193 struct htt_ppdu_user_stats
*user_stats
;
1197 ppdu_info
= (struct htt_ppdu_stats_info
*)data
;
1200 case HTT_PPDU_STATS_TAG_COMMON
:
1201 if (len
< sizeof(struct htt_ppdu_stats_common
)) {
1202 ath11k_warn(ab
, "Invalid len %d for the tag 0x%x\n",
1206 memcpy((void *)&ppdu_info
->ppdu_stats
.common
, ptr
,
1207 sizeof(struct htt_ppdu_stats_common
));
1209 case HTT_PPDU_STATS_TAG_USR_RATE
:
1210 if (len
< sizeof(struct htt_ppdu_stats_user_rate
)) {
1211 ath11k_warn(ab
, "Invalid len %d for the tag 0x%x\n",
1216 peer_id
= ((struct htt_ppdu_stats_user_rate
*)ptr
)->sw_peer_id
;
1217 cur_user
= ath11k_get_ppdu_user_index(&ppdu_info
->ppdu_stats
,
1221 user_stats
= &ppdu_info
->ppdu_stats
.user_stats
[cur_user
];
1222 user_stats
->peer_id
= peer_id
;
1223 user_stats
->is_valid_peer_id
= true;
1224 memcpy((void *)&user_stats
->rate
, ptr
,
1225 sizeof(struct htt_ppdu_stats_user_rate
));
1226 user_stats
->tlv_flags
|= BIT(tag
);
1228 case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON
:
1229 if (len
< sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn
)) {
1230 ath11k_warn(ab
, "Invalid len %d for the tag 0x%x\n",
1235 peer_id
= ((struct htt_ppdu_stats_usr_cmpltn_cmn
*)ptr
)->sw_peer_id
;
1236 cur_user
= ath11k_get_ppdu_user_index(&ppdu_info
->ppdu_stats
,
1240 user_stats
= &ppdu_info
->ppdu_stats
.user_stats
[cur_user
];
1241 user_stats
->peer_id
= peer_id
;
1242 user_stats
->is_valid_peer_id
= true;
1243 memcpy((void *)&user_stats
->cmpltn_cmn
, ptr
,
1244 sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn
));
1245 user_stats
->tlv_flags
|= BIT(tag
);
1247 case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS
:
1249 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status
)) {
1250 ath11k_warn(ab
, "Invalid len %d for the tag 0x%x\n",
1256 ((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status
*)ptr
)->sw_peer_id
;
1257 cur_user
= ath11k_get_ppdu_user_index(&ppdu_info
->ppdu_stats
,
1261 user_stats
= &ppdu_info
->ppdu_stats
.user_stats
[cur_user
];
1262 user_stats
->peer_id
= peer_id
;
1263 user_stats
->is_valid_peer_id
= true;
1264 memcpy((void *)&user_stats
->ack_ba
, ptr
,
1265 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status
));
1266 user_stats
->tlv_flags
|= BIT(tag
);
1272 int ath11k_dp_htt_tlv_iter(struct ath11k_base
*ab
, const void *ptr
, size_t len
,
1273 int (*iter
)(struct ath11k_base
*ar
, u16 tag
, u16 len
,
1274 const void *ptr
, void *data
),
1277 const struct htt_tlv
*tlv
;
1278 const void *begin
= ptr
;
1279 u16 tlv_tag
, tlv_len
;
1283 if (len
< sizeof(*tlv
)) {
1284 ath11k_err(ab
, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
1285 ptr
- begin
, len
, sizeof(*tlv
));
1288 tlv
= (struct htt_tlv
*)ptr
;
1289 tlv_tag
= FIELD_GET(HTT_TLV_TAG
, tlv
->header
);
1290 tlv_len
= FIELD_GET(HTT_TLV_LEN
, tlv
->header
);
1291 ptr
+= sizeof(*tlv
);
1292 len
-= sizeof(*tlv
);
1294 if (tlv_len
> len
) {
1295 ath11k_err(ab
, "htt tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n",
1296 tlv_tag
, ptr
- begin
, len
, tlv_len
);
1299 ret
= iter(ab
, tlv_tag
, tlv_len
, ptr
, data
);
1309 static inline u32
ath11k_he_gi_to_nl80211_he_gi(u8 sgi
)
1314 case RX_MSDU_START_SGI_0_8_US
:
1315 ret
= NL80211_RATE_INFO_HE_GI_0_8
;
1317 case RX_MSDU_START_SGI_1_6_US
:
1318 ret
= NL80211_RATE_INFO_HE_GI_1_6
;
1320 case RX_MSDU_START_SGI_3_2_US
:
1321 ret
= NL80211_RATE_INFO_HE_GI_3_2
;
1329 ath11k_update_per_peer_tx_stats(struct ath11k
*ar
,
1330 struct htt_ppdu_stats
*ppdu_stats
, u8 user
)
1332 struct ath11k_base
*ab
= ar
->ab
;
1333 struct ath11k_peer
*peer
;
1334 struct ieee80211_sta
*sta
;
1335 struct ath11k_sta
*arsta
;
1336 struct htt_ppdu_stats_user_rate
*user_rate
;
1337 struct ath11k_per_peer_tx_stats
*peer_stats
= &ar
->peer_tx_stats
;
1338 struct htt_ppdu_user_stats
*usr_stats
= &ppdu_stats
->user_stats
[user
];
1339 struct htt_ppdu_stats_common
*common
= &ppdu_stats
->common
;
1341 u8 flags
, mcs
, nss
, bw
, sgi
, dcm
, rate_idx
= 0;
1343 u16 rate
= 0, succ_pkts
= 0;
1344 u32 tx_duration
= 0;
1345 u8 tid
= HTT_PPDU_STATS_NON_QOS_TID
;
1346 bool is_ampdu
= false;
1351 if (!(usr_stats
->tlv_flags
& BIT(HTT_PPDU_STATS_TAG_USR_RATE
)))
1354 if (usr_stats
->tlv_flags
& BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON
))
1356 HTT_USR_CMPLTN_IS_AMPDU(usr_stats
->cmpltn_cmn
.flags
);
1358 if (usr_stats
->tlv_flags
&
1359 BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS
)) {
1360 succ_bytes
= usr_stats
->ack_ba
.success_bytes
;
1361 succ_pkts
= FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M
,
1362 usr_stats
->ack_ba
.info
);
1363 tid
= FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM
,
1364 usr_stats
->ack_ba
.info
);
1367 if (common
->fes_duration_us
)
1368 tx_duration
= common
->fes_duration_us
;
1370 user_rate
= &usr_stats
->rate
;
1371 flags
= HTT_USR_RATE_PREAMBLE(user_rate
->rate_flags
);
1372 bw
= HTT_USR_RATE_BW(user_rate
->rate_flags
) - 2;
1373 nss
= HTT_USR_RATE_NSS(user_rate
->rate_flags
) + 1;
1374 mcs
= HTT_USR_RATE_MCS(user_rate
->rate_flags
);
1375 sgi
= HTT_USR_RATE_GI(user_rate
->rate_flags
);
1376 dcm
= HTT_USR_RATE_DCM(user_rate
->rate_flags
);
1378 /* Note: If host configured fixed rates and in some other special
1379 * cases, the broadcast/management frames are sent in different rates.
1380 * Firmware rate's control to be skipped for this?
1383 if (flags
== WMI_RATE_PREAMBLE_HE
&& mcs
> 11) {
1384 ath11k_warn(ab
, "Invalid HE mcs %hhd peer stats", mcs
);
1388 if (flags
== WMI_RATE_PREAMBLE_HE
&& mcs
> ATH11K_HE_MCS_MAX
) {
1389 ath11k_warn(ab
, "Invalid HE mcs %hhd peer stats", mcs
);
1393 if (flags
== WMI_RATE_PREAMBLE_VHT
&& mcs
> ATH11K_VHT_MCS_MAX
) {
1394 ath11k_warn(ab
, "Invalid VHT mcs %hhd peer stats", mcs
);
1398 if (flags
== WMI_RATE_PREAMBLE_HT
&& (mcs
> ATH11K_HT_MCS_MAX
|| nss
< 1)) {
1399 ath11k_warn(ab
, "Invalid HT mcs %hhd nss %hhd peer stats",
1404 if (flags
== WMI_RATE_PREAMBLE_CCK
|| flags
== WMI_RATE_PREAMBLE_OFDM
) {
1405 ret
= ath11k_mac_hw_ratecode_to_legacy_rate(mcs
,
1414 spin_lock_bh(&ab
->base_lock
);
1415 peer
= ath11k_peer_find_by_id(ab
, usr_stats
->peer_id
);
1417 if (!peer
|| !peer
->sta
) {
1418 spin_unlock_bh(&ab
->base_lock
);
1424 arsta
= (struct ath11k_sta
*)sta
->drv_priv
;
1426 memset(&arsta
->txrate
, 0, sizeof(arsta
->txrate
));
1429 case WMI_RATE_PREAMBLE_OFDM
:
1430 arsta
->txrate
.legacy
= rate
;
1432 case WMI_RATE_PREAMBLE_CCK
:
1433 arsta
->txrate
.legacy
= rate
;
1435 case WMI_RATE_PREAMBLE_HT
:
1436 arsta
->txrate
.mcs
= mcs
+ 8 * (nss
- 1);
1437 arsta
->txrate
.flags
= RATE_INFO_FLAGS_MCS
;
1439 arsta
->txrate
.flags
|= RATE_INFO_FLAGS_SHORT_GI
;
1441 case WMI_RATE_PREAMBLE_VHT
:
1442 arsta
->txrate
.mcs
= mcs
;
1443 arsta
->txrate
.flags
= RATE_INFO_FLAGS_VHT_MCS
;
1445 arsta
->txrate
.flags
|= RATE_INFO_FLAGS_SHORT_GI
;
1447 case WMI_RATE_PREAMBLE_HE
:
1448 arsta
->txrate
.mcs
= mcs
;
1449 arsta
->txrate
.flags
= RATE_INFO_FLAGS_HE_MCS
;
1450 arsta
->txrate
.he_dcm
= dcm
;
1451 arsta
->txrate
.he_gi
= ath11k_he_gi_to_nl80211_he_gi(sgi
);
1452 arsta
->txrate
.he_ru_alloc
= ath11k_he_ru_tones_to_nl80211_he_ru_alloc(
1453 (user_rate
->ru_end
-
1454 user_rate
->ru_start
) + 1);
1458 arsta
->txrate
.nss
= nss
;
1459 arsta
->txrate
.bw
= ath11k_mac_bw_to_mac80211_bw(bw
);
1460 arsta
->tx_duration
+= tx_duration
;
1461 memcpy(&arsta
->last_txrate
, &arsta
->txrate
, sizeof(struct rate_info
));
1463 /* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
1464 * So skip peer stats update for mgmt packets.
1466 if (tid
< HTT_PPDU_STATS_NON_QOS_TID
) {
1467 memset(peer_stats
, 0, sizeof(*peer_stats
));
1468 peer_stats
->succ_pkts
= succ_pkts
;
1469 peer_stats
->succ_bytes
= succ_bytes
;
1470 peer_stats
->is_ampdu
= is_ampdu
;
1471 peer_stats
->duration
= tx_duration
;
1472 peer_stats
->ba_fails
=
1473 HTT_USR_CMPLTN_LONG_RETRY(usr_stats
->cmpltn_cmn
.flags
) +
1474 HTT_USR_CMPLTN_SHORT_RETRY(usr_stats
->cmpltn_cmn
.flags
);
1476 if (ath11k_debugfs_is_extd_tx_stats_enabled(ar
))
1477 ath11k_debugfs_sta_add_tx_stats(arsta
, peer_stats
, rate_idx
);
1480 spin_unlock_bh(&ab
->base_lock
);
1484 static void ath11k_htt_update_ppdu_stats(struct ath11k
*ar
,
1485 struct htt_ppdu_stats
*ppdu_stats
)
1489 for (user
= 0; user
< HTT_PPDU_STATS_MAX_USERS
- 1; user
++)
1490 ath11k_update_per_peer_tx_stats(ar
, ppdu_stats
, user
);
1494 struct htt_ppdu_stats_info
*ath11k_dp_htt_get_ppdu_desc(struct ath11k
*ar
,
1497 struct htt_ppdu_stats_info
*ppdu_info
;
1499 spin_lock_bh(&ar
->data_lock
);
1500 if (!list_empty(&ar
->ppdu_stats_info
)) {
1501 list_for_each_entry(ppdu_info
, &ar
->ppdu_stats_info
, list
) {
1502 if (ppdu_info
->ppdu_id
== ppdu_id
) {
1503 spin_unlock_bh(&ar
->data_lock
);
1508 if (ar
->ppdu_stat_list_depth
> HTT_PPDU_DESC_MAX_DEPTH
) {
1509 ppdu_info
= list_first_entry(&ar
->ppdu_stats_info
,
1510 typeof(*ppdu_info
), list
);
1511 list_del(&ppdu_info
->list
);
1512 ar
->ppdu_stat_list_depth
--;
1513 ath11k_htt_update_ppdu_stats(ar
, &ppdu_info
->ppdu_stats
);
1517 spin_unlock_bh(&ar
->data_lock
);
1519 ppdu_info
= kzalloc(sizeof(*ppdu_info
), GFP_ATOMIC
);
1523 spin_lock_bh(&ar
->data_lock
);
1524 list_add_tail(&ppdu_info
->list
, &ar
->ppdu_stats_info
);
1525 ar
->ppdu_stat_list_depth
++;
1526 spin_unlock_bh(&ar
->data_lock
);
1531 static int ath11k_htt_pull_ppdu_stats(struct ath11k_base
*ab
,
1532 struct sk_buff
*skb
)
1534 struct ath11k_htt_ppdu_stats_msg
*msg
;
1535 struct htt_ppdu_stats_info
*ppdu_info
;
1541 msg
= (struct ath11k_htt_ppdu_stats_msg
*)skb
->data
;
1542 len
= FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE
, msg
->info
);
1543 pdev_id
= FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID
, msg
->info
);
1544 ppdu_id
= msg
->ppdu_id
;
1547 ar
= ath11k_mac_get_ar_by_pdev_id(ab
, pdev_id
);
1553 if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar
))
1554 trace_ath11k_htt_ppdu_stats(ar
, skb
->data
, len
);
1556 ppdu_info
= ath11k_dp_htt_get_ppdu_desc(ar
, ppdu_id
);
1562 ppdu_info
->ppdu_id
= ppdu_id
;
1563 ret
= ath11k_dp_htt_tlv_iter(ab
, msg
->data
, len
,
1564 ath11k_htt_tlv_ppdu_stats_parse
,
1567 ath11k_warn(ab
, "Failed to parse tlv %d\n", ret
);
1577 static void ath11k_htt_pktlog(struct ath11k_base
*ab
, struct sk_buff
*skb
)
1579 struct htt_pktlog_msg
*data
= (struct htt_pktlog_msg
*)skb
->data
;
1580 struct ath_pktlog_hdr
*hdr
= (struct ath_pktlog_hdr
*)data
;
1584 pdev_id
= FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID
, data
->hdr
);
1585 ar
= ath11k_mac_get_ar_by_pdev_id(ab
, pdev_id
);
1587 ath11k_warn(ab
, "invalid pdev id %d on htt pktlog\n", pdev_id
);
1591 trace_ath11k_htt_pktlog(ar
, data
->payload
, hdr
->size
,
1592 ar
->ab
->pktlog_defs_checksum
);
1595 static void ath11k_htt_backpressure_event_handler(struct ath11k_base
*ab
,
1596 struct sk_buff
*skb
)
1598 u32
*data
= (u32
*)skb
->data
;
1599 u8 pdev_id
, ring_type
, ring_id
, pdev_idx
;
1601 u32 backpressure_time
;
1602 struct ath11k_bp_stats
*bp_stats
;
1604 pdev_id
= FIELD_GET(HTT_BACKPRESSURE_EVENT_PDEV_ID_M
, *data
);
1605 ring_type
= FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_TYPE_M
, *data
);
1606 ring_id
= FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_ID_M
, *data
);
1609 hp
= FIELD_GET(HTT_BACKPRESSURE_EVENT_HP_M
, *data
);
1610 tp
= FIELD_GET(HTT_BACKPRESSURE_EVENT_TP_M
, *data
);
1613 backpressure_time
= *data
;
1615 ath11k_dbg(ab
, ATH11K_DBG_DP_HTT
, "htt backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n",
1616 pdev_id
, ring_type
, ring_id
, hp
, tp
, backpressure_time
);
1618 if (ring_type
== HTT_BACKPRESSURE_UMAC_RING_TYPE
) {
1619 if (ring_id
>= HTT_SW_UMAC_RING_IDX_MAX
)
1622 bp_stats
= &ab
->soc_stats
.bp_stats
.umac_ring_bp_stats
[ring_id
];
1623 } else if (ring_type
== HTT_BACKPRESSURE_LMAC_RING_TYPE
) {
1624 pdev_idx
= DP_HW2SW_MACID(pdev_id
);
1626 if (ring_id
>= HTT_SW_LMAC_RING_IDX_MAX
|| pdev_idx
>= MAX_RADIOS
)
1629 bp_stats
= &ab
->soc_stats
.bp_stats
.lmac_ring_bp_stats
[ring_id
][pdev_idx
];
1631 ath11k_warn(ab
, "unknown ring type received in htt bp event %d\n",
1636 spin_lock_bh(&ab
->base_lock
);
1640 bp_stats
->jiffies
= jiffies
;
1641 spin_unlock_bh(&ab
->base_lock
);
1644 void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base
*ab
,
1645 struct sk_buff
*skb
)
1647 struct ath11k_dp
*dp
= &ab
->dp
;
1648 struct htt_resp_msg
*resp
= (struct htt_resp_msg
*)skb
->data
;
1649 enum htt_t2h_msg_type type
= FIELD_GET(HTT_T2H_MSG_TYPE
, *(u32
*)resp
);
1652 u8 mac_addr
[ETH_ALEN
];
1656 ath11k_dbg(ab
, ATH11K_DBG_DP_HTT
, "dp_htt rx msg type :0x%0x\n", type
);
1659 case HTT_T2H_MSG_TYPE_VERSION_CONF
:
1660 dp
->htt_tgt_ver_major
= FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR
,
1661 resp
->version_msg
.version
);
1662 dp
->htt_tgt_ver_minor
= FIELD_GET(HTT_T2H_VERSION_CONF_MINOR
,
1663 resp
->version_msg
.version
);
1664 complete(&dp
->htt_tgt_version_received
);
1666 case HTT_T2H_MSG_TYPE_PEER_MAP
:
1667 vdev_id
= FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID
,
1668 resp
->peer_map_ev
.info
);
1669 peer_id
= FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID
,
1670 resp
->peer_map_ev
.info
);
1671 peer_mac_h16
= FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16
,
1672 resp
->peer_map_ev
.info1
);
1673 ath11k_dp_get_mac_addr(resp
->peer_map_ev
.mac_addr_l32
,
1674 peer_mac_h16
, mac_addr
);
1675 ath11k_peer_map_event(ab
, vdev_id
, peer_id
, mac_addr
, 0);
1677 case HTT_T2H_MSG_TYPE_PEER_MAP2
:
1678 vdev_id
= FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID
,
1679 resp
->peer_map_ev
.info
);
1680 peer_id
= FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID
,
1681 resp
->peer_map_ev
.info
);
1682 peer_mac_h16
= FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16
,
1683 resp
->peer_map_ev
.info1
);
1684 ath11k_dp_get_mac_addr(resp
->peer_map_ev
.mac_addr_l32
,
1685 peer_mac_h16
, mac_addr
);
1686 ast_hash
= FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL
,
1687 resp
->peer_map_ev
.info2
);
1688 ath11k_peer_map_event(ab
, vdev_id
, peer_id
, mac_addr
, ast_hash
);
1690 case HTT_T2H_MSG_TYPE_PEER_UNMAP
:
1691 case HTT_T2H_MSG_TYPE_PEER_UNMAP2
:
1692 peer_id
= FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID
,
1693 resp
->peer_unmap_ev
.info
);
1694 ath11k_peer_unmap_event(ab
, peer_id
);
1696 case HTT_T2H_MSG_TYPE_PPDU_STATS_IND
:
1697 ath11k_htt_pull_ppdu_stats(ab
, skb
);
1699 case HTT_T2H_MSG_TYPE_EXT_STATS_CONF
:
1700 ath11k_debugfs_htt_ext_stats_handler(ab
, skb
);
1702 case HTT_T2H_MSG_TYPE_PKTLOG
:
1703 ath11k_htt_pktlog(ab
, skb
);
1705 case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND
:
1706 ath11k_htt_backpressure_event_handler(ab
, skb
);
1709 ath11k_warn(ab
, "htt event %d not handled\n", type
);
1713 dev_kfree_skb_any(skb
);
1716 static int ath11k_dp_rx_msdu_coalesce(struct ath11k
*ar
,
1717 struct sk_buff_head
*msdu_list
,
1718 struct sk_buff
*first
, struct sk_buff
*last
,
1719 u8 l3pad_bytes
, int msdu_len
)
1721 struct sk_buff
*skb
;
1722 struct ath11k_skb_rxcb
*rxcb
= ATH11K_SKB_RXCB(first
);
1723 int buf_first_hdr_len
, buf_first_len
;
1724 struct hal_rx_desc
*ldesc
;
1729 /* As the msdu is spread across multiple rx buffers,
1730 * find the offset to the start of msdu for computing
1731 * the length of the msdu in the first buffer.
1733 buf_first_hdr_len
= HAL_RX_DESC_SIZE
+ l3pad_bytes
;
1734 buf_first_len
= DP_RX_BUFFER_SIZE
- buf_first_hdr_len
;
1736 if (WARN_ON_ONCE(msdu_len
<= buf_first_len
)) {
1737 skb_put(first
, buf_first_hdr_len
+ msdu_len
);
1738 skb_pull(first
, buf_first_hdr_len
);
1742 ldesc
= (struct hal_rx_desc
*)last
->data
;
1743 rxcb
->is_first_msdu
= ath11k_dp_rx_h_msdu_end_first_msdu(ldesc
);
1744 rxcb
->is_last_msdu
= ath11k_dp_rx_h_msdu_end_last_msdu(ldesc
);
1746 /* MSDU spans over multiple buffers because the length of the MSDU
1747 * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
1748 * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.
1750 skb_put(first
, DP_RX_BUFFER_SIZE
);
1751 skb_pull(first
, buf_first_hdr_len
);
1753 /* When an MSDU spread over multiple buffers attention, MSDU_END and
1754 * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs.
1756 ath11k_dp_rx_desc_end_tlv_copy(rxcb
->rx_desc
, ldesc
);
1758 space_extra
= msdu_len
- (buf_first_len
+ skb_tailroom(first
));
1759 if (space_extra
> 0 &&
1760 (pskb_expand_head(first
, 0, space_extra
, GFP_ATOMIC
) < 0)) {
1761 /* Free up all buffers of the MSDU */
1762 while ((skb
= __skb_dequeue(msdu_list
)) != NULL
) {
1763 rxcb
= ATH11K_SKB_RXCB(skb
);
1764 if (!rxcb
->is_continuation
) {
1765 dev_kfree_skb_any(skb
);
1768 dev_kfree_skb_any(skb
);
1773 rem_len
= msdu_len
- buf_first_len
;
1774 while ((skb
= __skb_dequeue(msdu_list
)) != NULL
&& rem_len
> 0) {
1775 rxcb
= ATH11K_SKB_RXCB(skb
);
1776 if (rxcb
->is_continuation
)
1777 buf_len
= DP_RX_BUFFER_SIZE
- HAL_RX_DESC_SIZE
;
1781 if (buf_len
> (DP_RX_BUFFER_SIZE
- HAL_RX_DESC_SIZE
)) {
1783 dev_kfree_skb_any(skb
);
1787 skb_put(skb
, buf_len
+ HAL_RX_DESC_SIZE
);
1788 skb_pull(skb
, HAL_RX_DESC_SIZE
);
1789 skb_copy_from_linear_data(skb
, skb_put(first
, buf_len
),
1791 dev_kfree_skb_any(skb
);
1794 if (!rxcb
->is_continuation
)
1801 static struct sk_buff
*ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head
*msdu_list
,
1802 struct sk_buff
*first
)
1804 struct sk_buff
*skb
;
1805 struct ath11k_skb_rxcb
*rxcb
= ATH11K_SKB_RXCB(first
);
1807 if (!rxcb
->is_continuation
)
1810 skb_queue_walk(msdu_list
, skb
) {
1811 rxcb
= ATH11K_SKB_RXCB(skb
);
1812 if (!rxcb
->is_continuation
)
1819 static void ath11k_dp_rx_h_csum_offload(struct sk_buff
*msdu
)
1821 struct ath11k_skb_rxcb
*rxcb
= ATH11K_SKB_RXCB(msdu
);
1822 bool ip_csum_fail
, l4_csum_fail
;
1824 ip_csum_fail
= ath11k_dp_rx_h_attn_ip_cksum_fail(rxcb
->rx_desc
);
1825 l4_csum_fail
= ath11k_dp_rx_h_attn_l4_cksum_fail(rxcb
->rx_desc
);
1827 msdu
->ip_summed
= (ip_csum_fail
|| l4_csum_fail
) ?
1828 CHECKSUM_NONE
: CHECKSUM_UNNECESSARY
;
1831 static int ath11k_dp_rx_crypto_mic_len(struct ath11k
*ar
,
1832 enum hal_encrypt_type enctype
)
1835 case HAL_ENCRYPT_TYPE_OPEN
:
1836 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC
:
1837 case HAL_ENCRYPT_TYPE_TKIP_MIC
:
1839 case HAL_ENCRYPT_TYPE_CCMP_128
:
1840 return IEEE80211_CCMP_MIC_LEN
;
1841 case HAL_ENCRYPT_TYPE_CCMP_256
:
1842 return IEEE80211_CCMP_256_MIC_LEN
;
1843 case HAL_ENCRYPT_TYPE_GCMP_128
:
1844 case HAL_ENCRYPT_TYPE_AES_GCMP_256
:
1845 return IEEE80211_GCMP_MIC_LEN
;
1846 case HAL_ENCRYPT_TYPE_WEP_40
:
1847 case HAL_ENCRYPT_TYPE_WEP_104
:
1848 case HAL_ENCRYPT_TYPE_WEP_128
:
1849 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4
:
1850 case HAL_ENCRYPT_TYPE_WAPI
:
1854 ath11k_warn(ar
->ab
, "unsupported encryption type %d for mic len\n", enctype
);
1858 static int ath11k_dp_rx_crypto_param_len(struct ath11k
*ar
,
1859 enum hal_encrypt_type enctype
)
1862 case HAL_ENCRYPT_TYPE_OPEN
:
1864 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC
:
1865 case HAL_ENCRYPT_TYPE_TKIP_MIC
:
1866 return IEEE80211_TKIP_IV_LEN
;
1867 case HAL_ENCRYPT_TYPE_CCMP_128
:
1868 return IEEE80211_CCMP_HDR_LEN
;
1869 case HAL_ENCRYPT_TYPE_CCMP_256
:
1870 return IEEE80211_CCMP_256_HDR_LEN
;
1871 case HAL_ENCRYPT_TYPE_GCMP_128
:
1872 case HAL_ENCRYPT_TYPE_AES_GCMP_256
:
1873 return IEEE80211_GCMP_HDR_LEN
;
1874 case HAL_ENCRYPT_TYPE_WEP_40
:
1875 case HAL_ENCRYPT_TYPE_WEP_104
:
1876 case HAL_ENCRYPT_TYPE_WEP_128
:
1877 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4
:
1878 case HAL_ENCRYPT_TYPE_WAPI
:
1882 ath11k_warn(ar
->ab
, "unsupported encryption type %d\n", enctype
);
1886 static int ath11k_dp_rx_crypto_icv_len(struct ath11k
*ar
,
1887 enum hal_encrypt_type enctype
)
1890 case HAL_ENCRYPT_TYPE_OPEN
:
1891 case HAL_ENCRYPT_TYPE_CCMP_128
:
1892 case HAL_ENCRYPT_TYPE_CCMP_256
:
1893 case HAL_ENCRYPT_TYPE_GCMP_128
:
1894 case HAL_ENCRYPT_TYPE_AES_GCMP_256
:
1896 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC
:
1897 case HAL_ENCRYPT_TYPE_TKIP_MIC
:
1898 return IEEE80211_TKIP_ICV_LEN
;
1899 case HAL_ENCRYPT_TYPE_WEP_40
:
1900 case HAL_ENCRYPT_TYPE_WEP_104
:
1901 case HAL_ENCRYPT_TYPE_WEP_128
:
1902 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4
:
1903 case HAL_ENCRYPT_TYPE_WAPI
:
1907 ath11k_warn(ar
->ab
, "unsupported encryption type %d\n", enctype
);
1911 static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k
*ar
,
1912 struct sk_buff
*msdu
,
1914 enum hal_encrypt_type enctype
,
1915 struct ieee80211_rx_status
*status
)
1917 struct ath11k_skb_rxcb
*rxcb
= ATH11K_SKB_RXCB(msdu
);
1918 u8 decap_hdr
[DP_MAX_NWIFI_HDR_LEN
];
1919 struct ieee80211_hdr
*hdr
;
1926 /* copy SA & DA and pull decapped header */
1927 hdr
= (struct ieee80211_hdr
*)msdu
->data
;
1928 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1929 ether_addr_copy(da
, ieee80211_get_DA(hdr
));
1930 ether_addr_copy(sa
, ieee80211_get_SA(hdr
));
1931 skb_pull(msdu
, ieee80211_hdrlen(hdr
->frame_control
));
1933 if (rxcb
->is_first_msdu
) {
1934 /* original 802.11 header is valid for the first msdu
1935 * hence we can reuse the same header
1937 hdr
= (struct ieee80211_hdr
*)first_hdr
;
1938 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1940 /* Each A-MSDU subframe will be reported as a separate MSDU,
1941 * so strip the A-MSDU bit from QoS Ctl.
1943 if (ieee80211_is_data_qos(hdr
->frame_control
)) {
1944 qos
= ieee80211_get_qos_ctl(hdr
);
1945 qos
[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT
;
1948 /* Rebuild qos header if this is a middle/last msdu */
1949 hdr
->frame_control
|= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA
);
1951 /* Reset the order bit as the HT_Control header is stripped */
1952 hdr
->frame_control
&= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER
));
1954 qos_ctl
= rxcb
->tid
;
1956 if (ath11k_dp_rx_h_msdu_start_mesh_ctl_present(rxcb
->rx_desc
))
1957 qos_ctl
|= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT
;
1959 /* TODO Add other QoS ctl fields when required */
1961 /* copy decap header before overwriting for reuse below */
1962 memcpy(decap_hdr
, (uint8_t *)hdr
, hdr_len
);
1965 if (!(status
->flag
& RX_FLAG_IV_STRIPPED
)) {
1966 memcpy(skb_push(msdu
,
1967 ath11k_dp_rx_crypto_param_len(ar
, enctype
)),
1968 (void *)hdr
+ hdr_len
,
1969 ath11k_dp_rx_crypto_param_len(ar
, enctype
));
1972 if (!rxcb
->is_first_msdu
) {
1973 memcpy(skb_push(msdu
,
1974 IEEE80211_QOS_CTL_LEN
), &qos_ctl
,
1975 IEEE80211_QOS_CTL_LEN
);
1976 memcpy(skb_push(msdu
, hdr_len
), decap_hdr
, hdr_len
);
1980 memcpy(skb_push(msdu
, hdr_len
), hdr
, hdr_len
);
1982 /* original 802.11 header has a different DA and in
1983 * case of 4addr it may also have different SA
1985 hdr
= (struct ieee80211_hdr
*)msdu
->data
;
1986 ether_addr_copy(ieee80211_get_DA(hdr
), da
);
1987 ether_addr_copy(ieee80211_get_SA(hdr
), sa
);
1990 static void ath11k_dp_rx_h_undecap_raw(struct ath11k
*ar
, struct sk_buff
*msdu
,
1991 enum hal_encrypt_type enctype
,
1992 struct ieee80211_rx_status
*status
,
1995 struct ath11k_skb_rxcb
*rxcb
= ATH11K_SKB_RXCB(msdu
);
1996 struct ieee80211_hdr
*hdr
;
2000 if (!rxcb
->is_first_msdu
||
2001 !(rxcb
->is_first_msdu
&& rxcb
->is_last_msdu
)) {
2006 skb_trim(msdu
, msdu
->len
- FCS_LEN
);
2011 hdr
= (void *)msdu
->data
;
2014 if (status
->flag
& RX_FLAG_IV_STRIPPED
) {
2015 skb_trim(msdu
, msdu
->len
-
2016 ath11k_dp_rx_crypto_mic_len(ar
, enctype
));
2018 skb_trim(msdu
, msdu
->len
-
2019 ath11k_dp_rx_crypto_icv_len(ar
, enctype
));
2022 if (status
->flag
& RX_FLAG_MIC_STRIPPED
)
2023 skb_trim(msdu
, msdu
->len
-
2024 ath11k_dp_rx_crypto_mic_len(ar
, enctype
));
2027 if (status
->flag
& RX_FLAG_ICV_STRIPPED
)
2028 skb_trim(msdu
, msdu
->len
-
2029 ath11k_dp_rx_crypto_icv_len(ar
, enctype
));
2033 if ((status
->flag
& RX_FLAG_MMIC_STRIPPED
) &&
2034 !ieee80211_has_morefrags(hdr
->frame_control
) &&
2035 enctype
== HAL_ENCRYPT_TYPE_TKIP_MIC
)
2036 skb_trim(msdu
, msdu
->len
- IEEE80211_CCMP_MIC_LEN
);
2039 if (status
->flag
& RX_FLAG_IV_STRIPPED
) {
2040 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
2041 crypto_len
= ath11k_dp_rx_crypto_param_len(ar
, enctype
);
2043 memmove((void *)msdu
->data
+ crypto_len
,
2044 (void *)msdu
->data
, hdr_len
);
2045 skb_pull(msdu
, crypto_len
);
2049 static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k
*ar
,
2050 struct sk_buff
*msdu
,
2051 enum hal_encrypt_type enctype
)
2053 struct ath11k_skb_rxcb
*rxcb
= ATH11K_SKB_RXCB(msdu
);
2054 struct ieee80211_hdr
*hdr
;
2055 size_t hdr_len
, crypto_len
;
2059 is_amsdu
= !(rxcb
->is_first_msdu
&& rxcb
->is_last_msdu
);
2060 hdr
= (struct ieee80211_hdr
*)ath11k_dp_rx_h_80211_hdr(rxcb
->rx_desc
);
2063 if (rxcb
->is_first_msdu
) {
2064 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
2065 crypto_len
= ath11k_dp_rx_crypto_param_len(ar
, enctype
);
2067 rfc1042
+= hdr_len
+ crypto_len
;
2071 rfc1042
+= sizeof(struct ath11k_dp_amsdu_subframe_hdr
);
2076 static void ath11k_dp_rx_h_undecap_eth(struct ath11k
*ar
,
2077 struct sk_buff
*msdu
,
2079 enum hal_encrypt_type enctype
,
2080 struct ieee80211_rx_status
*status
)
2082 struct ieee80211_hdr
*hdr
;
2089 rfc1042
= ath11k_dp_rx_h_find_rfc1042(ar
, msdu
, enctype
);
2090 if (WARN_ON_ONCE(!rfc1042
))
2093 /* pull decapped header and copy SA & DA */
2094 eth
= (struct ethhdr
*)msdu
->data
;
2095 ether_addr_copy(da
, eth
->h_dest
);
2096 ether_addr_copy(sa
, eth
->h_source
);
2097 skb_pull(msdu
, sizeof(struct ethhdr
));
2099 /* push rfc1042/llc/snap */
2100 memcpy(skb_push(msdu
, sizeof(struct ath11k_dp_rfc1042_hdr
)), rfc1042
,
2101 sizeof(struct ath11k_dp_rfc1042_hdr
));
2103 /* push original 802.11 header */
2104 hdr
= (struct ieee80211_hdr
*)first_hdr
;
2105 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
2107 if (!(status
->flag
& RX_FLAG_IV_STRIPPED
)) {
2108 memcpy(skb_push(msdu
,
2109 ath11k_dp_rx_crypto_param_len(ar
, enctype
)),
2110 (void *)hdr
+ hdr_len
,
2111 ath11k_dp_rx_crypto_param_len(ar
, enctype
));
2114 memcpy(skb_push(msdu
, hdr_len
), hdr
, hdr_len
);
2116 /* original 802.11 header has a different DA and in
2117 * case of 4addr it may also have different SA
2119 hdr
= (struct ieee80211_hdr
*)msdu
->data
;
2120 ether_addr_copy(ieee80211_get_DA(hdr
), da
);
2121 ether_addr_copy(ieee80211_get_SA(hdr
), sa
);
2124 static void ath11k_dp_rx_h_undecap(struct ath11k
*ar
, struct sk_buff
*msdu
,
2125 struct hal_rx_desc
*rx_desc
,
2126 enum hal_encrypt_type enctype
,
2127 struct ieee80211_rx_status
*status
,
2133 first_hdr
= ath11k_dp_rx_h_80211_hdr(rx_desc
);
2134 decap
= ath11k_dp_rx_h_msdu_start_decap_type(rx_desc
);
2137 case DP_RX_DECAP_TYPE_NATIVE_WIFI
:
2138 ath11k_dp_rx_h_undecap_nwifi(ar
, msdu
, first_hdr
,
2141 case DP_RX_DECAP_TYPE_RAW
:
2142 ath11k_dp_rx_h_undecap_raw(ar
, msdu
, enctype
, status
,
2145 case DP_RX_DECAP_TYPE_ETHERNET2_DIX
:
2146 /* TODO undecap support for middle/last msdu's of amsdu */
2147 ath11k_dp_rx_h_undecap_eth(ar
, msdu
, first_hdr
,
2150 case DP_RX_DECAP_TYPE_8023
:
2151 /* TODO: Handle undecap for these formats */
2156 static void ath11k_dp_rx_h_mpdu(struct ath11k
*ar
,
2157 struct sk_buff
*msdu
,
2158 struct hal_rx_desc
*rx_desc
,
2159 struct ieee80211_rx_status
*rx_status
)
2161 bool fill_crypto_hdr
, mcast
;
2162 enum hal_encrypt_type enctype
;
2163 bool is_decrypted
= false;
2164 struct ieee80211_hdr
*hdr
;
2165 struct ath11k_peer
*peer
;
2168 hdr
= (struct ieee80211_hdr
*)msdu
->data
;
2170 /* PN for multicast packets will be checked in mac80211 */
2172 mcast
= is_multicast_ether_addr(hdr
->addr1
);
2173 fill_crypto_hdr
= mcast
;
2175 spin_lock_bh(&ar
->ab
->base_lock
);
2176 peer
= ath11k_peer_find_by_addr(ar
->ab
, hdr
->addr2
);
2179 enctype
= peer
->sec_type_grp
;
2181 enctype
= peer
->sec_type
;
2183 enctype
= HAL_ENCRYPT_TYPE_OPEN
;
2185 spin_unlock_bh(&ar
->ab
->base_lock
);
2187 err_bitmap
= ath11k_dp_rx_h_attn_mpdu_err(rx_desc
);
2188 if (enctype
!= HAL_ENCRYPT_TYPE_OPEN
&& !err_bitmap
)
2189 is_decrypted
= ath11k_dp_rx_h_attn_is_decrypted(rx_desc
);
2191 /* Clear per-MPDU flags while leaving per-PPDU flags intact */
2192 rx_status
->flag
&= ~(RX_FLAG_FAILED_FCS_CRC
|
2193 RX_FLAG_MMIC_ERROR
|
2195 RX_FLAG_IV_STRIPPED
|
2196 RX_FLAG_MMIC_STRIPPED
);
2198 if (err_bitmap
& DP_RX_MPDU_ERR_FCS
)
2199 rx_status
->flag
|= RX_FLAG_FAILED_FCS_CRC
;
2200 if (err_bitmap
& DP_RX_MPDU_ERR_TKIP_MIC
)
2201 rx_status
->flag
|= RX_FLAG_MMIC_ERROR
;
2204 rx_status
->flag
|= RX_FLAG_DECRYPTED
| RX_FLAG_MMIC_STRIPPED
;
2206 if (fill_crypto_hdr
)
2207 rx_status
->flag
|= RX_FLAG_MIC_STRIPPED
|
2208 RX_FLAG_ICV_STRIPPED
;
2210 rx_status
->flag
|= RX_FLAG_IV_STRIPPED
|
2211 RX_FLAG_PN_VALIDATED
;
2214 ath11k_dp_rx_h_csum_offload(msdu
);
2215 ath11k_dp_rx_h_undecap(ar
, msdu
, rx_desc
,
2216 enctype
, rx_status
, is_decrypted
);
2218 if (!is_decrypted
|| fill_crypto_hdr
)
2221 hdr
= (void *)msdu
->data
;
2222 hdr
->frame_control
&= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED
);
2225 static void ath11k_dp_rx_h_rate(struct ath11k
*ar
, struct hal_rx_desc
*rx_desc
,
2226 struct ieee80211_rx_status
*rx_status
)
2228 struct ieee80211_supported_band
*sband
;
2229 enum rx_msdu_start_pkt_type pkt_type
;
2235 pkt_type
= ath11k_dp_rx_h_msdu_start_pkt_type(rx_desc
);
2236 bw
= ath11k_dp_rx_h_msdu_start_rx_bw(rx_desc
);
2237 rate_mcs
= ath11k_dp_rx_h_msdu_start_rate_mcs(rx_desc
);
2238 nss
= ath11k_dp_rx_h_msdu_start_nss(rx_desc
);
2239 sgi
= ath11k_dp_rx_h_msdu_start_sgi(rx_desc
);
2242 case RX_MSDU_START_PKT_TYPE_11A
:
2243 case RX_MSDU_START_PKT_TYPE_11B
:
2244 is_cck
= (pkt_type
== RX_MSDU_START_PKT_TYPE_11B
);
2245 sband
= &ar
->mac
.sbands
[rx_status
->band
];
2246 rx_status
->rate_idx
= ath11k_mac_hw_rate_to_idx(sband
, rate_mcs
,
2249 case RX_MSDU_START_PKT_TYPE_11N
:
2250 rx_status
->encoding
= RX_ENC_HT
;
2251 if (rate_mcs
> ATH11K_HT_MCS_MAX
) {
2253 "Received with invalid mcs in HT mode %d\n",
2257 rx_status
->rate_idx
= rate_mcs
+ (8 * (nss
- 1));
2259 rx_status
->enc_flags
|= RX_ENC_FLAG_SHORT_GI
;
2260 rx_status
->bw
= ath11k_mac_bw_to_mac80211_bw(bw
);
2262 case RX_MSDU_START_PKT_TYPE_11AC
:
2263 rx_status
->encoding
= RX_ENC_VHT
;
2264 rx_status
->rate_idx
= rate_mcs
;
2265 if (rate_mcs
> ATH11K_VHT_MCS_MAX
) {
2267 "Received with invalid mcs in VHT mode %d\n",
2271 rx_status
->nss
= nss
;
2273 rx_status
->enc_flags
|= RX_ENC_FLAG_SHORT_GI
;
2274 rx_status
->bw
= ath11k_mac_bw_to_mac80211_bw(bw
);
2276 case RX_MSDU_START_PKT_TYPE_11AX
:
2277 rx_status
->rate_idx
= rate_mcs
;
2278 if (rate_mcs
> ATH11K_HE_MCS_MAX
) {
2280 "Received with invalid mcs in HE mode %d\n",
2284 rx_status
->encoding
= RX_ENC_HE
;
2285 rx_status
->nss
= nss
;
2286 rx_status
->he_gi
= ath11k_he_gi_to_nl80211_he_gi(sgi
);
2287 rx_status
->bw
= ath11k_mac_bw_to_mac80211_bw(bw
);
2292 static void ath11k_dp_rx_h_ppdu(struct ath11k
*ar
, struct hal_rx_desc
*rx_desc
,
2293 struct ieee80211_rx_status
*rx_status
)
2297 struct ieee80211_channel
*channel
;
2299 rx_status
->freq
= 0;
2300 rx_status
->rate_idx
= 0;
2302 rx_status
->encoding
= RX_ENC_LEGACY
;
2303 rx_status
->bw
= RATE_INFO_BW_20
;
2305 rx_status
->flag
|= RX_FLAG_NO_SIGNAL_VAL
;
2307 channel_num
= ath11k_dp_rx_h_msdu_start_freq(rx_desc
);
2308 center_freq
= ath11k_dp_rx_h_msdu_start_freq(rx_desc
) >> 16;
2310 if (center_freq
>= 5935 && center_freq
<= 7105) {
2311 rx_status
->band
= NL80211_BAND_6GHZ
;
2312 } else if (channel_num
>= 1 && channel_num
<= 14) {
2313 rx_status
->band
= NL80211_BAND_2GHZ
;
2314 } else if (channel_num
>= 36 && channel_num
<= 173) {
2315 rx_status
->band
= NL80211_BAND_5GHZ
;
2317 spin_lock_bh(&ar
->data_lock
);
2318 channel
= ar
->rx_channel
;
2320 rx_status
->band
= channel
->band
;
2322 ieee80211_frequency_to_channel(channel
->center_freq
);
2324 spin_unlock_bh(&ar
->data_lock
);
2325 ath11k_dbg_dump(ar
->ab
, ATH11K_DBG_DATA
, NULL
, "rx_desc: ",
2326 rx_desc
, sizeof(struct hal_rx_desc
));
2329 rx_status
->freq
= ieee80211_channel_to_frequency(channel_num
,
2332 ath11k_dp_rx_h_rate(ar
, rx_desc
, rx_status
);
2335 static char *ath11k_print_get_tid(struct ieee80211_hdr
*hdr
, char *out
,
2341 if (!ieee80211_is_data_qos(hdr
->frame_control
))
2344 qc
= ieee80211_get_qos_ctl(hdr
);
2345 tid
= *qc
& IEEE80211_QOS_CTL_TID_MASK
;
2346 snprintf(out
, size
, "tid %d", tid
);
2351 static void ath11k_dp_rx_deliver_msdu(struct ath11k
*ar
, struct napi_struct
*napi
,
2352 struct sk_buff
*msdu
)
2354 static const struct ieee80211_radiotap_he known
= {
2355 .data1
= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN
|
2356 IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN
),
2357 .data2
= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN
),
2359 struct ieee80211_rx_status
*status
;
2360 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)msdu
->data
;
2361 struct ieee80211_radiotap_he
*he
= NULL
;
2364 status
= IEEE80211_SKB_RXCB(msdu
);
2365 if (status
->encoding
== RX_ENC_HE
) {
2366 he
= skb_push(msdu
, sizeof(known
));
2367 memcpy(he
, &known
, sizeof(known
));
2368 status
->flag
|= RX_FLAG_RADIOTAP_HE
;
2371 ath11k_dbg(ar
->ab
, ATH11K_DBG_DATA
,
2372 "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
2375 ieee80211_get_SA(hdr
),
2376 ath11k_print_get_tid(hdr
, tid
, sizeof(tid
)),
2377 is_multicast_ether_addr(ieee80211_get_DA(hdr
)) ?
2379 (__le16_to_cpu(hdr
->seq_ctrl
) & IEEE80211_SCTL_SEQ
) >> 4,
2380 (status
->encoding
== RX_ENC_LEGACY
) ? "legacy" : "",
2381 (status
->encoding
== RX_ENC_HT
) ? "ht" : "",
2382 (status
->encoding
== RX_ENC_VHT
) ? "vht" : "",
2383 (status
->encoding
== RX_ENC_HE
) ? "he" : "",
2384 (status
->bw
== RATE_INFO_BW_40
) ? "40" : "",
2385 (status
->bw
== RATE_INFO_BW_80
) ? "80" : "",
2386 (status
->bw
== RATE_INFO_BW_160
) ? "160" : "",
2387 status
->enc_flags
& RX_ENC_FLAG_SHORT_GI
? "sgi " : "",
2391 status
->band
, status
->flag
,
2392 !!(status
->flag
& RX_FLAG_FAILED_FCS_CRC
),
2393 !!(status
->flag
& RX_FLAG_MMIC_ERROR
),
2394 !!(status
->flag
& RX_FLAG_AMSDU_MORE
));
2396 ath11k_dbg_dump(ar
->ab
, ATH11K_DBG_DP_RX
, NULL
, "dp rx msdu: ",
2397 msdu
->data
, msdu
->len
);
2399 /* TODO: trace rx packet */
2401 ieee80211_rx_napi(ar
->hw
, NULL
, msdu
, napi
);
2404 static int ath11k_dp_rx_process_msdu(struct ath11k
*ar
,
2405 struct sk_buff
*msdu
,
2406 struct sk_buff_head
*msdu_list
)
2408 struct hal_rx_desc
*rx_desc
, *lrx_desc
;
2409 struct ieee80211_rx_status rx_status
= {0};
2410 struct ieee80211_rx_status
*status
;
2411 struct ath11k_skb_rxcb
*rxcb
;
2412 struct ieee80211_hdr
*hdr
;
2413 struct sk_buff
*last_buf
;
2419 last_buf
= ath11k_dp_rx_get_msdu_last_buf(msdu_list
, msdu
);
2422 "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n");
2427 rx_desc
= (struct hal_rx_desc
*)msdu
->data
;
2428 lrx_desc
= (struct hal_rx_desc
*)last_buf
->data
;
2429 if (!ath11k_dp_rx_h_attn_msdu_done(lrx_desc
)) {
2430 ath11k_warn(ar
->ab
, "msdu_done bit in attention is not set\n");
2435 rxcb
= ATH11K_SKB_RXCB(msdu
);
2436 rxcb
->rx_desc
= rx_desc
;
2437 msdu_len
= ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc
);
2438 l3_pad_bytes
= ath11k_dp_rx_h_msdu_end_l3pad(lrx_desc
);
2440 if (rxcb
->is_frag
) {
2441 skb_pull(msdu
, HAL_RX_DESC_SIZE
);
2442 } else if (!rxcb
->is_continuation
) {
2443 if ((msdu_len
+ HAL_RX_DESC_SIZE
) > DP_RX_BUFFER_SIZE
) {
2444 hdr_status
= ath11k_dp_rx_h_80211_hdr(rx_desc
);
2446 ath11k_warn(ar
->ab
, "invalid msdu len %u\n", msdu_len
);
2447 ath11k_dbg_dump(ar
->ab
, ATH11K_DBG_DATA
, NULL
, "", hdr_status
,
2448 sizeof(struct ieee80211_hdr
));
2449 ath11k_dbg_dump(ar
->ab
, ATH11K_DBG_DATA
, NULL
, "", rx_desc
,
2450 sizeof(struct hal_rx_desc
));
2453 skb_put(msdu
, HAL_RX_DESC_SIZE
+ l3_pad_bytes
+ msdu_len
);
2454 skb_pull(msdu
, HAL_RX_DESC_SIZE
+ l3_pad_bytes
);
2456 ret
= ath11k_dp_rx_msdu_coalesce(ar
, msdu_list
,
2458 l3_pad_bytes
, msdu_len
);
2461 "failed to coalesce msdu rx buffer%d\n", ret
);
2466 hdr
= (struct ieee80211_hdr
*)msdu
->data
;
2468 /* Process only data frames */
2469 if (!ieee80211_is_data(hdr
->frame_control
))
2472 ath11k_dp_rx_h_ppdu(ar
, rx_desc
, &rx_status
);
2473 ath11k_dp_rx_h_mpdu(ar
, msdu
, rx_desc
, &rx_status
);
2475 rx_status
.flag
|= RX_FLAG_SKIP_MONITOR
| RX_FLAG_DUP_VALIDATED
;
2477 status
= IEEE80211_SKB_RXCB(msdu
);
2478 *status
= rx_status
;
2485 static void ath11k_dp_rx_process_received_packets(struct ath11k_base
*ab
,
2486 struct napi_struct
*napi
,
2487 struct sk_buff_head
*msdu_list
,
2488 int *quota
, int ring_id
)
2490 struct ath11k_skb_rxcb
*rxcb
;
2491 struct sk_buff
*msdu
;
2496 if (skb_queue_empty(msdu_list
))
2501 while (*quota
&& (msdu
= __skb_dequeue(msdu_list
))) {
2502 rxcb
= ATH11K_SKB_RXCB(msdu
);
2503 mac_id
= rxcb
->mac_id
;
2504 ar
= ab
->pdevs
[mac_id
].ar
;
2505 if (!rcu_dereference(ab
->pdevs_active
[mac_id
])) {
2506 dev_kfree_skb_any(msdu
);
2510 if (test_bit(ATH11K_CAC_RUNNING
, &ar
->dev_flags
)) {
2511 dev_kfree_skb_any(msdu
);
2515 ret
= ath11k_dp_rx_process_msdu(ar
, msdu
, msdu_list
);
2517 ath11k_dbg(ab
, ATH11K_DBG_DATA
,
2518 "Unable to process msdu %d", ret
);
2519 dev_kfree_skb_any(msdu
);
2523 ath11k_dp_rx_deliver_msdu(ar
, napi
, msdu
);
2530 int ath11k_dp_process_rx(struct ath11k_base
*ab
, int ring_id
,
2531 struct napi_struct
*napi
, int budget
)
2533 struct ath11k_dp
*dp
= &ab
->dp
;
2534 struct dp_rxdma_ring
*rx_ring
;
2535 int num_buffs_reaped
[MAX_RADIOS
] = {0};
2536 struct sk_buff_head msdu_list
;
2537 struct ath11k_skb_rxcb
*rxcb
;
2538 int total_msdu_reaped
= 0;
2539 struct hal_srng
*srng
;
2540 struct sk_buff
*msdu
;
2548 __skb_queue_head_init(&msdu_list
);
2550 srng
= &ab
->hal
.srng_list
[dp
->reo_dst_ring
[ring_id
].ring_id
];
2552 spin_lock_bh(&srng
->lock
);
2554 ath11k_hal_srng_access_begin(ab
, srng
);
2557 while ((rx_desc
= ath11k_hal_srng_dst_get_next_entry(ab
, srng
))) {
2558 struct hal_reo_dest_ring desc
= *(struct hal_reo_dest_ring
*)rx_desc
;
2559 enum hal_reo_dest_ring_push_reason push_reason
;
2562 cookie
= FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE
,
2563 desc
.buf_addr_info
.info1
);
2564 buf_id
= FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID
,
2566 mac_id
= FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID
, cookie
);
2568 ar
= ab
->pdevs
[mac_id
].ar
;
2569 rx_ring
= &ar
->dp
.rx_refill_buf_ring
;
2570 spin_lock_bh(&rx_ring
->idr_lock
);
2571 msdu
= idr_find(&rx_ring
->bufs_idr
, buf_id
);
2573 ath11k_warn(ab
, "frame rx with invalid buf_id %d\n",
2575 spin_unlock_bh(&rx_ring
->idr_lock
);
2579 idr_remove(&rx_ring
->bufs_idr
, buf_id
);
2580 spin_unlock_bh(&rx_ring
->idr_lock
);
2582 rxcb
= ATH11K_SKB_RXCB(msdu
);
2583 dma_unmap_single(ab
->dev
, rxcb
->paddr
,
2584 msdu
->len
+ skb_tailroom(msdu
),
2587 num_buffs_reaped
[mac_id
]++;
2588 total_msdu_reaped
++;
2590 push_reason
= FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON
,
2593 HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION
) {
2594 dev_kfree_skb_any(msdu
);
2595 ab
->soc_stats
.hal_reo_error
[dp
->reo_dst_ring
[ring_id
].ring_id
]++;
2599 rxcb
->is_first_msdu
= !!(desc
.rx_msdu_info
.info0
&
2600 RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU
);
2601 rxcb
->is_last_msdu
= !!(desc
.rx_msdu_info
.info0
&
2602 RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU
);
2603 rxcb
->is_continuation
= !!(desc
.rx_msdu_info
.info0
&
2604 RX_MSDU_DESC_INFO0_MSDU_CONTINUATION
);
2605 rxcb
->mac_id
= mac_id
;
2606 rxcb
->tid
= FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM
,
2609 __skb_queue_tail(&msdu_list
, msdu
);
2611 if (total_msdu_reaped
>= quota
&& !rxcb
->is_continuation
) {
2617 /* Hw might have updated the head pointer after we cached it.
2618 * In this case, even though there are entries in the ring we'll
2619 * get rx_desc NULL. Give the read another try with updated cached
2620 * head pointer so that we can reap complete MPDU in the current
2623 if (!done
&& ath11k_hal_srng_dst_num_free(ab
, srng
, true)) {
2624 ath11k_hal_srng_access_end(ab
, srng
);
2628 ath11k_hal_srng_access_end(ab
, srng
);
2630 spin_unlock_bh(&srng
->lock
);
2632 if (!total_msdu_reaped
)
2635 for (i
= 0; i
< ab
->num_radios
; i
++) {
2636 if (!num_buffs_reaped
[i
])
2639 ar
= ab
->pdevs
[i
].ar
;
2640 rx_ring
= &ar
->dp
.rx_refill_buf_ring
;
2642 ath11k_dp_rxbufs_replenish(ab
, i
, rx_ring
, num_buffs_reaped
[i
],
2643 HAL_RX_BUF_RBM_SW3_BM
);
2646 ath11k_dp_rx_process_received_packets(ab
, napi
, &msdu_list
,
2650 return budget
- quota
;
2653 static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta
*arsta
,
2654 struct hal_rx_mon_ppdu_info
*ppdu_info
)
2656 struct ath11k_rx_peer_stats
*rx_stats
= arsta
->rx_stats
;
2662 num_msdu
= ppdu_info
->tcp_msdu_count
+ ppdu_info
->tcp_ack_msdu_count
+
2663 ppdu_info
->udp_msdu_count
+ ppdu_info
->other_msdu_count
;
2665 rx_stats
->num_msdu
+= num_msdu
;
2666 rx_stats
->tcp_msdu_count
+= ppdu_info
->tcp_msdu_count
+
2667 ppdu_info
->tcp_ack_msdu_count
;
2668 rx_stats
->udp_msdu_count
+= ppdu_info
->udp_msdu_count
;
2669 rx_stats
->other_msdu_count
+= ppdu_info
->other_msdu_count
;
2671 if (ppdu_info
->preamble_type
== HAL_RX_PREAMBLE_11A
||
2672 ppdu_info
->preamble_type
== HAL_RX_PREAMBLE_11B
) {
2674 ppdu_info
->mcs
= HAL_RX_MAX_MCS
;
2675 ppdu_info
->tid
= IEEE80211_NUM_TIDS
;
2678 if (ppdu_info
->nss
> 0 && ppdu_info
->nss
<= HAL_RX_MAX_NSS
)
2679 rx_stats
->nss_count
[ppdu_info
->nss
- 1] += num_msdu
;
2681 if (ppdu_info
->mcs
<= HAL_RX_MAX_MCS
)
2682 rx_stats
->mcs_count
[ppdu_info
->mcs
] += num_msdu
;
2684 if (ppdu_info
->gi
< HAL_RX_GI_MAX
)
2685 rx_stats
->gi_count
[ppdu_info
->gi
] += num_msdu
;
2687 if (ppdu_info
->bw
< HAL_RX_BW_MAX
)
2688 rx_stats
->bw_count
[ppdu_info
->bw
] += num_msdu
;
2690 if (ppdu_info
->ldpc
< HAL_RX_SU_MU_CODING_MAX
)
2691 rx_stats
->coding_count
[ppdu_info
->ldpc
] += num_msdu
;
2693 if (ppdu_info
->tid
<= IEEE80211_NUM_TIDS
)
2694 rx_stats
->tid_count
[ppdu_info
->tid
] += num_msdu
;
2696 if (ppdu_info
->preamble_type
< HAL_RX_PREAMBLE_MAX
)
2697 rx_stats
->pream_cnt
[ppdu_info
->preamble_type
] += num_msdu
;
2699 if (ppdu_info
->reception_type
< HAL_RX_RECEPTION_TYPE_MAX
)
2700 rx_stats
->reception_type
[ppdu_info
->reception_type
] += num_msdu
;
2702 if (ppdu_info
->is_stbc
)
2703 rx_stats
->stbc_count
+= num_msdu
;
2705 if (ppdu_info
->beamformed
)
2706 rx_stats
->beamformed_count
+= num_msdu
;
2708 if (ppdu_info
->num_mpdu_fcs_ok
> 1)
2709 rx_stats
->ampdu_msdu_count
+= num_msdu
;
2711 rx_stats
->non_ampdu_msdu_count
+= num_msdu
;
2713 rx_stats
->num_mpdu_fcs_ok
+= ppdu_info
->num_mpdu_fcs_ok
;
2714 rx_stats
->num_mpdu_fcs_err
+= ppdu_info
->num_mpdu_fcs_err
;
2715 rx_stats
->dcm_count
+= ppdu_info
->dcm
;
2716 rx_stats
->ru_alloc_cnt
[ppdu_info
->ru_alloc
] += num_msdu
;
2718 arsta
->rssi_comb
= ppdu_info
->rssi_comb
;
2719 rx_stats
->rx_duration
+= ppdu_info
->rx_duration
;
2720 arsta
->rx_duration
= rx_stats
->rx_duration
;
2723 static struct sk_buff
*ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base
*ab
,
2724 struct dp_rxdma_ring
*rx_ring
,
2727 struct sk_buff
*skb
;
2730 skb
= dev_alloc_skb(DP_RX_BUFFER_SIZE
+
2731 DP_RX_BUFFER_ALIGN_SIZE
);
2734 goto fail_alloc_skb
;
2736 if (!IS_ALIGNED((unsigned long)skb
->data
,
2737 DP_RX_BUFFER_ALIGN_SIZE
)) {
2738 skb_pull(skb
, PTR_ALIGN(skb
->data
, DP_RX_BUFFER_ALIGN_SIZE
) -
2742 paddr
= dma_map_single(ab
->dev
, skb
->data
,
2743 skb
->len
+ skb_tailroom(skb
),
2745 if (unlikely(dma_mapping_error(ab
->dev
, paddr
)))
2748 spin_lock_bh(&rx_ring
->idr_lock
);
2749 *buf_id
= idr_alloc(&rx_ring
->bufs_idr
, skb
, 0,
2750 rx_ring
->bufs_max
, GFP_ATOMIC
);
2751 spin_unlock_bh(&rx_ring
->idr_lock
);
2753 goto fail_dma_unmap
;
2755 ATH11K_SKB_RXCB(skb
)->paddr
= paddr
;
2759 dma_unmap_single(ab
->dev
, paddr
, skb
->len
+ skb_tailroom(skb
),
2762 dev_kfree_skb_any(skb
);
2767 int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base
*ab
, int mac_id
,
2768 struct dp_rxdma_ring
*rx_ring
,
2770 enum hal_rx_buf_return_buf_manager mgr
)
2772 struct hal_srng
*srng
;
2774 struct sk_buff
*skb
;
2781 req_entries
= min(req_entries
, rx_ring
->bufs_max
);
2783 srng
= &ab
->hal
.srng_list
[rx_ring
->refill_buf_ring
.ring_id
];
2785 spin_lock_bh(&srng
->lock
);
2787 ath11k_hal_srng_access_begin(ab
, srng
);
2789 num_free
= ath11k_hal_srng_src_num_free(ab
, srng
, true);
2791 req_entries
= min(num_free
, req_entries
);
2792 num_remain
= req_entries
;
2794 while (num_remain
> 0) {
2795 skb
= ath11k_dp_rx_alloc_mon_status_buf(ab
, rx_ring
,
2799 paddr
= ATH11K_SKB_RXCB(skb
)->paddr
;
2801 desc
= ath11k_hal_srng_src_get_next_entry(ab
, srng
);
2805 cookie
= FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID
, mac_id
) |
2806 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID
, buf_id
);
2810 ath11k_hal_rx_buf_addr_info_set(desc
, paddr
, cookie
, mgr
);
2813 ath11k_hal_srng_access_end(ab
, srng
);
2815 spin_unlock_bh(&srng
->lock
);
2817 return req_entries
- num_remain
;
2820 spin_lock_bh(&rx_ring
->idr_lock
);
2821 idr_remove(&rx_ring
->bufs_idr
, buf_id
);
2822 spin_unlock_bh(&rx_ring
->idr_lock
);
2823 dma_unmap_single(ab
->dev
, paddr
, skb
->len
+ skb_tailroom(skb
),
2825 dev_kfree_skb_any(skb
);
2826 ath11k_hal_srng_access_end(ab
, srng
);
2827 spin_unlock_bh(&srng
->lock
);
2829 return req_entries
- num_remain
;
2832 static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base
*ab
, int mac_id
,
2833 int *budget
, struct sk_buff_head
*skb_list
)
2836 struct ath11k_pdev_dp
*dp
;
2837 struct dp_rxdma_ring
*rx_ring
;
2838 struct hal_srng
*srng
;
2839 void *rx_mon_status_desc
;
2840 struct sk_buff
*skb
;
2841 struct ath11k_skb_rxcb
*rxcb
;
2842 struct hal_tlv_hdr
*tlv
;
2844 int buf_id
, srng_id
;
2847 int num_buffs_reaped
= 0;
2849 ar
= ab
->pdevs
[ath11k_hw_mac_id_to_pdev_id(&ab
->hw_params
, mac_id
)].ar
;
2851 srng_id
= ath11k_hw_mac_id_to_srng_id(&ab
->hw_params
, mac_id
);
2852 rx_ring
= &dp
->rx_mon_status_refill_ring
[srng_id
];
2854 srng
= &ab
->hal
.srng_list
[rx_ring
->refill_buf_ring
.ring_id
];
2856 spin_lock_bh(&srng
->lock
);
2858 ath11k_hal_srng_access_begin(ab
, srng
);
2861 rx_mon_status_desc
=
2862 ath11k_hal_srng_src_peek(ab
, srng
);
2863 if (!rx_mon_status_desc
)
2866 ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc
, &paddr
,
2869 buf_id
= FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID
, cookie
);
2871 spin_lock_bh(&rx_ring
->idr_lock
);
2872 skb
= idr_find(&rx_ring
->bufs_idr
, buf_id
);
2874 ath11k_warn(ab
, "rx monitor status with invalid buf_id %d\n",
2876 spin_unlock_bh(&rx_ring
->idr_lock
);
2880 idr_remove(&rx_ring
->bufs_idr
, buf_id
);
2881 spin_unlock_bh(&rx_ring
->idr_lock
);
2883 rxcb
= ATH11K_SKB_RXCB(skb
);
2885 dma_unmap_single(ab
->dev
, rxcb
->paddr
,
2886 skb
->len
+ skb_tailroom(skb
),
2889 tlv
= (struct hal_tlv_hdr
*)skb
->data
;
2890 if (FIELD_GET(HAL_TLV_HDR_TAG
, tlv
->tl
) !=
2891 HAL_RX_STATUS_BUFFER_DONE
) {
2892 ath11k_warn(ab
, "mon status DONE not set %lx\n",
2893 FIELD_GET(HAL_TLV_HDR_TAG
,
2895 dev_kfree_skb_any(skb
);
2899 __skb_queue_tail(skb_list
, skb
);
2902 skb
= ath11k_dp_rx_alloc_mon_status_buf(ab
, rx_ring
,
2906 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc
, 0, 0,
2907 HAL_RX_BUF_RBM_SW3_BM
);
2911 rxcb
= ATH11K_SKB_RXCB(skb
);
2913 cookie
= FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID
, mac_id
) |
2914 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID
, buf_id
);
2916 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc
, rxcb
->paddr
,
2917 cookie
, HAL_RX_BUF_RBM_SW3_BM
);
2918 ath11k_hal_srng_src_get_next_entry(ab
, srng
);
2921 ath11k_hal_srng_access_end(ab
, srng
);
2922 spin_unlock_bh(&srng
->lock
);
2924 return num_buffs_reaped
;
2927 int ath11k_dp_rx_process_mon_status(struct ath11k_base
*ab
, int mac_id
,
2928 struct napi_struct
*napi
, int budget
)
2930 struct ath11k
*ar
= ath11k_ab_to_ar(ab
, mac_id
);
2931 enum hal_rx_mon_status hal_status
;
2932 struct sk_buff
*skb
;
2933 struct sk_buff_head skb_list
;
2934 struct hal_rx_mon_ppdu_info ppdu_info
;
2935 struct ath11k_peer
*peer
;
2936 struct ath11k_sta
*arsta
;
2937 int num_buffs_reaped
= 0;
2939 __skb_queue_head_init(&skb_list
);
2941 num_buffs_reaped
= ath11k_dp_rx_reap_mon_status_ring(ab
, mac_id
, &budget
,
2943 if (!num_buffs_reaped
)
2946 while ((skb
= __skb_dequeue(&skb_list
))) {
2947 memset(&ppdu_info
, 0, sizeof(ppdu_info
));
2948 ppdu_info
.peer_id
= HAL_INVALID_PEERID
;
2950 if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar
))
2951 trace_ath11k_htt_rxdesc(ar
, skb
->data
, DP_RX_BUFFER_SIZE
);
2953 hal_status
= ath11k_hal_rx_parse_mon_status(ab
, &ppdu_info
, skb
);
2955 if (ppdu_info
.peer_id
== HAL_INVALID_PEERID
||
2956 hal_status
!= HAL_RX_MON_STATUS_PPDU_DONE
) {
2957 dev_kfree_skb_any(skb
);
2962 spin_lock_bh(&ab
->base_lock
);
2963 peer
= ath11k_peer_find_by_id(ab
, ppdu_info
.peer_id
);
2965 if (!peer
|| !peer
->sta
) {
2966 ath11k_dbg(ab
, ATH11K_DBG_DATA
,
2967 "failed to find the peer with peer_id %d\n",
2969 spin_unlock_bh(&ab
->base_lock
);
2971 dev_kfree_skb_any(skb
);
2975 arsta
= (struct ath11k_sta
*)peer
->sta
->drv_priv
;
2976 ath11k_dp_rx_update_peer_stats(arsta
, &ppdu_info
);
2978 if (ath11k_debugfs_is_pktlog_peer_valid(ar
, peer
->addr
))
2979 trace_ath11k_htt_rxdesc(ar
, skb
->data
, DP_RX_BUFFER_SIZE
);
2981 spin_unlock_bh(&ab
->base_lock
);
2984 dev_kfree_skb_any(skb
);
2987 return num_buffs_reaped
;
2990 static void ath11k_dp_rx_frag_timer(struct timer_list
*timer
)
2992 struct dp_rx_tid
*rx_tid
= from_timer(rx_tid
, timer
, frag_timer
);
2994 spin_lock_bh(&rx_tid
->ab
->base_lock
);
2995 if (rx_tid
->last_frag_no
&&
2996 rx_tid
->rx_frag_bitmap
== GENMASK(rx_tid
->last_frag_no
, 0)) {
2997 spin_unlock_bh(&rx_tid
->ab
->base_lock
);
3000 ath11k_dp_rx_frags_cleanup(rx_tid
, true);
3001 spin_unlock_bh(&rx_tid
->ab
->base_lock
);
3004 int ath11k_peer_rx_frag_setup(struct ath11k
*ar
, const u8
*peer_mac
, int vdev_id
)
3006 struct ath11k_base
*ab
= ar
->ab
;
3007 struct crypto_shash
*tfm
;
3008 struct ath11k_peer
*peer
;
3009 struct dp_rx_tid
*rx_tid
;
3012 tfm
= crypto_alloc_shash("michael_mic", 0, 0);
3014 return PTR_ERR(tfm
);
3016 spin_lock_bh(&ab
->base_lock
);
3018 peer
= ath11k_peer_find(ab
, vdev_id
, peer_mac
);
3020 ath11k_warn(ab
, "failed to find the peer to set up fragment info\n");
3021 spin_unlock_bh(&ab
->base_lock
);
3025 for (i
= 0; i
<= IEEE80211_NUM_TIDS
; i
++) {
3026 rx_tid
= &peer
->rx_tid
[i
];
3028 timer_setup(&rx_tid
->frag_timer
, ath11k_dp_rx_frag_timer
, 0);
3029 skb_queue_head_init(&rx_tid
->rx_frags
);
3032 peer
->tfm_mmic
= tfm
;
3033 spin_unlock_bh(&ab
->base_lock
);
3038 static int ath11k_dp_rx_h_michael_mic(struct crypto_shash
*tfm
, u8
*key
,
3039 struct ieee80211_hdr
*hdr
, u8
*data
,
3040 size_t data_len
, u8
*mic
)
3042 SHASH_DESC_ON_STACK(desc
, tfm
);
3043 u8 mic_hdr
[16] = {0};
3052 ret
= crypto_shash_setkey(tfm
, key
, 8);
3056 ret
= crypto_shash_init(desc
);
3060 /* TKIP MIC header */
3061 memcpy(mic_hdr
, ieee80211_get_DA(hdr
), ETH_ALEN
);
3062 memcpy(mic_hdr
+ ETH_ALEN
, ieee80211_get_SA(hdr
), ETH_ALEN
);
3063 if (ieee80211_is_data_qos(hdr
->frame_control
))
3064 tid
= ieee80211_get_tid(hdr
);
3067 ret
= crypto_shash_update(desc
, mic_hdr
, 16);
3070 ret
= crypto_shash_update(desc
, data
, data_len
);
3073 ret
= crypto_shash_final(desc
, mic
);
3075 shash_desc_zero(desc
);
3079 static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k
*ar
, struct ath11k_peer
*peer
,
3080 struct sk_buff
*msdu
)
3082 struct hal_rx_desc
*rx_desc
= (struct hal_rx_desc
*)msdu
->data
;
3083 struct ieee80211_rx_status
*rxs
= IEEE80211_SKB_RXCB(msdu
);
3084 struct ieee80211_key_conf
*key_conf
;
3085 struct ieee80211_hdr
*hdr
;
3086 u8 mic
[IEEE80211_CCMP_MIC_LEN
];
3087 int head_len
, tail_len
, ret
;
3093 if (ath11k_dp_rx_h_mpdu_start_enctype(rx_desc
) != HAL_ENCRYPT_TYPE_TKIP_MIC
)
3096 hdr
= (struct ieee80211_hdr
*)(msdu
->data
+ HAL_RX_DESC_SIZE
);
3097 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
3098 head_len
= hdr_len
+ HAL_RX_DESC_SIZE
+ IEEE80211_TKIP_IV_LEN
;
3099 tail_len
= IEEE80211_CCMP_MIC_LEN
+ IEEE80211_TKIP_ICV_LEN
+ FCS_LEN
;
3101 if (!is_multicast_ether_addr(hdr
->addr1
))
3102 key_idx
= peer
->ucast_keyidx
;
3104 key_idx
= peer
->mcast_keyidx
;
3106 key_conf
= peer
->keys
[key_idx
];
3108 data
= msdu
->data
+ head_len
;
3109 data_len
= msdu
->len
- head_len
- tail_len
;
3110 key
= &key_conf
->key
[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY
];
3112 ret
= ath11k_dp_rx_h_michael_mic(peer
->tfm_mmic
, key
, hdr
, data
, data_len
, mic
);
3113 if (ret
|| memcmp(mic
, data
+ data_len
, IEEE80211_CCMP_MIC_LEN
))
3119 (ATH11K_SKB_RXCB(msdu
))->is_first_msdu
= true;
3120 (ATH11K_SKB_RXCB(msdu
))->is_last_msdu
= true;
3122 rxs
->flag
|= RX_FLAG_MMIC_ERROR
| RX_FLAG_MMIC_STRIPPED
|
3123 RX_FLAG_IV_STRIPPED
| RX_FLAG_DECRYPTED
;
3124 skb_pull(msdu
, HAL_RX_DESC_SIZE
);
3126 ath11k_dp_rx_h_ppdu(ar
, rx_desc
, rxs
);
3127 ath11k_dp_rx_h_undecap(ar
, msdu
, rx_desc
,
3128 HAL_ENCRYPT_TYPE_TKIP_MIC
, rxs
, true);
3129 ieee80211_rx(ar
->hw
, msdu
);
3133 static void ath11k_dp_rx_h_undecap_frag(struct ath11k
*ar
, struct sk_buff
*msdu
,
3134 enum hal_encrypt_type enctype
, u32 flags
)
3136 struct ieee80211_hdr
*hdr
;
3143 hdr
= (struct ieee80211_hdr
*)(msdu
->data
+ HAL_RX_DESC_SIZE
);
3145 if (flags
& RX_FLAG_MIC_STRIPPED
)
3146 skb_trim(msdu
, msdu
->len
-
3147 ath11k_dp_rx_crypto_mic_len(ar
, enctype
));
3149 if (flags
& RX_FLAG_ICV_STRIPPED
)
3150 skb_trim(msdu
, msdu
->len
-
3151 ath11k_dp_rx_crypto_icv_len(ar
, enctype
));
3153 if (flags
& RX_FLAG_IV_STRIPPED
) {
3154 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
3155 crypto_len
= ath11k_dp_rx_crypto_param_len(ar
, enctype
);
3157 memmove((void *)msdu
->data
+ HAL_RX_DESC_SIZE
+ crypto_len
,
3158 (void *)msdu
->data
+ HAL_RX_DESC_SIZE
, hdr_len
);
3159 skb_pull(msdu
, crypto_len
);
3163 static int ath11k_dp_rx_h_defrag(struct ath11k
*ar
,
3164 struct ath11k_peer
*peer
,
3165 struct dp_rx_tid
*rx_tid
,
3166 struct sk_buff
**defrag_skb
)
3168 struct hal_rx_desc
*rx_desc
;
3169 struct sk_buff
*skb
, *first_frag
, *last_frag
;
3170 struct ieee80211_hdr
*hdr
;
3171 enum hal_encrypt_type enctype
;
3172 bool is_decrypted
= false;
3177 first_frag
= skb_peek(&rx_tid
->rx_frags
);
3178 last_frag
= skb_peek_tail(&rx_tid
->rx_frags
);
3180 skb_queue_walk(&rx_tid
->rx_frags
, skb
) {
3182 rx_desc
= (struct hal_rx_desc
*)skb
->data
;
3183 hdr
= (struct ieee80211_hdr
*)(skb
->data
+ HAL_RX_DESC_SIZE
);
3185 enctype
= ath11k_dp_rx_h_mpdu_start_enctype(rx_desc
);
3186 if (enctype
!= HAL_ENCRYPT_TYPE_OPEN
)
3187 is_decrypted
= ath11k_dp_rx_h_attn_is_decrypted(rx_desc
);
3190 if (skb
!= first_frag
)
3191 flags
|= RX_FLAG_IV_STRIPPED
;
3192 if (skb
!= last_frag
)
3193 flags
|= RX_FLAG_ICV_STRIPPED
|
3194 RX_FLAG_MIC_STRIPPED
;
3197 /* RX fragments are always raw packets */
3198 if (skb
!= last_frag
)
3199 skb_trim(skb
, skb
->len
- FCS_LEN
);
3200 ath11k_dp_rx_h_undecap_frag(ar
, skb
, enctype
, flags
);
3202 if (skb
!= first_frag
)
3203 skb_pull(skb
, HAL_RX_DESC_SIZE
+
3204 ieee80211_hdrlen(hdr
->frame_control
));
3205 msdu_len
+= skb
->len
;
3208 extra_space
= msdu_len
- (DP_RX_BUFFER_SIZE
+ skb_tailroom(first_frag
));
3209 if (extra_space
> 0 &&
3210 (pskb_expand_head(first_frag
, 0, extra_space
, GFP_ATOMIC
) < 0))
3213 __skb_unlink(first_frag
, &rx_tid
->rx_frags
);
3214 while ((skb
= __skb_dequeue(&rx_tid
->rx_frags
))) {
3215 skb_put_data(first_frag
, skb
->data
, skb
->len
);
3216 dev_kfree_skb_any(skb
);
3219 hdr
= (struct ieee80211_hdr
*)(first_frag
->data
+ HAL_RX_DESC_SIZE
);
3220 hdr
->frame_control
&= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS
);
3221 ATH11K_SKB_RXCB(first_frag
)->is_frag
= 1;
3223 if (ath11k_dp_rx_h_verify_tkip_mic(ar
, peer
, first_frag
))
3226 *defrag_skb
= first_frag
;
3230 static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k
*ar
, struct dp_rx_tid
*rx_tid
,
3231 struct sk_buff
*defrag_skb
)
3233 struct ath11k_base
*ab
= ar
->ab
;
3234 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
3235 struct dp_rxdma_ring
*rx_refill_ring
= &dp
->rx_refill_buf_ring
;
3236 struct hal_rx_desc
*rx_desc
= (struct hal_rx_desc
*)defrag_skb
->data
;
3237 struct hal_reo_entrance_ring
*reo_ent_ring
;
3238 struct hal_reo_dest_ring
*reo_dest_ring
;
3239 struct dp_link_desc_bank
*link_desc_banks
;
3240 struct hal_rx_msdu_link
*msdu_link
;
3241 struct hal_rx_msdu_details
*msdu0
;
3242 struct hal_srng
*srng
;
3244 u32 desc_bank
, msdu_info
, mpdu_info
;
3245 u32 dst_idx
, cookie
;
3246 u32
*msdu_len_offset
;
3249 link_desc_banks
= ab
->dp
.link_desc_banks
;
3250 reo_dest_ring
= rx_tid
->dst_ring_desc
;
3252 ath11k_hal_rx_reo_ent_paddr_get(ab
, reo_dest_ring
, &paddr
, &desc_bank
);
3253 msdu_link
= (struct hal_rx_msdu_link
*)(link_desc_banks
[desc_bank
].vaddr
+
3254 (paddr
- link_desc_banks
[desc_bank
].paddr
));
3255 msdu0
= &msdu_link
->msdu_link
[0];
3256 dst_idx
= FIELD_GET(RX_MSDU_DESC_INFO0_REO_DEST_IND
, msdu0
->rx_msdu_info
.info0
);
3257 memset(msdu0
, 0, sizeof(*msdu0
));
3259 msdu_info
= FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU
, 1) |
3260 FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU
, 1) |
3261 FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_CONTINUATION
, 0) |
3262 FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_LENGTH
,
3263 defrag_skb
->len
- HAL_RX_DESC_SIZE
) |
3264 FIELD_PREP(RX_MSDU_DESC_INFO0_REO_DEST_IND
, dst_idx
) |
3265 FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_SA
, 1) |
3266 FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_DA
, 1);
3267 msdu0
->rx_msdu_info
.info0
= msdu_info
;
3269 /* change msdu len in hal rx desc */
3270 msdu_len_offset
= (u32
*)&rx_desc
->msdu_start
;
3271 *msdu_len_offset
&= ~(RX_MSDU_START_INFO1_MSDU_LENGTH
);
3272 *msdu_len_offset
|= defrag_skb
->len
- HAL_RX_DESC_SIZE
;
3274 paddr
= dma_map_single(ab
->dev
, defrag_skb
->data
,
3275 defrag_skb
->len
+ skb_tailroom(defrag_skb
),
3277 if (dma_mapping_error(ab
->dev
, paddr
))
3280 spin_lock_bh(&rx_refill_ring
->idr_lock
);
3281 buf_id
= idr_alloc(&rx_refill_ring
->bufs_idr
, defrag_skb
, 0,
3282 rx_refill_ring
->bufs_max
* 3, GFP_ATOMIC
);
3283 spin_unlock_bh(&rx_refill_ring
->idr_lock
);
3289 ATH11K_SKB_RXCB(defrag_skb
)->paddr
= paddr
;
3290 cookie
= FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID
, dp
->mac_id
) |
3291 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID
, buf_id
);
3293 ath11k_hal_rx_buf_addr_info_set(msdu0
, paddr
, cookie
, HAL_RX_BUF_RBM_SW3_BM
);
3295 /* Fill mpdu details into reo entrace ring */
3296 srng
= &ab
->hal
.srng_list
[ab
->dp
.reo_reinject_ring
.ring_id
];
3298 spin_lock_bh(&srng
->lock
);
3299 ath11k_hal_srng_access_begin(ab
, srng
);
3301 reo_ent_ring
= (struct hal_reo_entrance_ring
*)
3302 ath11k_hal_srng_src_get_next_entry(ab
, srng
);
3303 if (!reo_ent_ring
) {
3304 ath11k_hal_srng_access_end(ab
, srng
);
3305 spin_unlock_bh(&srng
->lock
);
3309 memset(reo_ent_ring
, 0, sizeof(*reo_ent_ring
));
3311 ath11k_hal_rx_reo_ent_paddr_get(ab
, reo_dest_ring
, &paddr
, &desc_bank
);
3312 ath11k_hal_rx_buf_addr_info_set(reo_ent_ring
, paddr
, desc_bank
,
3313 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST
);
3315 mpdu_info
= FIELD_PREP(RX_MPDU_DESC_INFO0_MSDU_COUNT
, 1) |
3316 FIELD_PREP(RX_MPDU_DESC_INFO0_SEQ_NUM
, rx_tid
->cur_sn
) |
3317 FIELD_PREP(RX_MPDU_DESC_INFO0_FRAG_FLAG
, 0) |
3318 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_SA
, 1) |
3319 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_DA
, 1) |
3320 FIELD_PREP(RX_MPDU_DESC_INFO0_RAW_MPDU
, 1) |
3321 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_PN
, 1);
3323 reo_ent_ring
->rx_mpdu_info
.info0
= mpdu_info
;
3324 reo_ent_ring
->rx_mpdu_info
.meta_data
= reo_dest_ring
->rx_mpdu_info
.meta_data
;
3325 reo_ent_ring
->queue_addr_lo
= reo_dest_ring
->queue_addr_lo
;
3326 reo_ent_ring
->info0
= FIELD_PREP(HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI
,
3327 FIELD_GET(HAL_REO_DEST_RING_INFO0_QUEUE_ADDR_HI
,
3328 reo_dest_ring
->info0
)) |
3329 FIELD_PREP(HAL_REO_ENTR_RING_INFO0_DEST_IND
, dst_idx
);
3330 ath11k_hal_srng_access_end(ab
, srng
);
3331 spin_unlock_bh(&srng
->lock
);
3336 spin_lock_bh(&rx_refill_ring
->idr_lock
);
3337 idr_remove(&rx_refill_ring
->bufs_idr
, buf_id
);
3338 spin_unlock_bh(&rx_refill_ring
->idr_lock
);
3340 dma_unmap_single(ab
->dev
, paddr
, defrag_skb
->len
+ skb_tailroom(defrag_skb
),
3345 static int ath11k_dp_rx_h_cmp_frags(struct sk_buff
*a
, struct sk_buff
*b
)
3349 frag1
= ath11k_dp_rx_h_mpdu_start_frag_no(a
);
3350 frag2
= ath11k_dp_rx_h_mpdu_start_frag_no(b
);
3352 return frag1
- frag2
;
3355 static void ath11k_dp_rx_h_sort_frags(struct sk_buff_head
*frag_list
,
3356 struct sk_buff
*cur_frag
)
3358 struct sk_buff
*skb
;
3361 skb_queue_walk(frag_list
, skb
) {
3362 cmp
= ath11k_dp_rx_h_cmp_frags(skb
, cur_frag
);
3365 __skb_queue_before(frag_list
, skb
, cur_frag
);
3368 __skb_queue_tail(frag_list
, cur_frag
);
3371 static u64
ath11k_dp_rx_h_get_pn(struct sk_buff
*skb
)
3373 struct ieee80211_hdr
*hdr
;
3377 hdr
= (struct ieee80211_hdr
*)(skb
->data
+ HAL_RX_DESC_SIZE
);
3378 ehdr
= skb
->data
+ HAL_RX_DESC_SIZE
+ ieee80211_hdrlen(hdr
->frame_control
);
3381 pn
|= (u64
)ehdr
[1] << 8;
3382 pn
|= (u64
)ehdr
[4] << 16;
3383 pn
|= (u64
)ehdr
[5] << 24;
3384 pn
|= (u64
)ehdr
[6] << 32;
3385 pn
|= (u64
)ehdr
[7] << 40;
3391 ath11k_dp_rx_h_defrag_validate_incr_pn(struct ath11k
*ar
, struct dp_rx_tid
*rx_tid
)
3393 enum hal_encrypt_type encrypt_type
;
3394 struct sk_buff
*first_frag
, *skb
;
3395 struct hal_rx_desc
*desc
;
3399 first_frag
= skb_peek(&rx_tid
->rx_frags
);
3400 desc
= (struct hal_rx_desc
*)first_frag
->data
;
3402 encrypt_type
= ath11k_dp_rx_h_mpdu_start_enctype(desc
);
3403 if (encrypt_type
!= HAL_ENCRYPT_TYPE_CCMP_128
&&
3404 encrypt_type
!= HAL_ENCRYPT_TYPE_CCMP_256
&&
3405 encrypt_type
!= HAL_ENCRYPT_TYPE_GCMP_128
&&
3406 encrypt_type
!= HAL_ENCRYPT_TYPE_AES_GCMP_256
)
3409 last_pn
= ath11k_dp_rx_h_get_pn(first_frag
);
3410 skb_queue_walk(&rx_tid
->rx_frags
, skb
) {
3411 if (skb
== first_frag
)
3414 cur_pn
= ath11k_dp_rx_h_get_pn(skb
);
3415 if (cur_pn
!= last_pn
+ 1)
3422 static int ath11k_dp_rx_frag_h_mpdu(struct ath11k
*ar
,
3423 struct sk_buff
*msdu
,
3426 struct ath11k_base
*ab
= ar
->ab
;
3427 struct hal_rx_desc
*rx_desc
;
3428 struct ath11k_peer
*peer
;
3429 struct dp_rx_tid
*rx_tid
;
3430 struct sk_buff
*defrag_skb
= NULL
;
3437 rx_desc
= (struct hal_rx_desc
*)msdu
->data
;
3438 peer_id
= ath11k_dp_rx_h_mpdu_start_peer_id(rx_desc
);
3439 tid
= ath11k_dp_rx_h_mpdu_start_tid(rx_desc
);
3440 seqno
= ath11k_dp_rx_h_mpdu_start_seq_no(rx_desc
);
3441 frag_no
= ath11k_dp_rx_h_mpdu_start_frag_no(msdu
);
3442 more_frags
= ath11k_dp_rx_h_mpdu_start_more_frags(msdu
);
3444 if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(rx_desc
) ||
3445 !ath11k_dp_rx_h_mpdu_start_fc_valid(rx_desc
) ||
3446 tid
> IEEE80211_NUM_TIDS
)
3449 /* received unfragmented packet in reo
3450 * exception ring, this shouldn't happen
3451 * as these packets typically come from
3454 if (WARN_ON_ONCE(!frag_no
&& !more_frags
))
3457 spin_lock_bh(&ab
->base_lock
);
3458 peer
= ath11k_peer_find_by_id(ab
, peer_id
);
3460 ath11k_warn(ab
, "failed to find the peer to de-fragment received fragment peer_id %d\n",
3465 rx_tid
= &peer
->rx_tid
[tid
];
3467 if ((!skb_queue_empty(&rx_tid
->rx_frags
) && seqno
!= rx_tid
->cur_sn
) ||
3468 skb_queue_empty(&rx_tid
->rx_frags
)) {
3469 /* Flush stored fragments and start a new sequence */
3470 ath11k_dp_rx_frags_cleanup(rx_tid
, true);
3471 rx_tid
->cur_sn
= seqno
;
3474 if (rx_tid
->rx_frag_bitmap
& BIT(frag_no
)) {
3475 /* Fragment already present */
3480 if (frag_no
> __fls(rx_tid
->rx_frag_bitmap
))
3481 __skb_queue_tail(&rx_tid
->rx_frags
, msdu
);
3483 ath11k_dp_rx_h_sort_frags(&rx_tid
->rx_frags
, msdu
);
3485 rx_tid
->rx_frag_bitmap
|= BIT(frag_no
);
3487 rx_tid
->last_frag_no
= frag_no
;
3490 rx_tid
->dst_ring_desc
= kmemdup(ring_desc
,
3491 sizeof(*rx_tid
->dst_ring_desc
),
3493 if (!rx_tid
->dst_ring_desc
) {
3498 ath11k_dp_rx_link_desc_return(ab
, ring_desc
,
3499 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE
);
3502 if (!rx_tid
->last_frag_no
||
3503 rx_tid
->rx_frag_bitmap
!= GENMASK(rx_tid
->last_frag_no
, 0)) {
3504 mod_timer(&rx_tid
->frag_timer
, jiffies
+
3505 ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS
);
3509 spin_unlock_bh(&ab
->base_lock
);
3510 del_timer_sync(&rx_tid
->frag_timer
);
3511 spin_lock_bh(&ab
->base_lock
);
3513 peer
= ath11k_peer_find_by_id(ab
, peer_id
);
3515 goto err_frags_cleanup
;
3517 if (!ath11k_dp_rx_h_defrag_validate_incr_pn(ar
, rx_tid
))
3518 goto err_frags_cleanup
;
3520 if (ath11k_dp_rx_h_defrag(ar
, peer
, rx_tid
, &defrag_skb
))
3521 goto err_frags_cleanup
;
3524 goto err_frags_cleanup
;
3526 if (ath11k_dp_rx_h_defrag_reo_reinject(ar
, rx_tid
, defrag_skb
))
3527 goto err_frags_cleanup
;
3529 ath11k_dp_rx_frags_cleanup(rx_tid
, false);
3533 dev_kfree_skb_any(defrag_skb
);
3534 ath11k_dp_rx_frags_cleanup(rx_tid
, true);
3536 spin_unlock_bh(&ab
->base_lock
);
3541 ath11k_dp_process_rx_err_buf(struct ath11k
*ar
, u32
*ring_desc
, int buf_id
, bool drop
)
3543 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
3544 struct dp_rxdma_ring
*rx_ring
= &dp
->rx_refill_buf_ring
;
3545 struct sk_buff
*msdu
;
3546 struct ath11k_skb_rxcb
*rxcb
;
3547 struct hal_rx_desc
*rx_desc
;
3551 spin_lock_bh(&rx_ring
->idr_lock
);
3552 msdu
= idr_find(&rx_ring
->bufs_idr
, buf_id
);
3554 ath11k_warn(ar
->ab
, "rx err buf with invalid buf_id %d\n",
3556 spin_unlock_bh(&rx_ring
->idr_lock
);
3560 idr_remove(&rx_ring
->bufs_idr
, buf_id
);
3561 spin_unlock_bh(&rx_ring
->idr_lock
);
3563 rxcb
= ATH11K_SKB_RXCB(msdu
);
3564 dma_unmap_single(ar
->ab
->dev
, rxcb
->paddr
,
3565 msdu
->len
+ skb_tailroom(msdu
),
3569 dev_kfree_skb_any(msdu
);
3574 if (!rcu_dereference(ar
->ab
->pdevs_active
[ar
->pdev_idx
])) {
3575 dev_kfree_skb_any(msdu
);
3579 if (test_bit(ATH11K_CAC_RUNNING
, &ar
->dev_flags
)) {
3580 dev_kfree_skb_any(msdu
);
3584 rx_desc
= (struct hal_rx_desc
*)msdu
->data
;
3585 msdu_len
= ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc
);
3586 if ((msdu_len
+ HAL_RX_DESC_SIZE
) > DP_RX_BUFFER_SIZE
) {
3587 hdr_status
= ath11k_dp_rx_h_80211_hdr(rx_desc
);
3588 ath11k_warn(ar
->ab
, "invalid msdu leng %u", msdu_len
);
3589 ath11k_dbg_dump(ar
->ab
, ATH11K_DBG_DATA
, NULL
, "", hdr_status
,
3590 sizeof(struct ieee80211_hdr
));
3591 ath11k_dbg_dump(ar
->ab
, ATH11K_DBG_DATA
, NULL
, "", rx_desc
,
3592 sizeof(struct hal_rx_desc
));
3593 dev_kfree_skb_any(msdu
);
3597 skb_put(msdu
, HAL_RX_DESC_SIZE
+ msdu_len
);
3599 if (ath11k_dp_rx_frag_h_mpdu(ar
, msdu
, ring_desc
)) {
3600 dev_kfree_skb_any(msdu
);
3601 ath11k_dp_rx_link_desc_return(ar
->ab
, ring_desc
,
3602 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE
);
3609 int ath11k_dp_process_rx_err(struct ath11k_base
*ab
, struct napi_struct
*napi
,
3612 u32 msdu_cookies
[HAL_NUM_RX_MSDUS_PER_LINK_DESC
];
3613 struct dp_link_desc_bank
*link_desc_banks
;
3614 enum hal_rx_buf_return_buf_manager rbm
;
3615 int tot_n_bufs_reaped
, quota
, ret
, i
;
3616 int n_bufs_reaped
[MAX_RADIOS
] = {0};
3617 struct dp_rxdma_ring
*rx_ring
;
3618 struct dp_srng
*reo_except
;
3619 u32 desc_bank
, num_msdus
;
3620 struct hal_srng
*srng
;
3621 struct ath11k_dp
*dp
;
3630 tot_n_bufs_reaped
= 0;
3634 reo_except
= &dp
->reo_except_ring
;
3635 link_desc_banks
= dp
->link_desc_banks
;
3637 srng
= &ab
->hal
.srng_list
[reo_except
->ring_id
];
3639 spin_lock_bh(&srng
->lock
);
3641 ath11k_hal_srng_access_begin(ab
, srng
);
3644 (desc
= ath11k_hal_srng_dst_get_next_entry(ab
, srng
))) {
3645 struct hal_reo_dest_ring
*reo_desc
= (struct hal_reo_dest_ring
*)desc
;
3647 ab
->soc_stats
.err_ring_pkts
++;
3648 ret
= ath11k_hal_desc_reo_parse_err(ab
, desc
, &paddr
,
3651 ath11k_warn(ab
, "failed to parse error reo desc %d\n",
3655 link_desc_va
= link_desc_banks
[desc_bank
].vaddr
+
3656 (paddr
- link_desc_banks
[desc_bank
].paddr
);
3657 ath11k_hal_rx_msdu_link_info_get(link_desc_va
, &num_msdus
, msdu_cookies
,
3659 if (rbm
!= HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST
&&
3660 rbm
!= HAL_RX_BUF_RBM_SW3_BM
) {
3661 ab
->soc_stats
.invalid_rbm
++;
3662 ath11k_warn(ab
, "invalid return buffer manager %d\n", rbm
);
3663 ath11k_dp_rx_link_desc_return(ab
, desc
,
3664 HAL_WBM_REL_BM_ACT_REL_MSDU
);
3668 is_frag
= !!(reo_desc
->rx_mpdu_info
.info0
& RX_MPDU_DESC_INFO0_FRAG_FLAG
);
3670 /* Process only rx fragments with one msdu per link desc below, and drop
3671 * msdu's indicated due to error reasons.
3673 if (!is_frag
|| num_msdus
> 1) {
3675 /* Return the link desc back to wbm idle list */
3676 ath11k_dp_rx_link_desc_return(ab
, desc
,
3677 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE
);
3680 for (i
= 0; i
< num_msdus
; i
++) {
3681 buf_id
= FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID
,
3684 mac_id
= FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID
,
3687 ar
= ab
->pdevs
[mac_id
].ar
;
3689 if (!ath11k_dp_process_rx_err_buf(ar
, desc
, buf_id
, drop
)) {
3690 n_bufs_reaped
[mac_id
]++;
3691 tot_n_bufs_reaped
++;
3695 if (tot_n_bufs_reaped
>= quota
) {
3696 tot_n_bufs_reaped
= quota
;
3700 budget
= quota
- tot_n_bufs_reaped
;
3704 ath11k_hal_srng_access_end(ab
, srng
);
3706 spin_unlock_bh(&srng
->lock
);
3708 for (i
= 0; i
< ab
->num_radios
; i
++) {
3709 if (!n_bufs_reaped
[i
])
3712 ar
= ab
->pdevs
[i
].ar
;
3713 rx_ring
= &ar
->dp
.rx_refill_buf_ring
;
3715 ath11k_dp_rxbufs_replenish(ab
, i
, rx_ring
, n_bufs_reaped
[i
],
3716 HAL_RX_BUF_RBM_SW3_BM
);
3719 return tot_n_bufs_reaped
;
3722 static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k
*ar
,
3724 struct sk_buff_head
*msdu_list
)
3726 struct sk_buff
*skb
, *tmp
;
3727 struct ath11k_skb_rxcb
*rxcb
;
3730 n_buffs
= DIV_ROUND_UP(msdu_len
,
3731 (DP_RX_BUFFER_SIZE
- HAL_RX_DESC_SIZE
));
3733 skb_queue_walk_safe(msdu_list
, skb
, tmp
) {
3734 rxcb
= ATH11K_SKB_RXCB(skb
);
3735 if (rxcb
->err_rel_src
== HAL_WBM_REL_SRC_MODULE_REO
&&
3736 rxcb
->err_code
== HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO
) {
3739 __skb_unlink(skb
, msdu_list
);
3740 dev_kfree_skb_any(skb
);
3746 static int ath11k_dp_rx_h_null_q_desc(struct ath11k
*ar
, struct sk_buff
*msdu
,
3747 struct ieee80211_rx_status
*status
,
3748 struct sk_buff_head
*msdu_list
)
3751 struct hal_rx_desc
*desc
= (struct hal_rx_desc
*)msdu
->data
;
3753 struct ath11k_skb_rxcb
*rxcb
= ATH11K_SKB_RXCB(msdu
);
3755 msdu_len
= ath11k_dp_rx_h_msdu_start_msdu_len(desc
);
3757 if (!rxcb
->is_frag
&& ((msdu_len
+ HAL_RX_DESC_SIZE
) > DP_RX_BUFFER_SIZE
)) {
3758 /* First buffer will be freed by the caller, so deduct it's length */
3759 msdu_len
= msdu_len
- (DP_RX_BUFFER_SIZE
- HAL_RX_DESC_SIZE
);
3760 ath11k_dp_rx_null_q_desc_sg_drop(ar
, msdu_len
, msdu_list
);
3764 if (!ath11k_dp_rx_h_attn_msdu_done(desc
)) {
3766 "msdu_done bit not set in null_q_des processing\n");
3767 __skb_queue_purge(msdu_list
);
3771 /* Handle NULL queue descriptor violations arising out a missing
3772 * REO queue for a given peer or a given TID. This typically
3773 * may happen if a packet is received on a QOS enabled TID before the
3774 * ADDBA negotiation for that TID, when the TID queue is setup. Or
3775 * it may also happen for MC/BC frames if they are not routed to the
3776 * non-QOS TID queue, in the absence of any other default TID queue.
3777 * This error can show up both in a REO destination or WBM release ring.
3780 rxcb
->is_first_msdu
= ath11k_dp_rx_h_msdu_end_first_msdu(desc
);
3781 rxcb
->is_last_msdu
= ath11k_dp_rx_h_msdu_end_last_msdu(desc
);
3783 if (rxcb
->is_frag
) {
3784 skb_pull(msdu
, HAL_RX_DESC_SIZE
);
3786 l3pad_bytes
= ath11k_dp_rx_h_msdu_end_l3pad(desc
);
3788 if ((HAL_RX_DESC_SIZE
+ l3pad_bytes
+ msdu_len
) > DP_RX_BUFFER_SIZE
)
3791 skb_put(msdu
, HAL_RX_DESC_SIZE
+ l3pad_bytes
+ msdu_len
);
3792 skb_pull(msdu
, HAL_RX_DESC_SIZE
+ l3pad_bytes
);
3794 ath11k_dp_rx_h_ppdu(ar
, desc
, status
);
3796 ath11k_dp_rx_h_mpdu(ar
, msdu
, desc
, status
);
3798 rxcb
->tid
= ath11k_dp_rx_h_mpdu_start_tid(desc
);
3800 /* Please note that caller will having the access to msdu and completing
3801 * rx with mac80211. Need not worry about cleaning up amsdu_list.
3807 static bool ath11k_dp_rx_h_reo_err(struct ath11k
*ar
, struct sk_buff
*msdu
,
3808 struct ieee80211_rx_status
*status
,
3809 struct sk_buff_head
*msdu_list
)
3811 struct ath11k_skb_rxcb
*rxcb
= ATH11K_SKB_RXCB(msdu
);
3814 ar
->ab
->soc_stats
.reo_error
[rxcb
->err_code
]++;
3816 switch (rxcb
->err_code
) {
3817 case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO
:
3818 if (ath11k_dp_rx_h_null_q_desc(ar
, msdu
, status
, msdu_list
))
3821 case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED
:
3822 /* TODO: Do not drop PN failed packets in the driver;
3823 * instead, it is good to drop such packets in mac80211
3824 * after incrementing the replay counters.
3828 /* TODO: Review other errors and process them to mac80211
3838 static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k
*ar
, struct sk_buff
*msdu
,
3839 struct ieee80211_rx_status
*status
)
3842 struct hal_rx_desc
*desc
= (struct hal_rx_desc
*)msdu
->data
;
3844 struct ath11k_skb_rxcb
*rxcb
= ATH11K_SKB_RXCB(msdu
);
3846 rxcb
->is_first_msdu
= ath11k_dp_rx_h_msdu_end_first_msdu(desc
);
3847 rxcb
->is_last_msdu
= ath11k_dp_rx_h_msdu_end_last_msdu(desc
);
3849 l3pad_bytes
= ath11k_dp_rx_h_msdu_end_l3pad(desc
);
3850 msdu_len
= ath11k_dp_rx_h_msdu_start_msdu_len(desc
);
3851 skb_put(msdu
, HAL_RX_DESC_SIZE
+ l3pad_bytes
+ msdu_len
);
3852 skb_pull(msdu
, HAL_RX_DESC_SIZE
+ l3pad_bytes
);
3854 ath11k_dp_rx_h_ppdu(ar
, desc
, status
);
3856 status
->flag
|= (RX_FLAG_MMIC_STRIPPED
| RX_FLAG_MMIC_ERROR
|
3859 ath11k_dp_rx_h_undecap(ar
, msdu
, desc
,
3860 HAL_ENCRYPT_TYPE_TKIP_MIC
, status
, false);
3863 static bool ath11k_dp_rx_h_rxdma_err(struct ath11k
*ar
, struct sk_buff
*msdu
,
3864 struct ieee80211_rx_status
*status
)
3866 struct ath11k_skb_rxcb
*rxcb
= ATH11K_SKB_RXCB(msdu
);
3869 ar
->ab
->soc_stats
.rxdma_error
[rxcb
->err_code
]++;
3871 switch (rxcb
->err_code
) {
3872 case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR
:
3873 ath11k_dp_rx_h_tkip_mic_err(ar
, msdu
, status
);
3876 /* TODO: Review other rxdma error code to check if anything is
3877 * worth reporting to mac80211
3886 static void ath11k_dp_rx_wbm_err(struct ath11k
*ar
,
3887 struct napi_struct
*napi
,
3888 struct sk_buff
*msdu
,
3889 struct sk_buff_head
*msdu_list
)
3891 struct ath11k_skb_rxcb
*rxcb
= ATH11K_SKB_RXCB(msdu
);
3892 struct ieee80211_rx_status rxs
= {0};
3893 struct ieee80211_rx_status
*status
;
3896 switch (rxcb
->err_rel_src
) {
3897 case HAL_WBM_REL_SRC_MODULE_REO
:
3898 drop
= ath11k_dp_rx_h_reo_err(ar
, msdu
, &rxs
, msdu_list
);
3900 case HAL_WBM_REL_SRC_MODULE_RXDMA
:
3901 drop
= ath11k_dp_rx_h_rxdma_err(ar
, msdu
, &rxs
);
3904 /* msdu will get freed */
3909 dev_kfree_skb_any(msdu
);
3913 status
= IEEE80211_SKB_RXCB(msdu
);
3916 ath11k_dp_rx_deliver_msdu(ar
, napi
, msdu
);
3919 int ath11k_dp_rx_process_wbm_err(struct ath11k_base
*ab
,
3920 struct napi_struct
*napi
, int budget
)
3923 struct ath11k_dp
*dp
= &ab
->dp
;
3924 struct dp_rxdma_ring
*rx_ring
;
3925 struct hal_rx_wbm_rel_info err_info
;
3926 struct hal_srng
*srng
;
3927 struct sk_buff
*msdu
;
3928 struct sk_buff_head msdu_list
[MAX_RADIOS
];
3929 struct ath11k_skb_rxcb
*rxcb
;
3932 int num_buffs_reaped
[MAX_RADIOS
] = {0};
3933 int total_num_buffs_reaped
= 0;
3936 for (i
= 0; i
< ab
->num_radios
; i
++)
3937 __skb_queue_head_init(&msdu_list
[i
]);
3939 srng
= &ab
->hal
.srng_list
[dp
->rx_rel_ring
.ring_id
];
3941 spin_lock_bh(&srng
->lock
);
3943 ath11k_hal_srng_access_begin(ab
, srng
);
3946 rx_desc
= ath11k_hal_srng_dst_get_next_entry(ab
, srng
);
3950 ret
= ath11k_hal_wbm_desc_parse_err(ab
, rx_desc
, &err_info
);
3953 "failed to parse rx error in wbm_rel ring desc %d\n",
3958 buf_id
= FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID
, err_info
.cookie
);
3959 mac_id
= FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID
, err_info
.cookie
);
3961 ar
= ab
->pdevs
[mac_id
].ar
;
3962 rx_ring
= &ar
->dp
.rx_refill_buf_ring
;
3964 spin_lock_bh(&rx_ring
->idr_lock
);
3965 msdu
= idr_find(&rx_ring
->bufs_idr
, buf_id
);
3967 ath11k_warn(ab
, "frame rx with invalid buf_id %d pdev %d\n",
3969 spin_unlock_bh(&rx_ring
->idr_lock
);
3973 idr_remove(&rx_ring
->bufs_idr
, buf_id
);
3974 spin_unlock_bh(&rx_ring
->idr_lock
);
3976 rxcb
= ATH11K_SKB_RXCB(msdu
);
3977 dma_unmap_single(ab
->dev
, rxcb
->paddr
,
3978 msdu
->len
+ skb_tailroom(msdu
),
3981 num_buffs_reaped
[mac_id
]++;
3982 total_num_buffs_reaped
++;
3985 if (err_info
.push_reason
!=
3986 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED
) {
3987 dev_kfree_skb_any(msdu
);
3991 rxcb
->err_rel_src
= err_info
.err_rel_src
;
3992 rxcb
->err_code
= err_info
.err_code
;
3993 rxcb
->rx_desc
= (struct hal_rx_desc
*)msdu
->data
;
3994 __skb_queue_tail(&msdu_list
[mac_id
], msdu
);
3997 ath11k_hal_srng_access_end(ab
, srng
);
3999 spin_unlock_bh(&srng
->lock
);
4001 if (!total_num_buffs_reaped
)
4004 for (i
= 0; i
< ab
->num_radios
; i
++) {
4005 if (!num_buffs_reaped
[i
])
4008 ar
= ab
->pdevs
[i
].ar
;
4009 rx_ring
= &ar
->dp
.rx_refill_buf_ring
;
4011 ath11k_dp_rxbufs_replenish(ab
, i
, rx_ring
, num_buffs_reaped
[i
],
4012 HAL_RX_BUF_RBM_SW3_BM
);
4016 for (i
= 0; i
< ab
->num_radios
; i
++) {
4017 if (!rcu_dereference(ab
->pdevs_active
[i
])) {
4018 __skb_queue_purge(&msdu_list
[i
]);
4022 ar
= ab
->pdevs
[i
].ar
;
4024 if (test_bit(ATH11K_CAC_RUNNING
, &ar
->dev_flags
)) {
4025 __skb_queue_purge(&msdu_list
[i
]);
4029 while ((msdu
= __skb_dequeue(&msdu_list
[i
])) != NULL
)
4030 ath11k_dp_rx_wbm_err(ar
, napi
, msdu
, &msdu_list
[i
]);
4034 return total_num_buffs_reaped
;
4037 int ath11k_dp_process_rxdma_err(struct ath11k_base
*ab
, int mac_id
, int budget
)
4040 struct dp_srng
*err_ring
;
4041 struct dp_rxdma_ring
*rx_ring
;
4042 struct dp_link_desc_bank
*link_desc_banks
= ab
->dp
.link_desc_banks
;
4043 struct hal_srng
*srng
;
4044 u32 msdu_cookies
[HAL_NUM_RX_MSDUS_PER_LINK_DESC
];
4045 enum hal_rx_buf_return_buf_manager rbm
;
4046 enum hal_reo_entr_rxdma_ecode rxdma_err_code
;
4047 struct ath11k_skb_rxcb
*rxcb
;
4048 struct sk_buff
*skb
;
4049 struct hal_reo_entrance_ring
*entr_ring
;
4051 int num_buf_freed
= 0;
4060 ar
= ab
->pdevs
[ath11k_hw_mac_id_to_pdev_id(&ab
->hw_params
, mac_id
)].ar
;
4061 err_ring
= &ar
->dp
.rxdma_err_dst_ring
[ath11k_hw_mac_id_to_srng_id(&ab
->hw_params
,
4063 rx_ring
= &ar
->dp
.rx_refill_buf_ring
;
4065 srng
= &ab
->hal
.srng_list
[err_ring
->ring_id
];
4067 spin_lock_bh(&srng
->lock
);
4069 ath11k_hal_srng_access_begin(ab
, srng
);
4072 (desc
= ath11k_hal_srng_dst_get_next_entry(ab
, srng
))) {
4073 ath11k_hal_rx_reo_ent_paddr_get(ab
, desc
, &paddr
, &desc_bank
);
4075 entr_ring
= (struct hal_reo_entrance_ring
*)desc
;
4077 FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE
,
4079 ab
->soc_stats
.rxdma_error
[rxdma_err_code
]++;
4081 link_desc_va
= link_desc_banks
[desc_bank
].vaddr
+
4082 (paddr
- link_desc_banks
[desc_bank
].paddr
);
4083 ath11k_hal_rx_msdu_link_info_get(link_desc_va
, &num_msdus
,
4084 msdu_cookies
, &rbm
);
4086 for (i
= 0; i
< num_msdus
; i
++) {
4087 buf_id
= FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID
,
4090 spin_lock_bh(&rx_ring
->idr_lock
);
4091 skb
= idr_find(&rx_ring
->bufs_idr
, buf_id
);
4093 ath11k_warn(ab
, "rxdma error with invalid buf_id %d\n",
4095 spin_unlock_bh(&rx_ring
->idr_lock
);
4099 idr_remove(&rx_ring
->bufs_idr
, buf_id
);
4100 spin_unlock_bh(&rx_ring
->idr_lock
);
4102 rxcb
= ATH11K_SKB_RXCB(skb
);
4103 dma_unmap_single(ab
->dev
, rxcb
->paddr
,
4104 skb
->len
+ skb_tailroom(skb
),
4106 dev_kfree_skb_any(skb
);
4111 ath11k_dp_rx_link_desc_return(ab
, desc
,
4112 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE
);
4115 ath11k_hal_srng_access_end(ab
, srng
);
4117 spin_unlock_bh(&srng
->lock
);
4120 ath11k_dp_rxbufs_replenish(ab
, mac_id
, rx_ring
, num_buf_freed
,
4121 HAL_RX_BUF_RBM_SW3_BM
);
4123 return budget
- quota
;
4126 void ath11k_dp_process_reo_status(struct ath11k_base
*ab
)
4128 struct ath11k_dp
*dp
= &ab
->dp
;
4129 struct hal_srng
*srng
;
4130 struct dp_reo_cmd
*cmd
, *tmp
;
4134 struct hal_reo_status reo_status
;
4136 srng
= &ab
->hal
.srng_list
[dp
->reo_status_ring
.ring_id
];
4138 memset(&reo_status
, 0, sizeof(reo_status
));
4140 spin_lock_bh(&srng
->lock
);
4142 ath11k_hal_srng_access_begin(ab
, srng
);
4144 while ((reo_desc
= ath11k_hal_srng_dst_get_next_entry(ab
, srng
))) {
4145 tag
= FIELD_GET(HAL_SRNG_TLV_HDR_TAG
, *reo_desc
);
4148 case HAL_REO_GET_QUEUE_STATS_STATUS
:
4149 ath11k_hal_reo_status_queue_stats(ab
, reo_desc
,
4152 case HAL_REO_FLUSH_QUEUE_STATUS
:
4153 ath11k_hal_reo_flush_queue_status(ab
, reo_desc
,
4156 case HAL_REO_FLUSH_CACHE_STATUS
:
4157 ath11k_hal_reo_flush_cache_status(ab
, reo_desc
,
4160 case HAL_REO_UNBLOCK_CACHE_STATUS
:
4161 ath11k_hal_reo_unblk_cache_status(ab
, reo_desc
,
4164 case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS
:
4165 ath11k_hal_reo_flush_timeout_list_status(ab
, reo_desc
,
4168 case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS
:
4169 ath11k_hal_reo_desc_thresh_reached_status(ab
, reo_desc
,
4172 case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS
:
4173 ath11k_hal_reo_update_rx_reo_queue_status(ab
, reo_desc
,
4177 ath11k_warn(ab
, "Unknown reo status type %d\n", tag
);
4181 spin_lock_bh(&dp
->reo_cmd_lock
);
4182 list_for_each_entry_safe(cmd
, tmp
, &dp
->reo_cmd_list
, list
) {
4183 if (reo_status
.uniform_hdr
.cmd_num
== cmd
->cmd_num
) {
4185 list_del(&cmd
->list
);
4189 spin_unlock_bh(&dp
->reo_cmd_lock
);
4192 cmd
->handler(dp
, (void *)&cmd
->data
,
4193 reo_status
.uniform_hdr
.cmd_status
);
4200 ath11k_hal_srng_access_end(ab
, srng
);
4202 spin_unlock_bh(&srng
->lock
);
4205 void ath11k_dp_rx_pdev_free(struct ath11k_base
*ab
, int mac_id
)
4207 struct ath11k
*ar
= ab
->pdevs
[mac_id
].ar
;
4209 ath11k_dp_rx_pdev_srng_free(ar
);
4210 ath11k_dp_rxdma_pdev_buf_free(ar
);
4213 int ath11k_dp_rx_pdev_alloc(struct ath11k_base
*ab
, int mac_id
)
4215 struct ath11k
*ar
= ab
->pdevs
[mac_id
].ar
;
4216 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
4221 ret
= ath11k_dp_rx_pdev_srng_alloc(ar
);
4223 ath11k_warn(ab
, "failed to setup rx srngs\n");
4227 ret
= ath11k_dp_rxdma_pdev_buf_setup(ar
);
4229 ath11k_warn(ab
, "failed to setup rxdma ring\n");
4233 ring_id
= dp
->rx_refill_buf_ring
.refill_buf_ring
.ring_id
;
4234 ret
= ath11k_dp_tx_htt_srng_setup(ab
, ring_id
, mac_id
, HAL_RXDMA_BUF
);
4236 ath11k_warn(ab
, "failed to configure rx_refill_buf_ring %d\n",
4241 if (ab
->hw_params
.rx_mac_buf_ring
) {
4242 for (i
= 0; i
< ab
->hw_params
.num_rxmda_per_pdev
; i
++) {
4243 ring_id
= dp
->rx_mac_buf_ring
[i
].ring_id
;
4244 ret
= ath11k_dp_tx_htt_srng_setup(ab
, ring_id
,
4245 mac_id
+ i
, HAL_RXDMA_BUF
);
4247 ath11k_warn(ab
, "failed to configure rx_mac_buf_ring%d %d\n",
4254 for (i
= 0; i
< ab
->hw_params
.num_rxmda_per_pdev
; i
++) {
4255 ring_id
= dp
->rxdma_err_dst_ring
[i
].ring_id
;
4256 ret
= ath11k_dp_tx_htt_srng_setup(ab
, ring_id
,
4257 mac_id
+ i
, HAL_RXDMA_DST
);
4259 ath11k_warn(ab
, "failed to configure rxdma_err_dest_ring%d %d\n",
4265 if (!ab
->hw_params
.rxdma1_enable
)
4266 goto config_refill_ring
;
4268 ring_id
= dp
->rxdma_mon_buf_ring
.refill_buf_ring
.ring_id
;
4269 ret
= ath11k_dp_tx_htt_srng_setup(ab
, ring_id
,
4270 mac_id
, HAL_RXDMA_MONITOR_BUF
);
4272 ath11k_warn(ab
, "failed to configure rxdma_mon_buf_ring %d\n",
4276 ret
= ath11k_dp_tx_htt_srng_setup(ab
,
4277 dp
->rxdma_mon_dst_ring
.ring_id
,
4278 mac_id
, HAL_RXDMA_MONITOR_DST
);
4280 ath11k_warn(ab
, "failed to configure rxdma_mon_dst_ring %d\n",
4284 ret
= ath11k_dp_tx_htt_srng_setup(ab
,
4285 dp
->rxdma_mon_desc_ring
.ring_id
,
4286 mac_id
, HAL_RXDMA_MONITOR_DESC
);
4288 ath11k_warn(ab
, "failed to configure rxdma_mon_dst_ring %d\n",
4294 for (i
= 0; i
< ab
->hw_params
.num_rxmda_per_pdev
; i
++) {
4295 ring_id
= dp
->rx_mon_status_refill_ring
[i
].refill_buf_ring
.ring_id
;
4296 ret
= ath11k_dp_tx_htt_srng_setup(ab
, ring_id
, mac_id
+ i
,
4297 HAL_RXDMA_MONITOR_STATUS
);
4300 "failed to configure mon_status_refill_ring%d %d\n",
4309 static void ath11k_dp_mon_set_frag_len(u32
*total_len
, u32
*frag_len
)
4311 if (*total_len
>= (DP_RX_BUFFER_SIZE
- sizeof(struct hal_rx_desc
))) {
4312 *frag_len
= DP_RX_BUFFER_SIZE
- sizeof(struct hal_rx_desc
);
4313 *total_len
-= *frag_len
;
4315 *frag_len
= *total_len
;
4321 int ath11k_dp_rx_monitor_link_desc_return(struct ath11k
*ar
,
4322 void *p_last_buf_addr_info
,
4325 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
4326 struct dp_srng
*dp_srng
;
4328 void *src_srng_desc
;
4331 if (ar
->ab
->hw_params
.rxdma1_enable
) {
4332 dp_srng
= &dp
->rxdma_mon_desc_ring
;
4333 hal_srng
= &ar
->ab
->hal
.srng_list
[dp_srng
->ring_id
];
4335 dp_srng
= &ar
->ab
->dp
.wbm_desc_rel_ring
;
4336 hal_srng
= &ar
->ab
->hal
.srng_list
[dp_srng
->ring_id
];
4339 ath11k_hal_srng_access_begin(ar
->ab
, hal_srng
);
4341 src_srng_desc
= ath11k_hal_srng_src_get_next_entry(ar
->ab
, hal_srng
);
4343 if (src_srng_desc
) {
4344 struct ath11k_buffer_addr
*src_desc
=
4345 (struct ath11k_buffer_addr
*)src_srng_desc
;
4347 *src_desc
= *((struct ath11k_buffer_addr
*)p_last_buf_addr_info
);
4349 ath11k_dbg(ar
->ab
, ATH11K_DBG_DATA
,
4350 "Monitor Link Desc Ring %d Full", mac_id
);
4354 ath11k_hal_srng_access_end(ar
->ab
, hal_srng
);
4359 void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc
,
4360 dma_addr_t
*paddr
, u32
*sw_cookie
,
4362 void **pp_buf_addr_info
)
4364 struct hal_rx_msdu_link
*msdu_link
=
4365 (struct hal_rx_msdu_link
*)rx_msdu_link_desc
;
4366 struct ath11k_buffer_addr
*buf_addr_info
;
4368 buf_addr_info
= (struct ath11k_buffer_addr
*)&msdu_link
->buf_addr_info
;
4370 ath11k_hal_rx_buf_addr_info_get(buf_addr_info
, paddr
, sw_cookie
, rbm
);
4372 *pp_buf_addr_info
= (void *)buf_addr_info
;
4375 static int ath11k_dp_pkt_set_pktlen(struct sk_buff
*skb
, u32 len
)
4377 if (skb
->len
> len
) {
4380 if (skb_tailroom(skb
) < len
- skb
->len
) {
4381 if ((pskb_expand_head(skb
, 0,
4382 len
- skb
->len
- skb_tailroom(skb
),
4384 dev_kfree_skb_any(skb
);
4388 skb_put(skb
, (len
- skb
->len
));
4393 static void ath11k_hal_rx_msdu_list_get(struct ath11k
*ar
,
4394 void *msdu_link_desc
,
4395 struct hal_rx_msdu_list
*msdu_list
,
4398 struct hal_rx_msdu_details
*msdu_details
= NULL
;
4399 struct rx_msdu_desc
*msdu_desc_info
= NULL
;
4400 struct hal_rx_msdu_link
*msdu_link
= NULL
;
4402 u32 last
= FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU
, 1);
4403 u32 first
= FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU
, 1);
4406 msdu_link
= (struct hal_rx_msdu_link
*)msdu_link_desc
;
4407 msdu_details
= &msdu_link
->msdu_link
[0];
4409 for (i
= 0; i
< HAL_RX_NUM_MSDU_DESC
; i
++) {
4410 if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR
,
4411 msdu_details
[i
].buf_addr_info
.info0
) == 0) {
4412 msdu_desc_info
= &msdu_details
[i
- 1].rx_msdu_info
;
4413 msdu_desc_info
->info0
|= last
;
4417 msdu_desc_info
= &msdu_details
[i
].rx_msdu_info
;
4420 msdu_desc_info
->info0
|= first
;
4421 else if (i
== (HAL_RX_NUM_MSDU_DESC
- 1))
4422 msdu_desc_info
->info0
|= last
;
4423 msdu_list
->msdu_info
[i
].msdu_flags
= msdu_desc_info
->info0
;
4424 msdu_list
->msdu_info
[i
].msdu_len
=
4425 HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info
->info0
);
4426 msdu_list
->sw_cookie
[i
] =
4427 FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE
,
4428 msdu_details
[i
].buf_addr_info
.info1
);
4429 tmp
= FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR
,
4430 msdu_details
[i
].buf_addr_info
.info1
);
4431 msdu_list
->rbm
[i
] = tmp
;
4436 static u32
ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id
, u32
*ppdu_id
,
4441 if ((*ppdu_id
< msdu_ppdu_id
) &&
4442 ((msdu_ppdu_id
- *ppdu_id
) < DP_NOT_PPDU_ID_WRAP_AROUND
)) {
4443 *ppdu_id
= msdu_ppdu_id
;
4445 } else if ((*ppdu_id
> msdu_ppdu_id
) &&
4446 ((*ppdu_id
- msdu_ppdu_id
) > DP_NOT_PPDU_ID_WRAP_AROUND
)) {
4447 /* mon_dst is behind than mon_status
4448 * skip dst_ring and free it
4451 *ppdu_id
= msdu_ppdu_id
;
4457 static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info
*info
,
4458 bool *is_frag
, u32
*total_len
,
4459 u32
*frag_len
, u32
*msdu_cnt
)
4461 if (info
->msdu_flags
& RX_MSDU_DESC_INFO0_MSDU_CONTINUATION
) {
4463 *total_len
= info
->msdu_len
;
4466 ath11k_dp_mon_set_frag_len(total_len
,
4470 ath11k_dp_mon_set_frag_len(total_len
,
4473 *frag_len
= info
->msdu_len
;
4481 ath11k_dp_rx_mon_mpdu_pop(struct ath11k
*ar
, int mac_id
,
4482 void *ring_entry
, struct sk_buff
**head_msdu
,
4483 struct sk_buff
**tail_msdu
, u32
*npackets
,
4486 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
4487 struct ath11k_mon_data
*pmon
= (struct ath11k_mon_data
*)&dp
->mon_data
;
4488 struct dp_rxdma_ring
*rx_ring
= &dp
->rxdma_mon_buf_ring
;
4489 struct sk_buff
*msdu
= NULL
, *last
= NULL
;
4490 struct hal_rx_msdu_list msdu_list
;
4491 void *p_buf_addr_info
, *p_last_buf_addr_info
;
4492 struct hal_rx_desc
*rx_desc
;
4493 void *rx_msdu_link_desc
;
4496 u32 rx_buf_size
, rx_pkt_offset
, sw_cookie
;
4497 u32 rx_bufs_used
= 0, i
= 0;
4498 u32 msdu_ppdu_id
= 0, msdu_cnt
= 0;
4499 u32 total_len
= 0, frag_len
= 0;
4500 bool is_frag
, is_first_msdu
;
4501 bool drop_mpdu
= false;
4502 struct ath11k_skb_rxcb
*rxcb
;
4503 struct hal_reo_entrance_ring
*ent_desc
=
4504 (struct hal_reo_entrance_ring
*)ring_entry
;
4506 u32 rx_link_buf_info
[2];
4509 if (!ar
->ab
->hw_params
.rxdma1_enable
)
4510 rx_ring
= &dp
->rx_refill_buf_ring
;
4512 ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry
, &paddr
,
4514 &p_last_buf_addr_info
, &rbm
,
4517 if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON
,
4519 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED
) {
4521 FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE
,
4523 if (rxdma_err
== HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR
||
4524 rxdma_err
== HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR
||
4525 rxdma_err
== HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR
) {
4527 pmon
->rx_mon_stats
.dest_mpdu_drop
++;
4532 is_first_msdu
= true;
4535 if (pmon
->mon_last_linkdesc_paddr
== paddr
) {
4536 pmon
->rx_mon_stats
.dup_mon_linkdesc_cnt
++;
4537 return rx_bufs_used
;
4540 if (ar
->ab
->hw_params
.rxdma1_enable
)
4542 (void *)pmon
->link_desc_banks
[sw_cookie
].vaddr
+
4543 (paddr
- pmon
->link_desc_banks
[sw_cookie
].paddr
);
4546 (void *)ar
->ab
->dp
.link_desc_banks
[sw_cookie
].vaddr
+
4547 (paddr
- ar
->ab
->dp
.link_desc_banks
[sw_cookie
].paddr
);
4549 ath11k_hal_rx_msdu_list_get(ar
, rx_msdu_link_desc
, &msdu_list
,
4552 for (i
= 0; i
< num_msdus
; i
++) {
4555 if (pmon
->mon_last_buf_cookie
== msdu_list
.sw_cookie
[i
]) {
4556 ath11k_dbg(ar
->ab
, ATH11K_DBG_DATA
,
4557 "i %d last_cookie %d is same\n",
4558 i
, pmon
->mon_last_buf_cookie
);
4560 pmon
->rx_mon_stats
.dup_mon_buf_cnt
++;
4563 buf_id
= FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID
,
4564 msdu_list
.sw_cookie
[i
]);
4566 spin_lock_bh(&rx_ring
->idr_lock
);
4567 msdu
= idr_find(&rx_ring
->bufs_idr
, buf_id
);
4568 spin_unlock_bh(&rx_ring
->idr_lock
);
4570 ath11k_dbg(ar
->ab
, ATH11K_DBG_DATA
,
4571 "msdu_pop: invalid buf_id %d\n", buf_id
);
4574 rxcb
= ATH11K_SKB_RXCB(msdu
);
4575 if (!rxcb
->unmapped
) {
4576 dma_unmap_single(ar
->ab
->dev
, rxcb
->paddr
,
4583 ath11k_dbg(ar
->ab
, ATH11K_DBG_DATA
,
4584 "i %d drop msdu %p *ppdu_id %x\n",
4586 dev_kfree_skb_any(msdu
);
4591 rx_desc
= (struct hal_rx_desc
*)msdu
->data
;
4593 rx_pkt_offset
= sizeof(struct hal_rx_desc
);
4594 l2_hdr_offset
= ath11k_dp_rx_h_msdu_end_l3pad(rx_desc
);
4596 if (is_first_msdu
) {
4597 if (!ath11k_dp_rxdesc_mpdu_valid(rx_desc
)) {
4599 dev_kfree_skb_any(msdu
);
4601 pmon
->mon_last_linkdesc_paddr
= paddr
;
4606 ath11k_dp_rxdesc_get_ppduid(rx_desc
);
4608 if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id
,
4613 dev_kfree_skb_any(msdu
);
4617 return rx_bufs_used
;
4619 pmon
->mon_last_linkdesc_paddr
= paddr
;
4620 is_first_msdu
= false;
4622 ath11k_dp_mon_get_buf_len(&msdu_list
.msdu_info
[i
],
4623 &is_frag
, &total_len
,
4624 &frag_len
, &msdu_cnt
);
4625 rx_buf_size
= rx_pkt_offset
+ l2_hdr_offset
+ frag_len
;
4627 ath11k_dp_pkt_set_pktlen(msdu
, rx_buf_size
);
4636 pmon
->mon_last_buf_cookie
= msdu_list
.sw_cookie
[i
];
4638 spin_lock_bh(&rx_ring
->idr_lock
);
4639 idr_remove(&rx_ring
->bufs_idr
, buf_id
);
4640 spin_unlock_bh(&rx_ring
->idr_lock
);
4643 ath11k_hal_rx_buf_addr_info_set(rx_link_buf_info
, paddr
, sw_cookie
, rbm
);
4645 ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc
, &paddr
,
4649 if (ar
->ab
->hw_params
.rxdma1_enable
) {
4650 if (ath11k_dp_rx_monitor_link_desc_return(ar
,
4651 p_last_buf_addr_info
,
4653 ath11k_dbg(ar
->ab
, ATH11K_DBG_DATA
,
4654 "dp_rx_monitor_link_desc_return failed");
4656 ath11k_dp_rx_link_desc_return(ar
->ab
, rx_link_buf_info
,
4657 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE
);
4660 p_last_buf_addr_info
= p_buf_addr_info
;
4662 } while (paddr
&& msdu_cnt
);
4672 return rx_bufs_used
;
4675 static void ath11k_dp_rx_msdus_set_payload(struct sk_buff
*msdu
)
4677 u32 rx_pkt_offset
, l2_hdr_offset
;
4679 rx_pkt_offset
= sizeof(struct hal_rx_desc
);
4680 l2_hdr_offset
= ath11k_dp_rx_h_msdu_end_l3pad((struct hal_rx_desc
*)msdu
->data
);
4681 skb_pull(msdu
, rx_pkt_offset
+ l2_hdr_offset
);
4684 static struct sk_buff
*
4685 ath11k_dp_rx_mon_merg_msdus(struct ath11k
*ar
,
4686 u32 mac_id
, struct sk_buff
*head_msdu
,
4687 struct sk_buff
*last_msdu
,
4688 struct ieee80211_rx_status
*rxs
)
4690 struct sk_buff
*msdu
, *mpdu_buf
, *prev_buf
;
4691 u32 decap_format
, wifi_hdr_len
;
4692 struct hal_rx_desc
*rx_desc
;
4695 struct ieee80211_hdr_3addr
*wh
;
4700 goto err_merge_fail
;
4702 rx_desc
= (struct hal_rx_desc
*)head_msdu
->data
;
4704 if (ath11k_dp_rxdesc_get_mpdulen_err(rx_desc
))
4707 decap_format
= ath11k_dp_rxdesc_get_decap_format(rx_desc
);
4709 ath11k_dp_rx_h_ppdu(ar
, rx_desc
, rxs
);
4711 if (decap_format
== DP_RX_DECAP_TYPE_RAW
) {
4712 ath11k_dp_rx_msdus_set_payload(head_msdu
);
4714 prev_buf
= head_msdu
;
4715 msdu
= head_msdu
->next
;
4718 ath11k_dp_rx_msdus_set_payload(msdu
);
4724 prev_buf
->next
= NULL
;
4726 skb_trim(prev_buf
, prev_buf
->len
- HAL_RX_FCS_LEN
);
4727 } else if (decap_format
== DP_RX_DECAP_TYPE_NATIVE_WIFI
) {
4731 rx_desc
= (struct hal_rx_desc
*)head_msdu
->data
;
4732 hdr_desc
= ath11k_dp_rxdesc_get_80211hdr(rx_desc
);
4735 wifi_hdr_len
= sizeof(struct ieee80211_hdr_3addr
);
4736 wh
= (struct ieee80211_hdr_3addr
*)hdr_desc
;
4738 if (ieee80211_is_data_qos(wh
->frame_control
)) {
4739 struct ieee80211_qos_hdr
*qwh
=
4740 (struct ieee80211_qos_hdr
*)hdr_desc
;
4742 qos_field
= qwh
->qos_ctrl
;
4748 rx_desc
= (struct hal_rx_desc
*)msdu
->data
;
4749 hdr_desc
= ath11k_dp_rxdesc_get_80211hdr(rx_desc
);
4752 dest
= skb_push(msdu
, sizeof(__le16
));
4754 goto err_merge_fail
;
4755 memcpy(dest
, hdr_desc
, wifi_hdr_len
);
4756 memcpy(dest
+ wifi_hdr_len
,
4757 (u8
*)&qos_field
, sizeof(__le16
));
4759 ath11k_dp_rx_msdus_set_payload(msdu
);
4763 dest
= skb_put(prev_buf
, HAL_RX_FCS_LEN
);
4765 goto err_merge_fail
;
4767 ath11k_dbg(ar
->ab
, ATH11K_DBG_DATA
,
4768 "mpdu_buf %pK mpdu_buf->len %u",
4769 prev_buf
, prev_buf
->len
);
4771 ath11k_dbg(ar
->ab
, ATH11K_DBG_DATA
,
4772 "decap format %d is not supported!\n",
4774 goto err_merge_fail
;
4780 if (mpdu_buf
&& decap_format
!= DP_RX_DECAP_TYPE_RAW
) {
4781 ath11k_dbg(ar
->ab
, ATH11K_DBG_DATA
,
4782 "err_merge_fail mpdu_buf %pK", mpdu_buf
);
4783 /* Free the head buffer */
4784 dev_kfree_skb_any(mpdu_buf
);
4789 static int ath11k_dp_rx_mon_deliver(struct ath11k
*ar
, u32 mac_id
,
4790 struct sk_buff
*head_msdu
,
4791 struct sk_buff
*tail_msdu
,
4792 struct napi_struct
*napi
)
4794 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
4795 struct sk_buff
*mon_skb
, *skb_next
, *header
;
4796 struct ieee80211_rx_status
*rxs
= &dp
->rx_status
, *status
;
4798 mon_skb
= ath11k_dp_rx_mon_merg_msdus(ar
, mac_id
, head_msdu
,
4802 goto mon_deliver_fail
;
4808 skb_next
= mon_skb
->next
;
4810 rxs
->flag
&= ~RX_FLAG_AMSDU_MORE
;
4812 rxs
->flag
|= RX_FLAG_AMSDU_MORE
;
4814 if (mon_skb
== header
) {
4816 rxs
->flag
&= ~RX_FLAG_ALLOW_SAME_PN
;
4818 rxs
->flag
|= RX_FLAG_ALLOW_SAME_PN
;
4820 rxs
->flag
|= RX_FLAG_ONLY_MONITOR
;
4822 status
= IEEE80211_SKB_RXCB(mon_skb
);
4825 ath11k_dp_rx_deliver_msdu(ar
, napi
, mon_skb
);
4833 mon_skb
= head_msdu
;
4835 skb_next
= mon_skb
->next
;
4836 dev_kfree_skb_any(mon_skb
);
4842 static void ath11k_dp_rx_mon_dest_process(struct ath11k
*ar
, int mac_id
,
4843 u32 quota
, struct napi_struct
*napi
)
4845 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
4846 struct ath11k_mon_data
*pmon
= (struct ath11k_mon_data
*)&dp
->mon_data
;
4852 struct ath11k_pdev_mon_stats
*rx_mon_stats
;
4855 if (ar
->ab
->hw_params
.rxdma1_enable
)
4856 ring_id
= dp
->rxdma_mon_dst_ring
.ring_id
;
4858 ring_id
= dp
->rxdma_err_dst_ring
[mac_id
].ring_id
;
4860 mon_dst_srng
= &ar
->ab
->hal
.srng_list
[ring_id
];
4862 if (!mon_dst_srng
) {
4864 "HAL Monitor Destination Ring Init Failed -- %pK",
4869 spin_lock_bh(&pmon
->mon_lock
);
4871 ath11k_hal_srng_access_begin(ar
->ab
, mon_dst_srng
);
4873 ppdu_id
= pmon
->mon_ppdu_info
.ppdu_id
;
4875 rx_mon_stats
= &pmon
->rx_mon_stats
;
4877 while ((ring_entry
= ath11k_hal_srng_dst_peek(ar
->ab
, mon_dst_srng
))) {
4878 struct sk_buff
*head_msdu
, *tail_msdu
;
4883 rx_bufs_used
+= ath11k_dp_rx_mon_mpdu_pop(ar
, mac_id
, ring_entry
,
4886 &npackets
, &ppdu_id
);
4888 if (ppdu_id
!= pmon
->mon_ppdu_info
.ppdu_id
) {
4889 pmon
->mon_ppdu_status
= DP_PPDU_STATUS_START
;
4890 ath11k_dbg(ar
->ab
, ATH11K_DBG_DATA
,
4891 "dest_rx: new ppdu_id %x != status ppdu_id %x",
4892 ppdu_id
, pmon
->mon_ppdu_info
.ppdu_id
);
4895 if (head_msdu
&& tail_msdu
) {
4896 ath11k_dp_rx_mon_deliver(ar
, dp
->mac_id
, head_msdu
,
4898 rx_mon_stats
->dest_mpdu_done
++;
4901 ring_entry
= ath11k_hal_srng_dst_get_next_entry(ar
->ab
,
4904 ath11k_hal_srng_access_end(ar
->ab
, mon_dst_srng
);
4906 spin_unlock_bh(&pmon
->mon_lock
);
4909 rx_mon_stats
->dest_ppdu_done
++;
4910 if (ar
->ab
->hw_params
.rxdma1_enable
)
4911 ath11k_dp_rxbufs_replenish(ar
->ab
, dp
->mac_id
,
4912 &dp
->rxdma_mon_buf_ring
,
4914 HAL_RX_BUF_RBM_SW3_BM
);
4916 ath11k_dp_rxbufs_replenish(ar
->ab
, dp
->mac_id
,
4917 &dp
->rx_refill_buf_ring
,
4919 HAL_RX_BUF_RBM_SW3_BM
);
4923 static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k
*ar
,
4924 int mac_id
, u32 quota
,
4925 struct napi_struct
*napi
)
4927 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
4928 struct ath11k_mon_data
*pmon
= (struct ath11k_mon_data
*)&dp
->mon_data
;
4929 struct hal_rx_mon_ppdu_info
*ppdu_info
;
4930 struct sk_buff
*status_skb
;
4931 u32 tlv_status
= HAL_TLV_STATUS_BUF_DONE
;
4932 struct ath11k_pdev_mon_stats
*rx_mon_stats
;
4934 ppdu_info
= &pmon
->mon_ppdu_info
;
4935 rx_mon_stats
= &pmon
->rx_mon_stats
;
4937 if (pmon
->mon_ppdu_status
!= DP_PPDU_STATUS_START
)
4940 while (!skb_queue_empty(&pmon
->rx_status_q
)) {
4941 status_skb
= skb_dequeue(&pmon
->rx_status_q
);
4943 tlv_status
= ath11k_hal_rx_parse_mon_status(ar
->ab
, ppdu_info
,
4945 if (tlv_status
== HAL_TLV_STATUS_PPDU_DONE
) {
4946 rx_mon_stats
->status_ppdu_done
++;
4947 pmon
->mon_ppdu_status
= DP_PPDU_STATUS_DONE
;
4948 ath11k_dp_rx_mon_dest_process(ar
, mac_id
, quota
, napi
);
4949 pmon
->mon_ppdu_status
= DP_PPDU_STATUS_START
;
4951 dev_kfree_skb_any(status_skb
);
4955 static int ath11k_dp_mon_process_rx(struct ath11k_base
*ab
, int mac_id
,
4956 struct napi_struct
*napi
, int budget
)
4958 struct ath11k
*ar
= ath11k_ab_to_ar(ab
, mac_id
);
4959 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
4960 struct ath11k_mon_data
*pmon
= (struct ath11k_mon_data
*)&dp
->mon_data
;
4961 int num_buffs_reaped
= 0;
4963 num_buffs_reaped
= ath11k_dp_rx_reap_mon_status_ring(ar
->ab
, mac_id
, &budget
,
4964 &pmon
->rx_status_q
);
4965 if (num_buffs_reaped
)
4966 ath11k_dp_rx_mon_status_process_tlv(ar
, mac_id
, budget
, napi
);
4968 return num_buffs_reaped
;
4971 int ath11k_dp_rx_process_mon_rings(struct ath11k_base
*ab
, int mac_id
,
4972 struct napi_struct
*napi
, int budget
)
4974 struct ath11k
*ar
= ath11k_ab_to_ar(ab
, mac_id
);
4977 if (test_bit(ATH11K_FLAG_MONITOR_ENABLED
, &ar
->monitor_flags
))
4978 ret
= ath11k_dp_mon_process_rx(ab
, mac_id
, napi
, budget
);
4980 ret
= ath11k_dp_rx_process_mon_status(ab
, mac_id
, napi
, budget
);
4984 static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k
*ar
)
4986 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
4987 struct ath11k_mon_data
*pmon
= (struct ath11k_mon_data
*)&dp
->mon_data
;
4989 skb_queue_head_init(&pmon
->rx_status_q
);
4991 pmon
->mon_ppdu_status
= DP_PPDU_STATUS_START
;
4993 memset(&pmon
->rx_mon_stats
, 0,
4994 sizeof(pmon
->rx_mon_stats
));
4998 int ath11k_dp_rx_pdev_mon_attach(struct ath11k
*ar
)
5000 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
5001 struct ath11k_mon_data
*pmon
= &dp
->mon_data
;
5002 struct hal_srng
*mon_desc_srng
= NULL
;
5003 struct dp_srng
*dp_srng
;
5005 u32 n_link_desc
= 0;
5007 ret
= ath11k_dp_rx_pdev_mon_status_attach(ar
);
5009 ath11k_warn(ar
->ab
, "pdev_mon_status_attach() failed");
5013 /* if rxdma1_enable is false, no need to setup
5014 * rxdma_mon_desc_ring.
5016 if (!ar
->ab
->hw_params
.rxdma1_enable
)
5019 dp_srng
= &dp
->rxdma_mon_desc_ring
;
5020 n_link_desc
= dp_srng
->size
/
5021 ath11k_hal_srng_get_entrysize(ar
->ab
, HAL_RXDMA_MONITOR_DESC
);
5023 &ar
->ab
->hal
.srng_list
[dp
->rxdma_mon_desc_ring
.ring_id
];
5025 ret
= ath11k_dp_link_desc_setup(ar
->ab
, pmon
->link_desc_banks
,
5026 HAL_RXDMA_MONITOR_DESC
, mon_desc_srng
,
5029 ath11k_warn(ar
->ab
, "mon_link_desc_pool_setup() failed");
5032 pmon
->mon_last_linkdesc_paddr
= 0;
5033 pmon
->mon_last_buf_cookie
= DP_RX_DESC_COOKIE_MAX
+ 1;
5034 spin_lock_init(&pmon
->mon_lock
);
5039 static int ath11k_dp_mon_link_free(struct ath11k
*ar
)
5041 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
5042 struct ath11k_mon_data
*pmon
= &dp
->mon_data
;
5044 ath11k_dp_link_desc_cleanup(ar
->ab
, pmon
->link_desc_banks
,
5045 HAL_RXDMA_MONITOR_DESC
,
5046 &dp
->rxdma_mon_desc_ring
);
5050 int ath11k_dp_rx_pdev_mon_detach(struct ath11k
*ar
)
5052 ath11k_dp_mon_link_free(ar
);
5056 int ath11k_dp_rx_pktlog_start(struct ath11k_base
*ab
)
5058 /* start reap timer */
5059 mod_timer(&ab
->mon_reap_timer
,
5060 jiffies
+ msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL
));
5065 int ath11k_dp_rx_pktlog_stop(struct ath11k_base
*ab
, bool stop_timer
)
5070 del_timer_sync(&ab
->mon_reap_timer
);
5072 /* reap all the monitor related rings */
5073 ret
= ath11k_dp_purge_mon_ring(ab
);
5075 ath11k_warn(ab
, "failed to purge dp mon ring: %d\n", ret
);