1 // SPDX-License-Identifier: BSD-3-Clause-Clear
3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
6 #include <linux/ieee80211.h>
16 static u8
*ath11k_dp_rx_h_80211_hdr(struct hal_rx_desc
*desc
)
18 return desc
->hdr_status
;
21 static enum hal_encrypt_type
ath11k_dp_rx_h_mpdu_start_enctype(struct hal_rx_desc
*desc
)
23 if (!(__le32_to_cpu(desc
->mpdu_start
.info1
) &
24 RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID
))
25 return HAL_ENCRYPT_TYPE_OPEN
;
27 return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE
,
28 __le32_to_cpu(desc
->mpdu_start
.info2
));
31 static u8
ath11k_dp_rx_h_mpdu_start_decap_type(struct hal_rx_desc
*desc
)
33 return FIELD_GET(RX_MPDU_START_INFO5_DECAP_TYPE
,
34 __le32_to_cpu(desc
->mpdu_start
.info5
));
37 static bool ath11k_dp_rx_h_attn_msdu_done(struct hal_rx_desc
*desc
)
39 return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE
,
40 __le32_to_cpu(desc
->attention
.info2
));
43 static bool ath11k_dp_rx_h_attn_first_mpdu(struct hal_rx_desc
*desc
)
45 return !!FIELD_GET(RX_ATTENTION_INFO1_FIRST_MPDU
,
46 __le32_to_cpu(desc
->attention
.info1
));
49 static bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct hal_rx_desc
*desc
)
51 return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL
,
52 __le32_to_cpu(desc
->attention
.info1
));
55 static bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct hal_rx_desc
*desc
)
57 return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL
,
58 __le32_to_cpu(desc
->attention
.info1
));
61 static bool ath11k_dp_rx_h_attn_is_decrypted(struct hal_rx_desc
*desc
)
63 return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE
,
64 __le32_to_cpu(desc
->attention
.info2
)) ==
65 RX_DESC_DECRYPT_STATUS_CODE_OK
);
68 static u32
ath11k_dp_rx_h_attn_mpdu_err(struct hal_rx_desc
*desc
)
70 u32 info
= __le32_to_cpu(desc
->attention
.info1
);
73 if (info
& RX_ATTENTION_INFO1_FCS_ERR
)
74 errmap
|= DP_RX_MPDU_ERR_FCS
;
76 if (info
& RX_ATTENTION_INFO1_DECRYPT_ERR
)
77 errmap
|= DP_RX_MPDU_ERR_DECRYPT
;
79 if (info
& RX_ATTENTION_INFO1_TKIP_MIC_ERR
)
80 errmap
|= DP_RX_MPDU_ERR_TKIP_MIC
;
82 if (info
& RX_ATTENTION_INFO1_A_MSDU_ERROR
)
83 errmap
|= DP_RX_MPDU_ERR_AMSDU_ERR
;
85 if (info
& RX_ATTENTION_INFO1_OVERFLOW_ERR
)
86 errmap
|= DP_RX_MPDU_ERR_OVERFLOW
;
88 if (info
& RX_ATTENTION_INFO1_MSDU_LEN_ERR
)
89 errmap
|= DP_RX_MPDU_ERR_MSDU_LEN
;
91 if (info
& RX_ATTENTION_INFO1_MPDU_LEN_ERR
)
92 errmap
|= DP_RX_MPDU_ERR_MPDU_LEN
;
97 static u16
ath11k_dp_rx_h_msdu_start_msdu_len(struct hal_rx_desc
*desc
)
99 return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH
,
100 __le32_to_cpu(desc
->msdu_start
.info1
));
103 static u8
ath11k_dp_rx_h_msdu_start_sgi(struct hal_rx_desc
*desc
)
105 return FIELD_GET(RX_MSDU_START_INFO3_SGI
,
106 __le32_to_cpu(desc
->msdu_start
.info3
));
109 static u8
ath11k_dp_rx_h_msdu_start_rate_mcs(struct hal_rx_desc
*desc
)
111 return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS
,
112 __le32_to_cpu(desc
->msdu_start
.info3
));
115 static u8
ath11k_dp_rx_h_msdu_start_rx_bw(struct hal_rx_desc
*desc
)
117 return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW
,
118 __le32_to_cpu(desc
->msdu_start
.info3
));
121 static u32
ath11k_dp_rx_h_msdu_start_freq(struct hal_rx_desc
*desc
)
123 return __le32_to_cpu(desc
->msdu_start
.phy_meta_data
);
126 static u8
ath11k_dp_rx_h_msdu_start_pkt_type(struct hal_rx_desc
*desc
)
128 return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE
,
129 __le32_to_cpu(desc
->msdu_start
.info3
));
132 static u8
ath11k_dp_rx_h_msdu_start_nss(struct hal_rx_desc
*desc
)
134 u8 mimo_ss_bitmap
= FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP
,
135 __le32_to_cpu(desc
->msdu_start
.info3
));
137 return hweight8(mimo_ss_bitmap
);
140 static u8
ath11k_dp_rx_h_msdu_end_l3pad(struct hal_rx_desc
*desc
)
142 return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING
,
143 __le32_to_cpu(desc
->msdu_end
.info2
));
146 static bool ath11k_dp_rx_h_msdu_end_first_msdu(struct hal_rx_desc
*desc
)
148 return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU
,
149 __le32_to_cpu(desc
->msdu_end
.info2
));
152 static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct hal_rx_desc
*desc
)
154 return !!FIELD_GET(RX_MSDU_END_INFO2_LAST_MSDU
,
155 __le32_to_cpu(desc
->msdu_end
.info2
));
158 static void ath11k_dp_rx_desc_end_tlv_copy(struct hal_rx_desc
*fdesc
,
159 struct hal_rx_desc
*ldesc
)
161 memcpy((u8
*)&fdesc
->msdu_end
, (u8
*)&ldesc
->msdu_end
,
162 sizeof(struct rx_msdu_end
));
163 memcpy((u8
*)&fdesc
->attention
, (u8
*)&ldesc
->attention
,
164 sizeof(struct rx_attention
));
165 memcpy((u8
*)&fdesc
->mpdu_end
, (u8
*)&ldesc
->mpdu_end
,
166 sizeof(struct rx_mpdu_end
));
169 static u32
ath11k_dp_rxdesc_get_mpdulen_err(struct hal_rx_desc
*rx_desc
)
171 struct rx_attention
*rx_attn
;
173 rx_attn
= &rx_desc
->attention
;
175 return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR
,
176 __le32_to_cpu(rx_attn
->info1
));
179 static u32
ath11k_dp_rxdesc_get_decap_format(struct hal_rx_desc
*rx_desc
)
181 struct rx_msdu_start
*rx_msdu_start
;
183 rx_msdu_start
= &rx_desc
->msdu_start
;
185 return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT
,
186 __le32_to_cpu(rx_msdu_start
->info2
));
189 static u8
*ath11k_dp_rxdesc_get_80211hdr(struct hal_rx_desc
*rx_desc
)
193 rx_pkt_hdr
= &rx_desc
->msdu_payload
[0];
198 static bool ath11k_dp_rxdesc_mpdu_valid(struct hal_rx_desc
*rx_desc
)
202 tlv_tag
= FIELD_GET(HAL_TLV_HDR_TAG
,
203 __le32_to_cpu(rx_desc
->mpdu_start_tag
));
205 return tlv_tag
== HAL_RX_MPDU_START
? true : false;
208 static u32
ath11k_dp_rxdesc_get_ppduid(struct hal_rx_desc
*rx_desc
)
210 return __le16_to_cpu(rx_desc
->mpdu_start
.phy_ppdu_id
);
213 /* Returns number of Rx buffers replenished */
214 int ath11k_dp_rxbufs_replenish(struct ath11k_base
*ab
, int mac_id
,
215 struct dp_rxdma_ring
*rx_ring
,
217 enum hal_rx_buf_return_buf_manager mgr
,
220 struct hal_srng
*srng
;
229 req_entries
= min(req_entries
, rx_ring
->bufs_max
);
231 srng
= &ab
->hal
.srng_list
[rx_ring
->refill_buf_ring
.ring_id
];
233 spin_lock_bh(&srng
->lock
);
235 ath11k_hal_srng_access_begin(ab
, srng
);
237 num_free
= ath11k_hal_srng_src_num_free(ab
, srng
, true);
238 if (!req_entries
&& (num_free
> (rx_ring
->bufs_max
* 3) / 4))
239 req_entries
= num_free
;
241 req_entries
= min(num_free
, req_entries
);
242 num_remain
= req_entries
;
244 while (num_remain
> 0) {
245 skb
= dev_alloc_skb(DP_RX_BUFFER_SIZE
+
246 DP_RX_BUFFER_ALIGN_SIZE
);
250 if (!IS_ALIGNED((unsigned long)skb
->data
,
251 DP_RX_BUFFER_ALIGN_SIZE
)) {
253 PTR_ALIGN(skb
->data
, DP_RX_BUFFER_ALIGN_SIZE
) -
257 paddr
= dma_map_single(ab
->dev
, skb
->data
,
258 skb
->len
+ skb_tailroom(skb
),
260 if (dma_mapping_error(ab
->dev
, paddr
))
263 spin_lock_bh(&rx_ring
->idr_lock
);
264 buf_id
= idr_alloc(&rx_ring
->bufs_idr
, skb
, 0,
265 rx_ring
->bufs_max
* 3, gfp
);
266 spin_unlock_bh(&rx_ring
->idr_lock
);
270 desc
= ath11k_hal_srng_src_get_next_entry(ab
, srng
);
272 goto fail_idr_remove
;
274 ATH11K_SKB_RXCB(skb
)->paddr
= paddr
;
276 cookie
= FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID
, mac_id
) |
277 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID
, buf_id
);
281 ath11k_hal_rx_buf_addr_info_set(desc
, paddr
, cookie
, mgr
);
284 ath11k_hal_srng_access_end(ab
, srng
);
286 spin_unlock_bh(&srng
->lock
);
288 return req_entries
- num_remain
;
291 spin_lock_bh(&rx_ring
->idr_lock
);
292 idr_remove(&rx_ring
->bufs_idr
, buf_id
);
293 spin_unlock_bh(&rx_ring
->idr_lock
);
295 dma_unmap_single(ab
->dev
, paddr
, skb
->len
+ skb_tailroom(skb
),
298 dev_kfree_skb_any(skb
);
300 ath11k_hal_srng_access_end(ab
, srng
);
302 spin_unlock_bh(&srng
->lock
);
304 return req_entries
- num_remain
;
307 static int ath11k_dp_rxdma_buf_ring_free(struct ath11k
*ar
,
308 struct dp_rxdma_ring
*rx_ring
)
310 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
314 spin_lock_bh(&rx_ring
->idr_lock
);
315 idr_for_each_entry(&rx_ring
->bufs_idr
, skb
, buf_id
) {
316 idr_remove(&rx_ring
->bufs_idr
, buf_id
);
317 /* TODO: Understand where internal driver does this dma_unmap of
320 dma_unmap_single(ar
->ab
->dev
, ATH11K_SKB_RXCB(skb
)->paddr
,
321 skb
->len
+ skb_tailroom(skb
), DMA_FROM_DEVICE
);
322 dev_kfree_skb_any(skb
);
325 idr_destroy(&rx_ring
->bufs_idr
);
326 spin_unlock_bh(&rx_ring
->idr_lock
);
328 rx_ring
= &dp
->rx_mon_status_refill_ring
;
330 spin_lock_bh(&rx_ring
->idr_lock
);
331 idr_for_each_entry(&rx_ring
->bufs_idr
, skb
, buf_id
) {
332 idr_remove(&rx_ring
->bufs_idr
, buf_id
);
333 /* XXX: Understand where internal driver does this dma_unmap of
336 dma_unmap_single(ar
->ab
->dev
, ATH11K_SKB_RXCB(skb
)->paddr
,
337 skb
->len
+ skb_tailroom(skb
), DMA_BIDIRECTIONAL
);
338 dev_kfree_skb_any(skb
);
341 idr_destroy(&rx_ring
->bufs_idr
);
342 spin_unlock_bh(&rx_ring
->idr_lock
);
346 static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k
*ar
)
348 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
349 struct dp_rxdma_ring
*rx_ring
= &dp
->rx_refill_buf_ring
;
351 ath11k_dp_rxdma_buf_ring_free(ar
, rx_ring
);
353 rx_ring
= &dp
->rxdma_mon_buf_ring
;
354 ath11k_dp_rxdma_buf_ring_free(ar
, rx_ring
);
356 rx_ring
= &dp
->rx_mon_status_refill_ring
;
357 ath11k_dp_rxdma_buf_ring_free(ar
, rx_ring
);
361 static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k
*ar
,
362 struct dp_rxdma_ring
*rx_ring
,
365 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
368 num_entries
= rx_ring
->refill_buf_ring
.size
/
369 ath11k_hal_srng_get_entrysize(ringtype
);
371 rx_ring
->bufs_max
= num_entries
;
372 ath11k_dp_rxbufs_replenish(ar
->ab
, dp
->mac_id
, rx_ring
, num_entries
,
373 HAL_RX_BUF_RBM_SW3_BM
, GFP_KERNEL
);
377 static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k
*ar
)
379 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
380 struct dp_rxdma_ring
*rx_ring
= &dp
->rx_refill_buf_ring
;
382 ath11k_dp_rxdma_ring_buf_setup(ar
, rx_ring
, HAL_RXDMA_BUF
);
384 rx_ring
= &dp
->rxdma_mon_buf_ring
;
385 ath11k_dp_rxdma_ring_buf_setup(ar
, rx_ring
, HAL_RXDMA_MONITOR_BUF
);
387 rx_ring
= &dp
->rx_mon_status_refill_ring
;
388 ath11k_dp_rxdma_ring_buf_setup(ar
, rx_ring
, HAL_RXDMA_MONITOR_STATUS
);
393 static void ath11k_dp_rx_pdev_srng_free(struct ath11k
*ar
)
395 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
397 ath11k_dp_srng_cleanup(ar
->ab
, &dp
->rx_refill_buf_ring
.refill_buf_ring
);
398 ath11k_dp_srng_cleanup(ar
->ab
, &dp
->rxdma_err_dst_ring
);
399 ath11k_dp_srng_cleanup(ar
->ab
, &dp
->rx_mon_status_refill_ring
.refill_buf_ring
);
400 ath11k_dp_srng_cleanup(ar
->ab
, &dp
->rxdma_mon_buf_ring
.refill_buf_ring
);
403 void ath11k_dp_pdev_reo_cleanup(struct ath11k_base
*ab
)
405 struct ath11k_pdev_dp
*dp
;
409 for (i
= 0; i
< ab
->num_radios
; i
++) {
410 ar
= ab
->pdevs
[i
].ar
;
412 ath11k_dp_srng_cleanup(ab
, &dp
->reo_dst_ring
);
416 int ath11k_dp_pdev_reo_setup(struct ath11k_base
*ab
)
419 struct ath11k_pdev_dp
*dp
;
423 for (i
= 0; i
< ab
->num_radios
; i
++) {
424 ar
= ab
->pdevs
[i
].ar
;
426 ret
= ath11k_dp_srng_setup(ab
, &dp
->reo_dst_ring
, HAL_REO_DST
,
427 dp
->mac_id
, dp
->mac_id
,
428 DP_REO_DST_RING_SIZE
);
430 ath11k_warn(ar
->ab
, "failed to setup reo_dst_ring\n");
431 goto err_reo_cleanup
;
438 ath11k_dp_pdev_reo_cleanup(ab
);
443 static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k
*ar
)
445 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
446 struct dp_srng
*srng
= NULL
;
449 ret
= ath11k_dp_srng_setup(ar
->ab
,
450 &dp
->rx_refill_buf_ring
.refill_buf_ring
,
452 dp
->mac_id
, DP_RXDMA_BUF_RING_SIZE
);
454 ath11k_warn(ar
->ab
, "failed to setup rx_refill_buf_ring\n");
458 ret
= ath11k_dp_srng_setup(ar
->ab
, &dp
->rxdma_err_dst_ring
,
459 HAL_RXDMA_DST
, 0, dp
->mac_id
,
460 DP_RXDMA_ERR_DST_RING_SIZE
);
462 ath11k_warn(ar
->ab
, "failed to setup rxdma_err_dst_ring\n");
466 srng
= &dp
->rx_mon_status_refill_ring
.refill_buf_ring
;
467 ret
= ath11k_dp_srng_setup(ar
->ab
,
469 HAL_RXDMA_MONITOR_STATUS
, 0, dp
->mac_id
,
470 DP_RXDMA_MON_STATUS_RING_SIZE
);
473 "failed to setup rx_mon_status_refill_ring\n");
476 ret
= ath11k_dp_srng_setup(ar
->ab
,
477 &dp
->rxdma_mon_buf_ring
.refill_buf_ring
,
478 HAL_RXDMA_MONITOR_BUF
, 0, dp
->mac_id
,
479 DP_RXDMA_MONITOR_BUF_RING_SIZE
);
482 "failed to setup HAL_RXDMA_MONITOR_BUF\n");
486 ret
= ath11k_dp_srng_setup(ar
->ab
, &dp
->rxdma_mon_dst_ring
,
487 HAL_RXDMA_MONITOR_DST
, 0, dp
->mac_id
,
488 DP_RXDMA_MONITOR_DST_RING_SIZE
);
491 "failed to setup HAL_RXDMA_MONITOR_DST\n");
495 ret
= ath11k_dp_srng_setup(ar
->ab
, &dp
->rxdma_mon_desc_ring
,
496 HAL_RXDMA_MONITOR_DESC
, 0, dp
->mac_id
,
497 DP_RXDMA_MONITOR_DESC_RING_SIZE
);
500 "failed to setup HAL_RXDMA_MONITOR_DESC\n");
507 void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base
*ab
)
509 struct ath11k_dp
*dp
= &ab
->dp
;
510 struct dp_reo_cmd
*cmd
, *tmp
;
511 struct dp_reo_cache_flush_elem
*cmd_cache
, *tmp_cache
;
513 spin_lock_bh(&dp
->reo_cmd_lock
);
514 list_for_each_entry_safe(cmd
, tmp
, &dp
->reo_cmd_list
, list
) {
515 list_del(&cmd
->list
);
516 dma_unmap_single(ab
->dev
, cmd
->data
.paddr
,
517 cmd
->data
.size
, DMA_BIDIRECTIONAL
);
518 kfree(cmd
->data
.vaddr
);
522 list_for_each_entry_safe(cmd_cache
, tmp_cache
,
523 &dp
->reo_cmd_cache_flush_list
, list
) {
524 list_del(&cmd_cache
->list
);
525 dma_unmap_single(ab
->dev
, cmd_cache
->data
.paddr
,
526 cmd_cache
->data
.size
, DMA_BIDIRECTIONAL
);
527 kfree(cmd_cache
->data
.vaddr
);
530 spin_unlock_bh(&dp
->reo_cmd_lock
);
533 static void ath11k_dp_reo_cmd_free(struct ath11k_dp
*dp
, void *ctx
,
534 enum hal_reo_cmd_status status
)
536 struct dp_rx_tid
*rx_tid
= ctx
;
538 if (status
!= HAL_REO_CMD_SUCCESS
)
539 ath11k_warn(dp
->ab
, "failed to flush rx tid hw desc, tid %d status %d\n",
540 rx_tid
->tid
, status
);
542 dma_unmap_single(dp
->ab
->dev
, rx_tid
->paddr
, rx_tid
->size
,
544 kfree(rx_tid
->vaddr
);
547 static void ath11k_dp_reo_cache_flush(struct ath11k_base
*ab
,
548 struct dp_rx_tid
*rx_tid
)
550 struct ath11k_hal_reo_cmd cmd
= {0};
551 unsigned long tot_desc_sz
, desc_sz
;
554 tot_desc_sz
= rx_tid
->size
;
555 desc_sz
= ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID
);
557 while (tot_desc_sz
> desc_sz
) {
558 tot_desc_sz
-= desc_sz
;
559 cmd
.addr_lo
= lower_32_bits(rx_tid
->paddr
+ tot_desc_sz
);
560 cmd
.addr_hi
= upper_32_bits(rx_tid
->paddr
);
561 ret
= ath11k_dp_tx_send_reo_cmd(ab
, rx_tid
,
562 HAL_REO_CMD_FLUSH_CACHE
, &cmd
,
566 "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
570 memset(&cmd
, 0, sizeof(cmd
));
571 cmd
.addr_lo
= lower_32_bits(rx_tid
->paddr
);
572 cmd
.addr_hi
= upper_32_bits(rx_tid
->paddr
);
573 cmd
.flag
|= HAL_REO_CMD_FLG_NEED_STATUS
;
574 ret
= ath11k_dp_tx_send_reo_cmd(ab
, rx_tid
,
575 HAL_REO_CMD_FLUSH_CACHE
,
576 &cmd
, ath11k_dp_reo_cmd_free
);
578 ath11k_err(ab
, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
580 dma_unmap_single(ab
->dev
, rx_tid
->paddr
, rx_tid
->size
,
582 kfree(rx_tid
->vaddr
);
586 static void ath11k_dp_rx_tid_del_func(struct ath11k_dp
*dp
, void *ctx
,
587 enum hal_reo_cmd_status status
)
589 struct ath11k_base
*ab
= dp
->ab
;
590 struct dp_rx_tid
*rx_tid
= ctx
;
591 struct dp_reo_cache_flush_elem
*elem
, *tmp
;
593 if (status
== HAL_REO_CMD_DRAIN
) {
595 } else if (status
!= HAL_REO_CMD_SUCCESS
) {
596 /* Shouldn't happen! Cleanup in case of other failure? */
597 ath11k_warn(ab
, "failed to delete rx tid %d hw descriptor %d\n",
598 rx_tid
->tid
, status
);
602 elem
= kzalloc(sizeof(*elem
), GFP_ATOMIC
);
607 memcpy(&elem
->data
, rx_tid
, sizeof(*rx_tid
));
609 spin_lock_bh(&dp
->reo_cmd_lock
);
610 list_add_tail(&elem
->list
, &dp
->reo_cmd_cache_flush_list
);
611 spin_unlock_bh(&dp
->reo_cmd_lock
);
613 /* Flush and invalidate aged REO desc from HW cache */
614 spin_lock_bh(&dp
->reo_cmd_lock
);
615 list_for_each_entry_safe(elem
, tmp
, &dp
->reo_cmd_cache_flush_list
,
617 if (time_after(jiffies
, elem
->ts
+
618 msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS
))) {
619 list_del(&elem
->list
);
620 spin_unlock_bh(&dp
->reo_cmd_lock
);
622 ath11k_dp_reo_cache_flush(ab
, &elem
->data
);
624 spin_lock_bh(&dp
->reo_cmd_lock
);
627 spin_unlock_bh(&dp
->reo_cmd_lock
);
631 dma_unmap_single(ab
->dev
, rx_tid
->paddr
, rx_tid
->size
,
633 kfree(rx_tid
->vaddr
);
636 static void ath11k_peer_rx_tid_delete(struct ath11k
*ar
,
637 struct ath11k_peer
*peer
, u8 tid
)
639 struct ath11k_hal_reo_cmd cmd
= {0};
640 struct dp_rx_tid
*rx_tid
= &peer
->rx_tid
[tid
];
646 cmd
.flag
= HAL_REO_CMD_FLG_NEED_STATUS
;
647 cmd
.addr_lo
= lower_32_bits(rx_tid
->paddr
);
648 cmd
.addr_hi
= upper_32_bits(rx_tid
->paddr
);
649 cmd
.upd0
|= HAL_REO_CMD_UPD0_VLD
;
650 ret
= ath11k_dp_tx_send_reo_cmd(ar
->ab
, rx_tid
,
651 HAL_REO_CMD_UPDATE_RX_QUEUE
, &cmd
,
652 ath11k_dp_rx_tid_del_func
);
654 ath11k_err(ar
->ab
, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
656 dma_unmap_single(ar
->ab
->dev
, rx_tid
->paddr
, rx_tid
->size
,
658 kfree(rx_tid
->vaddr
);
661 rx_tid
->active
= false;
664 void ath11k_peer_rx_tid_cleanup(struct ath11k
*ar
, struct ath11k_peer
*peer
)
668 for (i
= 0; i
<= IEEE80211_NUM_TIDS
; i
++)
669 ath11k_peer_rx_tid_delete(ar
, peer
, i
);
672 static int ath11k_peer_rx_tid_reo_update(struct ath11k
*ar
,
673 struct ath11k_peer
*peer
,
674 struct dp_rx_tid
*rx_tid
,
675 u32 ba_win_sz
, u16 ssn
,
678 struct ath11k_hal_reo_cmd cmd
= {0};
681 cmd
.addr_lo
= lower_32_bits(rx_tid
->paddr
);
682 cmd
.addr_hi
= upper_32_bits(rx_tid
->paddr
);
683 cmd
.flag
= HAL_REO_CMD_FLG_NEED_STATUS
;
684 cmd
.upd0
= HAL_REO_CMD_UPD0_BA_WINDOW_SIZE
;
685 cmd
.ba_window_size
= ba_win_sz
;
688 cmd
.upd0
|= HAL_REO_CMD_UPD0_SSN
;
689 cmd
.upd2
= FIELD_PREP(HAL_REO_CMD_UPD2_SSN
, ssn
);
692 ret
= ath11k_dp_tx_send_reo_cmd(ar
->ab
, rx_tid
,
693 HAL_REO_CMD_UPDATE_RX_QUEUE
, &cmd
,
696 ath11k_warn(ar
->ab
, "failed to update rx tid queue, tid %d (%d)\n",
701 rx_tid
->ba_win_sz
= ba_win_sz
;
706 static void ath11k_dp_rx_tid_mem_free(struct ath11k_base
*ab
,
707 const u8
*peer_mac
, int vdev_id
, u8 tid
)
709 struct ath11k_peer
*peer
;
710 struct dp_rx_tid
*rx_tid
;
712 spin_lock_bh(&ab
->base_lock
);
714 peer
= ath11k_peer_find(ab
, vdev_id
, peer_mac
);
716 ath11k_warn(ab
, "failed to find the peer to free up rx tid mem\n");
720 rx_tid
= &peer
->rx_tid
[tid
];
724 dma_unmap_single(ab
->dev
, rx_tid
->paddr
, rx_tid
->size
,
726 kfree(rx_tid
->vaddr
);
728 rx_tid
->active
= false;
731 spin_unlock_bh(&ab
->base_lock
);
734 int ath11k_peer_rx_tid_setup(struct ath11k
*ar
, const u8
*peer_mac
, int vdev_id
,
735 u8 tid
, u32 ba_win_sz
, u16 ssn
)
737 struct ath11k_base
*ab
= ar
->ab
;
738 struct ath11k_peer
*peer
;
739 struct dp_rx_tid
*rx_tid
;
746 spin_lock_bh(&ab
->base_lock
);
748 peer
= ath11k_peer_find(ab
, vdev_id
, peer_mac
);
750 ath11k_warn(ab
, "failed to find the peer to set up rx tid\n");
751 spin_unlock_bh(&ab
->base_lock
);
755 rx_tid
= &peer
->rx_tid
[tid
];
756 /* Update the tid queue if it is already setup */
757 if (rx_tid
->active
) {
758 paddr
= rx_tid
->paddr
;
759 ret
= ath11k_peer_rx_tid_reo_update(ar
, peer
, rx_tid
,
760 ba_win_sz
, ssn
, true);
761 spin_unlock_bh(&ab
->base_lock
);
763 ath11k_warn(ab
, "failed to update reo for rx tid %d\n", tid
);
767 ret
= ath11k_wmi_peer_rx_reorder_queue_setup(ar
, vdev_id
,
771 ath11k_warn(ab
, "failed to send wmi command to update rx reorder queue, tid :%d (%d)\n",
778 rx_tid
->ba_win_sz
= ba_win_sz
;
780 /* TODO: Optimize the memory allocation for qos tid based on the
781 * the actual BA window size in REO tid update path.
783 if (tid
== HAL_DESC_REO_NON_QOS_TID
)
784 hw_desc_sz
= ath11k_hal_reo_qdesc_size(ba_win_sz
, tid
);
786 hw_desc_sz
= ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX
, tid
);
788 vaddr
= kzalloc(hw_desc_sz
+ HAL_LINK_DESC_ALIGN
- 1, GFP_KERNEL
);
790 spin_unlock_bh(&ab
->base_lock
);
794 addr_aligned
= PTR_ALIGN(vaddr
, HAL_LINK_DESC_ALIGN
);
796 ath11k_hal_reo_qdesc_setup(addr_aligned
, tid
, ba_win_sz
, ssn
);
798 paddr
= dma_map_single(ab
->dev
, addr_aligned
, hw_desc_sz
,
801 ret
= dma_mapping_error(ab
->dev
, paddr
);
803 spin_unlock_bh(&ab
->base_lock
);
807 rx_tid
->vaddr
= vaddr
;
808 rx_tid
->paddr
= paddr
;
809 rx_tid
->size
= hw_desc_sz
;
810 rx_tid
->active
= true;
812 spin_unlock_bh(&ab
->base_lock
);
814 ret
= ath11k_wmi_peer_rx_reorder_queue_setup(ar
, vdev_id
, peer_mac
,
815 paddr
, tid
, 1, ba_win_sz
);
817 ath11k_warn(ar
->ab
, "failed to setup rx reorder queue, tid :%d (%d)\n",
819 ath11k_dp_rx_tid_mem_free(ab
, peer_mac
, vdev_id
, tid
);
830 int ath11k_dp_rx_ampdu_start(struct ath11k
*ar
,
831 struct ieee80211_ampdu_params
*params
)
833 struct ath11k_base
*ab
= ar
->ab
;
834 struct ath11k_sta
*arsta
= (void *)params
->sta
->drv_priv
;
835 int vdev_id
= arsta
->arvif
->vdev_id
;
838 ret
= ath11k_peer_rx_tid_setup(ar
, params
->sta
->addr
, vdev_id
,
839 params
->tid
, params
->buf_size
,
842 ath11k_warn(ab
, "failed to setup rx tid %d\n", ret
);
847 int ath11k_dp_rx_ampdu_stop(struct ath11k
*ar
,
848 struct ieee80211_ampdu_params
*params
)
850 struct ath11k_base
*ab
= ar
->ab
;
851 struct ath11k_peer
*peer
;
852 struct ath11k_sta
*arsta
= (void *)params
->sta
->drv_priv
;
853 int vdev_id
= arsta
->arvif
->vdev_id
;
858 spin_lock_bh(&ab
->base_lock
);
860 peer
= ath11k_peer_find(ab
, vdev_id
, params
->sta
->addr
);
862 ath11k_warn(ab
, "failed to find the peer to stop rx aggregation\n");
863 spin_unlock_bh(&ab
->base_lock
);
867 paddr
= peer
->rx_tid
[params
->tid
].paddr
;
868 active
= peer
->rx_tid
[params
->tid
].active
;
871 spin_unlock_bh(&ab
->base_lock
);
875 ret
= ath11k_peer_rx_tid_reo_update(ar
, peer
, peer
->rx_tid
, 1, 0, false);
876 spin_unlock_bh(&ab
->base_lock
);
878 ath11k_warn(ab
, "failed to update reo for rx tid %d: %d\n",
883 ret
= ath11k_wmi_peer_rx_reorder_queue_setup(ar
, vdev_id
,
884 params
->sta
->addr
, paddr
,
887 ath11k_warn(ab
, "failed to send wmi to delete rx tid %d\n",
893 static int ath11k_get_ppdu_user_index(struct htt_ppdu_stats
*ppdu_stats
,
898 for (i
= 0; i
< HTT_PPDU_STATS_MAX_USERS
- 1; i
++) {
899 if (ppdu_stats
->user_stats
[i
].is_valid_peer_id
) {
900 if (peer_id
== ppdu_stats
->user_stats
[i
].peer_id
)
910 static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base
*ab
,
911 u16 tag
, u16 len
, const void *ptr
,
914 struct htt_ppdu_stats_info
*ppdu_info
;
915 struct htt_ppdu_user_stats
*user_stats
;
919 ppdu_info
= (struct htt_ppdu_stats_info
*)data
;
922 case HTT_PPDU_STATS_TAG_COMMON
:
923 if (len
< sizeof(struct htt_ppdu_stats_common
)) {
924 ath11k_warn(ab
, "Invalid len %d for the tag 0x%x\n",
928 memcpy((void *)&ppdu_info
->ppdu_stats
.common
, ptr
,
929 sizeof(struct htt_ppdu_stats_common
));
931 case HTT_PPDU_STATS_TAG_USR_RATE
:
932 if (len
< sizeof(struct htt_ppdu_stats_user_rate
)) {
933 ath11k_warn(ab
, "Invalid len %d for the tag 0x%x\n",
938 peer_id
= ((struct htt_ppdu_stats_user_rate
*)ptr
)->sw_peer_id
;
939 cur_user
= ath11k_get_ppdu_user_index(&ppdu_info
->ppdu_stats
,
943 user_stats
= &ppdu_info
->ppdu_stats
.user_stats
[cur_user
];
944 user_stats
->peer_id
= peer_id
;
945 user_stats
->is_valid_peer_id
= true;
946 memcpy((void *)&user_stats
->rate
, ptr
,
947 sizeof(struct htt_ppdu_stats_user_rate
));
948 user_stats
->tlv_flags
|= BIT(tag
);
950 case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON
:
951 if (len
< sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn
)) {
952 ath11k_warn(ab
, "Invalid len %d for the tag 0x%x\n",
957 peer_id
= ((struct htt_ppdu_stats_usr_cmpltn_cmn
*)ptr
)->sw_peer_id
;
958 cur_user
= ath11k_get_ppdu_user_index(&ppdu_info
->ppdu_stats
,
962 user_stats
= &ppdu_info
->ppdu_stats
.user_stats
[cur_user
];
963 user_stats
->peer_id
= peer_id
;
964 user_stats
->is_valid_peer_id
= true;
965 memcpy((void *)&user_stats
->cmpltn_cmn
, ptr
,
966 sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn
));
967 user_stats
->tlv_flags
|= BIT(tag
);
969 case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS
:
971 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status
)) {
972 ath11k_warn(ab
, "Invalid len %d for the tag 0x%x\n",
978 ((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status
*)ptr
)->sw_peer_id
;
979 cur_user
= ath11k_get_ppdu_user_index(&ppdu_info
->ppdu_stats
,
983 user_stats
= &ppdu_info
->ppdu_stats
.user_stats
[cur_user
];
984 user_stats
->peer_id
= peer_id
;
985 user_stats
->is_valid_peer_id
= true;
986 memcpy((void *)&user_stats
->ack_ba
, ptr
,
987 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status
));
988 user_stats
->tlv_flags
|= BIT(tag
);
994 int ath11k_dp_htt_tlv_iter(struct ath11k_base
*ab
, const void *ptr
, size_t len
,
995 int (*iter
)(struct ath11k_base
*ar
, u16 tag
, u16 len
,
996 const void *ptr
, void *data
),
999 const struct htt_tlv
*tlv
;
1000 const void *begin
= ptr
;
1001 u16 tlv_tag
, tlv_len
;
1005 if (len
< sizeof(*tlv
)) {
1006 ath11k_err(ab
, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
1007 ptr
- begin
, len
, sizeof(*tlv
));
1010 tlv
= (struct htt_tlv
*)ptr
;
1011 tlv_tag
= FIELD_GET(HTT_TLV_TAG
, tlv
->header
);
1012 tlv_len
= FIELD_GET(HTT_TLV_LEN
, tlv
->header
);
1013 ptr
+= sizeof(*tlv
);
1014 len
-= sizeof(*tlv
);
1016 if (tlv_len
> len
) {
1017 ath11k_err(ab
, "htt tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n",
1018 tlv_tag
, ptr
- begin
, len
, tlv_len
);
1021 ret
= iter(ab
, tlv_tag
, tlv_len
, ptr
, data
);
1031 static u32
ath11k_bw_to_mac80211_bwflags(u8 bw
)
1037 bwflags
= IEEE80211_TX_RC_40_MHZ_WIDTH
;
1040 bwflags
= IEEE80211_TX_RC_80_MHZ_WIDTH
;
1043 bwflags
= IEEE80211_TX_RC_160_MHZ_WIDTH
;
1051 ath11k_update_per_peer_tx_stats(struct ath11k
*ar
,
1052 struct htt_ppdu_stats
*ppdu_stats
, u8 user
)
1054 struct ath11k_base
*ab
= ar
->ab
;
1055 struct ath11k_peer
*peer
;
1056 struct ieee80211_sta
*sta
;
1057 struct ath11k_sta
*arsta
;
1058 struct htt_ppdu_stats_user_rate
*user_rate
;
1059 struct ieee80211_chanctx_conf
*conf
= NULL
;
1060 struct ath11k_per_peer_tx_stats
*peer_stats
= &ar
->peer_tx_stats
;
1061 struct htt_ppdu_user_stats
*usr_stats
= &ppdu_stats
->user_stats
[user
];
1062 struct htt_ppdu_stats_common
*common
= &ppdu_stats
->common
;
1064 u8 flags
, mcs
, nss
, bw
, sgi
, rate_idx
= 0;
1066 u16 rate
= 0, succ_pkts
= 0;
1067 u32 tx_duration
= 0;
1068 u8 tid
= HTT_PPDU_STATS_NON_QOS_TID
;
1069 bool is_ampdu
= false;
1074 if (!(usr_stats
->tlv_flags
& BIT(HTT_PPDU_STATS_TAG_USR_RATE
)))
1077 if (usr_stats
->tlv_flags
& BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON
))
1079 HTT_USR_CMPLTN_IS_AMPDU(usr_stats
->cmpltn_cmn
.flags
);
1081 if (usr_stats
->tlv_flags
&
1082 BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS
)) {
1083 succ_bytes
= usr_stats
->ack_ba
.success_bytes
;
1084 succ_pkts
= FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M
,
1085 usr_stats
->ack_ba
.info
);
1086 tid
= FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM
,
1087 usr_stats
->ack_ba
.info
);
1090 if (common
->fes_duration_us
)
1091 tx_duration
= common
->fes_duration_us
;
1093 user_rate
= &usr_stats
->rate
;
1094 flags
= HTT_USR_RATE_PREAMBLE(user_rate
->rate_flags
);
1095 bw
= HTT_USR_RATE_BW(user_rate
->rate_flags
) - 2;
1096 nss
= HTT_USR_RATE_NSS(user_rate
->rate_flags
) + 1;
1097 mcs
= HTT_USR_RATE_MCS(user_rate
->rate_flags
);
1098 sgi
= HTT_USR_RATE_GI(user_rate
->rate_flags
);
1100 /* Note: If host configured fixed rates and in some other special
1101 * cases, the broadcast/management frames are sent in different rates.
1102 * Firmware rate's control to be skipped for this?
1105 if (flags
== WMI_RATE_PREAMBLE_VHT
&& mcs
> 9) {
1106 ath11k_warn(ab
, "Invalid VHT mcs %hhd peer stats", mcs
);
1110 if (flags
== WMI_RATE_PREAMBLE_HT
&& (mcs
> 7 || nss
< 1)) {
1111 ath11k_warn(ab
, "Invalid HT mcs %hhd nss %hhd peer stats",
1116 if (flags
== WMI_RATE_PREAMBLE_CCK
|| flags
== WMI_RATE_PREAMBLE_OFDM
) {
1117 ret
= ath11k_mac_hw_ratecode_to_legacy_rate(mcs
,
1126 spin_lock_bh(&ab
->base_lock
);
1127 peer
= ath11k_peer_find_by_id(ab
, usr_stats
->peer_id
);
1129 if (!peer
|| !peer
->sta
) {
1130 spin_unlock_bh(&ab
->base_lock
);
1136 arsta
= (struct ath11k_sta
*)sta
->drv_priv
;
1138 memset(&arsta
->txrate
, 0, sizeof(arsta
->txrate
));
1139 memset(&arsta
->tx_info
.status
, 0, sizeof(arsta
->tx_info
.status
));
1142 case WMI_RATE_PREAMBLE_OFDM
:
1143 arsta
->txrate
.legacy
= rate
;
1144 if (arsta
->arvif
&& arsta
->arvif
->vif
)
1145 conf
= rcu_dereference(arsta
->arvif
->vif
->chanctx_conf
);
1146 if (conf
&& conf
->def
.chan
->band
== NL80211_BAND_5GHZ
)
1147 arsta
->tx_info
.status
.rates
[0].idx
= rate_idx
- 4;
1149 case WMI_RATE_PREAMBLE_CCK
:
1150 arsta
->txrate
.legacy
= rate
;
1151 arsta
->tx_info
.status
.rates
[0].idx
= rate_idx
;
1152 if (mcs
> ATH11K_HW_RATE_CCK_LP_1M
&&
1153 mcs
<= ATH11K_HW_RATE_CCK_SP_2M
)
1154 arsta
->tx_info
.status
.rates
[0].flags
|=
1155 IEEE80211_TX_RC_USE_SHORT_PREAMBLE
;
1157 case WMI_RATE_PREAMBLE_HT
:
1158 arsta
->txrate
.mcs
= mcs
+ 8 * (nss
- 1);
1159 arsta
->tx_info
.status
.rates
[0].idx
= arsta
->txrate
.mcs
;
1160 arsta
->txrate
.flags
= RATE_INFO_FLAGS_MCS
;
1161 arsta
->tx_info
.status
.rates
[0].flags
|= IEEE80211_TX_RC_MCS
;
1163 arsta
->txrate
.flags
|= RATE_INFO_FLAGS_SHORT_GI
;
1164 arsta
->tx_info
.status
.rates
[0].flags
|=
1165 IEEE80211_TX_RC_SHORT_GI
;
1168 case WMI_RATE_PREAMBLE_VHT
:
1169 arsta
->txrate
.mcs
= mcs
;
1170 ieee80211_rate_set_vht(&arsta
->tx_info
.status
.rates
[0], mcs
, nss
);
1171 arsta
->txrate
.flags
= RATE_INFO_FLAGS_VHT_MCS
;
1172 arsta
->tx_info
.status
.rates
[0].flags
|= IEEE80211_TX_RC_VHT_MCS
;
1174 arsta
->txrate
.flags
|= RATE_INFO_FLAGS_SHORT_GI
;
1175 arsta
->tx_info
.status
.rates
[0].flags
|=
1176 IEEE80211_TX_RC_SHORT_GI
;
1181 arsta
->txrate
.nss
= nss
;
1182 arsta
->txrate
.bw
= ath11k_mac_bw_to_mac80211_bw(bw
);
1183 arsta
->tx_info
.status
.rates
[0].flags
|= ath11k_bw_to_mac80211_bwflags(bw
);
1184 arsta
->tx_duration
+= tx_duration
;
1185 memcpy(&arsta
->last_txrate
, &arsta
->txrate
, sizeof(struct rate_info
));
1188 arsta
->tx_info
.flags
= IEEE80211_TX_STAT_ACK
;
1189 arsta
->tx_info
.status
.rates
[0].count
= 1;
1190 ieee80211_tx_rate_update(ar
->hw
, sta
, &arsta
->tx_info
);
1193 /* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
1194 * So skip peer stats update for mgmt packets.
1196 if (tid
< HTT_PPDU_STATS_NON_QOS_TID
) {
1197 memset(peer_stats
, 0, sizeof(*peer_stats
));
1198 peer_stats
->succ_pkts
= succ_pkts
;
1199 peer_stats
->succ_bytes
= succ_bytes
;
1200 peer_stats
->is_ampdu
= is_ampdu
;
1201 peer_stats
->duration
= tx_duration
;
1202 peer_stats
->ba_fails
=
1203 HTT_USR_CMPLTN_LONG_RETRY(usr_stats
->cmpltn_cmn
.flags
) +
1204 HTT_USR_CMPLTN_SHORT_RETRY(usr_stats
->cmpltn_cmn
.flags
);
1206 if (ath11k_debug_is_extd_tx_stats_enabled(ar
))
1207 ath11k_accumulate_per_peer_tx_stats(arsta
,
1208 peer_stats
, rate_idx
);
1211 spin_unlock_bh(&ab
->base_lock
);
1215 static void ath11k_htt_update_ppdu_stats(struct ath11k
*ar
,
1216 struct htt_ppdu_stats
*ppdu_stats
)
1220 for (user
= 0; user
< HTT_PPDU_STATS_MAX_USERS
- 1; user
++)
1221 ath11k_update_per_peer_tx_stats(ar
, ppdu_stats
, user
);
1225 struct htt_ppdu_stats_info
*ath11k_dp_htt_get_ppdu_desc(struct ath11k
*ar
,
1228 struct htt_ppdu_stats_info
*ppdu_info
;
1230 spin_lock_bh(&ar
->data_lock
);
1231 if (!list_empty(&ar
->ppdu_stats_info
)) {
1232 list_for_each_entry(ppdu_info
, &ar
->ppdu_stats_info
, list
) {
1233 if (ppdu_info
->ppdu_id
== ppdu_id
) {
1234 spin_unlock_bh(&ar
->data_lock
);
1239 if (ar
->ppdu_stat_list_depth
> HTT_PPDU_DESC_MAX_DEPTH
) {
1240 ppdu_info
= list_first_entry(&ar
->ppdu_stats_info
,
1241 typeof(*ppdu_info
), list
);
1242 list_del(&ppdu_info
->list
);
1243 ar
->ppdu_stat_list_depth
--;
1244 ath11k_htt_update_ppdu_stats(ar
, &ppdu_info
->ppdu_stats
);
1248 spin_unlock_bh(&ar
->data_lock
);
1250 ppdu_info
= kzalloc(sizeof(*ppdu_info
), GFP_KERNEL
);
1254 spin_lock_bh(&ar
->data_lock
);
1255 list_add_tail(&ppdu_info
->list
, &ar
->ppdu_stats_info
);
1256 ar
->ppdu_stat_list_depth
++;
1257 spin_unlock_bh(&ar
->data_lock
);
1262 static int ath11k_htt_pull_ppdu_stats(struct ath11k_base
*ab
,
1263 struct sk_buff
*skb
)
1265 struct ath11k_htt_ppdu_stats_msg
*msg
;
1266 struct htt_ppdu_stats_info
*ppdu_info
;
1272 msg
= (struct ath11k_htt_ppdu_stats_msg
*)skb
->data
;
1273 len
= FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE
, msg
->info
);
1274 pdev_id
= FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID
, msg
->info
);
1275 ppdu_id
= msg
->ppdu_id
;
1278 ar
= ath11k_mac_get_ar_by_pdev_id(ab
, pdev_id
);
1284 if (ath11k_debug_is_pktlog_lite_mode_enabled(ar
))
1285 trace_ath11k_htt_ppdu_stats(ar
, skb
->data
, len
);
1287 ppdu_info
= ath11k_dp_htt_get_ppdu_desc(ar
, ppdu_id
);
1293 ppdu_info
->ppdu_id
= ppdu_id
;
1294 ret
= ath11k_dp_htt_tlv_iter(ab
, msg
->data
, len
,
1295 ath11k_htt_tlv_ppdu_stats_parse
,
1298 ath11k_warn(ab
, "Failed to parse tlv %d\n", ret
);
1308 static void ath11k_htt_pktlog(struct ath11k_base
*ab
, struct sk_buff
*skb
)
1310 struct htt_pktlog_msg
*data
= (struct htt_pktlog_msg
*)skb
->data
;
1315 len
= FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE
, data
->hdr
);
1316 if (len
> ATH11K_HTT_PKTLOG_MAX_SIZE
) {
1317 ath11k_warn(ab
, "htt pktlog buffer size %d, expected < %d\n",
1319 ATH11K_HTT_PKTLOG_MAX_SIZE
);
1323 pdev_id
= FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID
, data
->hdr
);
1324 ar
= ath11k_mac_get_ar_by_pdev_id(ab
, pdev_id
);
1326 ath11k_warn(ab
, "invalid pdev id %d on htt pktlog\n", pdev_id
);
1330 trace_ath11k_htt_pktlog(ar
, data
->payload
, len
);
1333 void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base
*ab
,
1334 struct sk_buff
*skb
)
1336 struct ath11k_dp
*dp
= &ab
->dp
;
1337 struct htt_resp_msg
*resp
= (struct htt_resp_msg
*)skb
->data
;
1338 enum htt_t2h_msg_type type
= FIELD_GET(HTT_T2H_MSG_TYPE
, *(u32
*)resp
);
1341 u8 mac_addr
[ETH_ALEN
];
1345 ath11k_dbg(ab
, ATH11K_DBG_DP_HTT
, "dp_htt rx msg type :0x%0x\n", type
);
1348 case HTT_T2H_MSG_TYPE_VERSION_CONF
:
1349 dp
->htt_tgt_ver_major
= FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR
,
1350 resp
->version_msg
.version
);
1351 dp
->htt_tgt_ver_minor
= FIELD_GET(HTT_T2H_VERSION_CONF_MINOR
,
1352 resp
->version_msg
.version
);
1353 complete(&dp
->htt_tgt_version_received
);
1355 case HTT_T2H_MSG_TYPE_PEER_MAP
:
1356 vdev_id
= FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID
,
1357 resp
->peer_map_ev
.info
);
1358 peer_id
= FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID
,
1359 resp
->peer_map_ev
.info
);
1360 peer_mac_h16
= FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16
,
1361 resp
->peer_map_ev
.info1
);
1362 ath11k_dp_get_mac_addr(resp
->peer_map_ev
.mac_addr_l32
,
1363 peer_mac_h16
, mac_addr
);
1364 ast_hash
= FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL
,
1365 resp
->peer_map_ev
.info2
);
1366 ath11k_peer_map_event(ab
, vdev_id
, peer_id
, mac_addr
, ast_hash
);
1368 case HTT_T2H_MSG_TYPE_PEER_UNMAP
:
1369 peer_id
= FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID
,
1370 resp
->peer_unmap_ev
.info
);
1371 ath11k_peer_unmap_event(ab
, peer_id
);
1373 case HTT_T2H_MSG_TYPE_PPDU_STATS_IND
:
1374 ath11k_htt_pull_ppdu_stats(ab
, skb
);
1376 case HTT_T2H_MSG_TYPE_EXT_STATS_CONF
:
1377 ath11k_dbg_htt_ext_stats_handler(ab
, skb
);
1379 case HTT_T2H_MSG_TYPE_PKTLOG
:
1380 ath11k_htt_pktlog(ab
, skb
);
1383 ath11k_warn(ab
, "htt event %d not handled\n", type
);
1387 dev_kfree_skb_any(skb
);
1390 static int ath11k_dp_rx_msdu_coalesce(struct ath11k
*ar
,
1391 struct sk_buff_head
*msdu_list
,
1392 struct sk_buff
*first
, struct sk_buff
*last
,
1393 u8 l3pad_bytes
, int msdu_len
)
1395 struct sk_buff
*skb
;
1396 struct ath11k_skb_rxcb
*rxcb
= ATH11K_SKB_RXCB(first
);
1397 int buf_first_hdr_len
, buf_first_len
;
1398 struct hal_rx_desc
*ldesc
;
1403 /* As the msdu is spread across multiple rx buffers,
1404 * find the offset to the start of msdu for computing
1405 * the length of the msdu in the first buffer.
1407 buf_first_hdr_len
= HAL_RX_DESC_SIZE
+ l3pad_bytes
;
1408 buf_first_len
= DP_RX_BUFFER_SIZE
- buf_first_hdr_len
;
1410 if (WARN_ON_ONCE(msdu_len
<= buf_first_len
)) {
1411 skb_put(first
, buf_first_hdr_len
+ msdu_len
);
1412 skb_pull(first
, buf_first_hdr_len
);
1416 ldesc
= (struct hal_rx_desc
*)last
->data
;
1417 rxcb
->is_first_msdu
= ath11k_dp_rx_h_msdu_end_first_msdu(ldesc
);
1418 rxcb
->is_last_msdu
= ath11k_dp_rx_h_msdu_end_last_msdu(ldesc
);
1420 /* MSDU spans over multiple buffers because the length of the MSDU
1421 * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
1422 * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.
1424 skb_put(first
, DP_RX_BUFFER_SIZE
);
1425 skb_pull(first
, buf_first_hdr_len
);
1427 /* When an MSDU spread over multiple buffers attention, MSDU_END and
1428 * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs.
1430 ath11k_dp_rx_desc_end_tlv_copy(rxcb
->rx_desc
, ldesc
);
1432 space_extra
= msdu_len
- (buf_first_len
+ skb_tailroom(first
));
1433 if (space_extra
> 0 &&
1434 (pskb_expand_head(first
, 0, space_extra
, GFP_ATOMIC
) < 0)) {
1435 /* Free up all buffers of the MSDU */
1436 while ((skb
= __skb_dequeue(msdu_list
)) != NULL
) {
1437 rxcb
= ATH11K_SKB_RXCB(skb
);
1438 if (!rxcb
->is_continuation
) {
1439 dev_kfree_skb_any(skb
);
1442 dev_kfree_skb_any(skb
);
1447 rem_len
= msdu_len
- buf_first_len
;
1448 while ((skb
= __skb_dequeue(msdu_list
)) != NULL
&& rem_len
> 0) {
1449 rxcb
= ATH11K_SKB_RXCB(skb
);
1450 if (rxcb
->is_continuation
)
1451 buf_len
= DP_RX_BUFFER_SIZE
- HAL_RX_DESC_SIZE
;
1455 if (buf_len
> (DP_RX_BUFFER_SIZE
- HAL_RX_DESC_SIZE
)) {
1457 dev_kfree_skb_any(skb
);
1461 skb_put(skb
, buf_len
+ HAL_RX_DESC_SIZE
);
1462 skb_pull(skb
, HAL_RX_DESC_SIZE
);
1463 skb_copy_from_linear_data(skb
, skb_put(first
, buf_len
),
1465 dev_kfree_skb_any(skb
);
1468 if (!rxcb
->is_continuation
)
1475 static struct sk_buff
*ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head
*msdu_list
,
1476 struct sk_buff
*first
)
1478 struct sk_buff
*skb
;
1479 struct ath11k_skb_rxcb
*rxcb
= ATH11K_SKB_RXCB(first
);
1481 if (!rxcb
->is_continuation
)
1484 skb_queue_walk(msdu_list
, skb
) {
1485 rxcb
= ATH11K_SKB_RXCB(skb
);
1486 if (!rxcb
->is_continuation
)
1493 static int ath11k_dp_rx_retrieve_amsdu(struct ath11k
*ar
,
1494 struct sk_buff_head
*msdu_list
,
1495 struct sk_buff_head
*amsdu_list
)
1497 struct sk_buff
*msdu
= skb_peek(msdu_list
);
1498 struct sk_buff
*last_buf
;
1499 struct ath11k_skb_rxcb
*rxcb
;
1500 struct ieee80211_hdr
*hdr
;
1501 struct hal_rx_desc
*rx_desc
, *lrx_desc
;
1510 rx_desc
= (struct hal_rx_desc
*)msdu
->data
;
1511 hdr_status
= ath11k_dp_rx_h_80211_hdr(rx_desc
);
1512 hdr
= (struct ieee80211_hdr
*)hdr_status
;
1513 /* Process only data frames */
1514 if (!ieee80211_is_data(hdr
->frame_control
)) {
1515 __skb_unlink(msdu
, msdu_list
);
1516 dev_kfree_skb_any(msdu
);
1521 __skb_unlink(msdu
, msdu_list
);
1522 last_buf
= ath11k_dp_rx_get_msdu_last_buf(msdu_list
, msdu
);
1525 "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n");
1530 rx_desc
= (struct hal_rx_desc
*)msdu
->data
;
1531 lrx_desc
= (struct hal_rx_desc
*)last_buf
->data
;
1533 if (!ath11k_dp_rx_h_attn_msdu_done(lrx_desc
)) {
1534 ath11k_warn(ar
->ab
, "msdu_done bit in attention is not set\n");
1539 rxcb
= ATH11K_SKB_RXCB(msdu
);
1540 rxcb
->rx_desc
= rx_desc
;
1541 msdu_len
= ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc
);
1542 l3_pad_bytes
= ath11k_dp_rx_h_msdu_end_l3pad(lrx_desc
);
1544 if (!rxcb
->is_continuation
) {
1545 skb_put(msdu
, HAL_RX_DESC_SIZE
+ l3_pad_bytes
+ msdu_len
);
1546 skb_pull(msdu
, HAL_RX_DESC_SIZE
+ l3_pad_bytes
);
1548 ret
= ath11k_dp_rx_msdu_coalesce(ar
, msdu_list
,
1550 l3_pad_bytes
, msdu_len
);
1553 "failed to coalesce msdu rx buffer%d\n", ret
);
1557 __skb_queue_tail(amsdu_list
, msdu
);
1559 /* Should we also consider msdu_cnt from mpdu_meta while
1560 * preparing amsdu list?
1562 if (rxcb
->is_last_msdu
)
1564 } while ((msdu
= skb_peek(msdu_list
)) != NULL
);
1569 dev_kfree_skb_any(msdu
);
1570 __skb_queue_purge(amsdu_list
);
1575 static void ath11k_dp_rx_h_csum_offload(struct sk_buff
*msdu
)
1577 struct ath11k_skb_rxcb
*rxcb
= ATH11K_SKB_RXCB(msdu
);
1578 bool ip_csum_fail
, l4_csum_fail
;
1580 ip_csum_fail
= ath11k_dp_rx_h_attn_ip_cksum_fail(rxcb
->rx_desc
);
1581 l4_csum_fail
= ath11k_dp_rx_h_attn_l4_cksum_fail(rxcb
->rx_desc
);
1583 msdu
->ip_summed
= (ip_csum_fail
|| l4_csum_fail
) ?
1584 CHECKSUM_NONE
: CHECKSUM_UNNECESSARY
;
1587 static int ath11k_dp_rx_crypto_mic_len(struct ath11k
*ar
,
1588 enum hal_encrypt_type enctype
)
1591 case HAL_ENCRYPT_TYPE_OPEN
:
1592 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC
:
1593 case HAL_ENCRYPT_TYPE_TKIP_MIC
:
1595 case HAL_ENCRYPT_TYPE_CCMP_128
:
1596 return IEEE80211_CCMP_MIC_LEN
;
1597 case HAL_ENCRYPT_TYPE_CCMP_256
:
1598 return IEEE80211_CCMP_256_MIC_LEN
;
1599 case HAL_ENCRYPT_TYPE_GCMP_128
:
1600 case HAL_ENCRYPT_TYPE_AES_GCMP_256
:
1601 return IEEE80211_GCMP_MIC_LEN
;
1602 case HAL_ENCRYPT_TYPE_WEP_40
:
1603 case HAL_ENCRYPT_TYPE_WEP_104
:
1604 case HAL_ENCRYPT_TYPE_WEP_128
:
1605 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4
:
1606 case HAL_ENCRYPT_TYPE_WAPI
:
1610 ath11k_warn(ar
->ab
, "unsupported encryption type %d for mic len\n", enctype
);
1614 static int ath11k_dp_rx_crypto_param_len(struct ath11k
*ar
,
1615 enum hal_encrypt_type enctype
)
1618 case HAL_ENCRYPT_TYPE_OPEN
:
1620 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC
:
1621 case HAL_ENCRYPT_TYPE_TKIP_MIC
:
1622 return IEEE80211_TKIP_IV_LEN
;
1623 case HAL_ENCRYPT_TYPE_CCMP_128
:
1624 return IEEE80211_CCMP_HDR_LEN
;
1625 case HAL_ENCRYPT_TYPE_CCMP_256
:
1626 return IEEE80211_CCMP_256_HDR_LEN
;
1627 case HAL_ENCRYPT_TYPE_GCMP_128
:
1628 case HAL_ENCRYPT_TYPE_AES_GCMP_256
:
1629 return IEEE80211_GCMP_HDR_LEN
;
1630 case HAL_ENCRYPT_TYPE_WEP_40
:
1631 case HAL_ENCRYPT_TYPE_WEP_104
:
1632 case HAL_ENCRYPT_TYPE_WEP_128
:
1633 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4
:
1634 case HAL_ENCRYPT_TYPE_WAPI
:
1638 ath11k_warn(ar
->ab
, "unsupported encryption type %d\n", enctype
);
1642 static int ath11k_dp_rx_crypto_icv_len(struct ath11k
*ar
,
1643 enum hal_encrypt_type enctype
)
1646 case HAL_ENCRYPT_TYPE_OPEN
:
1647 case HAL_ENCRYPT_TYPE_CCMP_128
:
1648 case HAL_ENCRYPT_TYPE_CCMP_256
:
1649 case HAL_ENCRYPT_TYPE_GCMP_128
:
1650 case HAL_ENCRYPT_TYPE_AES_GCMP_256
:
1652 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC
:
1653 case HAL_ENCRYPT_TYPE_TKIP_MIC
:
1654 return IEEE80211_TKIP_ICV_LEN
;
1655 case HAL_ENCRYPT_TYPE_WEP_40
:
1656 case HAL_ENCRYPT_TYPE_WEP_104
:
1657 case HAL_ENCRYPT_TYPE_WEP_128
:
1658 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4
:
1659 case HAL_ENCRYPT_TYPE_WAPI
:
1663 ath11k_warn(ar
->ab
, "unsupported encryption type %d\n", enctype
);
1667 static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k
*ar
,
1668 struct sk_buff
*msdu
,
1670 enum hal_encrypt_type enctype
,
1671 struct ieee80211_rx_status
*status
)
1673 struct ieee80211_hdr
*hdr
;
1678 /* pull decapped header and copy SA & DA */
1679 hdr
= (struct ieee80211_hdr
*)msdu
->data
;
1680 ether_addr_copy(da
, ieee80211_get_DA(hdr
));
1681 ether_addr_copy(sa
, ieee80211_get_SA(hdr
));
1682 skb_pull(msdu
, ieee80211_hdrlen(hdr
->frame_control
));
1684 /* push original 802.11 header */
1685 hdr
= (struct ieee80211_hdr
*)first_hdr
;
1686 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1688 if (!(status
->flag
& RX_FLAG_IV_STRIPPED
)) {
1689 memcpy(skb_push(msdu
,
1690 ath11k_dp_rx_crypto_param_len(ar
, enctype
)),
1691 (void *)hdr
+ hdr_len
,
1692 ath11k_dp_rx_crypto_param_len(ar
, enctype
));
1695 memcpy(skb_push(msdu
, hdr_len
), hdr
, hdr_len
);
1697 /* original 802.11 header has a different DA and in
1698 * case of 4addr it may also have different SA
1700 hdr
= (struct ieee80211_hdr
*)msdu
->data
;
1701 ether_addr_copy(ieee80211_get_DA(hdr
), da
);
1702 ether_addr_copy(ieee80211_get_SA(hdr
), sa
);
1705 static void ath11k_dp_rx_h_undecap_raw(struct ath11k
*ar
, struct sk_buff
*msdu
,
1706 enum hal_encrypt_type enctype
,
1707 struct ieee80211_rx_status
*status
,
1710 struct ath11k_skb_rxcb
*rxcb
= ATH11K_SKB_RXCB(msdu
);
1711 struct ieee80211_hdr
*hdr
;
1715 if (!rxcb
->is_first_msdu
||
1716 !(rxcb
->is_first_msdu
&& rxcb
->is_last_msdu
)) {
1721 skb_trim(msdu
, msdu
->len
- FCS_LEN
);
1726 hdr
= (void *)msdu
->data
;
1729 if (status
->flag
& RX_FLAG_IV_STRIPPED
) {
1730 skb_trim(msdu
, msdu
->len
-
1731 ath11k_dp_rx_crypto_mic_len(ar
, enctype
));
1733 skb_trim(msdu
, msdu
->len
-
1734 ath11k_dp_rx_crypto_icv_len(ar
, enctype
));
1737 if (status
->flag
& RX_FLAG_MIC_STRIPPED
)
1738 skb_trim(msdu
, msdu
->len
-
1739 ath11k_dp_rx_crypto_mic_len(ar
, enctype
));
1742 if (status
->flag
& RX_FLAG_ICV_STRIPPED
)
1743 skb_trim(msdu
, msdu
->len
-
1744 ath11k_dp_rx_crypto_icv_len(ar
, enctype
));
1748 if ((status
->flag
& RX_FLAG_MMIC_STRIPPED
) &&
1749 !ieee80211_has_morefrags(hdr
->frame_control
) &&
1750 enctype
== HAL_ENCRYPT_TYPE_TKIP_MIC
)
1751 skb_trim(msdu
, msdu
->len
- IEEE80211_CCMP_MIC_LEN
);
1754 if (status
->flag
& RX_FLAG_IV_STRIPPED
) {
1755 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1756 crypto_len
= ath11k_dp_rx_crypto_param_len(ar
, enctype
);
1758 memmove((void *)msdu
->data
+ crypto_len
,
1759 (void *)msdu
->data
, hdr_len
);
1760 skb_pull(msdu
, crypto_len
);
1764 static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k
*ar
,
1765 struct sk_buff
*msdu
,
1766 enum hal_encrypt_type enctype
)
1768 struct ath11k_skb_rxcb
*rxcb
= ATH11K_SKB_RXCB(msdu
);
1769 struct ieee80211_hdr
*hdr
;
1770 size_t hdr_len
, crypto_len
;
1774 is_amsdu
= !(rxcb
->is_first_msdu
&& rxcb
->is_last_msdu
);
1775 hdr
= (struct ieee80211_hdr
*)ath11k_dp_rx_h_80211_hdr(rxcb
->rx_desc
);
1778 if (rxcb
->is_first_msdu
) {
1779 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1780 crypto_len
= ath11k_dp_rx_crypto_param_len(ar
, enctype
);
1782 rfc1042
+= hdr_len
+ crypto_len
;
1786 rfc1042
+= sizeof(struct ath11k_dp_amsdu_subframe_hdr
);
1791 static void ath11k_dp_rx_h_undecap_eth(struct ath11k
*ar
,
1792 struct sk_buff
*msdu
,
1794 enum hal_encrypt_type enctype
,
1795 struct ieee80211_rx_status
*status
)
1797 struct ieee80211_hdr
*hdr
;
1804 rfc1042
= ath11k_dp_rx_h_find_rfc1042(ar
, msdu
, enctype
);
1805 if (WARN_ON_ONCE(!rfc1042
))
1808 /* pull decapped header and copy SA & DA */
1809 eth
= (struct ethhdr
*)msdu
->data
;
1810 ether_addr_copy(da
, eth
->h_dest
);
1811 ether_addr_copy(sa
, eth
->h_source
);
1812 skb_pull(msdu
, sizeof(struct ethhdr
));
1814 /* push rfc1042/llc/snap */
1815 memcpy(skb_push(msdu
, sizeof(struct ath11k_dp_rfc1042_hdr
)), rfc1042
,
1816 sizeof(struct ath11k_dp_rfc1042_hdr
));
1818 /* push original 802.11 header */
1819 hdr
= (struct ieee80211_hdr
*)first_hdr
;
1820 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1822 if (!(status
->flag
& RX_FLAG_IV_STRIPPED
)) {
1823 memcpy(skb_push(msdu
,
1824 ath11k_dp_rx_crypto_param_len(ar
, enctype
)),
1825 (void *)hdr
+ hdr_len
,
1826 ath11k_dp_rx_crypto_param_len(ar
, enctype
));
1829 memcpy(skb_push(msdu
, hdr_len
), hdr
, hdr_len
);
1831 /* original 802.11 header has a different DA and in
1832 * case of 4addr it may also have different SA
1834 hdr
= (struct ieee80211_hdr
*)msdu
->data
;
1835 ether_addr_copy(ieee80211_get_DA(hdr
), da
);
1836 ether_addr_copy(ieee80211_get_SA(hdr
), sa
);
1839 static void ath11k_dp_rx_h_undecap(struct ath11k
*ar
, struct sk_buff
*msdu
,
1840 struct hal_rx_desc
*rx_desc
,
1841 enum hal_encrypt_type enctype
,
1842 struct ieee80211_rx_status
*status
,
1848 first_hdr
= ath11k_dp_rx_h_80211_hdr(rx_desc
);
1849 decap
= ath11k_dp_rx_h_mpdu_start_decap_type(rx_desc
);
1852 case DP_RX_DECAP_TYPE_NATIVE_WIFI
:
1853 ath11k_dp_rx_h_undecap_nwifi(ar
, msdu
, first_hdr
,
1856 case DP_RX_DECAP_TYPE_RAW
:
1857 ath11k_dp_rx_h_undecap_raw(ar
, msdu
, enctype
, status
,
1860 case DP_RX_DECAP_TYPE_ETHERNET2_DIX
:
1861 ath11k_dp_rx_h_undecap_eth(ar
, msdu
, first_hdr
,
1864 case DP_RX_DECAP_TYPE_8023
:
1865 /* TODO: Handle undecap for these formats */
1870 static void ath11k_dp_rx_h_mpdu(struct ath11k
*ar
,
1871 struct sk_buff_head
*amsdu_list
,
1872 struct hal_rx_desc
*rx_desc
,
1873 struct ieee80211_rx_status
*rx_status
)
1875 struct ieee80211_hdr
*hdr
;
1876 enum hal_encrypt_type enctype
;
1877 struct sk_buff
*last_msdu
;
1878 struct sk_buff
*msdu
;
1879 struct ath11k_skb_rxcb
*last_rxcb
;
1884 if (skb_queue_empty(amsdu_list
))
1887 hdr
= (struct ieee80211_hdr
*)ath11k_dp_rx_h_80211_hdr(rx_desc
);
1889 /* Each A-MSDU subframe will use the original header as the base and be
1890 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
1892 if (ieee80211_is_data_qos(hdr
->frame_control
)) {
1893 qos
= ieee80211_get_qos_ctl(hdr
);
1894 qos
[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT
;
1897 is_decrypted
= ath11k_dp_rx_h_attn_is_decrypted(rx_desc
);
1898 enctype
= ath11k_dp_rx_h_mpdu_start_enctype(rx_desc
);
1900 /* Some attention flags are valid only in the last MSDU. */
1901 last_msdu
= skb_peek_tail(amsdu_list
);
1902 last_rxcb
= ATH11K_SKB_RXCB(last_msdu
);
1904 err_bitmap
= ath11k_dp_rx_h_attn_mpdu_err(last_rxcb
->rx_desc
);
1906 /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
1907 rx_status
->flag
&= ~(RX_FLAG_FAILED_FCS_CRC
|
1908 RX_FLAG_MMIC_ERROR
|
1910 RX_FLAG_IV_STRIPPED
|
1911 RX_FLAG_MMIC_STRIPPED
);
1913 if (err_bitmap
& DP_RX_MPDU_ERR_FCS
)
1914 rx_status
->flag
|= RX_FLAG_FAILED_FCS_CRC
;
1916 if (err_bitmap
& DP_RX_MPDU_ERR_TKIP_MIC
)
1917 rx_status
->flag
|= RX_FLAG_MMIC_ERROR
;
1920 rx_status
->flag
|= RX_FLAG_DECRYPTED
| RX_FLAG_MMIC_STRIPPED
|
1921 RX_FLAG_MIC_STRIPPED
| RX_FLAG_ICV_STRIPPED
;
1923 skb_queue_walk(amsdu_list
, msdu
) {
1924 ath11k_dp_rx_h_csum_offload(msdu
);
1925 ath11k_dp_rx_h_undecap(ar
, msdu
, rx_desc
,
1926 enctype
, rx_status
, is_decrypted
);
1930 static void ath11k_dp_rx_h_rate(struct ath11k
*ar
, struct hal_rx_desc
*rx_desc
,
1931 struct ieee80211_rx_status
*rx_status
)
1933 struct ieee80211_supported_band
*sband
;
1934 enum rx_msdu_start_pkt_type pkt_type
;
1940 pkt_type
= ath11k_dp_rx_h_msdu_start_pkt_type(rx_desc
);
1941 bw
= ath11k_dp_rx_h_msdu_start_rx_bw(rx_desc
);
1942 rate_mcs
= ath11k_dp_rx_h_msdu_start_rate_mcs(rx_desc
);
1943 nss
= ath11k_dp_rx_h_msdu_start_nss(rx_desc
);
1944 sgi
= ath11k_dp_rx_h_msdu_start_sgi(rx_desc
);
1947 case RX_MSDU_START_PKT_TYPE_11A
:
1948 case RX_MSDU_START_PKT_TYPE_11B
:
1949 is_cck
= (pkt_type
== RX_MSDU_START_PKT_TYPE_11B
);
1950 sband
= &ar
->mac
.sbands
[rx_status
->band
];
1951 rx_status
->rate_idx
= ath11k_mac_hw_rate_to_idx(sband
, rate_mcs
,
1954 case RX_MSDU_START_PKT_TYPE_11N
:
1955 rx_status
->encoding
= RX_ENC_HT
;
1956 if (rate_mcs
> ATH11K_HT_MCS_MAX
) {
1958 "Received with invalid mcs in HT mode %d\n",
1962 rx_status
->rate_idx
= rate_mcs
+ (8 * (nss
- 1));
1964 rx_status
->enc_flags
|= RX_ENC_FLAG_SHORT_GI
;
1965 rx_status
->bw
= ath11k_mac_bw_to_mac80211_bw(bw
);
1967 case RX_MSDU_START_PKT_TYPE_11AC
:
1968 rx_status
->encoding
= RX_ENC_VHT
;
1969 rx_status
->rate_idx
= rate_mcs
;
1970 if (rate_mcs
> ATH11K_VHT_MCS_MAX
) {
1972 "Received with invalid mcs in VHT mode %d\n",
1976 rx_status
->nss
= nss
;
1978 rx_status
->enc_flags
|= RX_ENC_FLAG_SHORT_GI
;
1979 rx_status
->bw
= ath11k_mac_bw_to_mac80211_bw(bw
);
1981 case RX_MSDU_START_PKT_TYPE_11AX
:
1982 rx_status
->rate_idx
= rate_mcs
;
1983 if (rate_mcs
> ATH11K_HE_MCS_MAX
) {
1985 "Received with invalid mcs in HE mode %d\n",
1989 rx_status
->encoding
= RX_ENC_HE
;
1990 rx_status
->nss
= nss
;
1991 rx_status
->bw
= ath11k_mac_bw_to_mac80211_bw(bw
);
1996 static void ath11k_dp_rx_h_ppdu(struct ath11k
*ar
, struct hal_rx_desc
*rx_desc
,
1997 struct ieee80211_rx_status
*rx_status
)
2001 rx_status
->freq
= 0;
2002 rx_status
->rate_idx
= 0;
2004 rx_status
->encoding
= RX_ENC_LEGACY
;
2005 rx_status
->bw
= RATE_INFO_BW_20
;
2007 rx_status
->flag
|= RX_FLAG_NO_SIGNAL_VAL
;
2009 channel_num
= ath11k_dp_rx_h_msdu_start_freq(rx_desc
);
2011 if (channel_num
>= 1 && channel_num
<= 14) {
2012 rx_status
->band
= NL80211_BAND_2GHZ
;
2013 } else if (channel_num
>= 36 && channel_num
<= 173) {
2014 rx_status
->band
= NL80211_BAND_5GHZ
;
2016 ath11k_warn(ar
->ab
, "Unsupported Channel info received %d\n",
2021 rx_status
->freq
= ieee80211_channel_to_frequency(channel_num
,
2024 ath11k_dp_rx_h_rate(ar
, rx_desc
, rx_status
);
2027 static void ath11k_dp_rx_process_amsdu(struct ath11k
*ar
,
2028 struct sk_buff_head
*amsdu_list
,
2029 struct ieee80211_rx_status
*rx_status
)
2031 struct sk_buff
*first
;
2032 struct ath11k_skb_rxcb
*rxcb
;
2033 struct hal_rx_desc
*rx_desc
;
2036 if (skb_queue_empty(amsdu_list
))
2039 first
= skb_peek(amsdu_list
);
2040 rxcb
= ATH11K_SKB_RXCB(first
);
2041 rx_desc
= rxcb
->rx_desc
;
2043 first_mpdu
= ath11k_dp_rx_h_attn_first_mpdu(rx_desc
);
2045 ath11k_dp_rx_h_ppdu(ar
, rx_desc
, rx_status
);
2047 ath11k_dp_rx_h_mpdu(ar
, amsdu_list
, rx_desc
, rx_status
);
2050 static char *ath11k_print_get_tid(struct ieee80211_hdr
*hdr
, char *out
,
2056 if (!ieee80211_is_data_qos(hdr
->frame_control
))
2059 qc
= ieee80211_get_qos_ctl(hdr
);
2060 tid
= *qc
& IEEE80211_QOS_CTL_TID_MASK
;
2061 snprintf(out
, size
, "tid %d", tid
);
2066 static void ath11k_dp_rx_deliver_msdu(struct ath11k
*ar
, struct napi_struct
*napi
,
2067 struct sk_buff
*msdu
)
2069 static const struct ieee80211_radiotap_he known
= {
2070 .data1
= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN
|
2071 IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN
),
2072 .data2
= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN
),
2074 struct ieee80211_rx_status
*status
;
2075 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)msdu
->data
;
2076 struct ieee80211_radiotap_he
*he
= NULL
;
2079 status
= IEEE80211_SKB_RXCB(msdu
);
2080 if (status
->encoding
== RX_ENC_HE
) {
2081 he
= skb_push(msdu
, sizeof(known
));
2082 memcpy(he
, &known
, sizeof(known
));
2083 status
->flag
|= RX_FLAG_RADIOTAP_HE
;
2086 ath11k_dbg(ar
->ab
, ATH11K_DBG_DATA
,
2087 "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
2090 ieee80211_get_SA(hdr
),
2091 ath11k_print_get_tid(hdr
, tid
, sizeof(tid
)),
2092 is_multicast_ether_addr(ieee80211_get_DA(hdr
)) ?
2094 (__le16_to_cpu(hdr
->seq_ctrl
) & IEEE80211_SCTL_SEQ
) >> 4,
2095 (status
->encoding
== RX_ENC_LEGACY
) ? "legacy" : "",
2096 (status
->encoding
== RX_ENC_HT
) ? "ht" : "",
2097 (status
->encoding
== RX_ENC_VHT
) ? "vht" : "",
2098 (status
->encoding
== RX_ENC_HE
) ? "he" : "",
2099 (status
->bw
== RATE_INFO_BW_40
) ? "40" : "",
2100 (status
->bw
== RATE_INFO_BW_80
) ? "80" : "",
2101 (status
->bw
== RATE_INFO_BW_160
) ? "160" : "",
2102 status
->enc_flags
& RX_ENC_FLAG_SHORT_GI
? "sgi " : "",
2106 status
->band
, status
->flag
,
2107 !!(status
->flag
& RX_FLAG_FAILED_FCS_CRC
),
2108 !!(status
->flag
& RX_FLAG_MMIC_ERROR
),
2109 !!(status
->flag
& RX_FLAG_AMSDU_MORE
));
2111 /* TODO: trace rx packet */
2113 ieee80211_rx_napi(ar
->hw
, NULL
, msdu
, napi
);
2116 static void ath11k_dp_rx_pre_deliver_amsdu(struct ath11k
*ar
,
2117 struct sk_buff_head
*amsdu_list
,
2118 struct ieee80211_rx_status
*rxs
)
2120 struct sk_buff
*msdu
;
2121 struct sk_buff
*first_subframe
;
2122 struct ieee80211_rx_status
*status
;
2124 first_subframe
= skb_peek(amsdu_list
);
2126 skb_queue_walk(amsdu_list
, msdu
) {
2127 /* Setup per-MSDU flags */
2128 if (skb_queue_empty(amsdu_list
))
2129 rxs
->flag
&= ~RX_FLAG_AMSDU_MORE
;
2131 rxs
->flag
|= RX_FLAG_AMSDU_MORE
;
2133 if (msdu
== first_subframe
) {
2134 first_subframe
= NULL
;
2135 rxs
->flag
&= ~RX_FLAG_ALLOW_SAME_PN
;
2137 rxs
->flag
|= RX_FLAG_ALLOW_SAME_PN
;
2139 rxs
->flag
|= RX_FLAG_SKIP_MONITOR
;
2141 status
= IEEE80211_SKB_RXCB(msdu
);
2146 static void ath11k_dp_rx_process_pending_packets(struct ath11k_base
*ab
,
2147 struct napi_struct
*napi
,
2148 struct sk_buff_head
*pending_q
,
2149 int *quota
, u8 mac_id
)
2152 struct sk_buff
*msdu
;
2153 struct ath11k_pdev
*pdev
;
2155 if (skb_queue_empty(pending_q
))
2158 ar
= ab
->pdevs
[mac_id
].ar
;
2161 pdev
= rcu_dereference(ab
->pdevs_active
[mac_id
]);
2163 while (*quota
&& (msdu
= __skb_dequeue(pending_q
))) {
2165 dev_kfree_skb_any(msdu
);
2169 ath11k_dp_rx_deliver_msdu(ar
, napi
, msdu
);
2175 int ath11k_dp_process_rx(struct ath11k_base
*ab
, int mac_id
,
2176 struct napi_struct
*napi
, struct sk_buff_head
*pending_q
,
2179 struct ath11k
*ar
= ab
->pdevs
[mac_id
].ar
;
2180 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
2181 struct ieee80211_rx_status
*rx_status
= &dp
->rx_status
;
2182 struct dp_rxdma_ring
*rx_ring
= &dp
->rx_refill_buf_ring
;
2183 struct hal_srng
*srng
;
2184 struct sk_buff
*msdu
;
2185 struct sk_buff_head msdu_list
;
2186 struct sk_buff_head amsdu_list
;
2187 struct ath11k_skb_rxcb
*rxcb
;
2190 int num_buffs_reaped
= 0;
2195 /* Process any pending packets from the previous napi poll.
2196 * Note: All msdu's in this pending_q corresponds to the same mac id
2197 * due to pdev based reo dest mapping and also since each irq group id
2198 * maps to specific reo dest ring.
2200 ath11k_dp_rx_process_pending_packets(ab
, napi
, pending_q
, "a
,
2203 /* If all quota is exhausted by processing the pending_q,
2204 * Wait for the next napi poll to reap the new info
2209 __skb_queue_head_init(&msdu_list
);
2211 srng
= &ab
->hal
.srng_list
[dp
->reo_dst_ring
.ring_id
];
2213 spin_lock_bh(&srng
->lock
);
2215 ath11k_hal_srng_access_begin(ab
, srng
);
2218 while ((rx_desc
= ath11k_hal_srng_dst_get_next_entry(ab
, srng
))) {
2219 struct hal_reo_dest_ring
*desc
= (struct hal_reo_dest_ring
*)rx_desc
;
2220 enum hal_reo_dest_ring_push_reason push_reason
;
2223 cookie
= FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE
,
2224 desc
->buf_addr_info
.info1
);
2225 buf_id
= FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID
,
2227 spin_lock_bh(&rx_ring
->idr_lock
);
2228 msdu
= idr_find(&rx_ring
->bufs_idr
, buf_id
);
2230 ath11k_warn(ab
, "frame rx with invalid buf_id %d\n",
2232 spin_unlock_bh(&rx_ring
->idr_lock
);
2236 idr_remove(&rx_ring
->bufs_idr
, buf_id
);
2237 spin_unlock_bh(&rx_ring
->idr_lock
);
2239 rxcb
= ATH11K_SKB_RXCB(msdu
);
2240 dma_unmap_single(ab
->dev
, rxcb
->paddr
,
2241 msdu
->len
+ skb_tailroom(msdu
),
2246 push_reason
= FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON
,
2249 HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION
) {
2250 /* TODO: Check if the msdu can be sent up for processing */
2251 dev_kfree_skb_any(msdu
);
2252 ab
->soc_stats
.hal_reo_error
[dp
->reo_dst_ring
.ring_id
]++;
2256 rxcb
->is_first_msdu
= !!(desc
->rx_msdu_info
.info0
&
2257 RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU
);
2258 rxcb
->is_last_msdu
= !!(desc
->rx_msdu_info
.info0
&
2259 RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU
);
2260 rxcb
->is_continuation
= !!(desc
->rx_msdu_info
.info0
&
2261 RX_MSDU_DESC_INFO0_MSDU_CONTINUATION
);
2262 rxcb
->mac_id
= mac_id
;
2263 __skb_queue_tail(&msdu_list
, msdu
);
2265 /* Stop reaping from the ring once quota is exhausted
2266 * and we've received all msdu's in the the AMSDU. The
2267 * additional msdu's reaped in excess of quota here would
2268 * be pushed into the pending queue to be processed during
2269 * the next napi poll.
2270 * Note: More profiling can be done to see the impact on
2271 * pending_q and throughput during various traffic & density
2272 * and how use of budget instead of remaining quota affects it.
2274 if (num_buffs_reaped
>= quota
&& rxcb
->is_last_msdu
&&
2275 !rxcb
->is_continuation
) {
2281 /* Hw might have updated the head pointer after we cached it.
2282 * In this case, even though there are entries in the ring we'll
2283 * get rx_desc NULL. Give the read another try with updated cached
2284 * head pointer so that we can reap complete MPDU in the current
2287 if (!done
&& ath11k_hal_srng_dst_num_free(ab
, srng
, true)) {
2288 ath11k_hal_srng_access_end(ab
, srng
);
2292 ath11k_hal_srng_access_end(ab
, srng
);
2294 spin_unlock_bh(&srng
->lock
);
2296 if (!num_buffs_reaped
)
2299 /* Should we reschedule it later if we are not able to replenish all
2302 ath11k_dp_rxbufs_replenish(ab
, mac_id
, rx_ring
, num_buffs_reaped
,
2303 HAL_RX_BUF_RBM_SW3_BM
, GFP_ATOMIC
);
2306 if (!rcu_dereference(ab
->pdevs_active
[mac_id
])) {
2307 __skb_queue_purge(&msdu_list
);
2311 if (test_bit(ATH11K_CAC_RUNNING
, &ar
->dev_flags
)) {
2312 __skb_queue_purge(&msdu_list
);
2316 while (!skb_queue_empty(&msdu_list
)) {
2317 __skb_queue_head_init(&amsdu_list
);
2318 ret
= ath11k_dp_rx_retrieve_amsdu(ar
, &msdu_list
, &amsdu_list
);
2321 ath11k_err(ab
, "rx ring got corrupted %d\n", ret
);
2322 __skb_queue_purge(&msdu_list
);
2323 /* Should stop processing any more rx in
2324 * future from this ring?
2329 /* A-MSDU retrieval got failed due to non-fatal condition,
2330 * continue processing with the next msdu.
2335 ath11k_dp_rx_process_amsdu(ar
, &amsdu_list
, rx_status
);
2337 ath11k_dp_rx_pre_deliver_amsdu(ar
, &amsdu_list
, rx_status
);
2338 skb_queue_splice_tail(&amsdu_list
, pending_q
);
2341 while (quota
&& (msdu
= __skb_dequeue(pending_q
))) {
2342 ath11k_dp_rx_deliver_msdu(ar
, napi
, msdu
);
2349 return budget
- quota
;
2352 static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta
*arsta
,
2353 struct hal_rx_mon_ppdu_info
*ppdu_info
)
2355 struct ath11k_rx_peer_stats
*rx_stats
= arsta
->rx_stats
;
2361 num_msdu
= ppdu_info
->tcp_msdu_count
+ ppdu_info
->tcp_ack_msdu_count
+
2362 ppdu_info
->udp_msdu_count
+ ppdu_info
->other_msdu_count
;
2364 rx_stats
->num_msdu
+= num_msdu
;
2365 rx_stats
->tcp_msdu_count
+= ppdu_info
->tcp_msdu_count
+
2366 ppdu_info
->tcp_ack_msdu_count
;
2367 rx_stats
->udp_msdu_count
+= ppdu_info
->udp_msdu_count
;
2368 rx_stats
->other_msdu_count
+= ppdu_info
->other_msdu_count
;
2370 if (ppdu_info
->preamble_type
== HAL_RX_PREAMBLE_11A
||
2371 ppdu_info
->preamble_type
== HAL_RX_PREAMBLE_11B
) {
2373 ppdu_info
->mcs
= HAL_RX_MAX_MCS
;
2374 ppdu_info
->tid
= IEEE80211_NUM_TIDS
;
2377 if (ppdu_info
->nss
> 0 && ppdu_info
->nss
<= HAL_RX_MAX_NSS
)
2378 rx_stats
->nss_count
[ppdu_info
->nss
- 1] += num_msdu
;
2380 if (ppdu_info
->mcs
<= HAL_RX_MAX_MCS
)
2381 rx_stats
->mcs_count
[ppdu_info
->mcs
] += num_msdu
;
2383 if (ppdu_info
->gi
< HAL_RX_GI_MAX
)
2384 rx_stats
->gi_count
[ppdu_info
->gi
] += num_msdu
;
2386 if (ppdu_info
->bw
< HAL_RX_BW_MAX
)
2387 rx_stats
->bw_count
[ppdu_info
->bw
] += num_msdu
;
2389 if (ppdu_info
->ldpc
< HAL_RX_SU_MU_CODING_MAX
)
2390 rx_stats
->coding_count
[ppdu_info
->ldpc
] += num_msdu
;
2392 if (ppdu_info
->tid
<= IEEE80211_NUM_TIDS
)
2393 rx_stats
->tid_count
[ppdu_info
->tid
] += num_msdu
;
2395 if (ppdu_info
->preamble_type
< HAL_RX_PREAMBLE_MAX
)
2396 rx_stats
->pream_cnt
[ppdu_info
->preamble_type
] += num_msdu
;
2398 if (ppdu_info
->reception_type
< HAL_RX_RECEPTION_TYPE_MAX
)
2399 rx_stats
->reception_type
[ppdu_info
->reception_type
] += num_msdu
;
2401 if (ppdu_info
->is_stbc
)
2402 rx_stats
->stbc_count
+= num_msdu
;
2404 if (ppdu_info
->beamformed
)
2405 rx_stats
->beamformed_count
+= num_msdu
;
2407 if (ppdu_info
->num_mpdu_fcs_ok
> 1)
2408 rx_stats
->ampdu_msdu_count
+= num_msdu
;
2410 rx_stats
->non_ampdu_msdu_count
+= num_msdu
;
2412 rx_stats
->num_mpdu_fcs_ok
+= ppdu_info
->num_mpdu_fcs_ok
;
2413 rx_stats
->num_mpdu_fcs_err
+= ppdu_info
->num_mpdu_fcs_err
;
2415 arsta
->rssi_comb
= ppdu_info
->rssi_comb
;
2416 rx_stats
->rx_duration
+= ppdu_info
->rx_duration
;
2417 arsta
->rx_duration
= rx_stats
->rx_duration
;
2420 static struct sk_buff
*ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base
*ab
,
2421 struct dp_rxdma_ring
*rx_ring
,
2422 int *buf_id
, gfp_t gfp
)
2424 struct sk_buff
*skb
;
2427 skb
= dev_alloc_skb(DP_RX_BUFFER_SIZE
+
2428 DP_RX_BUFFER_ALIGN_SIZE
);
2431 goto fail_alloc_skb
;
2433 if (!IS_ALIGNED((unsigned long)skb
->data
,
2434 DP_RX_BUFFER_ALIGN_SIZE
)) {
2435 skb_pull(skb
, PTR_ALIGN(skb
->data
, DP_RX_BUFFER_ALIGN_SIZE
) -
2439 paddr
= dma_map_single(ab
->dev
, skb
->data
,
2440 skb
->len
+ skb_tailroom(skb
),
2442 if (unlikely(dma_mapping_error(ab
->dev
, paddr
)))
2445 spin_lock_bh(&rx_ring
->idr_lock
);
2446 *buf_id
= idr_alloc(&rx_ring
->bufs_idr
, skb
, 0,
2447 rx_ring
->bufs_max
, gfp
);
2448 spin_unlock_bh(&rx_ring
->idr_lock
);
2450 goto fail_dma_unmap
;
2452 ATH11K_SKB_RXCB(skb
)->paddr
= paddr
;
2456 dma_unmap_single(ab
->dev
, paddr
, skb
->len
+ skb_tailroom(skb
),
2459 dev_kfree_skb_any(skb
);
2464 int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base
*ab
, int mac_id
,
2465 struct dp_rxdma_ring
*rx_ring
,
2467 enum hal_rx_buf_return_buf_manager mgr
,
2470 struct hal_srng
*srng
;
2472 struct sk_buff
*skb
;
2479 req_entries
= min(req_entries
, rx_ring
->bufs_max
);
2481 srng
= &ab
->hal
.srng_list
[rx_ring
->refill_buf_ring
.ring_id
];
2483 spin_lock_bh(&srng
->lock
);
2485 ath11k_hal_srng_access_begin(ab
, srng
);
2487 num_free
= ath11k_hal_srng_src_num_free(ab
, srng
, true);
2489 req_entries
= min(num_free
, req_entries
);
2490 num_remain
= req_entries
;
2492 while (num_remain
> 0) {
2493 skb
= ath11k_dp_rx_alloc_mon_status_buf(ab
, rx_ring
,
2497 paddr
= ATH11K_SKB_RXCB(skb
)->paddr
;
2499 desc
= ath11k_hal_srng_src_get_next_entry(ab
, srng
);
2503 cookie
= FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID
, mac_id
) |
2504 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID
, buf_id
);
2508 ath11k_hal_rx_buf_addr_info_set(desc
, paddr
, cookie
, mgr
);
2511 ath11k_hal_srng_access_end(ab
, srng
);
2513 spin_unlock_bh(&srng
->lock
);
2515 return req_entries
- num_remain
;
2518 spin_lock_bh(&rx_ring
->idr_lock
);
2519 idr_remove(&rx_ring
->bufs_idr
, buf_id
);
2520 spin_unlock_bh(&rx_ring
->idr_lock
);
2521 dma_unmap_single(ab
->dev
, paddr
, skb
->len
+ skb_tailroom(skb
),
2523 dev_kfree_skb_any(skb
);
2524 ath11k_hal_srng_access_end(ab
, srng
);
2525 spin_unlock_bh(&srng
->lock
);
2527 return req_entries
- num_remain
;
2530 static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base
*ab
, int mac_id
,
2531 int *budget
, struct sk_buff_head
*skb_list
)
2533 struct ath11k
*ar
= ab
->pdevs
[mac_id
].ar
;
2534 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
2535 struct dp_rxdma_ring
*rx_ring
= &dp
->rx_mon_status_refill_ring
;
2536 struct hal_srng
*srng
;
2537 void *rx_mon_status_desc
;
2538 struct sk_buff
*skb
;
2539 struct ath11k_skb_rxcb
*rxcb
;
2540 struct hal_tlv_hdr
*tlv
;
2545 int num_buffs_reaped
= 0;
2547 srng
= &ab
->hal
.srng_list
[rx_ring
->refill_buf_ring
.ring_id
];
2549 spin_lock_bh(&srng
->lock
);
2551 ath11k_hal_srng_access_begin(ab
, srng
);
2554 rx_mon_status_desc
=
2555 ath11k_hal_srng_src_peek(ab
, srng
);
2556 if (!rx_mon_status_desc
)
2559 ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc
, &paddr
,
2562 buf_id
= FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID
, cookie
);
2564 spin_lock_bh(&rx_ring
->idr_lock
);
2565 skb
= idr_find(&rx_ring
->bufs_idr
, buf_id
);
2567 ath11k_warn(ab
, "rx monitor status with invalid buf_id %d\n",
2569 spin_unlock_bh(&rx_ring
->idr_lock
);
2573 idr_remove(&rx_ring
->bufs_idr
, buf_id
);
2574 spin_unlock_bh(&rx_ring
->idr_lock
);
2576 rxcb
= ATH11K_SKB_RXCB(skb
);
2578 dma_sync_single_for_cpu(ab
->dev
, rxcb
->paddr
,
2579 skb
->len
+ skb_tailroom(skb
),
2582 dma_unmap_single(ab
->dev
, rxcb
->paddr
,
2583 skb
->len
+ skb_tailroom(skb
),
2586 tlv
= (struct hal_tlv_hdr
*)skb
->data
;
2587 if (FIELD_GET(HAL_TLV_HDR_TAG
, tlv
->tl
) !=
2588 HAL_RX_STATUS_BUFFER_DONE
) {
2589 ath11k_hal_srng_src_get_next_entry(ab
, srng
);
2593 __skb_queue_tail(skb_list
, skb
);
2596 skb
= ath11k_dp_rx_alloc_mon_status_buf(ab
, rx_ring
,
2597 &buf_id
, GFP_ATOMIC
);
2600 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc
, 0, 0,
2601 HAL_RX_BUF_RBM_SW3_BM
);
2605 rxcb
= ATH11K_SKB_RXCB(skb
);
2607 cookie
= FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID
, mac_id
) |
2608 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID
, buf_id
);
2610 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc
, rxcb
->paddr
,
2611 cookie
, HAL_RX_BUF_RBM_SW3_BM
);
2612 ath11k_hal_srng_src_get_next_entry(ab
, srng
);
2615 ath11k_hal_srng_access_end(ab
, srng
);
2616 spin_unlock_bh(&srng
->lock
);
2618 return num_buffs_reaped
;
2621 int ath11k_dp_rx_process_mon_status(struct ath11k_base
*ab
, int mac_id
,
2622 struct napi_struct
*napi
, int budget
)
2624 struct ath11k
*ar
= ab
->pdevs
[mac_id
].ar
;
2625 enum hal_rx_mon_status hal_status
;
2626 struct sk_buff
*skb
;
2627 struct sk_buff_head skb_list
;
2628 struct hal_rx_mon_ppdu_info ppdu_info
;
2629 struct ath11k_peer
*peer
;
2630 struct ath11k_sta
*arsta
;
2631 int num_buffs_reaped
= 0;
2633 __skb_queue_head_init(&skb_list
);
2635 num_buffs_reaped
= ath11k_dp_rx_reap_mon_status_ring(ab
, mac_id
, &budget
,
2637 if (!num_buffs_reaped
)
2640 while ((skb
= __skb_dequeue(&skb_list
))) {
2641 memset(&ppdu_info
, 0, sizeof(ppdu_info
));
2642 ppdu_info
.peer_id
= HAL_INVALID_PEERID
;
2644 if (ath11k_debug_is_pktlog_rx_stats_enabled(ar
))
2645 trace_ath11k_htt_rxdesc(ar
, skb
->data
, DP_RX_BUFFER_SIZE
);
2647 hal_status
= ath11k_hal_rx_parse_mon_status(ab
, &ppdu_info
, skb
);
2649 if (ppdu_info
.peer_id
== HAL_INVALID_PEERID
||
2650 hal_status
!= HAL_RX_MON_STATUS_PPDU_DONE
) {
2651 dev_kfree_skb_any(skb
);
2656 spin_lock_bh(&ab
->base_lock
);
2657 peer
= ath11k_peer_find_by_id(ab
, ppdu_info
.peer_id
);
2659 if (!peer
|| !peer
->sta
) {
2660 ath11k_dbg(ab
, ATH11K_DBG_DATA
,
2661 "failed to find the peer with peer_id %d\n",
2663 spin_unlock_bh(&ab
->base_lock
);
2665 dev_kfree_skb_any(skb
);
2669 arsta
= (struct ath11k_sta
*)peer
->sta
->drv_priv
;
2670 ath11k_dp_rx_update_peer_stats(arsta
, &ppdu_info
);
2672 if (ath11k_debug_is_pktlog_peer_valid(ar
, peer
->addr
))
2673 trace_ath11k_htt_rxdesc(ar
, skb
->data
, DP_RX_BUFFER_SIZE
);
2675 spin_unlock_bh(&ab
->base_lock
);
2678 dev_kfree_skb_any(skb
);
2681 return num_buffs_reaped
;
2684 static int ath11k_dp_rx_link_desc_return(struct ath11k_base
*ab
,
2686 enum hal_wbm_rel_bm_act action
)
2688 struct ath11k_dp
*dp
= &ab
->dp
;
2689 struct hal_srng
*srng
;
2693 srng
= &ab
->hal
.srng_list
[dp
->wbm_desc_rel_ring
.ring_id
];
2695 spin_lock_bh(&srng
->lock
);
2697 ath11k_hal_srng_access_begin(ab
, srng
);
2699 desc
= ath11k_hal_srng_src_get_next_entry(ab
, srng
);
2705 ath11k_hal_rx_msdu_link_desc_set(ab
, (void *)desc
, (void *)link_desc
,
2709 ath11k_hal_srng_access_end(ab
, srng
);
2711 spin_unlock_bh(&srng
->lock
);
2716 static void ath11k_dp_rx_frag_h_mpdu(struct ath11k
*ar
,
2717 struct sk_buff
*msdu
,
2718 struct hal_rx_desc
*rx_desc
,
2719 struct ieee80211_rx_status
*rx_status
)
2722 enum hal_encrypt_type enctype
;
2726 is_decrypted
= ath11k_dp_rx_h_attn_is_decrypted(rx_desc
);
2727 enctype
= ath11k_dp_rx_h_mpdu_start_enctype(rx_desc
);
2728 err_bitmap
= ath11k_dp_rx_h_attn_mpdu_err(rx_desc
);
2730 if (err_bitmap
& DP_RX_MPDU_ERR_FCS
)
2731 rx_status
->flag
|= RX_FLAG_FAILED_FCS_CRC
;
2733 if (err_bitmap
& DP_RX_MPDU_ERR_TKIP_MIC
)
2734 rx_status
->flag
|= RX_FLAG_MMIC_ERROR
;
2736 rx_status
->encoding
= RX_ENC_LEGACY
;
2737 rx_status
->bw
= RATE_INFO_BW_20
;
2739 rx_status
->flag
|= RX_FLAG_NO_SIGNAL_VAL
;
2741 rx_channel
= ath11k_dp_rx_h_msdu_start_freq(rx_desc
);
2743 if (rx_channel
>= 1 && rx_channel
<= 14) {
2744 rx_status
->band
= NL80211_BAND_2GHZ
;
2745 } else if (rx_channel
>= 36 && rx_channel
<= 173) {
2746 rx_status
->band
= NL80211_BAND_5GHZ
;
2748 ath11k_warn(ar
->ab
, "Unsupported Channel info received %d\n",
2753 rx_status
->freq
= ieee80211_channel_to_frequency(rx_channel
,
2755 ath11k_dp_rx_h_rate(ar
, rx_desc
, rx_status
);
2757 /* Rx fragments are received in raw mode */
2758 skb_trim(msdu
, msdu
->len
- FCS_LEN
);
2761 rx_status
->flag
|= RX_FLAG_DECRYPTED
| RX_FLAG_MIC_STRIPPED
;
2762 skb_trim(msdu
, msdu
->len
-
2763 ath11k_dp_rx_crypto_mic_len(ar
, enctype
));
2768 ath11k_dp_process_rx_err_buf(struct ath11k
*ar
, struct napi_struct
*napi
,
2769 int buf_id
, bool frag
)
2771 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
2772 struct dp_rxdma_ring
*rx_ring
= &dp
->rx_refill_buf_ring
;
2773 struct ieee80211_rx_status rx_status
= {0};
2774 struct sk_buff
*msdu
;
2775 struct ath11k_skb_rxcb
*rxcb
;
2776 struct ieee80211_rx_status
*status
;
2777 struct hal_rx_desc
*rx_desc
;
2780 spin_lock_bh(&rx_ring
->idr_lock
);
2781 msdu
= idr_find(&rx_ring
->bufs_idr
, buf_id
);
2783 ath11k_warn(ar
->ab
, "rx err buf with invalid buf_id %d\n",
2785 spin_unlock_bh(&rx_ring
->idr_lock
);
2789 idr_remove(&rx_ring
->bufs_idr
, buf_id
);
2790 spin_unlock_bh(&rx_ring
->idr_lock
);
2792 rxcb
= ATH11K_SKB_RXCB(msdu
);
2793 dma_unmap_single(ar
->ab
->dev
, rxcb
->paddr
,
2794 msdu
->len
+ skb_tailroom(msdu
),
2798 /* Process only rx fragments below, and drop
2799 * msdu's indicated due to error reasons.
2801 dev_kfree_skb_any(msdu
);
2806 if (!rcu_dereference(ar
->ab
->pdevs_active
[ar
->pdev_idx
])) {
2807 dev_kfree_skb_any(msdu
);
2811 if (test_bit(ATH11K_CAC_RUNNING
, &ar
->dev_flags
)) {
2812 dev_kfree_skb_any(msdu
);
2816 rx_desc
= (struct hal_rx_desc
*)msdu
->data
;
2817 msdu_len
= ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc
);
2818 skb_put(msdu
, HAL_RX_DESC_SIZE
+ msdu_len
);
2819 skb_pull(msdu
, HAL_RX_DESC_SIZE
);
2821 ath11k_dp_rx_frag_h_mpdu(ar
, msdu
, rx_desc
, &rx_status
);
2823 status
= IEEE80211_SKB_RXCB(msdu
);
2825 *status
= rx_status
;
2827 ath11k_dp_rx_deliver_msdu(ar
, napi
, msdu
);
2834 int ath11k_dp_process_rx_err(struct ath11k_base
*ab
, struct napi_struct
*napi
,
2837 u32 msdu_cookies
[HAL_NUM_RX_MSDUS_PER_LINK_DESC
];
2838 struct dp_link_desc_bank
*link_desc_banks
;
2839 enum hal_rx_buf_return_buf_manager rbm
;
2840 int tot_n_bufs_reaped
, quota
, ret
, i
;
2841 int n_bufs_reaped
[MAX_RADIOS
] = {0};
2842 struct dp_rxdma_ring
*rx_ring
;
2843 struct dp_srng
*reo_except
;
2844 u32 desc_bank
, num_msdus
;
2845 struct hal_srng
*srng
;
2846 struct ath11k_dp
*dp
;
2854 tot_n_bufs_reaped
= 0;
2858 reo_except
= &dp
->reo_except_ring
;
2859 link_desc_banks
= dp
->link_desc_banks
;
2861 srng
= &ab
->hal
.srng_list
[reo_except
->ring_id
];
2863 spin_lock_bh(&srng
->lock
);
2865 ath11k_hal_srng_access_begin(ab
, srng
);
2868 (desc
= ath11k_hal_srng_dst_get_next_entry(ab
, srng
))) {
2869 struct hal_reo_dest_ring
*reo_desc
= (struct hal_reo_dest_ring
*)desc
;
2871 ab
->soc_stats
.err_ring_pkts
++;
2872 ret
= ath11k_hal_desc_reo_parse_err(ab
, desc
, &paddr
,
2875 ath11k_warn(ab
, "failed to parse error reo desc %d\n",
2879 link_desc_va
= link_desc_banks
[desc_bank
].vaddr
+
2880 (paddr
- link_desc_banks
[desc_bank
].paddr
);
2881 ath11k_hal_rx_msdu_link_info_get(link_desc_va
, &num_msdus
, msdu_cookies
,
2883 if (rbm
!= HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST
&&
2884 rbm
!= HAL_RX_BUF_RBM_SW3_BM
) {
2885 ab
->soc_stats
.invalid_rbm
++;
2886 ath11k_warn(ab
, "invalid return buffer manager %d\n", rbm
);
2887 ath11k_dp_rx_link_desc_return(ab
, desc
,
2888 HAL_WBM_REL_BM_ACT_REL_MSDU
);
2892 is_frag
= !!(reo_desc
->rx_mpdu_info
.info0
& RX_MPDU_DESC_INFO0_FRAG_FLAG
);
2894 /* Return the link desc back to wbm idle list */
2895 ath11k_dp_rx_link_desc_return(ab
, desc
,
2896 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE
);
2898 for (i
= 0; i
< num_msdus
; i
++) {
2899 buf_id
= FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID
,
2902 mac_id
= FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID
,
2905 ar
= ab
->pdevs
[mac_id
].ar
;
2907 if (!ath11k_dp_process_rx_err_buf(ar
, napi
, buf_id
,
2909 n_bufs_reaped
[mac_id
]++;
2910 tot_n_bufs_reaped
++;
2914 if (tot_n_bufs_reaped
>= quota
) {
2915 tot_n_bufs_reaped
= quota
;
2919 budget
= quota
- tot_n_bufs_reaped
;
2923 ath11k_hal_srng_access_end(ab
, srng
);
2925 spin_unlock_bh(&srng
->lock
);
2927 for (i
= 0; i
< ab
->num_radios
; i
++) {
2928 if (!n_bufs_reaped
[i
])
2931 ar
= ab
->pdevs
[i
].ar
;
2932 rx_ring
= &ar
->dp
.rx_refill_buf_ring
;
2934 ath11k_dp_rxbufs_replenish(ab
, i
, rx_ring
, n_bufs_reaped
[i
],
2935 HAL_RX_BUF_RBM_SW3_BM
, GFP_ATOMIC
);
2938 return tot_n_bufs_reaped
;
2941 static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k
*ar
,
2943 struct sk_buff_head
*msdu_list
)
2945 struct sk_buff
*skb
, *tmp
;
2946 struct ath11k_skb_rxcb
*rxcb
;
2949 n_buffs
= DIV_ROUND_UP(msdu_len
,
2950 (DP_RX_BUFFER_SIZE
- HAL_RX_DESC_SIZE
));
2952 skb_queue_walk_safe(msdu_list
, skb
, tmp
) {
2953 rxcb
= ATH11K_SKB_RXCB(skb
);
2954 if (rxcb
->err_rel_src
== HAL_WBM_REL_SRC_MODULE_REO
&&
2955 rxcb
->err_code
== HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO
) {
2958 __skb_unlink(skb
, msdu_list
);
2959 dev_kfree_skb_any(skb
);
2965 static int ath11k_dp_rx_h_null_q_desc(struct ath11k
*ar
, struct sk_buff
*msdu
,
2966 struct ieee80211_rx_status
*status
,
2967 struct sk_buff_head
*msdu_list
)
2969 struct sk_buff_head amsdu_list
;
2971 struct hal_rx_desc
*desc
= (struct hal_rx_desc
*)msdu
->data
;
2973 struct ath11k_skb_rxcb
*rxcb
= ATH11K_SKB_RXCB(msdu
);
2975 msdu_len
= ath11k_dp_rx_h_msdu_start_msdu_len(desc
);
2977 if ((msdu_len
+ HAL_RX_DESC_SIZE
) > DP_RX_BUFFER_SIZE
) {
2978 /* First buffer will be freed by the caller, so deduct it's length */
2979 msdu_len
= msdu_len
- (DP_RX_BUFFER_SIZE
- HAL_RX_DESC_SIZE
);
2980 ath11k_dp_rx_null_q_desc_sg_drop(ar
, msdu_len
, msdu_list
);
2984 if (!ath11k_dp_rx_h_attn_msdu_done(desc
)) {
2986 "msdu_done bit not set in null_q_des processing\n");
2987 __skb_queue_purge(msdu_list
);
2991 /* Handle NULL queue descriptor violations arising out a missing
2992 * REO queue for a given peer or a given TID. This typically
2993 * may happen if a packet is received on a QOS enabled TID before the
2994 * ADDBA negotiation for that TID, when the TID queue is setup. Or
2995 * it may also happen for MC/BC frames if they are not routed to the
2996 * non-QOS TID queue, in the absence of any other default TID queue.
2997 * This error can show up both in a REO destination or WBM release ring.
3000 __skb_queue_head_init(&amsdu_list
);
3002 rxcb
->is_first_msdu
= ath11k_dp_rx_h_msdu_end_first_msdu(desc
);
3003 rxcb
->is_last_msdu
= ath11k_dp_rx_h_msdu_end_last_msdu(desc
);
3005 l3pad_bytes
= ath11k_dp_rx_h_msdu_end_l3pad(desc
);
3007 if ((HAL_RX_DESC_SIZE
+ l3pad_bytes
+ msdu_len
) > DP_RX_BUFFER_SIZE
)
3010 skb_put(msdu
, HAL_RX_DESC_SIZE
+ l3pad_bytes
+ msdu_len
);
3011 skb_pull(msdu
, HAL_RX_DESC_SIZE
+ l3pad_bytes
);
3013 ath11k_dp_rx_h_ppdu(ar
, desc
, status
);
3015 __skb_queue_tail(&amsdu_list
, msdu
);
3017 ath11k_dp_rx_h_mpdu(ar
, &amsdu_list
, desc
, status
);
3019 /* Please note that caller will having the access to msdu and completing
3020 * rx with mac80211. Need not worry about cleaning up amsdu_list.
3026 static bool ath11k_dp_rx_h_reo_err(struct ath11k
*ar
, struct sk_buff
*msdu
,
3027 struct ieee80211_rx_status
*status
,
3028 struct sk_buff_head
*msdu_list
)
3030 struct ath11k_skb_rxcb
*rxcb
= ATH11K_SKB_RXCB(msdu
);
3033 ar
->ab
->soc_stats
.reo_error
[rxcb
->err_code
]++;
3035 switch (rxcb
->err_code
) {
3036 case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO
:
3037 if (ath11k_dp_rx_h_null_q_desc(ar
, msdu
, status
, msdu_list
))
3041 /* TODO: Review other errors and process them to mac80211
3051 static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k
*ar
, struct sk_buff
*msdu
,
3052 struct ieee80211_rx_status
*status
)
3055 struct hal_rx_desc
*desc
= (struct hal_rx_desc
*)msdu
->data
;
3057 struct ath11k_skb_rxcb
*rxcb
= ATH11K_SKB_RXCB(msdu
);
3059 rxcb
->is_first_msdu
= ath11k_dp_rx_h_msdu_end_first_msdu(desc
);
3060 rxcb
->is_last_msdu
= ath11k_dp_rx_h_msdu_end_last_msdu(desc
);
3062 l3pad_bytes
= ath11k_dp_rx_h_msdu_end_l3pad(desc
);
3063 msdu_len
= ath11k_dp_rx_h_msdu_start_msdu_len(desc
);
3064 skb_put(msdu
, HAL_RX_DESC_SIZE
+ l3pad_bytes
+ msdu_len
);
3065 skb_pull(msdu
, HAL_RX_DESC_SIZE
+ l3pad_bytes
);
3067 ath11k_dp_rx_h_ppdu(ar
, desc
, status
);
3069 status
->flag
|= (RX_FLAG_MMIC_STRIPPED
| RX_FLAG_MMIC_ERROR
|
3072 ath11k_dp_rx_h_undecap(ar
, msdu
, desc
,
3073 HAL_ENCRYPT_TYPE_TKIP_MIC
, status
, false);
3076 static bool ath11k_dp_rx_h_rxdma_err(struct ath11k
*ar
, struct sk_buff
*msdu
,
3077 struct ieee80211_rx_status
*status
)
3079 struct ath11k_skb_rxcb
*rxcb
= ATH11K_SKB_RXCB(msdu
);
3082 ar
->ab
->soc_stats
.rxdma_error
[rxcb
->err_code
]++;
3084 switch (rxcb
->err_code
) {
3085 case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR
:
3086 ath11k_dp_rx_h_tkip_mic_err(ar
, msdu
, status
);
3089 /* TODO: Review other rxdma error code to check if anything is
3090 * worth reporting to mac80211
3099 static void ath11k_dp_rx_wbm_err(struct ath11k
*ar
,
3100 struct napi_struct
*napi
,
3101 struct sk_buff
*msdu
,
3102 struct sk_buff_head
*msdu_list
)
3104 struct ath11k_skb_rxcb
*rxcb
= ATH11K_SKB_RXCB(msdu
);
3105 struct ieee80211_rx_status rxs
= {0};
3106 struct ieee80211_rx_status
*status
;
3109 switch (rxcb
->err_rel_src
) {
3110 case HAL_WBM_REL_SRC_MODULE_REO
:
3111 drop
= ath11k_dp_rx_h_reo_err(ar
, msdu
, &rxs
, msdu_list
);
3113 case HAL_WBM_REL_SRC_MODULE_RXDMA
:
3114 drop
= ath11k_dp_rx_h_rxdma_err(ar
, msdu
, &rxs
);
3117 /* msdu will get freed */
3122 dev_kfree_skb_any(msdu
);
3126 status
= IEEE80211_SKB_RXCB(msdu
);
3129 ath11k_dp_rx_deliver_msdu(ar
, napi
, msdu
);
3132 int ath11k_dp_rx_process_wbm_err(struct ath11k_base
*ab
,
3133 struct napi_struct
*napi
, int budget
)
3136 struct ath11k_dp
*dp
= &ab
->dp
;
3137 struct dp_rxdma_ring
*rx_ring
;
3138 struct hal_rx_wbm_rel_info err_info
;
3139 struct hal_srng
*srng
;
3140 struct sk_buff
*msdu
;
3141 struct sk_buff_head msdu_list
[MAX_RADIOS
];
3142 struct ath11k_skb_rxcb
*rxcb
;
3145 int num_buffs_reaped
[MAX_RADIOS
] = {0};
3146 int total_num_buffs_reaped
= 0;
3149 for (i
= 0; i
< MAX_RADIOS
; i
++)
3150 __skb_queue_head_init(&msdu_list
[i
]);
3152 srng
= &ab
->hal
.srng_list
[dp
->rx_rel_ring
.ring_id
];
3154 spin_lock_bh(&srng
->lock
);
3156 ath11k_hal_srng_access_begin(ab
, srng
);
3159 rx_desc
= ath11k_hal_srng_dst_get_next_entry(ab
, srng
);
3163 ret
= ath11k_hal_wbm_desc_parse_err(ab
, rx_desc
, &err_info
);
3166 "failed to parse rx error in wbm_rel ring desc %d\n",
3171 buf_id
= FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID
, err_info
.cookie
);
3172 mac_id
= FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID
, err_info
.cookie
);
3174 ar
= ab
->pdevs
[mac_id
].ar
;
3175 rx_ring
= &ar
->dp
.rx_refill_buf_ring
;
3177 spin_lock_bh(&rx_ring
->idr_lock
);
3178 msdu
= idr_find(&rx_ring
->bufs_idr
, buf_id
);
3180 ath11k_warn(ab
, "frame rx with invalid buf_id %d pdev %d\n",
3182 spin_unlock_bh(&rx_ring
->idr_lock
);
3186 idr_remove(&rx_ring
->bufs_idr
, buf_id
);
3187 spin_unlock_bh(&rx_ring
->idr_lock
);
3189 rxcb
= ATH11K_SKB_RXCB(msdu
);
3190 dma_unmap_single(ab
->dev
, rxcb
->paddr
,
3191 msdu
->len
+ skb_tailroom(msdu
),
3194 num_buffs_reaped
[mac_id
]++;
3195 total_num_buffs_reaped
++;
3198 if (err_info
.push_reason
!=
3199 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED
) {
3200 dev_kfree_skb_any(msdu
);
3204 rxcb
->err_rel_src
= err_info
.err_rel_src
;
3205 rxcb
->err_code
= err_info
.err_code
;
3206 rxcb
->rx_desc
= (struct hal_rx_desc
*)msdu
->data
;
3207 __skb_queue_tail(&msdu_list
[mac_id
], msdu
);
3210 ath11k_hal_srng_access_end(ab
, srng
);
3212 spin_unlock_bh(&srng
->lock
);
3214 if (!total_num_buffs_reaped
)
3217 for (i
= 0; i
< ab
->num_radios
; i
++) {
3218 if (!num_buffs_reaped
[i
])
3221 ar
= ab
->pdevs
[i
].ar
;
3222 rx_ring
= &ar
->dp
.rx_refill_buf_ring
;
3224 ath11k_dp_rxbufs_replenish(ab
, i
, rx_ring
, num_buffs_reaped
[i
],
3225 HAL_RX_BUF_RBM_SW3_BM
, GFP_ATOMIC
);
3229 for (i
= 0; i
< ab
->num_radios
; i
++) {
3230 if (!rcu_dereference(ab
->pdevs_active
[i
])) {
3231 __skb_queue_purge(&msdu_list
[i
]);
3235 ar
= ab
->pdevs
[i
].ar
;
3237 if (test_bit(ATH11K_CAC_RUNNING
, &ar
->dev_flags
)) {
3238 __skb_queue_purge(&msdu_list
[i
]);
3242 while ((msdu
= __skb_dequeue(&msdu_list
[i
])) != NULL
)
3243 ath11k_dp_rx_wbm_err(ar
, napi
, msdu
, &msdu_list
[i
]);
3247 return total_num_buffs_reaped
;
3250 int ath11k_dp_process_rxdma_err(struct ath11k_base
*ab
, int mac_id
, int budget
)
3252 struct ath11k
*ar
= ab
->pdevs
[mac_id
].ar
;
3253 struct dp_srng
*err_ring
= &ar
->dp
.rxdma_err_dst_ring
;
3254 struct dp_rxdma_ring
*rx_ring
= &ar
->dp
.rx_refill_buf_ring
;
3255 struct dp_link_desc_bank
*link_desc_banks
= ab
->dp
.link_desc_banks
;
3256 struct hal_srng
*srng
;
3257 u32 msdu_cookies
[HAL_NUM_RX_MSDUS_PER_LINK_DESC
];
3258 enum hal_rx_buf_return_buf_manager rbm
;
3259 enum hal_reo_entr_rxdma_ecode rxdma_err_code
;
3260 struct ath11k_skb_rxcb
*rxcb
;
3261 struct sk_buff
*skb
;
3262 struct hal_reo_entrance_ring
*entr_ring
;
3264 int num_buf_freed
= 0;
3273 srng
= &ab
->hal
.srng_list
[err_ring
->ring_id
];
3275 spin_lock_bh(&srng
->lock
);
3277 ath11k_hal_srng_access_begin(ab
, srng
);
3280 (desc
= ath11k_hal_srng_dst_get_next_entry(ab
, srng
))) {
3281 ath11k_hal_rx_reo_ent_paddr_get(ab
, desc
, &paddr
, &desc_bank
);
3283 entr_ring
= (struct hal_reo_entrance_ring
*)desc
;
3285 FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE
,
3287 ab
->soc_stats
.rxdma_error
[rxdma_err_code
]++;
3289 link_desc_va
= link_desc_banks
[desc_bank
].vaddr
+
3290 (paddr
- link_desc_banks
[desc_bank
].paddr
);
3291 ath11k_hal_rx_msdu_link_info_get(link_desc_va
, &num_msdus
,
3292 msdu_cookies
, &rbm
);
3294 for (i
= 0; i
< num_msdus
; i
++) {
3295 buf_id
= FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID
,
3298 spin_lock_bh(&rx_ring
->idr_lock
);
3299 skb
= idr_find(&rx_ring
->bufs_idr
, buf_id
);
3301 ath11k_warn(ab
, "rxdma error with invalid buf_id %d\n",
3303 spin_unlock_bh(&rx_ring
->idr_lock
);
3307 idr_remove(&rx_ring
->bufs_idr
, buf_id
);
3308 spin_unlock_bh(&rx_ring
->idr_lock
);
3310 rxcb
= ATH11K_SKB_RXCB(skb
);
3311 dma_unmap_single(ab
->dev
, rxcb
->paddr
,
3312 skb
->len
+ skb_tailroom(skb
),
3314 dev_kfree_skb_any(skb
);
3319 ath11k_dp_rx_link_desc_return(ab
, desc
,
3320 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE
);
3323 ath11k_hal_srng_access_end(ab
, srng
);
3325 spin_unlock_bh(&srng
->lock
);
3328 ath11k_dp_rxbufs_replenish(ab
, mac_id
, rx_ring
, num_buf_freed
,
3329 HAL_RX_BUF_RBM_SW3_BM
, GFP_ATOMIC
);
3331 return budget
- quota
;
3334 void ath11k_dp_process_reo_status(struct ath11k_base
*ab
)
3336 struct ath11k_dp
*dp
= &ab
->dp
;
3337 struct hal_srng
*srng
;
3338 struct dp_reo_cmd
*cmd
, *tmp
;
3342 struct hal_reo_status reo_status
;
3344 srng
= &ab
->hal
.srng_list
[dp
->reo_status_ring
.ring_id
];
3346 memset(&reo_status
, 0, sizeof(reo_status
));
3348 spin_lock_bh(&srng
->lock
);
3350 ath11k_hal_srng_access_begin(ab
, srng
);
3352 while ((reo_desc
= ath11k_hal_srng_dst_get_next_entry(ab
, srng
))) {
3353 tag
= FIELD_GET(HAL_SRNG_TLV_HDR_TAG
, *reo_desc
);
3356 case HAL_REO_GET_QUEUE_STATS_STATUS
:
3357 ath11k_hal_reo_status_queue_stats(ab
, reo_desc
,
3360 case HAL_REO_FLUSH_QUEUE_STATUS
:
3361 ath11k_hal_reo_flush_queue_status(ab
, reo_desc
,
3364 case HAL_REO_FLUSH_CACHE_STATUS
:
3365 ath11k_hal_reo_flush_cache_status(ab
, reo_desc
,
3368 case HAL_REO_UNBLOCK_CACHE_STATUS
:
3369 ath11k_hal_reo_unblk_cache_status(ab
, reo_desc
,
3372 case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS
:
3373 ath11k_hal_reo_flush_timeout_list_status(ab
, reo_desc
,
3376 case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS
:
3377 ath11k_hal_reo_desc_thresh_reached_status(ab
, reo_desc
,
3380 case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS
:
3381 ath11k_hal_reo_update_rx_reo_queue_status(ab
, reo_desc
,
3385 ath11k_warn(ab
, "Unknown reo status type %d\n", tag
);
3389 spin_lock_bh(&dp
->reo_cmd_lock
);
3390 list_for_each_entry_safe(cmd
, tmp
, &dp
->reo_cmd_list
, list
) {
3391 if (reo_status
.uniform_hdr
.cmd_num
== cmd
->cmd_num
) {
3393 list_del(&cmd
->list
);
3397 spin_unlock_bh(&dp
->reo_cmd_lock
);
3400 cmd
->handler(dp
, (void *)&cmd
->data
,
3401 reo_status
.uniform_hdr
.cmd_status
);
3408 ath11k_hal_srng_access_end(ab
, srng
);
3410 spin_unlock_bh(&srng
->lock
);
3413 void ath11k_dp_rx_pdev_free(struct ath11k_base
*ab
, int mac_id
)
3415 struct ath11k
*ar
= ab
->pdevs
[mac_id
].ar
;
3417 ath11k_dp_rx_pdev_srng_free(ar
);
3418 ath11k_dp_rxdma_pdev_buf_free(ar
);
3421 int ath11k_dp_rx_pdev_alloc(struct ath11k_base
*ab
, int mac_id
)
3423 struct ath11k
*ar
= ab
->pdevs
[mac_id
].ar
;
3424 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
3428 ret
= ath11k_dp_rx_pdev_srng_alloc(ar
);
3430 ath11k_warn(ab
, "failed to setup rx srngs\n");
3434 ret
= ath11k_dp_rxdma_pdev_buf_setup(ar
);
3436 ath11k_warn(ab
, "failed to setup rxdma ring\n");
3440 ring_id
= dp
->rx_refill_buf_ring
.refill_buf_ring
.ring_id
;
3441 ret
= ath11k_dp_tx_htt_srng_setup(ab
, ring_id
, mac_id
, HAL_RXDMA_BUF
);
3443 ath11k_warn(ab
, "failed to configure rx_refill_buf_ring %d\n",
3448 ring_id
= dp
->rxdma_err_dst_ring
.ring_id
;
3449 ret
= ath11k_dp_tx_htt_srng_setup(ab
, ring_id
, mac_id
, HAL_RXDMA_DST
);
3451 ath11k_warn(ab
, "failed to configure rxdma_err_dest_ring %d\n",
3456 ring_id
= dp
->rxdma_mon_buf_ring
.refill_buf_ring
.ring_id
;
3457 ret
= ath11k_dp_tx_htt_srng_setup(ab
, ring_id
,
3458 mac_id
, HAL_RXDMA_MONITOR_BUF
);
3460 ath11k_warn(ab
, "failed to configure rxdma_mon_buf_ring %d\n",
3464 ret
= ath11k_dp_tx_htt_srng_setup(ab
,
3465 dp
->rxdma_mon_dst_ring
.ring_id
,
3466 mac_id
, HAL_RXDMA_MONITOR_DST
);
3468 ath11k_warn(ab
, "failed to configure rxdma_mon_dst_ring %d\n",
3472 ret
= ath11k_dp_tx_htt_srng_setup(ab
,
3473 dp
->rxdma_mon_desc_ring
.ring_id
,
3474 mac_id
, HAL_RXDMA_MONITOR_DESC
);
3476 ath11k_warn(ab
, "failed to configure rxdma_mon_dst_ring %d\n",
3480 ring_id
= dp
->rx_mon_status_refill_ring
.refill_buf_ring
.ring_id
;
3481 ret
= ath11k_dp_tx_htt_srng_setup(ab
, ring_id
, mac_id
,
3482 HAL_RXDMA_MONITOR_STATUS
);
3485 "failed to configure mon_status_refill_ring %d\n",
3492 static void ath11k_dp_mon_set_frag_len(u32
*total_len
, u32
*frag_len
)
3494 if (*total_len
>= (DP_RX_BUFFER_SIZE
- sizeof(struct hal_rx_desc
))) {
3495 *frag_len
= DP_RX_BUFFER_SIZE
- sizeof(struct hal_rx_desc
);
3496 *total_len
-= *frag_len
;
3498 *frag_len
= *total_len
;
3504 int ath11k_dp_rx_monitor_link_desc_return(struct ath11k
*ar
,
3505 void *p_last_buf_addr_info
,
3508 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
3509 struct dp_srng
*dp_srng
;
3511 void *src_srng_desc
;
3514 dp_srng
= &dp
->rxdma_mon_desc_ring
;
3515 hal_srng
= &ar
->ab
->hal
.srng_list
[dp_srng
->ring_id
];
3517 ath11k_hal_srng_access_begin(ar
->ab
, hal_srng
);
3519 src_srng_desc
= ath11k_hal_srng_src_get_next_entry(ar
->ab
, hal_srng
);
3521 if (src_srng_desc
) {
3522 struct ath11k_buffer_addr
*src_desc
=
3523 (struct ath11k_buffer_addr
*)src_srng_desc
;
3525 *src_desc
= *((struct ath11k_buffer_addr
*)p_last_buf_addr_info
);
3527 ath11k_dbg(ar
->ab
, ATH11K_DBG_DATA
,
3528 "Monitor Link Desc Ring %d Full", mac_id
);
3532 ath11k_hal_srng_access_end(ar
->ab
, hal_srng
);
3537 void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc
,
3538 dma_addr_t
*paddr
, u32
*sw_cookie
,
3539 void **pp_buf_addr_info
)
3541 struct hal_rx_msdu_link
*msdu_link
=
3542 (struct hal_rx_msdu_link
*)rx_msdu_link_desc
;
3543 struct ath11k_buffer_addr
*buf_addr_info
;
3546 buf_addr_info
= (struct ath11k_buffer_addr
*)&msdu_link
->buf_addr_info
;
3548 ath11k_hal_rx_buf_addr_info_get(buf_addr_info
, paddr
, sw_cookie
, &rbm
);
3550 *pp_buf_addr_info
= (void *)buf_addr_info
;
3553 static int ath11k_dp_pkt_set_pktlen(struct sk_buff
*skb
, u32 len
)
3555 if (skb
->len
> len
) {
3558 if (skb_tailroom(skb
) < len
- skb
->len
) {
3559 if ((pskb_expand_head(skb
, 0,
3560 len
- skb
->len
- skb_tailroom(skb
),
3562 dev_kfree_skb_any(skb
);
3566 skb_put(skb
, (len
- skb
->len
));
3571 static void ath11k_hal_rx_msdu_list_get(struct ath11k
*ar
,
3572 void *msdu_link_desc
,
3573 struct hal_rx_msdu_list
*msdu_list
,
3576 struct hal_rx_msdu_details
*msdu_details
= NULL
;
3577 struct rx_msdu_desc
*msdu_desc_info
= NULL
;
3578 struct hal_rx_msdu_link
*msdu_link
= NULL
;
3580 u32 last
= FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU
, 1);
3581 u32 first
= FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU
, 1);
3584 msdu_link
= (struct hal_rx_msdu_link
*)msdu_link_desc
;
3585 msdu_details
= &msdu_link
->msdu_link
[0];
3587 for (i
= 0; i
< HAL_RX_NUM_MSDU_DESC
; i
++) {
3588 if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR
,
3589 msdu_details
[i
].buf_addr_info
.info0
) == 0) {
3590 msdu_desc_info
= &msdu_details
[i
- 1].rx_msdu_info
;
3591 msdu_desc_info
->info0
|= last
;
3595 msdu_desc_info
= &msdu_details
[i
].rx_msdu_info
;
3598 msdu_desc_info
->info0
|= first
;
3599 else if (i
== (HAL_RX_NUM_MSDU_DESC
- 1))
3600 msdu_desc_info
->info0
|= last
;
3601 msdu_list
->msdu_info
[i
].msdu_flags
= msdu_desc_info
->info0
;
3602 msdu_list
->msdu_info
[i
].msdu_len
=
3603 HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info
->info0
);
3604 msdu_list
->sw_cookie
[i
] =
3605 FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE
,
3606 msdu_details
[i
].buf_addr_info
.info1
);
3607 tmp
= FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR
,
3608 msdu_details
[i
].buf_addr_info
.info1
);
3609 msdu_list
->rbm
[i
] = tmp
;
3614 static u32
ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id
, u32
*ppdu_id
,
3619 if ((*ppdu_id
< msdu_ppdu_id
) &&
3620 ((msdu_ppdu_id
- *ppdu_id
) < DP_NOT_PPDU_ID_WRAP_AROUND
)) {
3621 *ppdu_id
= msdu_ppdu_id
;
3623 } else if ((*ppdu_id
> msdu_ppdu_id
) &&
3624 ((*ppdu_id
- msdu_ppdu_id
) > DP_NOT_PPDU_ID_WRAP_AROUND
)) {
3625 /* mon_dst is behind than mon_status
3626 * skip dst_ring and free it
3629 *ppdu_id
= msdu_ppdu_id
;
3635 static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info
*info
,
3636 bool *is_frag
, u32
*total_len
,
3637 u32
*frag_len
, u32
*msdu_cnt
)
3639 if (info
->msdu_flags
& RX_MSDU_DESC_INFO0_MSDU_CONTINUATION
) {
3641 *total_len
= info
->msdu_len
;
3644 ath11k_dp_mon_set_frag_len(total_len
,
3648 ath11k_dp_mon_set_frag_len(total_len
,
3651 *frag_len
= info
->msdu_len
;
3659 ath11k_dp_rx_mon_mpdu_pop(struct ath11k
*ar
,
3660 void *ring_entry
, struct sk_buff
**head_msdu
,
3661 struct sk_buff
**tail_msdu
, u32
*npackets
,
3664 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
3665 struct ath11k_mon_data
*pmon
= (struct ath11k_mon_data
*)&dp
->mon_data
;
3666 struct dp_rxdma_ring
*rx_ring
= &dp
->rxdma_mon_buf_ring
;
3667 struct sk_buff
*msdu
= NULL
, *last
= NULL
;
3668 struct hal_rx_msdu_list msdu_list
;
3669 void *p_buf_addr_info
, *p_last_buf_addr_info
;
3670 struct hal_rx_desc
*rx_desc
;
3671 void *rx_msdu_link_desc
;
3674 u32 rx_buf_size
, rx_pkt_offset
, sw_cookie
;
3675 u32 rx_bufs_used
= 0, i
= 0;
3676 u32 msdu_ppdu_id
= 0, msdu_cnt
= 0;
3677 u32 total_len
= 0, frag_len
= 0;
3678 bool is_frag
, is_first_msdu
;
3679 bool drop_mpdu
= false;
3680 struct ath11k_skb_rxcb
*rxcb
;
3681 struct hal_reo_entrance_ring
*ent_desc
=
3682 (struct hal_reo_entrance_ring
*)ring_entry
;
3685 ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry
, &paddr
,
3686 &sw_cookie
, &p_last_buf_addr_info
,
3689 if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON
,
3691 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED
) {
3693 FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE
,
3695 if (rxdma_err
== HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR
||
3696 rxdma_err
== HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR
||
3697 rxdma_err
== HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR
) {
3699 pmon
->rx_mon_stats
.dest_mpdu_drop
++;
3704 is_first_msdu
= true;
3707 if (pmon
->mon_last_linkdesc_paddr
== paddr
) {
3708 pmon
->rx_mon_stats
.dup_mon_linkdesc_cnt
++;
3709 return rx_bufs_used
;
3713 (void *)pmon
->link_desc_banks
[sw_cookie
].vaddr
+
3714 (paddr
- pmon
->link_desc_banks
[sw_cookie
].paddr
);
3716 ath11k_hal_rx_msdu_list_get(ar
, rx_msdu_link_desc
, &msdu_list
,
3719 for (i
= 0; i
< num_msdus
; i
++) {
3722 if (pmon
->mon_last_buf_cookie
== msdu_list
.sw_cookie
[i
]) {
3723 ath11k_dbg(ar
->ab
, ATH11K_DBG_DATA
,
3724 "i %d last_cookie %d is same\n",
3725 i
, pmon
->mon_last_buf_cookie
);
3727 pmon
->rx_mon_stats
.dup_mon_buf_cnt
++;
3730 buf_id
= FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID
,
3731 msdu_list
.sw_cookie
[i
]);
3733 spin_lock_bh(&rx_ring
->idr_lock
);
3734 msdu
= idr_find(&rx_ring
->bufs_idr
, buf_id
);
3735 spin_unlock_bh(&rx_ring
->idr_lock
);
3737 ath11k_dbg(ar
->ab
, ATH11K_DBG_DATA
,
3738 "msdu_pop: invalid buf_id %d\n", buf_id
);
3741 rxcb
= ATH11K_SKB_RXCB(msdu
);
3742 if (!rxcb
->unmapped
) {
3743 dma_unmap_single(ar
->ab
->dev
, rxcb
->paddr
,
3750 ath11k_dbg(ar
->ab
, ATH11K_DBG_DATA
,
3751 "i %d drop msdu %p *ppdu_id %x\n",
3753 dev_kfree_skb_any(msdu
);
3758 rx_desc
= (struct hal_rx_desc
*)msdu
->data
;
3760 rx_pkt_offset
= sizeof(struct hal_rx_desc
);
3761 l2_hdr_offset
= ath11k_dp_rx_h_msdu_end_l3pad(rx_desc
);
3763 if (is_first_msdu
) {
3764 if (!ath11k_dp_rxdesc_mpdu_valid(rx_desc
)) {
3766 dev_kfree_skb_any(msdu
);
3768 pmon
->mon_last_linkdesc_paddr
= paddr
;
3773 ath11k_dp_rxdesc_get_ppduid(rx_desc
);
3775 if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id
,
3780 dev_kfree_skb_any(msdu
);
3784 return rx_bufs_used
;
3786 pmon
->mon_last_linkdesc_paddr
= paddr
;
3787 is_first_msdu
= false;
3789 ath11k_dp_mon_get_buf_len(&msdu_list
.msdu_info
[i
],
3790 &is_frag
, &total_len
,
3791 &frag_len
, &msdu_cnt
);
3792 rx_buf_size
= rx_pkt_offset
+ l2_hdr_offset
+ frag_len
;
3794 ath11k_dp_pkt_set_pktlen(msdu
, rx_buf_size
);
3803 pmon
->mon_last_buf_cookie
= msdu_list
.sw_cookie
[i
];
3805 spin_lock_bh(&rx_ring
->idr_lock
);
3806 idr_remove(&rx_ring
->bufs_idr
, buf_id
);
3807 spin_unlock_bh(&rx_ring
->idr_lock
);
3810 ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc
, &paddr
,
3814 if (ath11k_dp_rx_monitor_link_desc_return(ar
,
3815 p_last_buf_addr_info
,
3817 ath11k_dbg(ar
->ab
, ATH11K_DBG_DATA
,
3818 "dp_rx_monitor_link_desc_return failed");
3820 p_last_buf_addr_info
= p_buf_addr_info
;
3822 } while (paddr
&& msdu_cnt
);
3832 return rx_bufs_used
;
3835 static void ath11k_dp_rx_msdus_set_payload(struct sk_buff
*msdu
)
3837 u32 rx_pkt_offset
, l2_hdr_offset
;
3839 rx_pkt_offset
= sizeof(struct hal_rx_desc
);
3840 l2_hdr_offset
= ath11k_dp_rx_h_msdu_end_l3pad((struct hal_rx_desc
*)msdu
->data
);
3841 skb_pull(msdu
, rx_pkt_offset
+ l2_hdr_offset
);
3844 static struct sk_buff
*
3845 ath11k_dp_rx_mon_merg_msdus(struct ath11k
*ar
,
3846 u32 mac_id
, struct sk_buff
*head_msdu
,
3847 struct sk_buff
*last_msdu
,
3848 struct ieee80211_rx_status
*rxs
)
3850 struct sk_buff
*msdu
, *mpdu_buf
, *prev_buf
;
3851 u32 decap_format
, wifi_hdr_len
;
3852 struct hal_rx_desc
*rx_desc
;
3855 struct ieee80211_hdr_3addr
*wh
;
3860 goto err_merge_fail
;
3862 rx_desc
= (struct hal_rx_desc
*)head_msdu
->data
;
3864 if (ath11k_dp_rxdesc_get_mpdulen_err(rx_desc
))
3867 decap_format
= ath11k_dp_rxdesc_get_decap_format(rx_desc
);
3869 ath11k_dp_rx_h_ppdu(ar
, rx_desc
, rxs
);
3871 if (decap_format
== DP_RX_DECAP_TYPE_RAW
) {
3872 ath11k_dp_rx_msdus_set_payload(head_msdu
);
3874 prev_buf
= head_msdu
;
3875 msdu
= head_msdu
->next
;
3878 ath11k_dp_rx_msdus_set_payload(msdu
);
3884 prev_buf
->next
= NULL
;
3886 skb_trim(prev_buf
, prev_buf
->len
- HAL_RX_FCS_LEN
);
3887 } else if (decap_format
== DP_RX_DECAP_TYPE_NATIVE_WIFI
) {
3891 rx_desc
= (struct hal_rx_desc
*)head_msdu
->data
;
3892 hdr_desc
= ath11k_dp_rxdesc_get_80211hdr(rx_desc
);
3895 wifi_hdr_len
= sizeof(struct ieee80211_hdr_3addr
);
3896 wh
= (struct ieee80211_hdr_3addr
*)hdr_desc
;
3898 if (ieee80211_is_data_qos(wh
->frame_control
)) {
3899 struct ieee80211_qos_hdr
*qwh
=
3900 (struct ieee80211_qos_hdr
*)hdr_desc
;
3902 qos_field
= qwh
->qos_ctrl
;
3908 rx_desc
= (struct hal_rx_desc
*)msdu
->data
;
3909 hdr_desc
= ath11k_dp_rxdesc_get_80211hdr(rx_desc
);
3912 dest
= skb_push(msdu
, sizeof(__le16
));
3914 goto err_merge_fail
;
3915 memcpy(dest
, hdr_desc
, wifi_hdr_len
);
3916 memcpy(dest
+ wifi_hdr_len
,
3917 (u8
*)&qos_field
, sizeof(__le16
));
3919 ath11k_dp_rx_msdus_set_payload(msdu
);
3923 dest
= skb_put(prev_buf
, HAL_RX_FCS_LEN
);
3925 goto err_merge_fail
;
3927 ath11k_dbg(ar
->ab
, ATH11K_DBG_DATA
,
3928 "mpdu_buf %pK mpdu_buf->len %u",
3929 prev_buf
, prev_buf
->len
);
3931 ath11k_dbg(ar
->ab
, ATH11K_DBG_DATA
,
3932 "decap format %d is not supported!\n",
3934 goto err_merge_fail
;
3940 if (mpdu_buf
&& decap_format
!= DP_RX_DECAP_TYPE_RAW
) {
3941 ath11k_dbg(ar
->ab
, ATH11K_DBG_DATA
,
3942 "err_merge_fail mpdu_buf %pK", mpdu_buf
);
3943 /* Free the head buffer */
3944 dev_kfree_skb_any(mpdu_buf
);
3949 static int ath11k_dp_rx_mon_deliver(struct ath11k
*ar
, u32 mac_id
,
3950 struct sk_buff
*head_msdu
,
3951 struct sk_buff
*tail_msdu
,
3952 struct napi_struct
*napi
)
3954 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
3955 struct sk_buff
*mon_skb
, *skb_next
, *header
;
3956 struct ieee80211_rx_status
*rxs
= &dp
->rx_status
, *status
;
3958 mon_skb
= ath11k_dp_rx_mon_merg_msdus(ar
, mac_id
, head_msdu
,
3962 goto mon_deliver_fail
;
3968 skb_next
= mon_skb
->next
;
3970 rxs
->flag
&= ~RX_FLAG_AMSDU_MORE
;
3972 rxs
->flag
|= RX_FLAG_AMSDU_MORE
;
3974 if (mon_skb
== header
) {
3976 rxs
->flag
&= ~RX_FLAG_ALLOW_SAME_PN
;
3978 rxs
->flag
|= RX_FLAG_ALLOW_SAME_PN
;
3980 rxs
->flag
|= RX_FLAG_ONLY_MONITOR
;
3982 status
= IEEE80211_SKB_RXCB(mon_skb
);
3985 ath11k_dp_rx_deliver_msdu(ar
, napi
, mon_skb
);
3993 mon_skb
= head_msdu
;
3995 skb_next
= mon_skb
->next
;
3996 dev_kfree_skb_any(mon_skb
);
4002 static void ath11k_dp_rx_mon_dest_process(struct ath11k
*ar
, u32 quota
,
4003 struct napi_struct
*napi
)
4005 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
4006 struct ath11k_mon_data
*pmon
= (struct ath11k_mon_data
*)&dp
->mon_data
;
4011 struct ath11k_pdev_mon_stats
*rx_mon_stats
;
4014 mon_dst_srng
= &ar
->ab
->hal
.srng_list
[dp
->rxdma_mon_dst_ring
.ring_id
];
4016 if (!mon_dst_srng
) {
4018 "HAL Monitor Destination Ring Init Failed -- %pK",
4023 spin_lock_bh(&pmon
->mon_lock
);
4025 ath11k_hal_srng_access_begin(ar
->ab
, mon_dst_srng
);
4027 ppdu_id
= pmon
->mon_ppdu_info
.ppdu_id
;
4029 rx_mon_stats
= &pmon
->rx_mon_stats
;
4031 while ((ring_entry
= ath11k_hal_srng_dst_peek(ar
->ab
, mon_dst_srng
))) {
4032 struct sk_buff
*head_msdu
, *tail_msdu
;
4037 rx_bufs_used
+= ath11k_dp_rx_mon_mpdu_pop(ar
, ring_entry
,
4040 &npackets
, &ppdu_id
);
4042 if (ppdu_id
!= pmon
->mon_ppdu_info
.ppdu_id
) {
4043 pmon
->mon_ppdu_status
= DP_PPDU_STATUS_START
;
4044 ath11k_dbg(ar
->ab
, ATH11K_DBG_DATA
,
4045 "dest_rx: new ppdu_id %x != status ppdu_id %x",
4046 ppdu_id
, pmon
->mon_ppdu_info
.ppdu_id
);
4049 if (head_msdu
&& tail_msdu
) {
4050 ath11k_dp_rx_mon_deliver(ar
, dp
->mac_id
, head_msdu
,
4052 rx_mon_stats
->dest_mpdu_done
++;
4055 ring_entry
= ath11k_hal_srng_dst_get_next_entry(ar
->ab
,
4058 ath11k_hal_srng_access_end(ar
->ab
, mon_dst_srng
);
4060 spin_unlock_bh(&pmon
->mon_lock
);
4063 rx_mon_stats
->dest_ppdu_done
++;
4064 ath11k_dp_rxbufs_replenish(ar
->ab
, dp
->mac_id
,
4065 &dp
->rxdma_mon_buf_ring
,
4067 HAL_RX_BUF_RBM_SW3_BM
, GFP_ATOMIC
);
4071 static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k
*ar
,
4073 struct napi_struct
*napi
)
4075 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
4076 struct ath11k_mon_data
*pmon
= (struct ath11k_mon_data
*)&dp
->mon_data
;
4077 struct hal_rx_mon_ppdu_info
*ppdu_info
;
4078 struct sk_buff
*status_skb
;
4079 u32 tlv_status
= HAL_TLV_STATUS_BUF_DONE
;
4080 struct ath11k_pdev_mon_stats
*rx_mon_stats
;
4082 ppdu_info
= &pmon
->mon_ppdu_info
;
4083 rx_mon_stats
= &pmon
->rx_mon_stats
;
4085 if (pmon
->mon_ppdu_status
!= DP_PPDU_STATUS_START
)
4088 while (!skb_queue_empty(&pmon
->rx_status_q
)) {
4089 status_skb
= skb_dequeue(&pmon
->rx_status_q
);
4091 tlv_status
= ath11k_hal_rx_parse_mon_status(ar
->ab
, ppdu_info
,
4093 if (tlv_status
== HAL_TLV_STATUS_PPDU_DONE
) {
4094 rx_mon_stats
->status_ppdu_done
++;
4095 pmon
->mon_ppdu_status
= DP_PPDU_STATUS_DONE
;
4096 ath11k_dp_rx_mon_dest_process(ar
, quota
, napi
);
4097 pmon
->mon_ppdu_status
= DP_PPDU_STATUS_START
;
4099 dev_kfree_skb_any(status_skb
);
4103 static int ath11k_dp_mon_process_rx(struct ath11k_base
*ab
, int mac_id
,
4104 struct napi_struct
*napi
, int budget
)
4106 struct ath11k
*ar
= ab
->pdevs
[mac_id
].ar
;
4107 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
4108 struct ath11k_mon_data
*pmon
= (struct ath11k_mon_data
*)&dp
->mon_data
;
4109 int num_buffs_reaped
= 0;
4111 num_buffs_reaped
= ath11k_dp_rx_reap_mon_status_ring(ar
->ab
, dp
->mac_id
, &budget
,
4112 &pmon
->rx_status_q
);
4113 if (num_buffs_reaped
)
4114 ath11k_dp_rx_mon_status_process_tlv(ar
, budget
, napi
);
4116 return num_buffs_reaped
;
4119 int ath11k_dp_rx_process_mon_rings(struct ath11k_base
*ab
, int mac_id
,
4120 struct napi_struct
*napi
, int budget
)
4122 struct ath11k
*ar
= ab
->pdevs
[mac_id
].ar
;
4125 if (test_bit(ATH11K_FLAG_MONITOR_ENABLED
, &ar
->monitor_flags
))
4126 ret
= ath11k_dp_mon_process_rx(ab
, mac_id
, napi
, budget
);
4128 ret
= ath11k_dp_rx_process_mon_status(ab
, mac_id
, napi
, budget
);
4132 static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k
*ar
)
4134 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
4135 struct ath11k_mon_data
*pmon
= (struct ath11k_mon_data
*)&dp
->mon_data
;
4137 skb_queue_head_init(&pmon
->rx_status_q
);
4139 pmon
->mon_ppdu_status
= DP_PPDU_STATUS_START
;
4141 memset(&pmon
->rx_mon_stats
, 0,
4142 sizeof(pmon
->rx_mon_stats
));
4146 int ath11k_dp_rx_pdev_mon_attach(struct ath11k
*ar
)
4148 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
4149 struct ath11k_mon_data
*pmon
= &dp
->mon_data
;
4150 struct hal_srng
*mon_desc_srng
= NULL
;
4151 struct dp_srng
*dp_srng
;
4153 u32 n_link_desc
= 0;
4155 ret
= ath11k_dp_rx_pdev_mon_status_attach(ar
);
4157 ath11k_warn(ar
->ab
, "pdev_mon_status_attach() failed");
4161 dp_srng
= &dp
->rxdma_mon_desc_ring
;
4162 n_link_desc
= dp_srng
->size
/
4163 ath11k_hal_srng_get_entrysize(HAL_RXDMA_MONITOR_DESC
);
4165 &ar
->ab
->hal
.srng_list
[dp
->rxdma_mon_desc_ring
.ring_id
];
4167 ret
= ath11k_dp_link_desc_setup(ar
->ab
, pmon
->link_desc_banks
,
4168 HAL_RXDMA_MONITOR_DESC
, mon_desc_srng
,
4171 ath11k_warn(ar
->ab
, "mon_link_desc_pool_setup() failed");
4174 pmon
->mon_last_linkdesc_paddr
= 0;
4175 pmon
->mon_last_buf_cookie
= DP_RX_DESC_COOKIE_MAX
+ 1;
4176 spin_lock_init(&pmon
->mon_lock
);
4180 static int ath11k_dp_mon_link_free(struct ath11k
*ar
)
4182 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
4183 struct ath11k_mon_data
*pmon
= &dp
->mon_data
;
4185 ath11k_dp_link_desc_cleanup(ar
->ab
, pmon
->link_desc_banks
,
4186 HAL_RXDMA_MONITOR_DESC
,
4187 &dp
->rxdma_mon_desc_ring
);
4191 int ath11k_dp_rx_pdev_mon_detach(struct ath11k
*ar
)
4193 ath11k_dp_mon_link_free(ar
);