1 // SPDX-License-Identifier: BSD-3-Clause-Clear
3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
7 #include <linux/ieee80211.h>
8 #include <linux/kernel.h>
9 #include <linux/skbuff.h>
10 #include <crypto/hash.h>
20 #include "debugfs_htt_stats.h"
22 #define ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
24 static enum hal_encrypt_type
ath12k_dp_rx_h_enctype(struct ath12k_base
*ab
,
25 struct hal_rx_desc
*desc
)
27 if (!ab
->hal_rx_ops
->rx_desc_encrypt_valid(desc
))
28 return HAL_ENCRYPT_TYPE_OPEN
;
30 return ab
->hal_rx_ops
->rx_desc_get_encrypt_type(desc
);
33 u8
ath12k_dp_rx_h_decap_type(struct ath12k_base
*ab
,
34 struct hal_rx_desc
*desc
)
36 return ab
->hal_rx_ops
->rx_desc_get_decap_type(desc
);
39 static u8
ath12k_dp_rx_h_mesh_ctl_present(struct ath12k_base
*ab
,
40 struct hal_rx_desc
*desc
)
42 return ab
->hal_rx_ops
->rx_desc_get_mesh_ctl(desc
);
45 static bool ath12k_dp_rx_h_seq_ctrl_valid(struct ath12k_base
*ab
,
46 struct hal_rx_desc
*desc
)
48 return ab
->hal_rx_ops
->rx_desc_get_mpdu_seq_ctl_vld(desc
);
51 static bool ath12k_dp_rx_h_fc_valid(struct ath12k_base
*ab
,
52 struct hal_rx_desc
*desc
)
54 return ab
->hal_rx_ops
->rx_desc_get_mpdu_fc_valid(desc
);
57 static bool ath12k_dp_rx_h_more_frags(struct ath12k_base
*ab
,
60 struct ieee80211_hdr
*hdr
;
62 hdr
= (struct ieee80211_hdr
*)(skb
->data
+ ab
->hal
.hal_desc_sz
);
63 return ieee80211_has_morefrags(hdr
->frame_control
);
66 static u16
ath12k_dp_rx_h_frag_no(struct ath12k_base
*ab
,
69 struct ieee80211_hdr
*hdr
;
71 hdr
= (struct ieee80211_hdr
*)(skb
->data
+ ab
->hal
.hal_desc_sz
);
72 return le16_to_cpu(hdr
->seq_ctrl
) & IEEE80211_SCTL_FRAG
;
75 static u16
ath12k_dp_rx_h_seq_no(struct ath12k_base
*ab
,
76 struct hal_rx_desc
*desc
)
78 return ab
->hal_rx_ops
->rx_desc_get_mpdu_start_seq_no(desc
);
81 static bool ath12k_dp_rx_h_msdu_done(struct ath12k_base
*ab
,
82 struct hal_rx_desc
*desc
)
84 return ab
->hal_rx_ops
->dp_rx_h_msdu_done(desc
);
87 static bool ath12k_dp_rx_h_l4_cksum_fail(struct ath12k_base
*ab
,
88 struct hal_rx_desc
*desc
)
90 return ab
->hal_rx_ops
->dp_rx_h_l4_cksum_fail(desc
);
93 static bool ath12k_dp_rx_h_ip_cksum_fail(struct ath12k_base
*ab
,
94 struct hal_rx_desc
*desc
)
96 return ab
->hal_rx_ops
->dp_rx_h_ip_cksum_fail(desc
);
99 static bool ath12k_dp_rx_h_is_decrypted(struct ath12k_base
*ab
,
100 struct hal_rx_desc
*desc
)
102 return ab
->hal_rx_ops
->dp_rx_h_is_decrypted(desc
);
105 u32
ath12k_dp_rx_h_mpdu_err(struct ath12k_base
*ab
,
106 struct hal_rx_desc
*desc
)
108 return ab
->hal_rx_ops
->dp_rx_h_mpdu_err(desc
);
111 static u16
ath12k_dp_rx_h_msdu_len(struct ath12k_base
*ab
,
112 struct hal_rx_desc
*desc
)
114 return ab
->hal_rx_ops
->rx_desc_get_msdu_len(desc
);
117 static u8
ath12k_dp_rx_h_sgi(struct ath12k_base
*ab
,
118 struct hal_rx_desc
*desc
)
120 return ab
->hal_rx_ops
->rx_desc_get_msdu_sgi(desc
);
123 static u8
ath12k_dp_rx_h_rate_mcs(struct ath12k_base
*ab
,
124 struct hal_rx_desc
*desc
)
126 return ab
->hal_rx_ops
->rx_desc_get_msdu_rate_mcs(desc
);
129 static u8
ath12k_dp_rx_h_rx_bw(struct ath12k_base
*ab
,
130 struct hal_rx_desc
*desc
)
132 return ab
->hal_rx_ops
->rx_desc_get_msdu_rx_bw(desc
);
135 static u32
ath12k_dp_rx_h_freq(struct ath12k_base
*ab
,
136 struct hal_rx_desc
*desc
)
138 return ab
->hal_rx_ops
->rx_desc_get_msdu_freq(desc
);
141 static u8
ath12k_dp_rx_h_pkt_type(struct ath12k_base
*ab
,
142 struct hal_rx_desc
*desc
)
144 return ab
->hal_rx_ops
->rx_desc_get_msdu_pkt_type(desc
);
147 static u8
ath12k_dp_rx_h_nss(struct ath12k_base
*ab
,
148 struct hal_rx_desc
*desc
)
150 return hweight8(ab
->hal_rx_ops
->rx_desc_get_msdu_nss(desc
));
153 static u8
ath12k_dp_rx_h_tid(struct ath12k_base
*ab
,
154 struct hal_rx_desc
*desc
)
156 return ab
->hal_rx_ops
->rx_desc_get_mpdu_tid(desc
);
159 static u16
ath12k_dp_rx_h_peer_id(struct ath12k_base
*ab
,
160 struct hal_rx_desc
*desc
)
162 return ab
->hal_rx_ops
->rx_desc_get_mpdu_peer_id(desc
);
165 u8
ath12k_dp_rx_h_l3pad(struct ath12k_base
*ab
,
166 struct hal_rx_desc
*desc
)
168 return ab
->hal_rx_ops
->rx_desc_get_l3_pad_bytes(desc
);
171 static bool ath12k_dp_rx_h_first_msdu(struct ath12k_base
*ab
,
172 struct hal_rx_desc
*desc
)
174 return ab
->hal_rx_ops
->rx_desc_get_first_msdu(desc
);
177 static bool ath12k_dp_rx_h_last_msdu(struct ath12k_base
*ab
,
178 struct hal_rx_desc
*desc
)
180 return ab
->hal_rx_ops
->rx_desc_get_last_msdu(desc
);
183 static void ath12k_dp_rx_desc_end_tlv_copy(struct ath12k_base
*ab
,
184 struct hal_rx_desc
*fdesc
,
185 struct hal_rx_desc
*ldesc
)
187 ab
->hal_rx_ops
->rx_desc_copy_end_tlv(fdesc
, ldesc
);
190 static void ath12k_dp_rxdesc_set_msdu_len(struct ath12k_base
*ab
,
191 struct hal_rx_desc
*desc
,
194 ab
->hal_rx_ops
->rx_desc_set_msdu_len(desc
, len
);
197 static bool ath12k_dp_rx_h_is_da_mcbc(struct ath12k_base
*ab
,
198 struct hal_rx_desc
*desc
)
200 return (ath12k_dp_rx_h_first_msdu(ab
, desc
) &&
201 ab
->hal_rx_ops
->rx_desc_is_da_mcbc(desc
));
204 static bool ath12k_dp_rxdesc_mac_addr2_valid(struct ath12k_base
*ab
,
205 struct hal_rx_desc
*desc
)
207 return ab
->hal_rx_ops
->rx_desc_mac_addr2_valid(desc
);
210 static u8
*ath12k_dp_rxdesc_get_mpdu_start_addr2(struct ath12k_base
*ab
,
211 struct hal_rx_desc
*desc
)
213 return ab
->hal_rx_ops
->rx_desc_mpdu_start_addr2(desc
);
216 static void ath12k_dp_rx_desc_get_dot11_hdr(struct ath12k_base
*ab
,
217 struct hal_rx_desc
*desc
,
218 struct ieee80211_hdr
*hdr
)
220 ab
->hal_rx_ops
->rx_desc_get_dot11_hdr(desc
, hdr
);
223 static void ath12k_dp_rx_desc_get_crypto_header(struct ath12k_base
*ab
,
224 struct hal_rx_desc
*desc
,
226 enum hal_encrypt_type enctype
)
228 ab
->hal_rx_ops
->rx_desc_get_crypto_header(desc
, crypto_hdr
, enctype
);
231 static u16
ath12k_dp_rxdesc_get_mpdu_frame_ctrl(struct ath12k_base
*ab
,
232 struct hal_rx_desc
*desc
)
234 return ab
->hal_rx_ops
->rx_desc_get_mpdu_frame_ctl(desc
);
237 static inline u8
ath12k_dp_rx_get_msdu_src_link(struct ath12k_base
*ab
,
238 struct hal_rx_desc
*desc
)
240 return ab
->hal_rx_ops
->rx_desc_get_msdu_src_link_id(desc
);
243 static void ath12k_dp_clean_up_skb_list(struct sk_buff_head
*skb_list
)
247 while ((skb
= __skb_dequeue(skb_list
)))
248 dev_kfree_skb_any(skb
);
251 static size_t ath12k_dp_list_cut_nodes(struct list_head
*list
,
252 struct list_head
*head
,
255 struct list_head
*cur
;
256 struct ath12k_rx_desc_info
*rx_desc
;
260 INIT_LIST_HEAD(list
);
264 list_for_each(cur
, head
) {
268 rx_desc
= list_entry(cur
, struct ath12k_rx_desc_info
, list
);
269 rx_desc
->in_use
= true;
275 list_cut_before(list
, head
, cur
);
280 static void ath12k_dp_rx_enqueue_free(struct ath12k_dp
*dp
,
281 struct list_head
*used_list
)
283 struct ath12k_rx_desc_info
*rx_desc
, *safe
;
285 /* Reset the use flag */
286 list_for_each_entry_safe(rx_desc
, safe
, used_list
, list
)
287 rx_desc
->in_use
= false;
289 spin_lock_bh(&dp
->rx_desc_lock
);
290 list_splice_tail(used_list
, &dp
->rx_desc_free_list
);
291 spin_unlock_bh(&dp
->rx_desc_lock
);
294 /* Returns number of Rx buffers replenished */
295 int ath12k_dp_rx_bufs_replenish(struct ath12k_base
*ab
,
296 struct dp_rxdma_ring
*rx_ring
,
297 struct list_head
*used_list
,
300 struct ath12k_buffer_addr
*desc
;
301 struct hal_srng
*srng
;
307 struct ath12k_dp
*dp
= &ab
->dp
;
308 struct ath12k_rx_desc_info
*rx_desc
;
309 enum hal_rx_buf_return_buf_manager mgr
= ab
->hw_params
->hal_params
->rx_buf_rbm
;
311 req_entries
= min(req_entries
, rx_ring
->bufs_max
);
313 srng
= &ab
->hal
.srng_list
[rx_ring
->refill_buf_ring
.ring_id
];
315 spin_lock_bh(&srng
->lock
);
317 ath12k_hal_srng_access_begin(ab
, srng
);
319 num_free
= ath12k_hal_srng_src_num_free(ab
, srng
, true);
320 if (!req_entries
&& (num_free
> (rx_ring
->bufs_max
* 3) / 4))
321 req_entries
= num_free
;
323 req_entries
= min(num_free
, req_entries
);
324 num_remain
= req_entries
;
329 /* Get the descriptor from free list */
330 if (list_empty(used_list
)) {
331 spin_lock_bh(&dp
->rx_desc_lock
);
332 req_entries
= ath12k_dp_list_cut_nodes(used_list
,
333 &dp
->rx_desc_free_list
,
335 spin_unlock_bh(&dp
->rx_desc_lock
);
336 num_remain
= req_entries
;
339 while (num_remain
> 0) {
340 skb
= dev_alloc_skb(DP_RX_BUFFER_SIZE
+
341 DP_RX_BUFFER_ALIGN_SIZE
);
345 if (!IS_ALIGNED((unsigned long)skb
->data
,
346 DP_RX_BUFFER_ALIGN_SIZE
)) {
348 PTR_ALIGN(skb
->data
, DP_RX_BUFFER_ALIGN_SIZE
) -
352 paddr
= dma_map_single(ab
->dev
, skb
->data
,
353 skb
->len
+ skb_tailroom(skb
),
355 if (dma_mapping_error(ab
->dev
, paddr
))
358 rx_desc
= list_first_entry_or_null(used_list
,
359 struct ath12k_rx_desc_info
,
365 cookie
= rx_desc
->cookie
;
367 desc
= ath12k_hal_srng_src_get_next_entry(ab
, srng
);
371 list_del(&rx_desc
->list
);
372 ATH12K_SKB_RXCB(skb
)->paddr
= paddr
;
376 ath12k_hal_rx_buf_addr_info_set(desc
, paddr
, cookie
, mgr
);
382 dma_unmap_single(ab
->dev
, paddr
, skb
->len
+ skb_tailroom(skb
),
385 dev_kfree_skb_any(skb
);
387 ath12k_hal_srng_access_end(ab
, srng
);
389 if (!list_empty(used_list
))
390 ath12k_dp_rx_enqueue_free(dp
, used_list
);
392 spin_unlock_bh(&srng
->lock
);
394 return req_entries
- num_remain
;
397 static int ath12k_dp_rxdma_mon_buf_ring_free(struct ath12k_base
*ab
,
398 struct dp_rxdma_mon_ring
*rx_ring
)
403 spin_lock_bh(&rx_ring
->idr_lock
);
404 idr_for_each_entry(&rx_ring
->bufs_idr
, skb
, buf_id
) {
405 idr_remove(&rx_ring
->bufs_idr
, buf_id
);
406 /* TODO: Understand where internal driver does this dma_unmap
409 dma_unmap_single(ab
->dev
, ATH12K_SKB_RXCB(skb
)->paddr
,
410 skb
->len
+ skb_tailroom(skb
), DMA_FROM_DEVICE
);
411 dev_kfree_skb_any(skb
);
414 idr_destroy(&rx_ring
->bufs_idr
);
415 spin_unlock_bh(&rx_ring
->idr_lock
);
420 static int ath12k_dp_rxdma_buf_free(struct ath12k_base
*ab
)
422 struct ath12k_dp
*dp
= &ab
->dp
;
424 ath12k_dp_rxdma_mon_buf_ring_free(ab
, &dp
->rxdma_mon_buf_ring
);
429 static int ath12k_dp_rxdma_mon_ring_buf_setup(struct ath12k_base
*ab
,
430 struct dp_rxdma_mon_ring
*rx_ring
,
435 num_entries
= rx_ring
->refill_buf_ring
.size
/
436 ath12k_hal_srng_get_entrysize(ab
, ringtype
);
438 rx_ring
->bufs_max
= num_entries
;
439 ath12k_dp_mon_buf_replenish(ab
, rx_ring
, num_entries
);
444 static int ath12k_dp_rxdma_ring_buf_setup(struct ath12k_base
*ab
,
445 struct dp_rxdma_ring
*rx_ring
)
449 rx_ring
->bufs_max
= rx_ring
->refill_buf_ring
.size
/
450 ath12k_hal_srng_get_entrysize(ab
, HAL_RXDMA_BUF
);
452 ath12k_dp_rx_bufs_replenish(ab
, rx_ring
, &list
, 0);
457 static int ath12k_dp_rxdma_buf_setup(struct ath12k_base
*ab
)
459 struct ath12k_dp
*dp
= &ab
->dp
;
462 ret
= ath12k_dp_rxdma_ring_buf_setup(ab
, &dp
->rx_refill_buf_ring
);
465 "failed to setup HAL_RXDMA_BUF\n");
469 if (ab
->hw_params
->rxdma1_enable
) {
470 ret
= ath12k_dp_rxdma_mon_ring_buf_setup(ab
,
471 &dp
->rxdma_mon_buf_ring
,
472 HAL_RXDMA_MONITOR_BUF
);
475 "failed to setup HAL_RXDMA_MONITOR_BUF\n");
483 static void ath12k_dp_rx_pdev_srng_free(struct ath12k
*ar
)
485 struct ath12k_pdev_dp
*dp
= &ar
->dp
;
486 struct ath12k_base
*ab
= ar
->ab
;
489 for (i
= 0; i
< ab
->hw_params
->num_rxdma_per_pdev
; i
++)
490 ath12k_dp_srng_cleanup(ab
, &dp
->rxdma_mon_dst_ring
[i
]);
493 void ath12k_dp_rx_pdev_reo_cleanup(struct ath12k_base
*ab
)
495 struct ath12k_dp
*dp
= &ab
->dp
;
498 for (i
= 0; i
< DP_REO_DST_RING_MAX
; i
++)
499 ath12k_dp_srng_cleanup(ab
, &dp
->reo_dst_ring
[i
]);
502 int ath12k_dp_rx_pdev_reo_setup(struct ath12k_base
*ab
)
504 struct ath12k_dp
*dp
= &ab
->dp
;
508 for (i
= 0; i
< DP_REO_DST_RING_MAX
; i
++) {
509 ret
= ath12k_dp_srng_setup(ab
, &dp
->reo_dst_ring
[i
],
511 DP_REO_DST_RING_SIZE
);
513 ath12k_warn(ab
, "failed to setup reo_dst_ring\n");
514 goto err_reo_cleanup
;
521 ath12k_dp_rx_pdev_reo_cleanup(ab
);
526 static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k
*ar
)
528 struct ath12k_pdev_dp
*dp
= &ar
->dp
;
529 struct ath12k_base
*ab
= ar
->ab
;
532 u32 mac_id
= dp
->mac_id
;
534 for (i
= 0; i
< ab
->hw_params
->num_rxdma_per_pdev
; i
++) {
535 ret
= ath12k_dp_srng_setup(ar
->ab
,
536 &dp
->rxdma_mon_dst_ring
[i
],
537 HAL_RXDMA_MONITOR_DST
,
539 DP_RXDMA_MONITOR_DST_RING_SIZE
);
542 "failed to setup HAL_RXDMA_MONITOR_DST\n");
550 void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base
*ab
)
552 struct ath12k_dp
*dp
= &ab
->dp
;
553 struct ath12k_dp_rx_reo_cmd
*cmd
, *tmp
;
554 struct ath12k_dp_rx_reo_cache_flush_elem
*cmd_cache
, *tmp_cache
;
556 spin_lock_bh(&dp
->reo_cmd_lock
);
557 list_for_each_entry_safe(cmd
, tmp
, &dp
->reo_cmd_list
, list
) {
558 list_del(&cmd
->list
);
559 dma_unmap_single(ab
->dev
, cmd
->data
.paddr
,
560 cmd
->data
.size
, DMA_BIDIRECTIONAL
);
561 kfree(cmd
->data
.vaddr
);
565 list_for_each_entry_safe(cmd_cache
, tmp_cache
,
566 &dp
->reo_cmd_cache_flush_list
, list
) {
567 list_del(&cmd_cache
->list
);
568 dp
->reo_cmd_cache_flush_count
--;
569 dma_unmap_single(ab
->dev
, cmd_cache
->data
.paddr
,
570 cmd_cache
->data
.size
, DMA_BIDIRECTIONAL
);
571 kfree(cmd_cache
->data
.vaddr
);
574 spin_unlock_bh(&dp
->reo_cmd_lock
);
577 static void ath12k_dp_reo_cmd_free(struct ath12k_dp
*dp
, void *ctx
,
578 enum hal_reo_cmd_status status
)
580 struct ath12k_dp_rx_tid
*rx_tid
= ctx
;
582 if (status
!= HAL_REO_CMD_SUCCESS
)
583 ath12k_warn(dp
->ab
, "failed to flush rx tid hw desc, tid %d status %d\n",
584 rx_tid
->tid
, status
);
586 dma_unmap_single(dp
->ab
->dev
, rx_tid
->paddr
, rx_tid
->size
,
588 kfree(rx_tid
->vaddr
);
589 rx_tid
->vaddr
= NULL
;
592 static int ath12k_dp_reo_cmd_send(struct ath12k_base
*ab
, struct ath12k_dp_rx_tid
*rx_tid
,
593 enum hal_reo_cmd_type type
,
594 struct ath12k_hal_reo_cmd
*cmd
,
595 void (*cb
)(struct ath12k_dp
*dp
, void *ctx
,
596 enum hal_reo_cmd_status status
))
598 struct ath12k_dp
*dp
= &ab
->dp
;
599 struct ath12k_dp_rx_reo_cmd
*dp_cmd
;
600 struct hal_srng
*cmd_ring
;
603 cmd_ring
= &ab
->hal
.srng_list
[dp
->reo_cmd_ring
.ring_id
];
604 cmd_num
= ath12k_hal_reo_cmd_send(ab
, cmd_ring
, type
, cmd
);
606 /* cmd_num should start from 1, during failure return the error code */
610 /* reo cmd ring descriptors has cmd_num starting from 1 */
617 /* Can this be optimized so that we keep the pending command list only
618 * for tid delete command to free up the resource on the command status
621 dp_cmd
= kzalloc(sizeof(*dp_cmd
), GFP_ATOMIC
);
626 memcpy(&dp_cmd
->data
, rx_tid
, sizeof(*rx_tid
));
627 dp_cmd
->cmd_num
= cmd_num
;
628 dp_cmd
->handler
= cb
;
630 spin_lock_bh(&dp
->reo_cmd_lock
);
631 list_add_tail(&dp_cmd
->list
, &dp
->reo_cmd_list
);
632 spin_unlock_bh(&dp
->reo_cmd_lock
);
637 static void ath12k_dp_reo_cache_flush(struct ath12k_base
*ab
,
638 struct ath12k_dp_rx_tid
*rx_tid
)
640 struct ath12k_hal_reo_cmd cmd
= {0};
641 unsigned long tot_desc_sz
, desc_sz
;
644 tot_desc_sz
= rx_tid
->size
;
645 desc_sz
= ath12k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID
);
647 while (tot_desc_sz
> desc_sz
) {
648 tot_desc_sz
-= desc_sz
;
649 cmd
.addr_lo
= lower_32_bits(rx_tid
->paddr
+ tot_desc_sz
);
650 cmd
.addr_hi
= upper_32_bits(rx_tid
->paddr
);
651 ret
= ath12k_dp_reo_cmd_send(ab
, rx_tid
,
652 HAL_REO_CMD_FLUSH_CACHE
, &cmd
,
656 "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
660 memset(&cmd
, 0, sizeof(cmd
));
661 cmd
.addr_lo
= lower_32_bits(rx_tid
->paddr
);
662 cmd
.addr_hi
= upper_32_bits(rx_tid
->paddr
);
663 cmd
.flag
= HAL_REO_CMD_FLG_NEED_STATUS
;
664 ret
= ath12k_dp_reo_cmd_send(ab
, rx_tid
,
665 HAL_REO_CMD_FLUSH_CACHE
,
666 &cmd
, ath12k_dp_reo_cmd_free
);
668 ath12k_err(ab
, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
670 dma_unmap_single(ab
->dev
, rx_tid
->paddr
, rx_tid
->size
,
672 kfree(rx_tid
->vaddr
);
673 rx_tid
->vaddr
= NULL
;
677 static void ath12k_dp_rx_tid_del_func(struct ath12k_dp
*dp
, void *ctx
,
678 enum hal_reo_cmd_status status
)
680 struct ath12k_base
*ab
= dp
->ab
;
681 struct ath12k_dp_rx_tid
*rx_tid
= ctx
;
682 struct ath12k_dp_rx_reo_cache_flush_elem
*elem
, *tmp
;
684 if (status
== HAL_REO_CMD_DRAIN
) {
686 } else if (status
!= HAL_REO_CMD_SUCCESS
) {
687 /* Shouldn't happen! Cleanup in case of other failure? */
688 ath12k_warn(ab
, "failed to delete rx tid %d hw descriptor %d\n",
689 rx_tid
->tid
, status
);
693 elem
= kzalloc(sizeof(*elem
), GFP_ATOMIC
);
698 memcpy(&elem
->data
, rx_tid
, sizeof(*rx_tid
));
700 spin_lock_bh(&dp
->reo_cmd_lock
);
701 list_add_tail(&elem
->list
, &dp
->reo_cmd_cache_flush_list
);
702 dp
->reo_cmd_cache_flush_count
++;
704 /* Flush and invalidate aged REO desc from HW cache */
705 list_for_each_entry_safe(elem
, tmp
, &dp
->reo_cmd_cache_flush_list
,
707 if (dp
->reo_cmd_cache_flush_count
> ATH12K_DP_RX_REO_DESC_FREE_THRES
||
708 time_after(jiffies
, elem
->ts
+
709 msecs_to_jiffies(ATH12K_DP_RX_REO_DESC_FREE_TIMEOUT_MS
))) {
710 list_del(&elem
->list
);
711 dp
->reo_cmd_cache_flush_count
--;
713 /* Unlock the reo_cmd_lock before using ath12k_dp_reo_cmd_send()
714 * within ath12k_dp_reo_cache_flush. The reo_cmd_cache_flush_list
715 * is used in only two contexts, one is in this function called
716 * from napi and the other in ath12k_dp_free during core destroy.
717 * Before dp_free, the irqs would be disabled and would wait to
718 * synchronize. Hence there wouldn’t be any race against add or
719 * delete to this list. Hence unlock-lock is safe here.
721 spin_unlock_bh(&dp
->reo_cmd_lock
);
723 ath12k_dp_reo_cache_flush(ab
, &elem
->data
);
725 spin_lock_bh(&dp
->reo_cmd_lock
);
728 spin_unlock_bh(&dp
->reo_cmd_lock
);
732 dma_unmap_single(ab
->dev
, rx_tid
->paddr
, rx_tid
->size
,
734 kfree(rx_tid
->vaddr
);
735 rx_tid
->vaddr
= NULL
;
738 static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base
*ab
, u16 peer_id
, u16 tid
,
741 struct ath12k_reo_queue_ref
*qref
;
742 struct ath12k_dp
*dp
= &ab
->dp
;
744 if (!ab
->hw_params
->reoq_lut_support
)
747 /* TODO: based on ML peer or not, select the LUT. below assumes non
750 qref
= (struct ath12k_reo_queue_ref
*)dp
->reoq_lut
.vaddr
+
751 (peer_id
* (IEEE80211_NUM_TIDS
+ 1) + tid
);
753 qref
->info0
= u32_encode_bits(lower_32_bits(paddr
),
754 BUFFER_ADDR_INFO0_ADDR
);
755 qref
->info1
= u32_encode_bits(upper_32_bits(paddr
),
756 BUFFER_ADDR_INFO1_ADDR
) |
757 u32_encode_bits(tid
, DP_REO_QREF_NUM
);
760 static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base
*ab
, u16 peer_id
, u16 tid
)
762 struct ath12k_reo_queue_ref
*qref
;
763 struct ath12k_dp
*dp
= &ab
->dp
;
765 if (!ab
->hw_params
->reoq_lut_support
)
768 /* TODO: based on ML peer or not, select the LUT. below assumes non
771 qref
= (struct ath12k_reo_queue_ref
*)dp
->reoq_lut
.vaddr
+
772 (peer_id
* (IEEE80211_NUM_TIDS
+ 1) + tid
);
774 qref
->info0
= u32_encode_bits(0, BUFFER_ADDR_INFO0_ADDR
);
775 qref
->info1
= u32_encode_bits(0, BUFFER_ADDR_INFO1_ADDR
) |
776 u32_encode_bits(tid
, DP_REO_QREF_NUM
);
779 void ath12k_dp_rx_peer_tid_delete(struct ath12k
*ar
,
780 struct ath12k_peer
*peer
, u8 tid
)
782 struct ath12k_hal_reo_cmd cmd
= {0};
783 struct ath12k_dp_rx_tid
*rx_tid
= &peer
->rx_tid
[tid
];
789 cmd
.flag
= HAL_REO_CMD_FLG_NEED_STATUS
;
790 cmd
.addr_lo
= lower_32_bits(rx_tid
->paddr
);
791 cmd
.addr_hi
= upper_32_bits(rx_tid
->paddr
);
792 cmd
.upd0
= HAL_REO_CMD_UPD0_VLD
;
793 ret
= ath12k_dp_reo_cmd_send(ar
->ab
, rx_tid
,
794 HAL_REO_CMD_UPDATE_RX_QUEUE
, &cmd
,
795 ath12k_dp_rx_tid_del_func
);
797 ath12k_err(ar
->ab
, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
799 dma_unmap_single(ar
->ab
->dev
, rx_tid
->paddr
, rx_tid
->size
,
801 kfree(rx_tid
->vaddr
);
802 rx_tid
->vaddr
= NULL
;
805 ath12k_peer_rx_tid_qref_reset(ar
->ab
, peer
->peer_id
, tid
);
807 rx_tid
->active
= false;
810 /* TODO: it's strange (and ugly) that struct hal_reo_dest_ring is converted
811 * to struct hal_wbm_release_ring, I couldn't figure out the logic behind
814 static int ath12k_dp_rx_link_desc_return(struct ath12k_base
*ab
,
815 struct hal_reo_dest_ring
*ring
,
816 enum hal_wbm_rel_bm_act action
)
818 struct hal_wbm_release_ring
*link_desc
= (struct hal_wbm_release_ring
*)ring
;
819 struct hal_wbm_release_ring
*desc
;
820 struct ath12k_dp
*dp
= &ab
->dp
;
821 struct hal_srng
*srng
;
824 srng
= &ab
->hal
.srng_list
[dp
->wbm_desc_rel_ring
.ring_id
];
826 spin_lock_bh(&srng
->lock
);
828 ath12k_hal_srng_access_begin(ab
, srng
);
830 desc
= ath12k_hal_srng_src_get_next_entry(ab
, srng
);
836 ath12k_hal_rx_msdu_link_desc_set(ab
, desc
, link_desc
, action
);
839 ath12k_hal_srng_access_end(ab
, srng
);
841 spin_unlock_bh(&srng
->lock
);
846 static void ath12k_dp_rx_frags_cleanup(struct ath12k_dp_rx_tid
*rx_tid
,
849 struct ath12k_base
*ab
= rx_tid
->ab
;
851 lockdep_assert_held(&ab
->base_lock
);
853 if (rx_tid
->dst_ring_desc
) {
855 ath12k_dp_rx_link_desc_return(ab
, rx_tid
->dst_ring_desc
,
856 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE
);
857 kfree(rx_tid
->dst_ring_desc
);
858 rx_tid
->dst_ring_desc
= NULL
;
862 rx_tid
->last_frag_no
= 0;
863 rx_tid
->rx_frag_bitmap
= 0;
864 __skb_queue_purge(&rx_tid
->rx_frags
);
867 void ath12k_dp_rx_peer_tid_cleanup(struct ath12k
*ar
, struct ath12k_peer
*peer
)
869 struct ath12k_dp_rx_tid
*rx_tid
;
872 lockdep_assert_held(&ar
->ab
->base_lock
);
874 for (i
= 0; i
<= IEEE80211_NUM_TIDS
; i
++) {
875 rx_tid
= &peer
->rx_tid
[i
];
877 ath12k_dp_rx_peer_tid_delete(ar
, peer
, i
);
878 ath12k_dp_rx_frags_cleanup(rx_tid
, true);
880 spin_unlock_bh(&ar
->ab
->base_lock
);
881 del_timer_sync(&rx_tid
->frag_timer
);
882 spin_lock_bh(&ar
->ab
->base_lock
);
886 static int ath12k_peer_rx_tid_reo_update(struct ath12k
*ar
,
887 struct ath12k_peer
*peer
,
888 struct ath12k_dp_rx_tid
*rx_tid
,
889 u32 ba_win_sz
, u16 ssn
,
892 struct ath12k_hal_reo_cmd cmd
= {0};
895 cmd
.addr_lo
= lower_32_bits(rx_tid
->paddr
);
896 cmd
.addr_hi
= upper_32_bits(rx_tid
->paddr
);
897 cmd
.flag
= HAL_REO_CMD_FLG_NEED_STATUS
;
898 cmd
.upd0
= HAL_REO_CMD_UPD0_BA_WINDOW_SIZE
;
899 cmd
.ba_window_size
= ba_win_sz
;
902 cmd
.upd0
|= HAL_REO_CMD_UPD0_SSN
;
903 cmd
.upd2
= u32_encode_bits(ssn
, HAL_REO_CMD_UPD2_SSN
);
906 ret
= ath12k_dp_reo_cmd_send(ar
->ab
, rx_tid
,
907 HAL_REO_CMD_UPDATE_RX_QUEUE
, &cmd
,
910 ath12k_warn(ar
->ab
, "failed to update rx tid queue, tid %d (%d)\n",
915 rx_tid
->ba_win_sz
= ba_win_sz
;
920 int ath12k_dp_rx_peer_tid_setup(struct ath12k
*ar
, const u8
*peer_mac
, int vdev_id
,
921 u8 tid
, u32 ba_win_sz
, u16 ssn
,
922 enum hal_pn_type pn_type
)
924 struct ath12k_base
*ab
= ar
->ab
;
925 struct ath12k_dp
*dp
= &ab
->dp
;
926 struct hal_rx_reo_queue
*addr_aligned
;
927 struct ath12k_peer
*peer
;
928 struct ath12k_dp_rx_tid
*rx_tid
;
934 spin_lock_bh(&ab
->base_lock
);
936 peer
= ath12k_peer_find(ab
, vdev_id
, peer_mac
);
938 spin_unlock_bh(&ab
->base_lock
);
939 ath12k_warn(ab
, "failed to find the peer to set up rx tid\n");
943 if (ab
->hw_params
->reoq_lut_support
&& !dp
->reoq_lut
.vaddr
) {
944 spin_unlock_bh(&ab
->base_lock
);
945 ath12k_warn(ab
, "reo qref table is not setup\n");
949 if (peer
->peer_id
> DP_MAX_PEER_ID
|| tid
> IEEE80211_NUM_TIDS
) {
950 ath12k_warn(ab
, "peer id of peer %d or tid %d doesn't allow reoq setup\n",
952 spin_unlock_bh(&ab
->base_lock
);
956 rx_tid
= &peer
->rx_tid
[tid
];
957 /* Update the tid queue if it is already setup */
958 if (rx_tid
->active
) {
959 paddr
= rx_tid
->paddr
;
960 ret
= ath12k_peer_rx_tid_reo_update(ar
, peer
, rx_tid
,
961 ba_win_sz
, ssn
, true);
962 spin_unlock_bh(&ab
->base_lock
);
964 ath12k_warn(ab
, "failed to update reo for rx tid %d\n", tid
);
968 if (!ab
->hw_params
->reoq_lut_support
) {
969 ret
= ath12k_wmi_peer_rx_reorder_queue_setup(ar
, vdev_id
,
974 ath12k_warn(ab
, "failed to setup peer rx reorder queuefor tid %d: %d\n",
985 rx_tid
->ba_win_sz
= ba_win_sz
;
987 /* TODO: Optimize the memory allocation for qos tid based on
988 * the actual BA window size in REO tid update path.
990 if (tid
== HAL_DESC_REO_NON_QOS_TID
)
991 hw_desc_sz
= ath12k_hal_reo_qdesc_size(ba_win_sz
, tid
);
993 hw_desc_sz
= ath12k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX
, tid
);
995 vaddr
= kzalloc(hw_desc_sz
+ HAL_LINK_DESC_ALIGN
- 1, GFP_ATOMIC
);
997 spin_unlock_bh(&ab
->base_lock
);
1001 addr_aligned
= PTR_ALIGN(vaddr
, HAL_LINK_DESC_ALIGN
);
1003 ath12k_hal_reo_qdesc_setup(addr_aligned
, tid
, ba_win_sz
,
1006 paddr
= dma_map_single(ab
->dev
, addr_aligned
, hw_desc_sz
,
1009 ret
= dma_mapping_error(ab
->dev
, paddr
);
1011 spin_unlock_bh(&ab
->base_lock
);
1015 rx_tid
->vaddr
= vaddr
;
1016 rx_tid
->paddr
= paddr
;
1017 rx_tid
->size
= hw_desc_sz
;
1018 rx_tid
->active
= true;
1020 if (ab
->hw_params
->reoq_lut_support
) {
1021 /* Update the REO queue LUT at the corresponding peer id
1022 * and tid with qaddr.
1024 ath12k_peer_rx_tid_qref_setup(ab
, peer
->peer_id
, tid
, paddr
);
1025 spin_unlock_bh(&ab
->base_lock
);
1027 spin_unlock_bh(&ab
->base_lock
);
1028 ret
= ath12k_wmi_peer_rx_reorder_queue_setup(ar
, vdev_id
, peer_mac
,
1029 paddr
, tid
, 1, ba_win_sz
);
1040 int ath12k_dp_rx_ampdu_start(struct ath12k
*ar
,
1041 struct ieee80211_ampdu_params
*params
)
1043 struct ath12k_base
*ab
= ar
->ab
;
1044 struct ath12k_sta
*ahsta
= ath12k_sta_to_ahsta(params
->sta
);
1045 struct ath12k_link_sta
*arsta
= &ahsta
->deflink
;
1046 int vdev_id
= arsta
->arvif
->vdev_id
;
1049 ret
= ath12k_dp_rx_peer_tid_setup(ar
, params
->sta
->addr
, vdev_id
,
1050 params
->tid
, params
->buf_size
,
1051 params
->ssn
, arsta
->ahsta
->pn_type
);
1053 ath12k_warn(ab
, "failed to setup rx tid %d\n", ret
);
1058 int ath12k_dp_rx_ampdu_stop(struct ath12k
*ar
,
1059 struct ieee80211_ampdu_params
*params
)
1061 struct ath12k_base
*ab
= ar
->ab
;
1062 struct ath12k_peer
*peer
;
1063 struct ath12k_sta
*ahsta
= ath12k_sta_to_ahsta(params
->sta
);
1064 struct ath12k_link_sta
*arsta
= &ahsta
->deflink
;
1065 int vdev_id
= arsta
->arvif
->vdev_id
;
1069 spin_lock_bh(&ab
->base_lock
);
1071 peer
= ath12k_peer_find(ab
, vdev_id
, params
->sta
->addr
);
1073 spin_unlock_bh(&ab
->base_lock
);
1074 ath12k_warn(ab
, "failed to find the peer to stop rx aggregation\n");
1078 active
= peer
->rx_tid
[params
->tid
].active
;
1081 spin_unlock_bh(&ab
->base_lock
);
1085 ret
= ath12k_peer_rx_tid_reo_update(ar
, peer
, peer
->rx_tid
, 1, 0, false);
1086 spin_unlock_bh(&ab
->base_lock
);
1088 ath12k_warn(ab
, "failed to update reo for rx tid %d: %d\n",
1096 int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif
*arvif
,
1097 const u8
*peer_addr
,
1098 enum set_key_cmd key_cmd
,
1099 struct ieee80211_key_conf
*key
)
1101 struct ath12k
*ar
= arvif
->ar
;
1102 struct ath12k_base
*ab
= ar
->ab
;
1103 struct ath12k_hal_reo_cmd cmd
= {0};
1104 struct ath12k_peer
*peer
;
1105 struct ath12k_dp_rx_tid
*rx_tid
;
1109 /* NOTE: Enable PN/TSC replay check offload only for unicast frames.
1110 * We use mac80211 PN/TSC replay check functionality for bcast/mcast
1113 if (!(key
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
))
1116 cmd
.flag
= HAL_REO_CMD_FLG_NEED_STATUS
;
1117 cmd
.upd0
= HAL_REO_CMD_UPD0_PN
|
1118 HAL_REO_CMD_UPD0_PN_SIZE
|
1119 HAL_REO_CMD_UPD0_PN_VALID
|
1120 HAL_REO_CMD_UPD0_PN_CHECK
|
1121 HAL_REO_CMD_UPD0_SVLD
;
1123 switch (key
->cipher
) {
1124 case WLAN_CIPHER_SUITE_TKIP
:
1125 case WLAN_CIPHER_SUITE_CCMP
:
1126 case WLAN_CIPHER_SUITE_CCMP_256
:
1127 case WLAN_CIPHER_SUITE_GCMP
:
1128 case WLAN_CIPHER_SUITE_GCMP_256
:
1129 if (key_cmd
== SET_KEY
) {
1130 cmd
.upd1
|= HAL_REO_CMD_UPD1_PN_CHECK
;
1138 spin_lock_bh(&ab
->base_lock
);
1140 peer
= ath12k_peer_find(ab
, arvif
->vdev_id
, peer_addr
);
1142 spin_unlock_bh(&ab
->base_lock
);
1143 ath12k_warn(ab
, "failed to find the peer %pM to configure pn replay detection\n",
1148 for (tid
= 0; tid
<= IEEE80211_NUM_TIDS
; tid
++) {
1149 rx_tid
= &peer
->rx_tid
[tid
];
1150 if (!rx_tid
->active
)
1152 cmd
.addr_lo
= lower_32_bits(rx_tid
->paddr
);
1153 cmd
.addr_hi
= upper_32_bits(rx_tid
->paddr
);
1154 ret
= ath12k_dp_reo_cmd_send(ab
, rx_tid
,
1155 HAL_REO_CMD_UPDATE_RX_QUEUE
,
1158 ath12k_warn(ab
, "failed to configure rx tid %d queue of peer %pM for pn replay detection %d\n",
1159 tid
, peer_addr
, ret
);
1164 spin_unlock_bh(&ab
->base_lock
);
1169 static int ath12k_get_ppdu_user_index(struct htt_ppdu_stats
*ppdu_stats
,
1174 for (i
= 0; i
< HTT_PPDU_STATS_MAX_USERS
- 1; i
++) {
1175 if (ppdu_stats
->user_stats
[i
].is_valid_peer_id
) {
1176 if (peer_id
== ppdu_stats
->user_stats
[i
].peer_id
)
1186 static int ath12k_htt_tlv_ppdu_stats_parse(struct ath12k_base
*ab
,
1187 u16 tag
, u16 len
, const void *ptr
,
1190 const struct htt_ppdu_stats_usr_cmpltn_ack_ba_status
*ba_status
;
1191 const struct htt_ppdu_stats_usr_cmpltn_cmn
*cmplt_cmn
;
1192 const struct htt_ppdu_stats_user_rate
*user_rate
;
1193 struct htt_ppdu_stats_info
*ppdu_info
;
1194 struct htt_ppdu_user_stats
*user_stats
;
1201 case HTT_PPDU_STATS_TAG_COMMON
:
1202 if (len
< sizeof(struct htt_ppdu_stats_common
)) {
1203 ath12k_warn(ab
, "Invalid len %d for the tag 0x%x\n",
1207 memcpy(&ppdu_info
->ppdu_stats
.common
, ptr
,
1208 sizeof(struct htt_ppdu_stats_common
));
1210 case HTT_PPDU_STATS_TAG_USR_RATE
:
1211 if (len
< sizeof(struct htt_ppdu_stats_user_rate
)) {
1212 ath12k_warn(ab
, "Invalid len %d for the tag 0x%x\n",
1217 peer_id
= le16_to_cpu(user_rate
->sw_peer_id
);
1218 cur_user
= ath12k_get_ppdu_user_index(&ppdu_info
->ppdu_stats
,
1222 user_stats
= &ppdu_info
->ppdu_stats
.user_stats
[cur_user
];
1223 user_stats
->peer_id
= peer_id
;
1224 user_stats
->is_valid_peer_id
= true;
1225 memcpy(&user_stats
->rate
, ptr
,
1226 sizeof(struct htt_ppdu_stats_user_rate
));
1227 user_stats
->tlv_flags
|= BIT(tag
);
1229 case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON
:
1230 if (len
< sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn
)) {
1231 ath12k_warn(ab
, "Invalid len %d for the tag 0x%x\n",
1237 peer_id
= le16_to_cpu(cmplt_cmn
->sw_peer_id
);
1238 cur_user
= ath12k_get_ppdu_user_index(&ppdu_info
->ppdu_stats
,
1242 user_stats
= &ppdu_info
->ppdu_stats
.user_stats
[cur_user
];
1243 user_stats
->peer_id
= peer_id
;
1244 user_stats
->is_valid_peer_id
= true;
1245 memcpy(&user_stats
->cmpltn_cmn
, ptr
,
1246 sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn
));
1247 user_stats
->tlv_flags
|= BIT(tag
);
1249 case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS
:
1251 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status
)) {
1252 ath12k_warn(ab
, "Invalid len %d for the tag 0x%x\n",
1258 peer_id
= le16_to_cpu(ba_status
->sw_peer_id
);
1259 cur_user
= ath12k_get_ppdu_user_index(&ppdu_info
->ppdu_stats
,
1263 user_stats
= &ppdu_info
->ppdu_stats
.user_stats
[cur_user
];
1264 user_stats
->peer_id
= peer_id
;
1265 user_stats
->is_valid_peer_id
= true;
1266 memcpy(&user_stats
->ack_ba
, ptr
,
1267 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status
));
1268 user_stats
->tlv_flags
|= BIT(tag
);
1274 int ath12k_dp_htt_tlv_iter(struct ath12k_base
*ab
, const void *ptr
, size_t len
,
1275 int (*iter
)(struct ath12k_base
*ar
, u16 tag
, u16 len
,
1276 const void *ptr
, void *data
),
1279 const struct htt_tlv
*tlv
;
1280 const void *begin
= ptr
;
1281 u16 tlv_tag
, tlv_len
;
1285 if (len
< sizeof(*tlv
)) {
1286 ath12k_err(ab
, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
1287 ptr
- begin
, len
, sizeof(*tlv
));
1290 tlv
= (struct htt_tlv
*)ptr
;
1291 tlv_tag
= le32_get_bits(tlv
->header
, HTT_TLV_TAG
);
1292 tlv_len
= le32_get_bits(tlv
->header
, HTT_TLV_LEN
);
1293 ptr
+= sizeof(*tlv
);
1294 len
-= sizeof(*tlv
);
1296 if (tlv_len
> len
) {
1297 ath12k_err(ab
, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
1298 tlv_tag
, ptr
- begin
, len
, tlv_len
);
1301 ret
= iter(ab
, tlv_tag
, tlv_len
, ptr
, data
);
1312 ath12k_update_per_peer_tx_stats(struct ath12k
*ar
,
1313 struct htt_ppdu_stats
*ppdu_stats
, u8 user
)
1315 struct ath12k_base
*ab
= ar
->ab
;
1316 struct ath12k_peer
*peer
;
1317 struct ieee80211_sta
*sta
;
1318 struct ath12k_sta
*ahsta
;
1319 struct ath12k_link_sta
*arsta
;
1320 struct htt_ppdu_stats_user_rate
*user_rate
;
1321 struct ath12k_per_peer_tx_stats
*peer_stats
= &ar
->peer_tx_stats
;
1322 struct htt_ppdu_user_stats
*usr_stats
= &ppdu_stats
->user_stats
[user
];
1323 struct htt_ppdu_stats_common
*common
= &ppdu_stats
->common
;
1325 u8 flags
, mcs
, nss
, bw
, sgi
, dcm
, rate_idx
= 0;
1326 u32 v
, succ_bytes
= 0;
1327 u16 tones
, rate
= 0, succ_pkts
= 0;
1328 u32 tx_duration
= 0;
1329 u8 tid
= HTT_PPDU_STATS_NON_QOS_TID
;
1330 bool is_ampdu
= false;
1332 if (!(usr_stats
->tlv_flags
& BIT(HTT_PPDU_STATS_TAG_USR_RATE
)))
1335 if (usr_stats
->tlv_flags
& BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON
))
1337 HTT_USR_CMPLTN_IS_AMPDU(usr_stats
->cmpltn_cmn
.flags
);
1339 if (usr_stats
->tlv_flags
&
1340 BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS
)) {
1341 succ_bytes
= le32_to_cpu(usr_stats
->ack_ba
.success_bytes
);
1342 succ_pkts
= le32_get_bits(usr_stats
->ack_ba
.info
,
1343 HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M
);
1344 tid
= le32_get_bits(usr_stats
->ack_ba
.info
,
1345 HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM
);
1348 if (common
->fes_duration_us
)
1349 tx_duration
= le32_to_cpu(common
->fes_duration_us
);
1351 user_rate
= &usr_stats
->rate
;
1352 flags
= HTT_USR_RATE_PREAMBLE(user_rate
->rate_flags
);
1353 bw
= HTT_USR_RATE_BW(user_rate
->rate_flags
) - 2;
1354 nss
= HTT_USR_RATE_NSS(user_rate
->rate_flags
) + 1;
1355 mcs
= HTT_USR_RATE_MCS(user_rate
->rate_flags
);
1356 sgi
= HTT_USR_RATE_GI(user_rate
->rate_flags
);
1357 dcm
= HTT_USR_RATE_DCM(user_rate
->rate_flags
);
1359 /* Note: If host configured fixed rates and in some other special
1360 * cases, the broadcast/management frames are sent in different rates.
1361 * Firmware rate's control to be skipped for this?
1364 if (flags
== WMI_RATE_PREAMBLE_HE
&& mcs
> ATH12K_HE_MCS_MAX
) {
1365 ath12k_warn(ab
, "Invalid HE mcs %d peer stats", mcs
);
1369 if (flags
== WMI_RATE_PREAMBLE_VHT
&& mcs
> ATH12K_VHT_MCS_MAX
) {
1370 ath12k_warn(ab
, "Invalid VHT mcs %d peer stats", mcs
);
1374 if (flags
== WMI_RATE_PREAMBLE_HT
&& (mcs
> ATH12K_HT_MCS_MAX
|| nss
< 1)) {
1375 ath12k_warn(ab
, "Invalid HT mcs %d nss %d peer stats",
1380 if (flags
== WMI_RATE_PREAMBLE_CCK
|| flags
== WMI_RATE_PREAMBLE_OFDM
) {
1381 ret
= ath12k_mac_hw_ratecode_to_legacy_rate(mcs
,
1390 spin_lock_bh(&ab
->base_lock
);
1391 peer
= ath12k_peer_find_by_id(ab
, usr_stats
->peer_id
);
1393 if (!peer
|| !peer
->sta
) {
1394 spin_unlock_bh(&ab
->base_lock
);
1400 ahsta
= ath12k_sta_to_ahsta(sta
);
1401 arsta
= &ahsta
->deflink
;
1403 memset(&arsta
->txrate
, 0, sizeof(arsta
->txrate
));
1406 case WMI_RATE_PREAMBLE_OFDM
:
1407 arsta
->txrate
.legacy
= rate
;
1409 case WMI_RATE_PREAMBLE_CCK
:
1410 arsta
->txrate
.legacy
= rate
;
1412 case WMI_RATE_PREAMBLE_HT
:
1413 arsta
->txrate
.mcs
= mcs
+ 8 * (nss
- 1);
1414 arsta
->txrate
.flags
= RATE_INFO_FLAGS_MCS
;
1416 arsta
->txrate
.flags
|= RATE_INFO_FLAGS_SHORT_GI
;
1418 case WMI_RATE_PREAMBLE_VHT
:
1419 arsta
->txrate
.mcs
= mcs
;
1420 arsta
->txrate
.flags
= RATE_INFO_FLAGS_VHT_MCS
;
1422 arsta
->txrate
.flags
|= RATE_INFO_FLAGS_SHORT_GI
;
1424 case WMI_RATE_PREAMBLE_HE
:
1425 arsta
->txrate
.mcs
= mcs
;
1426 arsta
->txrate
.flags
= RATE_INFO_FLAGS_HE_MCS
;
1427 arsta
->txrate
.he_dcm
= dcm
;
1428 arsta
->txrate
.he_gi
= ath12k_he_gi_to_nl80211_he_gi(sgi
);
1429 tones
= le16_to_cpu(user_rate
->ru_end
) -
1430 le16_to_cpu(user_rate
->ru_start
) + 1;
1431 v
= ath12k_he_ru_tones_to_nl80211_he_ru_alloc(tones
);
1432 arsta
->txrate
.he_ru_alloc
= v
;
1436 arsta
->txrate
.nss
= nss
;
1437 arsta
->txrate
.bw
= ath12k_mac_bw_to_mac80211_bw(bw
);
1438 arsta
->tx_duration
+= tx_duration
;
1439 memcpy(&arsta
->last_txrate
, &arsta
->txrate
, sizeof(struct rate_info
));
1441 /* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
1442 * So skip peer stats update for mgmt packets.
1444 if (tid
< HTT_PPDU_STATS_NON_QOS_TID
) {
1445 memset(peer_stats
, 0, sizeof(*peer_stats
));
1446 peer_stats
->succ_pkts
= succ_pkts
;
1447 peer_stats
->succ_bytes
= succ_bytes
;
1448 peer_stats
->is_ampdu
= is_ampdu
;
1449 peer_stats
->duration
= tx_duration
;
1450 peer_stats
->ba_fails
=
1451 HTT_USR_CMPLTN_LONG_RETRY(usr_stats
->cmpltn_cmn
.flags
) +
1452 HTT_USR_CMPLTN_SHORT_RETRY(usr_stats
->cmpltn_cmn
.flags
);
1455 spin_unlock_bh(&ab
->base_lock
);
1459 static void ath12k_htt_update_ppdu_stats(struct ath12k
*ar
,
1460 struct htt_ppdu_stats
*ppdu_stats
)
1464 for (user
= 0; user
< HTT_PPDU_STATS_MAX_USERS
- 1; user
++)
1465 ath12k_update_per_peer_tx_stats(ar
, ppdu_stats
, user
);
1469 struct htt_ppdu_stats_info
*ath12k_dp_htt_get_ppdu_desc(struct ath12k
*ar
,
1472 struct htt_ppdu_stats_info
*ppdu_info
;
1474 lockdep_assert_held(&ar
->data_lock
);
1475 if (!list_empty(&ar
->ppdu_stats_info
)) {
1476 list_for_each_entry(ppdu_info
, &ar
->ppdu_stats_info
, list
) {
1477 if (ppdu_info
->ppdu_id
== ppdu_id
)
1481 if (ar
->ppdu_stat_list_depth
> HTT_PPDU_DESC_MAX_DEPTH
) {
1482 ppdu_info
= list_first_entry(&ar
->ppdu_stats_info
,
1483 typeof(*ppdu_info
), list
);
1484 list_del(&ppdu_info
->list
);
1485 ar
->ppdu_stat_list_depth
--;
1486 ath12k_htt_update_ppdu_stats(ar
, &ppdu_info
->ppdu_stats
);
1491 ppdu_info
= kzalloc(sizeof(*ppdu_info
), GFP_ATOMIC
);
1495 list_add_tail(&ppdu_info
->list
, &ar
->ppdu_stats_info
);
1496 ar
->ppdu_stat_list_depth
++;
1501 static void ath12k_copy_to_delay_stats(struct ath12k_peer
*peer
,
1502 struct htt_ppdu_user_stats
*usr_stats
)
1504 peer
->ppdu_stats_delayba
.sw_peer_id
= le16_to_cpu(usr_stats
->rate
.sw_peer_id
);
1505 peer
->ppdu_stats_delayba
.info0
= le32_to_cpu(usr_stats
->rate
.info0
);
1506 peer
->ppdu_stats_delayba
.ru_end
= le16_to_cpu(usr_stats
->rate
.ru_end
);
1507 peer
->ppdu_stats_delayba
.ru_start
= le16_to_cpu(usr_stats
->rate
.ru_start
);
1508 peer
->ppdu_stats_delayba
.info1
= le32_to_cpu(usr_stats
->rate
.info1
);
1509 peer
->ppdu_stats_delayba
.rate_flags
= le32_to_cpu(usr_stats
->rate
.rate_flags
);
1510 peer
->ppdu_stats_delayba
.resp_rate_flags
=
1511 le32_to_cpu(usr_stats
->rate
.resp_rate_flags
);
1513 peer
->delayba_flag
= true;
1516 static void ath12k_copy_to_bar(struct ath12k_peer
*peer
,
1517 struct htt_ppdu_user_stats
*usr_stats
)
1519 usr_stats
->rate
.sw_peer_id
= cpu_to_le16(peer
->ppdu_stats_delayba
.sw_peer_id
);
1520 usr_stats
->rate
.info0
= cpu_to_le32(peer
->ppdu_stats_delayba
.info0
);
1521 usr_stats
->rate
.ru_end
= cpu_to_le16(peer
->ppdu_stats_delayba
.ru_end
);
1522 usr_stats
->rate
.ru_start
= cpu_to_le16(peer
->ppdu_stats_delayba
.ru_start
);
1523 usr_stats
->rate
.info1
= cpu_to_le32(peer
->ppdu_stats_delayba
.info1
);
1524 usr_stats
->rate
.rate_flags
= cpu_to_le32(peer
->ppdu_stats_delayba
.rate_flags
);
1525 usr_stats
->rate
.resp_rate_flags
=
1526 cpu_to_le32(peer
->ppdu_stats_delayba
.resp_rate_flags
);
1528 peer
->delayba_flag
= false;
1531 static int ath12k_htt_pull_ppdu_stats(struct ath12k_base
*ab
,
1532 struct sk_buff
*skb
)
1534 struct ath12k_htt_ppdu_stats_msg
*msg
;
1535 struct htt_ppdu_stats_info
*ppdu_info
;
1536 struct ath12k_peer
*peer
= NULL
;
1537 struct htt_ppdu_user_stats
*usr_stats
= NULL
;
1544 msg
= (struct ath12k_htt_ppdu_stats_msg
*)skb
->data
;
1545 len
= le32_get_bits(msg
->info
, HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE
);
1546 if (len
> (skb
->len
- struct_size(msg
, data
, 0))) {
1548 "HTT PPDU STATS event has unexpected payload size %u, should be smaller than %u\n",
1553 pdev_id
= le32_get_bits(msg
->info
, HTT_T2H_PPDU_STATS_INFO_PDEV_ID
);
1554 ppdu_id
= le32_to_cpu(msg
->ppdu_id
);
1557 ar
= ath12k_mac_get_ar_by_pdev_id(ab
, pdev_id
);
1563 spin_lock_bh(&ar
->data_lock
);
1564 ppdu_info
= ath12k_dp_htt_get_ppdu_desc(ar
, ppdu_id
);
1566 spin_unlock_bh(&ar
->data_lock
);
1571 ppdu_info
->ppdu_id
= ppdu_id
;
1572 ret
= ath12k_dp_htt_tlv_iter(ab
, msg
->data
, len
,
1573 ath12k_htt_tlv_ppdu_stats_parse
,
1576 spin_unlock_bh(&ar
->data_lock
);
1577 ath12k_warn(ab
, "Failed to parse tlv %d\n", ret
);
1581 if (ppdu_info
->ppdu_stats
.common
.num_users
>= HTT_PPDU_STATS_MAX_USERS
) {
1582 spin_unlock_bh(&ar
->data_lock
);
1584 "HTT PPDU STATS event has unexpected num_users %u, should be smaller than %u\n",
1585 ppdu_info
->ppdu_stats
.common
.num_users
,
1586 HTT_PPDU_STATS_MAX_USERS
);
1591 /* back up data rate tlv for all peers */
1592 if (ppdu_info
->frame_type
== HTT_STATS_PPDU_FTYPE_DATA
&&
1593 (ppdu_info
->tlv_bitmap
& (1 << HTT_PPDU_STATS_TAG_USR_COMMON
)) &&
1594 ppdu_info
->delay_ba
) {
1595 for (i
= 0; i
< ppdu_info
->ppdu_stats
.common
.num_users
; i
++) {
1596 peer_id
= ppdu_info
->ppdu_stats
.user_stats
[i
].peer_id
;
1597 spin_lock_bh(&ab
->base_lock
);
1598 peer
= ath12k_peer_find_by_id(ab
, peer_id
);
1600 spin_unlock_bh(&ab
->base_lock
);
1604 usr_stats
= &ppdu_info
->ppdu_stats
.user_stats
[i
];
1605 if (usr_stats
->delay_ba
)
1606 ath12k_copy_to_delay_stats(peer
, usr_stats
);
1607 spin_unlock_bh(&ab
->base_lock
);
1611 /* restore all peers' data rate tlv to mu-bar tlv */
1612 if (ppdu_info
->frame_type
== HTT_STATS_PPDU_FTYPE_BAR
&&
1613 (ppdu_info
->tlv_bitmap
& (1 << HTT_PPDU_STATS_TAG_USR_COMMON
))) {
1614 for (i
= 0; i
< ppdu_info
->bar_num_users
; i
++) {
1615 peer_id
= ppdu_info
->ppdu_stats
.user_stats
[i
].peer_id
;
1616 spin_lock_bh(&ab
->base_lock
);
1617 peer
= ath12k_peer_find_by_id(ab
, peer_id
);
1619 spin_unlock_bh(&ab
->base_lock
);
1623 usr_stats
= &ppdu_info
->ppdu_stats
.user_stats
[i
];
1624 if (peer
->delayba_flag
)
1625 ath12k_copy_to_bar(peer
, usr_stats
);
1626 spin_unlock_bh(&ab
->base_lock
);
1630 spin_unlock_bh(&ar
->data_lock
);
1638 static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base
*ab
,
1639 struct sk_buff
*skb
)
1641 struct ath12k_htt_mlo_offset_msg
*msg
;
1642 struct ath12k_pdev
*pdev
;
1646 msg
= (struct ath12k_htt_mlo_offset_msg
*)skb
->data
;
1647 pdev_id
= u32_get_bits(__le32_to_cpu(msg
->info
),
1648 HTT_T2H_MLO_OFFSET_INFO_PDEV_ID
);
1651 ar
= ath12k_mac_get_ar_by_pdev_id(ab
, pdev_id
);
1653 ath12k_warn(ab
, "invalid pdev id %d on htt mlo offset\n", pdev_id
);
1657 spin_lock_bh(&ar
->data_lock
);
1660 pdev
->timestamp
.info
= __le32_to_cpu(msg
->info
);
1661 pdev
->timestamp
.sync_timestamp_lo_us
= __le32_to_cpu(msg
->sync_timestamp_lo_us
);
1662 pdev
->timestamp
.sync_timestamp_hi_us
= __le32_to_cpu(msg
->sync_timestamp_hi_us
);
1663 pdev
->timestamp
.mlo_offset_lo
= __le32_to_cpu(msg
->mlo_offset_lo
);
1664 pdev
->timestamp
.mlo_offset_hi
= __le32_to_cpu(msg
->mlo_offset_hi
);
1665 pdev
->timestamp
.mlo_offset_clks
= __le32_to_cpu(msg
->mlo_offset_clks
);
1666 pdev
->timestamp
.mlo_comp_clks
= __le32_to_cpu(msg
->mlo_comp_clks
);
1667 pdev
->timestamp
.mlo_comp_timer
= __le32_to_cpu(msg
->mlo_comp_timer
);
1669 spin_unlock_bh(&ar
->data_lock
);
1674 void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base
*ab
,
1675 struct sk_buff
*skb
)
1677 struct ath12k_dp
*dp
= &ab
->dp
;
1678 struct htt_resp_msg
*resp
= (struct htt_resp_msg
*)skb
->data
;
1679 enum htt_t2h_msg_type type
;
1682 u8 mac_addr
[ETH_ALEN
];
1687 type
= le32_get_bits(resp
->version_msg
.version
, HTT_T2H_MSG_TYPE
);
1689 ath12k_dbg(ab
, ATH12K_DBG_DP_HTT
, "dp_htt rx msg type :0x%0x\n", type
);
1692 case HTT_T2H_MSG_TYPE_VERSION_CONF
:
1693 dp
->htt_tgt_ver_major
= le32_get_bits(resp
->version_msg
.version
,
1694 HTT_T2H_VERSION_CONF_MAJOR
);
1695 dp
->htt_tgt_ver_minor
= le32_get_bits(resp
->version_msg
.version
,
1696 HTT_T2H_VERSION_CONF_MINOR
);
1697 complete(&dp
->htt_tgt_version_received
);
1699 /* TODO: remove unused peer map versions after testing */
1700 case HTT_T2H_MSG_TYPE_PEER_MAP
:
1701 vdev_id
= le32_get_bits(resp
->peer_map_ev
.info
,
1702 HTT_T2H_PEER_MAP_INFO_VDEV_ID
);
1703 peer_id
= le32_get_bits(resp
->peer_map_ev
.info
,
1704 HTT_T2H_PEER_MAP_INFO_PEER_ID
);
1705 peer_mac_h16
= le32_get_bits(resp
->peer_map_ev
.info1
,
1706 HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16
);
1707 ath12k_dp_get_mac_addr(le32_to_cpu(resp
->peer_map_ev
.mac_addr_l32
),
1708 peer_mac_h16
, mac_addr
);
1709 ath12k_peer_map_event(ab
, vdev_id
, peer_id
, mac_addr
, 0, 0);
1711 case HTT_T2H_MSG_TYPE_PEER_MAP2
:
1712 vdev_id
= le32_get_bits(resp
->peer_map_ev
.info
,
1713 HTT_T2H_PEER_MAP_INFO_VDEV_ID
);
1714 peer_id
= le32_get_bits(resp
->peer_map_ev
.info
,
1715 HTT_T2H_PEER_MAP_INFO_PEER_ID
);
1716 peer_mac_h16
= le32_get_bits(resp
->peer_map_ev
.info1
,
1717 HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16
);
1718 ath12k_dp_get_mac_addr(le32_to_cpu(resp
->peer_map_ev
.mac_addr_l32
),
1719 peer_mac_h16
, mac_addr
);
1720 ast_hash
= le32_get_bits(resp
->peer_map_ev
.info2
,
1721 HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL
);
1722 hw_peer_id
= le32_get_bits(resp
->peer_map_ev
.info1
,
1723 HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID
);
1724 ath12k_peer_map_event(ab
, vdev_id
, peer_id
, mac_addr
, ast_hash
,
1727 case HTT_T2H_MSG_TYPE_PEER_MAP3
:
1728 vdev_id
= le32_get_bits(resp
->peer_map_ev
.info
,
1729 HTT_T2H_PEER_MAP_INFO_VDEV_ID
);
1730 peer_id
= le32_get_bits(resp
->peer_map_ev
.info
,
1731 HTT_T2H_PEER_MAP_INFO_PEER_ID
);
1732 peer_mac_h16
= le32_get_bits(resp
->peer_map_ev
.info1
,
1733 HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16
);
1734 ath12k_dp_get_mac_addr(le32_to_cpu(resp
->peer_map_ev
.mac_addr_l32
),
1735 peer_mac_h16
, mac_addr
);
1736 ath12k_peer_map_event(ab
, vdev_id
, peer_id
, mac_addr
, ast_hash
,
1739 case HTT_T2H_MSG_TYPE_PEER_UNMAP
:
1740 case HTT_T2H_MSG_TYPE_PEER_UNMAP2
:
1741 peer_id
= le32_get_bits(resp
->peer_unmap_ev
.info
,
1742 HTT_T2H_PEER_UNMAP_INFO_PEER_ID
);
1743 ath12k_peer_unmap_event(ab
, peer_id
);
1745 case HTT_T2H_MSG_TYPE_PPDU_STATS_IND
:
1746 ath12k_htt_pull_ppdu_stats(ab
, skb
);
1748 case HTT_T2H_MSG_TYPE_EXT_STATS_CONF
:
1749 ath12k_debugfs_htt_ext_stats_handler(ab
, skb
);
1751 case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND
:
1752 ath12k_htt_mlo_offset_event_handler(ab
, skb
);
1755 ath12k_dbg(ab
, ATH12K_DBG_DP_HTT
, "dp_htt event %d not handled\n",
1760 dev_kfree_skb_any(skb
);
1763 static int ath12k_dp_rx_msdu_coalesce(struct ath12k
*ar
,
1764 struct sk_buff_head
*msdu_list
,
1765 struct sk_buff
*first
, struct sk_buff
*last
,
1766 u8 l3pad_bytes
, int msdu_len
)
1768 struct ath12k_base
*ab
= ar
->ab
;
1769 struct sk_buff
*skb
;
1770 struct ath12k_skb_rxcb
*rxcb
= ATH12K_SKB_RXCB(first
);
1771 int buf_first_hdr_len
, buf_first_len
;
1772 struct hal_rx_desc
*ldesc
;
1773 int space_extra
, rem_len
, buf_len
;
1774 u32 hal_rx_desc_sz
= ar
->ab
->hal
.hal_desc_sz
;
1776 /* As the msdu is spread across multiple rx buffers,
1777 * find the offset to the start of msdu for computing
1778 * the length of the msdu in the first buffer.
1780 buf_first_hdr_len
= hal_rx_desc_sz
+ l3pad_bytes
;
1781 buf_first_len
= DP_RX_BUFFER_SIZE
- buf_first_hdr_len
;
1783 if (WARN_ON_ONCE(msdu_len
<= buf_first_len
)) {
1784 skb_put(first
, buf_first_hdr_len
+ msdu_len
);
1785 skb_pull(first
, buf_first_hdr_len
);
1789 ldesc
= (struct hal_rx_desc
*)last
->data
;
1790 rxcb
->is_first_msdu
= ath12k_dp_rx_h_first_msdu(ab
, ldesc
);
1791 rxcb
->is_last_msdu
= ath12k_dp_rx_h_last_msdu(ab
, ldesc
);
1793 /* MSDU spans over multiple buffers because the length of the MSDU
1794 * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
1795 * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.
1797 skb_put(first
, DP_RX_BUFFER_SIZE
);
1798 skb_pull(first
, buf_first_hdr_len
);
1800 /* When an MSDU spread over multiple buffers MSDU_END
1801 * tlvs are valid only in the last buffer. Copy those tlvs.
1803 ath12k_dp_rx_desc_end_tlv_copy(ab
, rxcb
->rx_desc
, ldesc
);
1805 space_extra
= msdu_len
- (buf_first_len
+ skb_tailroom(first
));
1806 if (space_extra
> 0 &&
1807 (pskb_expand_head(first
, 0, space_extra
, GFP_ATOMIC
) < 0)) {
1808 /* Free up all buffers of the MSDU */
1809 while ((skb
= __skb_dequeue(msdu_list
)) != NULL
) {
1810 rxcb
= ATH12K_SKB_RXCB(skb
);
1811 if (!rxcb
->is_continuation
) {
1812 dev_kfree_skb_any(skb
);
1815 dev_kfree_skb_any(skb
);
1820 rem_len
= msdu_len
- buf_first_len
;
1821 while ((skb
= __skb_dequeue(msdu_list
)) != NULL
&& rem_len
> 0) {
1822 rxcb
= ATH12K_SKB_RXCB(skb
);
1823 if (rxcb
->is_continuation
)
1824 buf_len
= DP_RX_BUFFER_SIZE
- hal_rx_desc_sz
;
1828 if (buf_len
> (DP_RX_BUFFER_SIZE
- hal_rx_desc_sz
)) {
1830 dev_kfree_skb_any(skb
);
1834 skb_put(skb
, buf_len
+ hal_rx_desc_sz
);
1835 skb_pull(skb
, hal_rx_desc_sz
);
1836 skb_copy_from_linear_data(skb
, skb_put(first
, buf_len
),
1838 dev_kfree_skb_any(skb
);
1841 if (!rxcb
->is_continuation
)
1848 static struct sk_buff
*ath12k_dp_rx_get_msdu_last_buf(struct sk_buff_head
*msdu_list
,
1849 struct sk_buff
*first
)
1851 struct sk_buff
*skb
;
1852 struct ath12k_skb_rxcb
*rxcb
= ATH12K_SKB_RXCB(first
);
1854 if (!rxcb
->is_continuation
)
1857 skb_queue_walk(msdu_list
, skb
) {
1858 rxcb
= ATH12K_SKB_RXCB(skb
);
1859 if (!rxcb
->is_continuation
)
1866 static void ath12k_dp_rx_h_csum_offload(struct ath12k
*ar
, struct sk_buff
*msdu
)
1868 struct ath12k_skb_rxcb
*rxcb
= ATH12K_SKB_RXCB(msdu
);
1869 struct ath12k_base
*ab
= ar
->ab
;
1870 bool ip_csum_fail
, l4_csum_fail
;
1872 ip_csum_fail
= ath12k_dp_rx_h_ip_cksum_fail(ab
, rxcb
->rx_desc
);
1873 l4_csum_fail
= ath12k_dp_rx_h_l4_cksum_fail(ab
, rxcb
->rx_desc
);
1875 msdu
->ip_summed
= (ip_csum_fail
|| l4_csum_fail
) ?
1876 CHECKSUM_NONE
: CHECKSUM_UNNECESSARY
;
1879 static int ath12k_dp_rx_crypto_mic_len(struct ath12k
*ar
,
1880 enum hal_encrypt_type enctype
)
1883 case HAL_ENCRYPT_TYPE_OPEN
:
1884 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC
:
1885 case HAL_ENCRYPT_TYPE_TKIP_MIC
:
1887 case HAL_ENCRYPT_TYPE_CCMP_128
:
1888 return IEEE80211_CCMP_MIC_LEN
;
1889 case HAL_ENCRYPT_TYPE_CCMP_256
:
1890 return IEEE80211_CCMP_256_MIC_LEN
;
1891 case HAL_ENCRYPT_TYPE_GCMP_128
:
1892 case HAL_ENCRYPT_TYPE_AES_GCMP_256
:
1893 return IEEE80211_GCMP_MIC_LEN
;
1894 case HAL_ENCRYPT_TYPE_WEP_40
:
1895 case HAL_ENCRYPT_TYPE_WEP_104
:
1896 case HAL_ENCRYPT_TYPE_WEP_128
:
1897 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4
:
1898 case HAL_ENCRYPT_TYPE_WAPI
:
1902 ath12k_warn(ar
->ab
, "unsupported encryption type %d for mic len\n", enctype
);
1906 static int ath12k_dp_rx_crypto_param_len(struct ath12k
*ar
,
1907 enum hal_encrypt_type enctype
)
1910 case HAL_ENCRYPT_TYPE_OPEN
:
1912 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC
:
1913 case HAL_ENCRYPT_TYPE_TKIP_MIC
:
1914 return IEEE80211_TKIP_IV_LEN
;
1915 case HAL_ENCRYPT_TYPE_CCMP_128
:
1916 return IEEE80211_CCMP_HDR_LEN
;
1917 case HAL_ENCRYPT_TYPE_CCMP_256
:
1918 return IEEE80211_CCMP_256_HDR_LEN
;
1919 case HAL_ENCRYPT_TYPE_GCMP_128
:
1920 case HAL_ENCRYPT_TYPE_AES_GCMP_256
:
1921 return IEEE80211_GCMP_HDR_LEN
;
1922 case HAL_ENCRYPT_TYPE_WEP_40
:
1923 case HAL_ENCRYPT_TYPE_WEP_104
:
1924 case HAL_ENCRYPT_TYPE_WEP_128
:
1925 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4
:
1926 case HAL_ENCRYPT_TYPE_WAPI
:
1930 ath12k_warn(ar
->ab
, "unsupported encryption type %d\n", enctype
);
1934 static int ath12k_dp_rx_crypto_icv_len(struct ath12k
*ar
,
1935 enum hal_encrypt_type enctype
)
1938 case HAL_ENCRYPT_TYPE_OPEN
:
1939 case HAL_ENCRYPT_TYPE_CCMP_128
:
1940 case HAL_ENCRYPT_TYPE_CCMP_256
:
1941 case HAL_ENCRYPT_TYPE_GCMP_128
:
1942 case HAL_ENCRYPT_TYPE_AES_GCMP_256
:
1944 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC
:
1945 case HAL_ENCRYPT_TYPE_TKIP_MIC
:
1946 return IEEE80211_TKIP_ICV_LEN
;
1947 case HAL_ENCRYPT_TYPE_WEP_40
:
1948 case HAL_ENCRYPT_TYPE_WEP_104
:
1949 case HAL_ENCRYPT_TYPE_WEP_128
:
1950 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4
:
1951 case HAL_ENCRYPT_TYPE_WAPI
:
1955 ath12k_warn(ar
->ab
, "unsupported encryption type %d\n", enctype
);
1959 static void ath12k_dp_rx_h_undecap_nwifi(struct ath12k
*ar
,
1960 struct sk_buff
*msdu
,
1961 enum hal_encrypt_type enctype
,
1962 struct ieee80211_rx_status
*status
)
1964 struct ath12k_base
*ab
= ar
->ab
;
1965 struct ath12k_skb_rxcb
*rxcb
= ATH12K_SKB_RXCB(msdu
);
1966 u8 decap_hdr
[DP_MAX_NWIFI_HDR_LEN
];
1967 struct ieee80211_hdr
*hdr
;
1972 /* pull decapped header */
1973 hdr
= (struct ieee80211_hdr
*)msdu
->data
;
1974 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1975 skb_pull(msdu
, hdr_len
);
1977 /* Rebuild qos header */
1978 hdr
->frame_control
|= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA
);
1980 /* Reset the order bit as the HT_Control header is stripped */
1981 hdr
->frame_control
&= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER
));
1983 qos_ctl
= rxcb
->tid
;
1985 if (ath12k_dp_rx_h_mesh_ctl_present(ab
, rxcb
->rx_desc
))
1986 qos_ctl
|= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT
;
1988 /* TODO: Add other QoS ctl fields when required */
1990 /* copy decap header before overwriting for reuse below */
1991 memcpy(decap_hdr
, hdr
, hdr_len
);
1993 /* Rebuild crypto header for mac80211 use */
1994 if (!(status
->flag
& RX_FLAG_IV_STRIPPED
)) {
1995 crypto_hdr
= skb_push(msdu
, ath12k_dp_rx_crypto_param_len(ar
, enctype
));
1996 ath12k_dp_rx_desc_get_crypto_header(ar
->ab
,
1997 rxcb
->rx_desc
, crypto_hdr
,
2001 memcpy(skb_push(msdu
,
2002 IEEE80211_QOS_CTL_LEN
), &qos_ctl
,
2003 IEEE80211_QOS_CTL_LEN
);
2004 memcpy(skb_push(msdu
, hdr_len
), decap_hdr
, hdr_len
);
2007 static void ath12k_dp_rx_h_undecap_raw(struct ath12k
*ar
, struct sk_buff
*msdu
,
2008 enum hal_encrypt_type enctype
,
2009 struct ieee80211_rx_status
*status
,
2012 struct ath12k_skb_rxcb
*rxcb
= ATH12K_SKB_RXCB(msdu
);
2013 struct ieee80211_hdr
*hdr
;
2017 if (!rxcb
->is_first_msdu
||
2018 !(rxcb
->is_first_msdu
&& rxcb
->is_last_msdu
)) {
2023 skb_trim(msdu
, msdu
->len
- FCS_LEN
);
2028 hdr
= (void *)msdu
->data
;
2031 if (status
->flag
& RX_FLAG_IV_STRIPPED
) {
2032 skb_trim(msdu
, msdu
->len
-
2033 ath12k_dp_rx_crypto_mic_len(ar
, enctype
));
2035 skb_trim(msdu
, msdu
->len
-
2036 ath12k_dp_rx_crypto_icv_len(ar
, enctype
));
2039 if (status
->flag
& RX_FLAG_MIC_STRIPPED
)
2040 skb_trim(msdu
, msdu
->len
-
2041 ath12k_dp_rx_crypto_mic_len(ar
, enctype
));
2044 if (status
->flag
& RX_FLAG_ICV_STRIPPED
)
2045 skb_trim(msdu
, msdu
->len
-
2046 ath12k_dp_rx_crypto_icv_len(ar
, enctype
));
2050 if ((status
->flag
& RX_FLAG_MMIC_STRIPPED
) &&
2051 !ieee80211_has_morefrags(hdr
->frame_control
) &&
2052 enctype
== HAL_ENCRYPT_TYPE_TKIP_MIC
)
2053 skb_trim(msdu
, msdu
->len
- IEEE80211_CCMP_MIC_LEN
);
2056 if (status
->flag
& RX_FLAG_IV_STRIPPED
) {
2057 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
2058 crypto_len
= ath12k_dp_rx_crypto_param_len(ar
, enctype
);
2060 memmove(msdu
->data
+ crypto_len
, msdu
->data
, hdr_len
);
2061 skb_pull(msdu
, crypto_len
);
2065 static void ath12k_get_dot11_hdr_from_rx_desc(struct ath12k
*ar
,
2066 struct sk_buff
*msdu
,
2067 struct ath12k_skb_rxcb
*rxcb
,
2068 struct ieee80211_rx_status
*status
,
2069 enum hal_encrypt_type enctype
)
2071 struct hal_rx_desc
*rx_desc
= rxcb
->rx_desc
;
2072 struct ath12k_base
*ab
= ar
->ab
;
2073 size_t hdr_len
, crypto_len
;
2074 struct ieee80211_hdr
*hdr
;
2079 if (!(status
->flag
& RX_FLAG_IV_STRIPPED
)) {
2080 crypto_len
= ath12k_dp_rx_crypto_param_len(ar
, enctype
);
2081 crypto_hdr
= skb_push(msdu
, crypto_len
);
2082 ath12k_dp_rx_desc_get_crypto_header(ab
, rx_desc
, crypto_hdr
, enctype
);
2085 fc
= cpu_to_le16(ath12k_dp_rxdesc_get_mpdu_frame_ctrl(ab
, rx_desc
));
2086 hdr_len
= ieee80211_hdrlen(fc
);
2087 skb_push(msdu
, hdr_len
);
2088 hdr
= (struct ieee80211_hdr
*)msdu
->data
;
2089 hdr
->frame_control
= fc
;
2091 /* Get wifi header from rx_desc */
2092 ath12k_dp_rx_desc_get_dot11_hdr(ab
, rx_desc
, hdr
);
2095 status
->flag
&= ~RX_FLAG_PN_VALIDATED
;
2097 /* Add QOS header */
2098 if (ieee80211_is_data_qos(hdr
->frame_control
)) {
2099 qos_ctl
= rxcb
->tid
;
2100 if (ath12k_dp_rx_h_mesh_ctl_present(ab
, rx_desc
))
2101 qos_ctl
|= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT
;
2103 /* TODO: Add other QoS ctl fields when required */
2104 memcpy(msdu
->data
+ (hdr_len
- IEEE80211_QOS_CTL_LEN
),
2105 &qos_ctl
, IEEE80211_QOS_CTL_LEN
);
2109 static void ath12k_dp_rx_h_undecap_eth(struct ath12k
*ar
,
2110 struct sk_buff
*msdu
,
2111 enum hal_encrypt_type enctype
,
2112 struct ieee80211_rx_status
*status
)
2114 struct ieee80211_hdr
*hdr
;
2118 struct ath12k_skb_rxcb
*rxcb
= ATH12K_SKB_RXCB(msdu
);
2119 struct ath12k_dp_rx_rfc1042_hdr rfc
= {0xaa, 0xaa, 0x03, {0x00, 0x00, 0x00}};
2121 eth
= (struct ethhdr
*)msdu
->data
;
2122 ether_addr_copy(da
, eth
->h_dest
);
2123 ether_addr_copy(sa
, eth
->h_source
);
2124 rfc
.snap_type
= eth
->h_proto
;
2125 skb_pull(msdu
, sizeof(*eth
));
2126 memcpy(skb_push(msdu
, sizeof(rfc
)), &rfc
,
2128 ath12k_get_dot11_hdr_from_rx_desc(ar
, msdu
, rxcb
, status
, enctype
);
2130 /* original 802.11 header has a different DA and in
2131 * case of 4addr it may also have different SA
2133 hdr
= (struct ieee80211_hdr
*)msdu
->data
;
2134 ether_addr_copy(ieee80211_get_DA(hdr
), da
);
2135 ether_addr_copy(ieee80211_get_SA(hdr
), sa
);
2138 static void ath12k_dp_rx_h_undecap(struct ath12k
*ar
, struct sk_buff
*msdu
,
2139 struct hal_rx_desc
*rx_desc
,
2140 enum hal_encrypt_type enctype
,
2141 struct ieee80211_rx_status
*status
,
2144 struct ath12k_base
*ab
= ar
->ab
;
2146 struct ethhdr
*ehdr
;
2148 decap
= ath12k_dp_rx_h_decap_type(ab
, rx_desc
);
2151 case DP_RX_DECAP_TYPE_NATIVE_WIFI
:
2152 ath12k_dp_rx_h_undecap_nwifi(ar
, msdu
, enctype
, status
);
2154 case DP_RX_DECAP_TYPE_RAW
:
2155 ath12k_dp_rx_h_undecap_raw(ar
, msdu
, enctype
, status
,
2158 case DP_RX_DECAP_TYPE_ETHERNET2_DIX
:
2159 ehdr
= (struct ethhdr
*)msdu
->data
;
2161 /* mac80211 allows fast path only for authorized STA */
2162 if (ehdr
->h_proto
== cpu_to_be16(ETH_P_PAE
)) {
2163 ATH12K_SKB_RXCB(msdu
)->is_eapol
= true;
2164 ath12k_dp_rx_h_undecap_eth(ar
, msdu
, enctype
, status
);
2168 /* PN for mcast packets will be validated in mac80211;
2169 * remove eth header and add 802.11 header.
2171 if (ATH12K_SKB_RXCB(msdu
)->is_mcbc
&& decrypted
)
2172 ath12k_dp_rx_h_undecap_eth(ar
, msdu
, enctype
, status
);
2174 case DP_RX_DECAP_TYPE_8023
:
2175 /* TODO: Handle undecap for these formats */
2180 struct ath12k_peer
*
2181 ath12k_dp_rx_h_find_peer(struct ath12k_base
*ab
, struct sk_buff
*msdu
)
2183 struct ath12k_skb_rxcb
*rxcb
= ATH12K_SKB_RXCB(msdu
);
2184 struct hal_rx_desc
*rx_desc
= rxcb
->rx_desc
;
2185 struct ath12k_peer
*peer
= NULL
;
2187 lockdep_assert_held(&ab
->base_lock
);
2190 peer
= ath12k_peer_find_by_id(ab
, rxcb
->peer_id
);
2195 if (!rx_desc
|| !(ath12k_dp_rxdesc_mac_addr2_valid(ab
, rx_desc
)))
2198 peer
= ath12k_peer_find_by_addr(ab
,
2199 ath12k_dp_rxdesc_get_mpdu_start_addr2(ab
,
2204 static void ath12k_dp_rx_h_mpdu(struct ath12k
*ar
,
2205 struct sk_buff
*msdu
,
2206 struct hal_rx_desc
*rx_desc
,
2207 struct ieee80211_rx_status
*rx_status
)
2209 bool fill_crypto_hdr
;
2210 struct ath12k_base
*ab
= ar
->ab
;
2211 struct ath12k_skb_rxcb
*rxcb
;
2212 enum hal_encrypt_type enctype
;
2213 bool is_decrypted
= false;
2214 struct ieee80211_hdr
*hdr
;
2215 struct ath12k_peer
*peer
;
2218 /* PN for multicast packets will be checked in mac80211 */
2219 rxcb
= ATH12K_SKB_RXCB(msdu
);
2220 fill_crypto_hdr
= ath12k_dp_rx_h_is_da_mcbc(ar
->ab
, rx_desc
);
2221 rxcb
->is_mcbc
= fill_crypto_hdr
;
2224 rxcb
->peer_id
= ath12k_dp_rx_h_peer_id(ar
->ab
, rx_desc
);
2226 spin_lock_bh(&ar
->ab
->base_lock
);
2227 peer
= ath12k_dp_rx_h_find_peer(ar
->ab
, msdu
);
2230 enctype
= peer
->sec_type_grp
;
2232 enctype
= peer
->sec_type
;
2234 enctype
= HAL_ENCRYPT_TYPE_OPEN
;
2236 spin_unlock_bh(&ar
->ab
->base_lock
);
2238 err_bitmap
= ath12k_dp_rx_h_mpdu_err(ab
, rx_desc
);
2239 if (enctype
!= HAL_ENCRYPT_TYPE_OPEN
&& !err_bitmap
)
2240 is_decrypted
= ath12k_dp_rx_h_is_decrypted(ab
, rx_desc
);
2242 /* Clear per-MPDU flags while leaving per-PPDU flags intact */
2243 rx_status
->flag
&= ~(RX_FLAG_FAILED_FCS_CRC
|
2244 RX_FLAG_MMIC_ERROR
|
2246 RX_FLAG_IV_STRIPPED
|
2247 RX_FLAG_MMIC_STRIPPED
);
2249 if (err_bitmap
& HAL_RX_MPDU_ERR_FCS
)
2250 rx_status
->flag
|= RX_FLAG_FAILED_FCS_CRC
;
2251 if (err_bitmap
& HAL_RX_MPDU_ERR_TKIP_MIC
)
2252 rx_status
->flag
|= RX_FLAG_MMIC_ERROR
;
2255 rx_status
->flag
|= RX_FLAG_DECRYPTED
| RX_FLAG_MMIC_STRIPPED
;
2257 if (fill_crypto_hdr
)
2258 rx_status
->flag
|= RX_FLAG_MIC_STRIPPED
|
2259 RX_FLAG_ICV_STRIPPED
;
2261 rx_status
->flag
|= RX_FLAG_IV_STRIPPED
|
2262 RX_FLAG_PN_VALIDATED
;
2265 ath12k_dp_rx_h_csum_offload(ar
, msdu
);
2266 ath12k_dp_rx_h_undecap(ar
, msdu
, rx_desc
,
2267 enctype
, rx_status
, is_decrypted
);
2269 if (!is_decrypted
|| fill_crypto_hdr
)
2272 if (ath12k_dp_rx_h_decap_type(ar
->ab
, rx_desc
) !=
2273 DP_RX_DECAP_TYPE_ETHERNET2_DIX
) {
2274 hdr
= (void *)msdu
->data
;
2275 hdr
->frame_control
&= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED
);
2279 static void ath12k_dp_rx_h_rate(struct ath12k
*ar
, struct hal_rx_desc
*rx_desc
,
2280 struct ieee80211_rx_status
*rx_status
)
2282 struct ath12k_base
*ab
= ar
->ab
;
2283 struct ieee80211_supported_band
*sband
;
2284 enum rx_msdu_start_pkt_type pkt_type
;
2290 pkt_type
= ath12k_dp_rx_h_pkt_type(ab
, rx_desc
);
2291 bw
= ath12k_dp_rx_h_rx_bw(ab
, rx_desc
);
2292 rate_mcs
= ath12k_dp_rx_h_rate_mcs(ab
, rx_desc
);
2293 nss
= ath12k_dp_rx_h_nss(ab
, rx_desc
);
2294 sgi
= ath12k_dp_rx_h_sgi(ab
, rx_desc
);
2297 case RX_MSDU_START_PKT_TYPE_11A
:
2298 case RX_MSDU_START_PKT_TYPE_11B
:
2299 is_cck
= (pkt_type
== RX_MSDU_START_PKT_TYPE_11B
);
2300 sband
= &ar
->mac
.sbands
[rx_status
->band
];
2301 rx_status
->rate_idx
= ath12k_mac_hw_rate_to_idx(sband
, rate_mcs
,
2304 case RX_MSDU_START_PKT_TYPE_11N
:
2305 rx_status
->encoding
= RX_ENC_HT
;
2306 if (rate_mcs
> ATH12K_HT_MCS_MAX
) {
2308 "Received with invalid mcs in HT mode %d\n",
2312 rx_status
->rate_idx
= rate_mcs
+ (8 * (nss
- 1));
2314 rx_status
->enc_flags
|= RX_ENC_FLAG_SHORT_GI
;
2315 rx_status
->bw
= ath12k_mac_bw_to_mac80211_bw(bw
);
2317 case RX_MSDU_START_PKT_TYPE_11AC
:
2318 rx_status
->encoding
= RX_ENC_VHT
;
2319 rx_status
->rate_idx
= rate_mcs
;
2320 if (rate_mcs
> ATH12K_VHT_MCS_MAX
) {
2322 "Received with invalid mcs in VHT mode %d\n",
2326 rx_status
->nss
= nss
;
2328 rx_status
->enc_flags
|= RX_ENC_FLAG_SHORT_GI
;
2329 rx_status
->bw
= ath12k_mac_bw_to_mac80211_bw(bw
);
2331 case RX_MSDU_START_PKT_TYPE_11AX
:
2332 rx_status
->rate_idx
= rate_mcs
;
2333 if (rate_mcs
> ATH12K_HE_MCS_MAX
) {
2335 "Received with invalid mcs in HE mode %d\n",
2339 rx_status
->encoding
= RX_ENC_HE
;
2340 rx_status
->nss
= nss
;
2341 rx_status
->he_gi
= ath12k_he_gi_to_nl80211_he_gi(sgi
);
2342 rx_status
->bw
= ath12k_mac_bw_to_mac80211_bw(bw
);
2347 void ath12k_dp_rx_h_ppdu(struct ath12k
*ar
, struct hal_rx_desc
*rx_desc
,
2348 struct ieee80211_rx_status
*rx_status
)
2350 struct ath12k_base
*ab
= ar
->ab
;
2352 u32 center_freq
, meta_data
;
2353 struct ieee80211_channel
*channel
;
2355 rx_status
->freq
= 0;
2356 rx_status
->rate_idx
= 0;
2358 rx_status
->encoding
= RX_ENC_LEGACY
;
2359 rx_status
->bw
= RATE_INFO_BW_20
;
2360 rx_status
->enc_flags
= 0;
2362 rx_status
->flag
|= RX_FLAG_NO_SIGNAL_VAL
;
2364 meta_data
= ath12k_dp_rx_h_freq(ab
, rx_desc
);
2365 channel_num
= meta_data
;
2366 center_freq
= meta_data
>> 16;
2368 if (center_freq
>= ATH12K_MIN_6G_FREQ
&&
2369 center_freq
<= ATH12K_MAX_6G_FREQ
) {
2370 rx_status
->band
= NL80211_BAND_6GHZ
;
2371 rx_status
->freq
= center_freq
;
2372 } else if (channel_num
>= 1 && channel_num
<= 14) {
2373 rx_status
->band
= NL80211_BAND_2GHZ
;
2374 } else if (channel_num
>= 36 && channel_num
<= 173) {
2375 rx_status
->band
= NL80211_BAND_5GHZ
;
2377 spin_lock_bh(&ar
->data_lock
);
2378 channel
= ar
->rx_channel
;
2380 rx_status
->band
= channel
->band
;
2382 ieee80211_frequency_to_channel(channel
->center_freq
);
2384 spin_unlock_bh(&ar
->data_lock
);
2385 ath12k_dbg_dump(ar
->ab
, ATH12K_DBG_DATA
, NULL
, "rx_desc: ",
2386 rx_desc
, sizeof(*rx_desc
));
2389 if (rx_status
->band
!= NL80211_BAND_6GHZ
)
2390 rx_status
->freq
= ieee80211_channel_to_frequency(channel_num
,
2393 ath12k_dp_rx_h_rate(ar
, rx_desc
, rx_status
);
2396 static void ath12k_dp_rx_deliver_msdu(struct ath12k
*ar
, struct napi_struct
*napi
,
2397 struct sk_buff
*msdu
,
2398 struct ieee80211_rx_status
*status
)
2400 struct ath12k_base
*ab
= ar
->ab
;
2401 static const struct ieee80211_radiotap_he known
= {
2402 .data1
= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN
|
2403 IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN
),
2404 .data2
= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN
),
2406 struct ieee80211_radiotap_he
*he
;
2407 struct ieee80211_rx_status
*rx_status
;
2408 struct ieee80211_sta
*pubsta
;
2409 struct ath12k_peer
*peer
;
2410 struct ath12k_skb_rxcb
*rxcb
= ATH12K_SKB_RXCB(msdu
);
2411 u8 decap
= DP_RX_DECAP_TYPE_RAW
;
2412 bool is_mcbc
= rxcb
->is_mcbc
;
2413 bool is_eapol
= rxcb
->is_eapol
;
2415 if (status
->encoding
== RX_ENC_HE
&& !(status
->flag
& RX_FLAG_RADIOTAP_HE
) &&
2416 !(status
->flag
& RX_FLAG_SKIP_MONITOR
)) {
2417 he
= skb_push(msdu
, sizeof(known
));
2418 memcpy(he
, &known
, sizeof(known
));
2419 status
->flag
|= RX_FLAG_RADIOTAP_HE
;
2422 if (!(status
->flag
& RX_FLAG_ONLY_MONITOR
))
2423 decap
= ath12k_dp_rx_h_decap_type(ab
, rxcb
->rx_desc
);
2425 spin_lock_bh(&ab
->base_lock
);
2426 peer
= ath12k_dp_rx_h_find_peer(ab
, msdu
);
2428 pubsta
= peer
? peer
->sta
: NULL
;
2430 spin_unlock_bh(&ab
->base_lock
);
2432 ath12k_dbg(ab
, ATH12K_DBG_DATA
,
2433 "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
2436 peer
? peer
->addr
: NULL
,
2438 is_mcbc
? "mcast" : "ucast",
2439 ath12k_dp_rx_h_seq_no(ab
, rxcb
->rx_desc
),
2440 (status
->encoding
== RX_ENC_LEGACY
) ? "legacy" : "",
2441 (status
->encoding
== RX_ENC_HT
) ? "ht" : "",
2442 (status
->encoding
== RX_ENC_VHT
) ? "vht" : "",
2443 (status
->encoding
== RX_ENC_HE
) ? "he" : "",
2444 (status
->bw
== RATE_INFO_BW_40
) ? "40" : "",
2445 (status
->bw
== RATE_INFO_BW_80
) ? "80" : "",
2446 (status
->bw
== RATE_INFO_BW_160
) ? "160" : "",
2447 (status
->bw
== RATE_INFO_BW_320
) ? "320" : "",
2448 status
->enc_flags
& RX_ENC_FLAG_SHORT_GI
? "sgi " : "",
2452 status
->band
, status
->flag
,
2453 !!(status
->flag
& RX_FLAG_FAILED_FCS_CRC
),
2454 !!(status
->flag
& RX_FLAG_MMIC_ERROR
),
2455 !!(status
->flag
& RX_FLAG_AMSDU_MORE
));
2457 ath12k_dbg_dump(ab
, ATH12K_DBG_DP_RX
, NULL
, "dp rx msdu: ",
2458 msdu
->data
, msdu
->len
);
2460 rx_status
= IEEE80211_SKB_RXCB(msdu
);
2461 *rx_status
= *status
;
2463 /* TODO: trace rx packet */
2465 /* PN for multicast packets are not validate in HW,
2466 * so skip 802.3 rx path
2467 * Also, fast_rx expects the STA to be authorized, hence
2468 * eapol packets are sent in slow path.
2470 if (decap
== DP_RX_DECAP_TYPE_ETHERNET2_DIX
&& !is_eapol
&&
2471 !(is_mcbc
&& rx_status
->flag
& RX_FLAG_DECRYPTED
))
2472 rx_status
->flag
|= RX_FLAG_8023
;
2474 ieee80211_rx_napi(ath12k_ar_to_hw(ar
), pubsta
, msdu
, napi
);
2477 static int ath12k_dp_rx_process_msdu(struct ath12k
*ar
,
2478 struct sk_buff
*msdu
,
2479 struct sk_buff_head
*msdu_list
,
2480 struct ieee80211_rx_status
*rx_status
)
2482 struct ath12k_base
*ab
= ar
->ab
;
2483 struct hal_rx_desc
*rx_desc
, *lrx_desc
;
2484 struct ath12k_skb_rxcb
*rxcb
;
2485 struct sk_buff
*last_buf
;
2489 u32 hal_rx_desc_sz
= ar
->ab
->hal
.hal_desc_sz
;
2491 last_buf
= ath12k_dp_rx_get_msdu_last_buf(msdu_list
, msdu
);
2494 "No valid Rx buffer to access MSDU_END tlv\n");
2499 rx_desc
= (struct hal_rx_desc
*)msdu
->data
;
2500 lrx_desc
= (struct hal_rx_desc
*)last_buf
->data
;
2501 if (!ath12k_dp_rx_h_msdu_done(ab
, lrx_desc
)) {
2502 ath12k_warn(ab
, "msdu_done bit in msdu_end is not set\n");
2507 rxcb
= ATH12K_SKB_RXCB(msdu
);
2508 rxcb
->rx_desc
= rx_desc
;
2509 msdu_len
= ath12k_dp_rx_h_msdu_len(ab
, lrx_desc
);
2510 l3_pad_bytes
= ath12k_dp_rx_h_l3pad(ab
, lrx_desc
);
2512 if (rxcb
->is_frag
) {
2513 skb_pull(msdu
, hal_rx_desc_sz
);
2514 } else if (!rxcb
->is_continuation
) {
2515 if ((msdu_len
+ hal_rx_desc_sz
) > DP_RX_BUFFER_SIZE
) {
2517 ath12k_warn(ab
, "invalid msdu len %u\n", msdu_len
);
2518 ath12k_dbg_dump(ab
, ATH12K_DBG_DATA
, NULL
, "", rx_desc
,
2522 skb_put(msdu
, hal_rx_desc_sz
+ l3_pad_bytes
+ msdu_len
);
2523 skb_pull(msdu
, hal_rx_desc_sz
+ l3_pad_bytes
);
2525 ret
= ath12k_dp_rx_msdu_coalesce(ar
, msdu_list
,
2527 l3_pad_bytes
, msdu_len
);
2530 "failed to coalesce msdu rx buffer%d\n", ret
);
2535 ath12k_dp_rx_h_ppdu(ar
, rx_desc
, rx_status
);
2536 ath12k_dp_rx_h_mpdu(ar
, msdu
, rx_desc
, rx_status
);
2538 rx_status
->flag
|= RX_FLAG_SKIP_MONITOR
| RX_FLAG_DUP_VALIDATED
;
2546 static void ath12k_dp_rx_process_received_packets(struct ath12k_base
*ab
,
2547 struct napi_struct
*napi
,
2548 struct sk_buff_head
*msdu_list
,
2551 struct ieee80211_rx_status rx_status
= {0};
2552 struct ath12k_skb_rxcb
*rxcb
;
2553 struct sk_buff
*msdu
;
2558 if (skb_queue_empty(msdu_list
))
2563 while ((msdu
= __skb_dequeue(msdu_list
))) {
2564 rxcb
= ATH12K_SKB_RXCB(msdu
);
2565 mac_id
= rxcb
->mac_id
;
2566 pdev_id
= ath12k_hw_mac_id_to_pdev_id(ab
->hw_params
, mac_id
);
2567 ar
= ab
->pdevs
[pdev_id
].ar
;
2568 if (!rcu_dereference(ab
->pdevs_active
[pdev_id
])) {
2569 dev_kfree_skb_any(msdu
);
2573 if (test_bit(ATH12K_CAC_RUNNING
, &ar
->dev_flags
)) {
2574 dev_kfree_skb_any(msdu
);
2578 ret
= ath12k_dp_rx_process_msdu(ar
, msdu
, msdu_list
, &rx_status
);
2580 ath12k_dbg(ab
, ATH12K_DBG_DATA
,
2581 "Unable to process msdu %d", ret
);
2582 dev_kfree_skb_any(msdu
);
2586 ath12k_dp_rx_deliver_msdu(ar
, napi
, msdu
, &rx_status
);
2592 static u16
ath12k_dp_rx_get_peer_id(struct ath12k_base
*ab
,
2593 enum ath12k_peer_metadata_version ver
,
2594 __le32 peer_metadata
)
2598 ath12k_warn(ab
, "Unknown peer metadata version: %d", ver
);
2600 case ATH12K_PEER_METADATA_V0
:
2601 return le32_get_bits(peer_metadata
,
2602 RX_MPDU_DESC_META_DATA_V0_PEER_ID
);
2603 case ATH12K_PEER_METADATA_V1
:
2604 return le32_get_bits(peer_metadata
,
2605 RX_MPDU_DESC_META_DATA_V1_PEER_ID
);
2606 case ATH12K_PEER_METADATA_V1A
:
2607 return le32_get_bits(peer_metadata
,
2608 RX_MPDU_DESC_META_DATA_V1A_PEER_ID
);
2609 case ATH12K_PEER_METADATA_V1B
:
2610 return le32_get_bits(peer_metadata
,
2611 RX_MPDU_DESC_META_DATA_V1B_PEER_ID
);
2615 int ath12k_dp_rx_process(struct ath12k_base
*ab
, int ring_id
,
2616 struct napi_struct
*napi
, int budget
)
2618 LIST_HEAD(rx_desc_used_list
);
2619 struct ath12k_rx_desc_info
*desc_info
;
2620 struct ath12k_dp
*dp
= &ab
->dp
;
2621 struct dp_rxdma_ring
*rx_ring
= &dp
->rx_refill_buf_ring
;
2622 struct hal_reo_dest_ring
*desc
;
2623 int num_buffs_reaped
= 0;
2624 struct sk_buff_head msdu_list
;
2625 struct ath12k_skb_rxcb
*rxcb
;
2626 int total_msdu_reaped
= 0;
2627 struct hal_srng
*srng
;
2628 struct sk_buff
*msdu
;
2633 __skb_queue_head_init(&msdu_list
);
2635 srng
= &ab
->hal
.srng_list
[dp
->reo_dst_ring
[ring_id
].ring_id
];
2637 spin_lock_bh(&srng
->lock
);
2640 ath12k_hal_srng_access_begin(ab
, srng
);
2642 while ((desc
= ath12k_hal_srng_dst_get_next_entry(ab
, srng
))) {
2643 struct rx_mpdu_desc
*mpdu_info
;
2644 struct rx_msdu_desc
*msdu_info
;
2645 enum hal_reo_dest_ring_push_reason push_reason
;
2648 cookie
= le32_get_bits(desc
->buf_addr_info
.info1
,
2649 BUFFER_ADDR_INFO1_SW_COOKIE
);
2651 mac_id
= le32_get_bits(desc
->info0
,
2652 HAL_REO_DEST_RING_INFO0_SRC_LINK_ID
);
2654 desc_va
= ((u64
)le32_to_cpu(desc
->buf_va_hi
) << 32 |
2655 le32_to_cpu(desc
->buf_va_lo
));
2656 desc_info
= (struct ath12k_rx_desc_info
*)((unsigned long)desc_va
);
2658 /* retry manual desc retrieval */
2660 desc_info
= ath12k_dp_get_rx_desc(ab
, cookie
);
2662 ath12k_warn(ab
, "Invalid cookie in manual descriptor retrieval: 0x%x\n",
2668 if (desc_info
->magic
!= ATH12K_DP_RX_DESC_MAGIC
)
2669 ath12k_warn(ab
, "Check HW CC implementation");
2671 msdu
= desc_info
->skb
;
2672 desc_info
->skb
= NULL
;
2674 list_add_tail(&desc_info
->list
, &rx_desc_used_list
);
2676 rxcb
= ATH12K_SKB_RXCB(msdu
);
2677 dma_unmap_single(ab
->dev
, rxcb
->paddr
,
2678 msdu
->len
+ skb_tailroom(msdu
),
2683 push_reason
= le32_get_bits(desc
->info0
,
2684 HAL_REO_DEST_RING_INFO0_PUSH_REASON
);
2686 HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION
) {
2687 dev_kfree_skb_any(msdu
);
2688 ab
->soc_stats
.hal_reo_error
[ring_id
]++;
2692 msdu_info
= &desc
->rx_msdu_info
;
2693 mpdu_info
= &desc
->rx_mpdu_info
;
2695 rxcb
->is_first_msdu
= !!(le32_to_cpu(msdu_info
->info0
) &
2696 RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU
);
2697 rxcb
->is_last_msdu
= !!(le32_to_cpu(msdu_info
->info0
) &
2698 RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU
);
2699 rxcb
->is_continuation
= !!(le32_to_cpu(msdu_info
->info0
) &
2700 RX_MSDU_DESC_INFO0_MSDU_CONTINUATION
);
2701 rxcb
->mac_id
= mac_id
;
2702 rxcb
->peer_id
= ath12k_dp_rx_get_peer_id(ab
, dp
->peer_metadata_ver
,
2703 mpdu_info
->peer_meta_data
);
2704 rxcb
->tid
= le32_get_bits(mpdu_info
->info0
,
2705 RX_MPDU_DESC_INFO0_TID
);
2707 __skb_queue_tail(&msdu_list
, msdu
);
2709 if (!rxcb
->is_continuation
) {
2710 total_msdu_reaped
++;
2716 if (total_msdu_reaped
>= budget
)
2720 /* Hw might have updated the head pointer after we cached it.
2721 * In this case, even though there are entries in the ring we'll
2722 * get rx_desc NULL. Give the read another try with updated cached
2723 * head pointer so that we can reap complete MPDU in the current
2726 if (!done
&& ath12k_hal_srng_dst_num_free(ab
, srng
, true)) {
2727 ath12k_hal_srng_access_end(ab
, srng
);
2731 ath12k_hal_srng_access_end(ab
, srng
);
2733 spin_unlock_bh(&srng
->lock
);
2735 if (!total_msdu_reaped
)
2738 ath12k_dp_rx_bufs_replenish(ab
, rx_ring
, &rx_desc_used_list
,
2741 ath12k_dp_rx_process_received_packets(ab
, napi
, &msdu_list
,
2745 return total_msdu_reaped
;
2748 static void ath12k_dp_rx_frag_timer(struct timer_list
*timer
)
2750 struct ath12k_dp_rx_tid
*rx_tid
= from_timer(rx_tid
, timer
, frag_timer
);
2752 spin_lock_bh(&rx_tid
->ab
->base_lock
);
2753 if (rx_tid
->last_frag_no
&&
2754 rx_tid
->rx_frag_bitmap
== GENMASK(rx_tid
->last_frag_no
, 0)) {
2755 spin_unlock_bh(&rx_tid
->ab
->base_lock
);
2758 ath12k_dp_rx_frags_cleanup(rx_tid
, true);
2759 spin_unlock_bh(&rx_tid
->ab
->base_lock
);
2762 int ath12k_dp_rx_peer_frag_setup(struct ath12k
*ar
, const u8
*peer_mac
, int vdev_id
)
2764 struct ath12k_base
*ab
= ar
->ab
;
2765 struct crypto_shash
*tfm
;
2766 struct ath12k_peer
*peer
;
2767 struct ath12k_dp_rx_tid
*rx_tid
;
2770 tfm
= crypto_alloc_shash("michael_mic", 0, 0);
2772 return PTR_ERR(tfm
);
2774 spin_lock_bh(&ab
->base_lock
);
2776 peer
= ath12k_peer_find(ab
, vdev_id
, peer_mac
);
2778 spin_unlock_bh(&ab
->base_lock
);
2779 crypto_free_shash(tfm
);
2780 ath12k_warn(ab
, "failed to find the peer to set up fragment info\n");
2784 for (i
= 0; i
<= IEEE80211_NUM_TIDS
; i
++) {
2785 rx_tid
= &peer
->rx_tid
[i
];
2787 timer_setup(&rx_tid
->frag_timer
, ath12k_dp_rx_frag_timer
, 0);
2788 skb_queue_head_init(&rx_tid
->rx_frags
);
2791 peer
->tfm_mmic
= tfm
;
2792 peer
->dp_setup_done
= true;
2793 spin_unlock_bh(&ab
->base_lock
);
2798 static int ath12k_dp_rx_h_michael_mic(struct crypto_shash
*tfm
, u8
*key
,
2799 struct ieee80211_hdr
*hdr
, u8
*data
,
2800 size_t data_len
, u8
*mic
)
2802 SHASH_DESC_ON_STACK(desc
, tfm
);
2803 u8 mic_hdr
[16] = {0};
2812 ret
= crypto_shash_setkey(tfm
, key
, 8);
2816 ret
= crypto_shash_init(desc
);
2820 /* TKIP MIC header */
2821 memcpy(mic_hdr
, ieee80211_get_DA(hdr
), ETH_ALEN
);
2822 memcpy(mic_hdr
+ ETH_ALEN
, ieee80211_get_SA(hdr
), ETH_ALEN
);
2823 if (ieee80211_is_data_qos(hdr
->frame_control
))
2824 tid
= ieee80211_get_tid(hdr
);
2827 ret
= crypto_shash_update(desc
, mic_hdr
, 16);
2830 ret
= crypto_shash_update(desc
, data
, data_len
);
2833 ret
= crypto_shash_final(desc
, mic
);
2835 shash_desc_zero(desc
);
2839 static int ath12k_dp_rx_h_verify_tkip_mic(struct ath12k
*ar
, struct ath12k_peer
*peer
,
2840 struct sk_buff
*msdu
)
2842 struct ath12k_base
*ab
= ar
->ab
;
2843 struct hal_rx_desc
*rx_desc
= (struct hal_rx_desc
*)msdu
->data
;
2844 struct ieee80211_rx_status
*rxs
= IEEE80211_SKB_RXCB(msdu
);
2845 struct ieee80211_key_conf
*key_conf
;
2846 struct ieee80211_hdr
*hdr
;
2847 u8 mic
[IEEE80211_CCMP_MIC_LEN
];
2848 int head_len
, tail_len
, ret
;
2850 u32 hdr_len
, hal_rx_desc_sz
= ar
->ab
->hal
.hal_desc_sz
;
2854 if (ath12k_dp_rx_h_enctype(ab
, rx_desc
) != HAL_ENCRYPT_TYPE_TKIP_MIC
)
2857 hdr
= (struct ieee80211_hdr
*)(msdu
->data
+ hal_rx_desc_sz
);
2858 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
2859 head_len
= hdr_len
+ hal_rx_desc_sz
+ IEEE80211_TKIP_IV_LEN
;
2860 tail_len
= IEEE80211_CCMP_MIC_LEN
+ IEEE80211_TKIP_ICV_LEN
+ FCS_LEN
;
2862 if (!is_multicast_ether_addr(hdr
->addr1
))
2863 key_idx
= peer
->ucast_keyidx
;
2865 key_idx
= peer
->mcast_keyidx
;
2867 key_conf
= peer
->keys
[key_idx
];
2869 data
= msdu
->data
+ head_len
;
2870 data_len
= msdu
->len
- head_len
- tail_len
;
2871 key
= &key_conf
->key
[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY
];
2873 ret
= ath12k_dp_rx_h_michael_mic(peer
->tfm_mmic
, key
, hdr
, data
, data_len
, mic
);
2874 if (ret
|| memcmp(mic
, data
+ data_len
, IEEE80211_CCMP_MIC_LEN
))
2880 (ATH12K_SKB_RXCB(msdu
))->is_first_msdu
= true;
2881 (ATH12K_SKB_RXCB(msdu
))->is_last_msdu
= true;
2883 rxs
->flag
|= RX_FLAG_MMIC_ERROR
| RX_FLAG_MMIC_STRIPPED
|
2884 RX_FLAG_IV_STRIPPED
| RX_FLAG_DECRYPTED
;
2885 skb_pull(msdu
, hal_rx_desc_sz
);
2887 ath12k_dp_rx_h_ppdu(ar
, rx_desc
, rxs
);
2888 ath12k_dp_rx_h_undecap(ar
, msdu
, rx_desc
,
2889 HAL_ENCRYPT_TYPE_TKIP_MIC
, rxs
, true);
2890 ieee80211_rx(ath12k_ar_to_hw(ar
), msdu
);
2894 static void ath12k_dp_rx_h_undecap_frag(struct ath12k
*ar
, struct sk_buff
*msdu
,
2895 enum hal_encrypt_type enctype
, u32 flags
)
2897 struct ieee80211_hdr
*hdr
;
2900 u32 hal_rx_desc_sz
= ar
->ab
->hal
.hal_desc_sz
;
2905 hdr
= (struct ieee80211_hdr
*)(msdu
->data
+ hal_rx_desc_sz
);
2907 if (flags
& RX_FLAG_MIC_STRIPPED
)
2908 skb_trim(msdu
, msdu
->len
-
2909 ath12k_dp_rx_crypto_mic_len(ar
, enctype
));
2911 if (flags
& RX_FLAG_ICV_STRIPPED
)
2912 skb_trim(msdu
, msdu
->len
-
2913 ath12k_dp_rx_crypto_icv_len(ar
, enctype
));
2915 if (flags
& RX_FLAG_IV_STRIPPED
) {
2916 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
2917 crypto_len
= ath12k_dp_rx_crypto_param_len(ar
, enctype
);
2919 memmove(msdu
->data
+ hal_rx_desc_sz
+ crypto_len
,
2920 msdu
->data
+ hal_rx_desc_sz
, hdr_len
);
2921 skb_pull(msdu
, crypto_len
);
2925 static int ath12k_dp_rx_h_defrag(struct ath12k
*ar
,
2926 struct ath12k_peer
*peer
,
2927 struct ath12k_dp_rx_tid
*rx_tid
,
2928 struct sk_buff
**defrag_skb
)
2930 struct ath12k_base
*ab
= ar
->ab
;
2931 struct hal_rx_desc
*rx_desc
;
2932 struct sk_buff
*skb
, *first_frag
, *last_frag
;
2933 struct ieee80211_hdr
*hdr
;
2934 enum hal_encrypt_type enctype
;
2935 bool is_decrypted
= false;
2938 u32 flags
, hal_rx_desc_sz
= ar
->ab
->hal
.hal_desc_sz
;
2940 first_frag
= skb_peek(&rx_tid
->rx_frags
);
2941 last_frag
= skb_peek_tail(&rx_tid
->rx_frags
);
2943 skb_queue_walk(&rx_tid
->rx_frags
, skb
) {
2945 rx_desc
= (struct hal_rx_desc
*)skb
->data
;
2946 hdr
= (struct ieee80211_hdr
*)(skb
->data
+ hal_rx_desc_sz
);
2948 enctype
= ath12k_dp_rx_h_enctype(ab
, rx_desc
);
2949 if (enctype
!= HAL_ENCRYPT_TYPE_OPEN
)
2950 is_decrypted
= ath12k_dp_rx_h_is_decrypted(ab
,
2954 if (skb
!= first_frag
)
2955 flags
|= RX_FLAG_IV_STRIPPED
;
2956 if (skb
!= last_frag
)
2957 flags
|= RX_FLAG_ICV_STRIPPED
|
2958 RX_FLAG_MIC_STRIPPED
;
2961 /* RX fragments are always raw packets */
2962 if (skb
!= last_frag
)
2963 skb_trim(skb
, skb
->len
- FCS_LEN
);
2964 ath12k_dp_rx_h_undecap_frag(ar
, skb
, enctype
, flags
);
2966 if (skb
!= first_frag
)
2967 skb_pull(skb
, hal_rx_desc_sz
+
2968 ieee80211_hdrlen(hdr
->frame_control
));
2969 msdu_len
+= skb
->len
;
2972 extra_space
= msdu_len
- (DP_RX_BUFFER_SIZE
+ skb_tailroom(first_frag
));
2973 if (extra_space
> 0 &&
2974 (pskb_expand_head(first_frag
, 0, extra_space
, GFP_ATOMIC
) < 0))
2977 __skb_unlink(first_frag
, &rx_tid
->rx_frags
);
2978 while ((skb
= __skb_dequeue(&rx_tid
->rx_frags
))) {
2979 skb_put_data(first_frag
, skb
->data
, skb
->len
);
2980 dev_kfree_skb_any(skb
);
2983 hdr
= (struct ieee80211_hdr
*)(first_frag
->data
+ hal_rx_desc_sz
);
2984 hdr
->frame_control
&= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS
);
2985 ATH12K_SKB_RXCB(first_frag
)->is_frag
= 1;
2987 if (ath12k_dp_rx_h_verify_tkip_mic(ar
, peer
, first_frag
))
2990 *defrag_skb
= first_frag
;
2994 static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k
*ar
,
2995 struct ath12k_dp_rx_tid
*rx_tid
,
2996 struct sk_buff
*defrag_skb
)
2998 struct ath12k_base
*ab
= ar
->ab
;
2999 struct ath12k_dp
*dp
= &ab
->dp
;
3000 struct hal_rx_desc
*rx_desc
= (struct hal_rx_desc
*)defrag_skb
->data
;
3001 struct hal_reo_entrance_ring
*reo_ent_ring
;
3002 struct hal_reo_dest_ring
*reo_dest_ring
;
3003 struct dp_link_desc_bank
*link_desc_banks
;
3004 struct hal_rx_msdu_link
*msdu_link
;
3005 struct hal_rx_msdu_details
*msdu0
;
3006 struct hal_srng
*srng
;
3007 dma_addr_t link_paddr
, buf_paddr
;
3008 u32 desc_bank
, msdu_info
, msdu_ext_info
, mpdu_info
;
3009 u32 cookie
, hal_rx_desc_sz
, dest_ring_info0
, queue_addr_hi
;
3011 struct ath12k_rx_desc_info
*desc_info
;
3012 enum hal_rx_buf_return_buf_manager idle_link_rbm
= dp
->idle_link_rbm
;
3015 hal_rx_desc_sz
= ab
->hal
.hal_desc_sz
;
3016 link_desc_banks
= dp
->link_desc_banks
;
3017 reo_dest_ring
= rx_tid
->dst_ring_desc
;
3019 ath12k_hal_rx_reo_ent_paddr_get(ab
, &reo_dest_ring
->buf_addr_info
,
3020 &link_paddr
, &cookie
);
3021 desc_bank
= u32_get_bits(cookie
, DP_LINK_DESC_BANK_MASK
);
3023 msdu_link
= (struct hal_rx_msdu_link
*)(link_desc_banks
[desc_bank
].vaddr
+
3024 (link_paddr
- link_desc_banks
[desc_bank
].paddr
));
3025 msdu0
= &msdu_link
->msdu_link
[0];
3026 msdu_ext_info
= le32_to_cpu(msdu0
->rx_msdu_ext_info
.info0
);
3027 dst_ind
= u32_get_bits(msdu_ext_info
, RX_MSDU_EXT_DESC_INFO0_REO_DEST_IND
);
3029 memset(msdu0
, 0, sizeof(*msdu0
));
3031 msdu_info
= u32_encode_bits(1, RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU
) |
3032 u32_encode_bits(1, RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU
) |
3033 u32_encode_bits(0, RX_MSDU_DESC_INFO0_MSDU_CONTINUATION
) |
3034 u32_encode_bits(defrag_skb
->len
- hal_rx_desc_sz
,
3035 RX_MSDU_DESC_INFO0_MSDU_LENGTH
) |
3036 u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_SA
) |
3037 u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_DA
);
3038 msdu0
->rx_msdu_info
.info0
= cpu_to_le32(msdu_info
);
3039 msdu0
->rx_msdu_ext_info
.info0
= cpu_to_le32(msdu_ext_info
);
3041 /* change msdu len in hal rx desc */
3042 ath12k_dp_rxdesc_set_msdu_len(ab
, rx_desc
, defrag_skb
->len
- hal_rx_desc_sz
);
3044 buf_paddr
= dma_map_single(ab
->dev
, defrag_skb
->data
,
3045 defrag_skb
->len
+ skb_tailroom(defrag_skb
),
3047 if (dma_mapping_error(ab
->dev
, buf_paddr
))
3050 spin_lock_bh(&dp
->rx_desc_lock
);
3051 desc_info
= list_first_entry_or_null(&dp
->rx_desc_free_list
,
3052 struct ath12k_rx_desc_info
,
3055 spin_unlock_bh(&dp
->rx_desc_lock
);
3056 ath12k_warn(ab
, "failed to find rx desc for reinject\n");
3061 desc_info
->skb
= defrag_skb
;
3062 desc_info
->in_use
= true;
3064 list_del(&desc_info
->list
);
3065 spin_unlock_bh(&dp
->rx_desc_lock
);
3067 ATH12K_SKB_RXCB(defrag_skb
)->paddr
= buf_paddr
;
3069 ath12k_hal_rx_buf_addr_info_set(&msdu0
->buf_addr_info
, buf_paddr
,
3071 HAL_RX_BUF_RBM_SW3_BM
);
3073 /* Fill mpdu details into reo entrance ring */
3074 srng
= &ab
->hal
.srng_list
[dp
->reo_reinject_ring
.ring_id
];
3076 spin_lock_bh(&srng
->lock
);
3077 ath12k_hal_srng_access_begin(ab
, srng
);
3079 reo_ent_ring
= ath12k_hal_srng_src_get_next_entry(ab
, srng
);
3080 if (!reo_ent_ring
) {
3081 ath12k_hal_srng_access_end(ab
, srng
);
3082 spin_unlock_bh(&srng
->lock
);
3086 memset(reo_ent_ring
, 0, sizeof(*reo_ent_ring
));
3088 ath12k_hal_rx_buf_addr_info_set(&reo_ent_ring
->buf_addr_info
, link_paddr
,
3092 mpdu_info
= u32_encode_bits(1, RX_MPDU_DESC_INFO0_MSDU_COUNT
) |
3093 u32_encode_bits(0, RX_MPDU_DESC_INFO0_FRAG_FLAG
) |
3094 u32_encode_bits(1, RX_MPDU_DESC_INFO0_RAW_MPDU
) |
3095 u32_encode_bits(1, RX_MPDU_DESC_INFO0_VALID_PN
) |
3096 u32_encode_bits(rx_tid
->tid
, RX_MPDU_DESC_INFO0_TID
);
3098 reo_ent_ring
->rx_mpdu_info
.info0
= cpu_to_le32(mpdu_info
);
3099 reo_ent_ring
->rx_mpdu_info
.peer_meta_data
=
3100 reo_dest_ring
->rx_mpdu_info
.peer_meta_data
;
3102 reo_ent_ring
->queue_addr_lo
= cpu_to_le32(lower_32_bits(rx_tid
->paddr
));
3103 queue_addr_hi
= upper_32_bits(rx_tid
->paddr
);
3104 reo_ent_ring
->info0
= le32_encode_bits(queue_addr_hi
,
3105 HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI
) |
3106 le32_encode_bits(dst_ind
,
3107 HAL_REO_ENTR_RING_INFO0_DEST_IND
);
3109 reo_ent_ring
->info1
= le32_encode_bits(rx_tid
->cur_sn
,
3110 HAL_REO_ENTR_RING_INFO1_MPDU_SEQ_NUM
);
3111 dest_ring_info0
= le32_get_bits(reo_dest_ring
->info0
,
3112 HAL_REO_DEST_RING_INFO0_SRC_LINK_ID
);
3113 reo_ent_ring
->info2
=
3114 cpu_to_le32(u32_get_bits(dest_ring_info0
,
3115 HAL_REO_ENTR_RING_INFO2_SRC_LINK_ID
));
3117 ath12k_hal_srng_access_end(ab
, srng
);
3118 spin_unlock_bh(&srng
->lock
);
3123 spin_lock_bh(&dp
->rx_desc_lock
);
3124 desc_info
->in_use
= false;
3125 desc_info
->skb
= NULL
;
3126 list_add_tail(&desc_info
->list
, &dp
->rx_desc_free_list
);
3127 spin_unlock_bh(&dp
->rx_desc_lock
);
3129 dma_unmap_single(ab
->dev
, buf_paddr
, defrag_skb
->len
+ skb_tailroom(defrag_skb
),
3134 static int ath12k_dp_rx_h_cmp_frags(struct ath12k_base
*ab
,
3135 struct sk_buff
*a
, struct sk_buff
*b
)
3139 frag1
= ath12k_dp_rx_h_frag_no(ab
, a
);
3140 frag2
= ath12k_dp_rx_h_frag_no(ab
, b
);
3142 return frag1
- frag2
;
3145 static void ath12k_dp_rx_h_sort_frags(struct ath12k_base
*ab
,
3146 struct sk_buff_head
*frag_list
,
3147 struct sk_buff
*cur_frag
)
3149 struct sk_buff
*skb
;
3152 skb_queue_walk(frag_list
, skb
) {
3153 cmp
= ath12k_dp_rx_h_cmp_frags(ab
, skb
, cur_frag
);
3156 __skb_queue_before(frag_list
, skb
, cur_frag
);
3159 __skb_queue_tail(frag_list
, cur_frag
);
3162 static u64
ath12k_dp_rx_h_get_pn(struct ath12k
*ar
, struct sk_buff
*skb
)
3164 struct ieee80211_hdr
*hdr
;
3167 u32 hal_rx_desc_sz
= ar
->ab
->hal
.hal_desc_sz
;
3169 hdr
= (struct ieee80211_hdr
*)(skb
->data
+ hal_rx_desc_sz
);
3170 ehdr
= skb
->data
+ hal_rx_desc_sz
+ ieee80211_hdrlen(hdr
->frame_control
);
3173 pn
|= (u64
)ehdr
[1] << 8;
3174 pn
|= (u64
)ehdr
[4] << 16;
3175 pn
|= (u64
)ehdr
[5] << 24;
3176 pn
|= (u64
)ehdr
[6] << 32;
3177 pn
|= (u64
)ehdr
[7] << 40;
3183 ath12k_dp_rx_h_defrag_validate_incr_pn(struct ath12k
*ar
, struct ath12k_dp_rx_tid
*rx_tid
)
3185 struct ath12k_base
*ab
= ar
->ab
;
3186 enum hal_encrypt_type encrypt_type
;
3187 struct sk_buff
*first_frag
, *skb
;
3188 struct hal_rx_desc
*desc
;
3192 first_frag
= skb_peek(&rx_tid
->rx_frags
);
3193 desc
= (struct hal_rx_desc
*)first_frag
->data
;
3195 encrypt_type
= ath12k_dp_rx_h_enctype(ab
, desc
);
3196 if (encrypt_type
!= HAL_ENCRYPT_TYPE_CCMP_128
&&
3197 encrypt_type
!= HAL_ENCRYPT_TYPE_CCMP_256
&&
3198 encrypt_type
!= HAL_ENCRYPT_TYPE_GCMP_128
&&
3199 encrypt_type
!= HAL_ENCRYPT_TYPE_AES_GCMP_256
)
3202 last_pn
= ath12k_dp_rx_h_get_pn(ar
, first_frag
);
3203 skb_queue_walk(&rx_tid
->rx_frags
, skb
) {
3204 if (skb
== first_frag
)
3207 cur_pn
= ath12k_dp_rx_h_get_pn(ar
, skb
);
3208 if (cur_pn
!= last_pn
+ 1)
3215 static int ath12k_dp_rx_frag_h_mpdu(struct ath12k
*ar
,
3216 struct sk_buff
*msdu
,
3217 struct hal_reo_dest_ring
*ring_desc
)
3219 struct ath12k_base
*ab
= ar
->ab
;
3220 struct hal_rx_desc
*rx_desc
;
3221 struct ath12k_peer
*peer
;
3222 struct ath12k_dp_rx_tid
*rx_tid
;
3223 struct sk_buff
*defrag_skb
= NULL
;
3230 rx_desc
= (struct hal_rx_desc
*)msdu
->data
;
3231 peer_id
= ath12k_dp_rx_h_peer_id(ab
, rx_desc
);
3232 tid
= ath12k_dp_rx_h_tid(ab
, rx_desc
);
3233 seqno
= ath12k_dp_rx_h_seq_no(ab
, rx_desc
);
3234 frag_no
= ath12k_dp_rx_h_frag_no(ab
, msdu
);
3235 more_frags
= ath12k_dp_rx_h_more_frags(ab
, msdu
);
3237 if (!ath12k_dp_rx_h_seq_ctrl_valid(ab
, rx_desc
) ||
3238 !ath12k_dp_rx_h_fc_valid(ab
, rx_desc
) ||
3239 tid
> IEEE80211_NUM_TIDS
)
3242 /* received unfragmented packet in reo
3243 * exception ring, this shouldn't happen
3244 * as these packets typically come from
3247 if (WARN_ON_ONCE(!frag_no
&& !more_frags
))
3250 spin_lock_bh(&ab
->base_lock
);
3251 peer
= ath12k_peer_find_by_id(ab
, peer_id
);
3253 ath12k_warn(ab
, "failed to find the peer to de-fragment received fragment peer_id %d\n",
3259 if (!peer
->dp_setup_done
) {
3260 ath12k_warn(ab
, "The peer %pM [%d] has uninitialized datapath\n",
3261 peer
->addr
, peer_id
);
3266 rx_tid
= &peer
->rx_tid
[tid
];
3268 if ((!skb_queue_empty(&rx_tid
->rx_frags
) && seqno
!= rx_tid
->cur_sn
) ||
3269 skb_queue_empty(&rx_tid
->rx_frags
)) {
3270 /* Flush stored fragments and start a new sequence */
3271 ath12k_dp_rx_frags_cleanup(rx_tid
, true);
3272 rx_tid
->cur_sn
= seqno
;
3275 if (rx_tid
->rx_frag_bitmap
& BIT(frag_no
)) {
3276 /* Fragment already present */
3281 if ((!rx_tid
->rx_frag_bitmap
|| frag_no
> __fls(rx_tid
->rx_frag_bitmap
)))
3282 __skb_queue_tail(&rx_tid
->rx_frags
, msdu
);
3284 ath12k_dp_rx_h_sort_frags(ab
, &rx_tid
->rx_frags
, msdu
);
3286 rx_tid
->rx_frag_bitmap
|= BIT(frag_no
);
3288 rx_tid
->last_frag_no
= frag_no
;
3291 rx_tid
->dst_ring_desc
= kmemdup(ring_desc
,
3292 sizeof(*rx_tid
->dst_ring_desc
),
3294 if (!rx_tid
->dst_ring_desc
) {
3299 ath12k_dp_rx_link_desc_return(ab
, ring_desc
,
3300 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE
);
3303 if (!rx_tid
->last_frag_no
||
3304 rx_tid
->rx_frag_bitmap
!= GENMASK(rx_tid
->last_frag_no
, 0)) {
3305 mod_timer(&rx_tid
->frag_timer
, jiffies
+
3306 ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS
);
3310 spin_unlock_bh(&ab
->base_lock
);
3311 del_timer_sync(&rx_tid
->frag_timer
);
3312 spin_lock_bh(&ab
->base_lock
);
3314 peer
= ath12k_peer_find_by_id(ab
, peer_id
);
3316 goto err_frags_cleanup
;
3318 if (!ath12k_dp_rx_h_defrag_validate_incr_pn(ar
, rx_tid
))
3319 goto err_frags_cleanup
;
3321 if (ath12k_dp_rx_h_defrag(ar
, peer
, rx_tid
, &defrag_skb
))
3322 goto err_frags_cleanup
;
3325 goto err_frags_cleanup
;
3327 if (ath12k_dp_rx_h_defrag_reo_reinject(ar
, rx_tid
, defrag_skb
))
3328 goto err_frags_cleanup
;
3330 ath12k_dp_rx_frags_cleanup(rx_tid
, false);
3334 dev_kfree_skb_any(defrag_skb
);
3335 ath12k_dp_rx_frags_cleanup(rx_tid
, true);
3337 spin_unlock_bh(&ab
->base_lock
);
3342 ath12k_dp_process_rx_err_buf(struct ath12k
*ar
, struct hal_reo_dest_ring
*desc
,
3343 struct list_head
*used_list
,
3344 bool drop
, u32 cookie
)
3346 struct ath12k_base
*ab
= ar
->ab
;
3347 struct sk_buff
*msdu
;
3348 struct ath12k_skb_rxcb
*rxcb
;
3349 struct hal_rx_desc
*rx_desc
;
3351 u32 hal_rx_desc_sz
= ab
->hal
.hal_desc_sz
;
3352 struct ath12k_rx_desc_info
*desc_info
;
3355 desc_va
= ((u64
)le32_to_cpu(desc
->buf_va_hi
) << 32 |
3356 le32_to_cpu(desc
->buf_va_lo
));
3357 desc_info
= (struct ath12k_rx_desc_info
*)((unsigned long)desc_va
);
3359 /* retry manual desc retrieval */
3361 desc_info
= ath12k_dp_get_rx_desc(ab
, cookie
);
3363 ath12k_warn(ab
, "Invalid cookie in DP rx error descriptor retrieval: 0x%x\n",
3369 if (desc_info
->magic
!= ATH12K_DP_RX_DESC_MAGIC
)
3370 ath12k_warn(ab
, " RX Exception, Check HW CC implementation");
3372 msdu
= desc_info
->skb
;
3373 desc_info
->skb
= NULL
;
3375 list_add_tail(&desc_info
->list
, used_list
);
3377 rxcb
= ATH12K_SKB_RXCB(msdu
);
3378 dma_unmap_single(ar
->ab
->dev
, rxcb
->paddr
,
3379 msdu
->len
+ skb_tailroom(msdu
),
3383 dev_kfree_skb_any(msdu
);
3388 if (!rcu_dereference(ar
->ab
->pdevs_active
[ar
->pdev_idx
])) {
3389 dev_kfree_skb_any(msdu
);
3393 if (test_bit(ATH12K_CAC_RUNNING
, &ar
->dev_flags
)) {
3394 dev_kfree_skb_any(msdu
);
3398 rx_desc
= (struct hal_rx_desc
*)msdu
->data
;
3399 msdu_len
= ath12k_dp_rx_h_msdu_len(ar
->ab
, rx_desc
);
3400 if ((msdu_len
+ hal_rx_desc_sz
) > DP_RX_BUFFER_SIZE
) {
3401 ath12k_warn(ar
->ab
, "invalid msdu leng %u", msdu_len
);
3402 ath12k_dbg_dump(ar
->ab
, ATH12K_DBG_DATA
, NULL
, "", rx_desc
,
3404 dev_kfree_skb_any(msdu
);
3408 skb_put(msdu
, hal_rx_desc_sz
+ msdu_len
);
3410 if (ath12k_dp_rx_frag_h_mpdu(ar
, msdu
, desc
)) {
3411 dev_kfree_skb_any(msdu
);
3412 ath12k_dp_rx_link_desc_return(ar
->ab
, desc
,
3413 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE
);
3420 int ath12k_dp_rx_process_err(struct ath12k_base
*ab
, struct napi_struct
*napi
,
3423 u32 msdu_cookies
[HAL_NUM_RX_MSDUS_PER_LINK_DESC
];
3424 struct dp_link_desc_bank
*link_desc_banks
;
3425 enum hal_rx_buf_return_buf_manager rbm
;
3426 struct hal_rx_msdu_link
*link_desc_va
;
3427 int tot_n_bufs_reaped
, quota
, ret
, i
;
3428 struct hal_reo_dest_ring
*reo_desc
;
3429 struct dp_rxdma_ring
*rx_ring
;
3430 struct dp_srng
*reo_except
;
3431 LIST_HEAD(rx_desc_used_list
);
3432 u32 desc_bank
, num_msdus
;
3433 struct hal_srng
*srng
;
3434 struct ath12k_dp
*dp
;
3442 tot_n_bufs_reaped
= 0;
3446 reo_except
= &dp
->reo_except_ring
;
3447 link_desc_banks
= dp
->link_desc_banks
;
3449 srng
= &ab
->hal
.srng_list
[reo_except
->ring_id
];
3451 spin_lock_bh(&srng
->lock
);
3453 ath12k_hal_srng_access_begin(ab
, srng
);
3456 (reo_desc
= ath12k_hal_srng_dst_get_next_entry(ab
, srng
))) {
3458 ab
->soc_stats
.err_ring_pkts
++;
3460 ret
= ath12k_hal_desc_reo_parse_err(ab
, reo_desc
, &paddr
,
3463 ath12k_warn(ab
, "failed to parse error reo desc %d\n",
3467 link_desc_va
= link_desc_banks
[desc_bank
].vaddr
+
3468 (paddr
- link_desc_banks
[desc_bank
].paddr
);
3469 ath12k_hal_rx_msdu_link_info_get(link_desc_va
, &num_msdus
, msdu_cookies
,
3471 if (rbm
!= dp
->idle_link_rbm
&&
3472 rbm
!= HAL_RX_BUF_RBM_SW3_BM
&&
3473 rbm
!= ab
->hw_params
->hal_params
->rx_buf_rbm
) {
3474 ab
->soc_stats
.invalid_rbm
++;
3475 ath12k_warn(ab
, "invalid return buffer manager %d\n", rbm
);
3476 ath12k_dp_rx_link_desc_return(ab
, reo_desc
,
3477 HAL_WBM_REL_BM_ACT_REL_MSDU
);
3481 is_frag
= !!(le32_to_cpu(reo_desc
->rx_mpdu_info
.info0
) &
3482 RX_MPDU_DESC_INFO0_FRAG_FLAG
);
3484 /* Process only rx fragments with one msdu per link desc below, and drop
3485 * msdu's indicated due to error reasons.
3487 if (!is_frag
|| num_msdus
> 1) {
3489 /* Return the link desc back to wbm idle list */
3490 ath12k_dp_rx_link_desc_return(ab
, reo_desc
,
3491 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE
);
3494 for (i
= 0; i
< num_msdus
; i
++) {
3495 mac_id
= le32_get_bits(reo_desc
->info0
,
3496 HAL_REO_DEST_RING_INFO0_SRC_LINK_ID
);
3498 pdev_id
= ath12k_hw_mac_id_to_pdev_id(ab
->hw_params
, mac_id
);
3499 ar
= ab
->pdevs
[pdev_id
].ar
;
3501 if (!ath12k_dp_process_rx_err_buf(ar
, reo_desc
,
3505 tot_n_bufs_reaped
++;
3508 if (tot_n_bufs_reaped
>= quota
) {
3509 tot_n_bufs_reaped
= quota
;
3513 budget
= quota
- tot_n_bufs_reaped
;
3517 ath12k_hal_srng_access_end(ab
, srng
);
3519 spin_unlock_bh(&srng
->lock
);
3521 rx_ring
= &dp
->rx_refill_buf_ring
;
3523 ath12k_dp_rx_bufs_replenish(ab
, rx_ring
, &rx_desc_used_list
,
3526 return tot_n_bufs_reaped
;
3529 static void ath12k_dp_rx_null_q_desc_sg_drop(struct ath12k
*ar
,
3531 struct sk_buff_head
*msdu_list
)
3533 struct sk_buff
*skb
, *tmp
;
3534 struct ath12k_skb_rxcb
*rxcb
;
3537 n_buffs
= DIV_ROUND_UP(msdu_len
,
3538 (DP_RX_BUFFER_SIZE
- ar
->ab
->hal
.hal_desc_sz
));
3540 skb_queue_walk_safe(msdu_list
, skb
, tmp
) {
3541 rxcb
= ATH12K_SKB_RXCB(skb
);
3542 if (rxcb
->err_rel_src
== HAL_WBM_REL_SRC_MODULE_REO
&&
3543 rxcb
->err_code
== HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO
) {
3546 __skb_unlink(skb
, msdu_list
);
3547 dev_kfree_skb_any(skb
);
3553 static int ath12k_dp_rx_h_null_q_desc(struct ath12k
*ar
, struct sk_buff
*msdu
,
3554 struct ieee80211_rx_status
*status
,
3555 struct sk_buff_head
*msdu_list
)
3557 struct ath12k_base
*ab
= ar
->ab
;
3559 struct hal_rx_desc
*desc
= (struct hal_rx_desc
*)msdu
->data
;
3561 struct ath12k_skb_rxcb
*rxcb
= ATH12K_SKB_RXCB(msdu
);
3562 u32 hal_rx_desc_sz
= ar
->ab
->hal
.hal_desc_sz
;
3564 msdu_len
= ath12k_dp_rx_h_msdu_len(ab
, desc
);
3566 if (!rxcb
->is_frag
&& ((msdu_len
+ hal_rx_desc_sz
) > DP_RX_BUFFER_SIZE
)) {
3567 /* First buffer will be freed by the caller, so deduct it's length */
3568 msdu_len
= msdu_len
- (DP_RX_BUFFER_SIZE
- hal_rx_desc_sz
);
3569 ath12k_dp_rx_null_q_desc_sg_drop(ar
, msdu_len
, msdu_list
);
3573 /* Even after cleaning up the sg buffers in the msdu list with above check
3574 * any msdu received with continuation flag needs to be dropped as invalid.
3575 * This protects against some random err frame with continuation flag.
3577 if (rxcb
->is_continuation
)
3580 if (!ath12k_dp_rx_h_msdu_done(ab
, desc
)) {
3582 "msdu_done bit not set in null_q_des processing\n");
3583 __skb_queue_purge(msdu_list
);
3587 /* Handle NULL queue descriptor violations arising out a missing
3588 * REO queue for a given peer or a given TID. This typically
3589 * may happen if a packet is received on a QOS enabled TID before the
3590 * ADDBA negotiation for that TID, when the TID queue is setup. Or
3591 * it may also happen for MC/BC frames if they are not routed to the
3592 * non-QOS TID queue, in the absence of any other default TID queue.
3593 * This error can show up both in a REO destination or WBM release ring.
3596 if (rxcb
->is_frag
) {
3597 skb_pull(msdu
, hal_rx_desc_sz
);
3599 l3pad_bytes
= ath12k_dp_rx_h_l3pad(ab
, desc
);
3601 if ((hal_rx_desc_sz
+ l3pad_bytes
+ msdu_len
) > DP_RX_BUFFER_SIZE
)
3604 skb_put(msdu
, hal_rx_desc_sz
+ l3pad_bytes
+ msdu_len
);
3605 skb_pull(msdu
, hal_rx_desc_sz
+ l3pad_bytes
);
3607 ath12k_dp_rx_h_ppdu(ar
, desc
, status
);
3609 ath12k_dp_rx_h_mpdu(ar
, msdu
, desc
, status
);
3611 rxcb
->tid
= ath12k_dp_rx_h_tid(ab
, desc
);
3613 /* Please note that caller will having the access to msdu and completing
3614 * rx with mac80211. Need not worry about cleaning up amsdu_list.
3620 static bool ath12k_dp_rx_h_reo_err(struct ath12k
*ar
, struct sk_buff
*msdu
,
3621 struct ieee80211_rx_status
*status
,
3622 struct sk_buff_head
*msdu_list
)
3624 struct ath12k_skb_rxcb
*rxcb
= ATH12K_SKB_RXCB(msdu
);
3627 ar
->ab
->soc_stats
.reo_error
[rxcb
->err_code
]++;
3629 switch (rxcb
->err_code
) {
3630 case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO
:
3631 if (ath12k_dp_rx_h_null_q_desc(ar
, msdu
, status
, msdu_list
))
3634 case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED
:
3635 /* TODO: Do not drop PN failed packets in the driver;
3636 * instead, it is good to drop such packets in mac80211
3637 * after incrementing the replay counters.
3641 /* TODO: Review other errors and process them to mac80211
3651 static void ath12k_dp_rx_h_tkip_mic_err(struct ath12k
*ar
, struct sk_buff
*msdu
,
3652 struct ieee80211_rx_status
*status
)
3654 struct ath12k_base
*ab
= ar
->ab
;
3656 struct hal_rx_desc
*desc
= (struct hal_rx_desc
*)msdu
->data
;
3658 struct ath12k_skb_rxcb
*rxcb
= ATH12K_SKB_RXCB(msdu
);
3659 u32 hal_rx_desc_sz
= ar
->ab
->hal
.hal_desc_sz
;
3661 rxcb
->is_first_msdu
= ath12k_dp_rx_h_first_msdu(ab
, desc
);
3662 rxcb
->is_last_msdu
= ath12k_dp_rx_h_last_msdu(ab
, desc
);
3664 l3pad_bytes
= ath12k_dp_rx_h_l3pad(ab
, desc
);
3665 msdu_len
= ath12k_dp_rx_h_msdu_len(ab
, desc
);
3666 skb_put(msdu
, hal_rx_desc_sz
+ l3pad_bytes
+ msdu_len
);
3667 skb_pull(msdu
, hal_rx_desc_sz
+ l3pad_bytes
);
3669 ath12k_dp_rx_h_ppdu(ar
, desc
, status
);
3671 status
->flag
|= (RX_FLAG_MMIC_STRIPPED
| RX_FLAG_MMIC_ERROR
|
3674 ath12k_dp_rx_h_undecap(ar
, msdu
, desc
,
3675 HAL_ENCRYPT_TYPE_TKIP_MIC
, status
, false);
3678 static bool ath12k_dp_rx_h_rxdma_err(struct ath12k
*ar
, struct sk_buff
*msdu
,
3679 struct ieee80211_rx_status
*status
)
3681 struct ath12k_base
*ab
= ar
->ab
;
3682 struct ath12k_skb_rxcb
*rxcb
= ATH12K_SKB_RXCB(msdu
);
3683 struct hal_rx_desc
*rx_desc
= (struct hal_rx_desc
*)msdu
->data
;
3687 ar
->ab
->soc_stats
.rxdma_error
[rxcb
->err_code
]++;
3689 switch (rxcb
->err_code
) {
3690 case HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR
:
3691 case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR
:
3692 err_bitmap
= ath12k_dp_rx_h_mpdu_err(ab
, rx_desc
);
3693 if (err_bitmap
& HAL_RX_MPDU_ERR_TKIP_MIC
) {
3694 ath12k_dp_rx_h_tkip_mic_err(ar
, msdu
, status
);
3699 /* TODO: Review other rxdma error code to check if anything is
3700 * worth reporting to mac80211
3709 static void ath12k_dp_rx_wbm_err(struct ath12k
*ar
,
3710 struct napi_struct
*napi
,
3711 struct sk_buff
*msdu
,
3712 struct sk_buff_head
*msdu_list
)
3714 struct ath12k_skb_rxcb
*rxcb
= ATH12K_SKB_RXCB(msdu
);
3715 struct ieee80211_rx_status rxs
= {0};
3718 switch (rxcb
->err_rel_src
) {
3719 case HAL_WBM_REL_SRC_MODULE_REO
:
3720 drop
= ath12k_dp_rx_h_reo_err(ar
, msdu
, &rxs
, msdu_list
);
3722 case HAL_WBM_REL_SRC_MODULE_RXDMA
:
3723 drop
= ath12k_dp_rx_h_rxdma_err(ar
, msdu
, &rxs
);
3726 /* msdu will get freed */
3731 dev_kfree_skb_any(msdu
);
3735 ath12k_dp_rx_deliver_msdu(ar
, napi
, msdu
, &rxs
);
3738 int ath12k_dp_rx_process_wbm_err(struct ath12k_base
*ab
,
3739 struct napi_struct
*napi
, int budget
)
3741 LIST_HEAD(rx_desc_used_list
);
3743 struct ath12k_dp
*dp
= &ab
->dp
;
3744 struct dp_rxdma_ring
*rx_ring
;
3745 struct hal_rx_wbm_rel_info err_info
;
3746 struct hal_srng
*srng
;
3747 struct sk_buff
*msdu
;
3748 struct sk_buff_head msdu_list
, scatter_msdu_list
;
3749 struct ath12k_skb_rxcb
*rxcb
;
3752 int num_buffs_reaped
= 0;
3753 struct ath12k_rx_desc_info
*desc_info
;
3755 struct hal_rx_desc
*msdu_data
;
3757 __skb_queue_head_init(&msdu_list
);
3758 __skb_queue_head_init(&scatter_msdu_list
);
3760 srng
= &ab
->hal
.srng_list
[dp
->rx_rel_ring
.ring_id
];
3761 rx_ring
= &dp
->rx_refill_buf_ring
;
3762 spin_lock_bh(&srng
->lock
);
3764 ath12k_hal_srng_access_begin(ab
, srng
);
3767 rx_desc
= ath12k_hal_srng_dst_get_next_entry(ab
, srng
);
3771 ret
= ath12k_hal_wbm_desc_parse_err(ab
, rx_desc
, &err_info
);
3774 "failed to parse rx error in wbm_rel ring desc %d\n",
3779 desc_info
= err_info
.rx_desc
;
3781 /* retry manual desc retrieval if hw cc is not done */
3783 desc_info
= ath12k_dp_get_rx_desc(ab
, err_info
.cookie
);
3785 ath12k_warn(ab
, "Invalid cookie in DP WBM rx error descriptor retrieval: 0x%x\n",
3791 if (desc_info
->magic
!= ATH12K_DP_RX_DESC_MAGIC
)
3792 ath12k_warn(ab
, "WBM RX err, Check HW CC implementation");
3794 msdu
= desc_info
->skb
;
3795 desc_info
->skb
= NULL
;
3797 list_add_tail(&desc_info
->list
, &rx_desc_used_list
);
3799 rxcb
= ATH12K_SKB_RXCB(msdu
);
3800 dma_unmap_single(ab
->dev
, rxcb
->paddr
,
3801 msdu
->len
+ skb_tailroom(msdu
),
3806 if (!err_info
.continuation
)
3809 if (err_info
.push_reason
!=
3810 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED
) {
3811 dev_kfree_skb_any(msdu
);
3815 msdu_data
= (struct hal_rx_desc
*)msdu
->data
;
3816 rxcb
->err_rel_src
= err_info
.err_rel_src
;
3817 rxcb
->err_code
= err_info
.err_code
;
3818 rxcb
->is_first_msdu
= err_info
.first_msdu
;
3819 rxcb
->is_last_msdu
= err_info
.last_msdu
;
3820 rxcb
->is_continuation
= err_info
.continuation
;
3821 rxcb
->rx_desc
= msdu_data
;
3823 if (err_info
.continuation
) {
3824 __skb_queue_tail(&scatter_msdu_list
, msdu
);
3828 mac_id
= ath12k_dp_rx_get_msdu_src_link(ab
,
3830 if (mac_id
>= MAX_RADIOS
) {
3831 dev_kfree_skb_any(msdu
);
3833 /* In any case continuation bit is set
3834 * in the previous record, cleanup scatter_msdu_list
3836 ath12k_dp_clean_up_skb_list(&scatter_msdu_list
);
3840 if (!skb_queue_empty(&scatter_msdu_list
)) {
3841 struct sk_buff
*msdu
;
3843 skb_queue_walk(&scatter_msdu_list
, msdu
) {
3844 rxcb
= ATH12K_SKB_RXCB(msdu
);
3845 rxcb
->mac_id
= mac_id
;
3848 skb_queue_splice_tail_init(&scatter_msdu_list
,
3852 rxcb
= ATH12K_SKB_RXCB(msdu
);
3853 rxcb
->mac_id
= mac_id
;
3854 __skb_queue_tail(&msdu_list
, msdu
);
3857 /* In any case continuation bit is set in the
3858 * last record, cleanup scatter_msdu_list
3860 ath12k_dp_clean_up_skb_list(&scatter_msdu_list
);
3862 ath12k_hal_srng_access_end(ab
, srng
);
3864 spin_unlock_bh(&srng
->lock
);
3866 if (!num_buffs_reaped
)
3869 ath12k_dp_rx_bufs_replenish(ab
, rx_ring
, &rx_desc_used_list
,
3873 while ((msdu
= __skb_dequeue(&msdu_list
))) {
3874 rxcb
= ATH12K_SKB_RXCB(msdu
);
3875 mac_id
= rxcb
->mac_id
;
3877 pdev_id
= ath12k_hw_mac_id_to_pdev_id(ab
->hw_params
, mac_id
);
3878 ar
= ab
->pdevs
[pdev_id
].ar
;
3880 if (!ar
|| !rcu_dereference(ar
->ab
->pdevs_active
[mac_id
])) {
3881 dev_kfree_skb_any(msdu
);
3885 if (test_bit(ATH12K_CAC_RUNNING
, &ar
->dev_flags
)) {
3886 dev_kfree_skb_any(msdu
);
3889 ath12k_dp_rx_wbm_err(ar
, napi
, msdu
, &msdu_list
);
3893 return num_buffs_reaped
;
3896 void ath12k_dp_rx_process_reo_status(struct ath12k_base
*ab
)
3898 struct ath12k_dp
*dp
= &ab
->dp
;
3899 struct hal_tlv_64_hdr
*hdr
;
3900 struct hal_srng
*srng
;
3901 struct ath12k_dp_rx_reo_cmd
*cmd
, *tmp
;
3904 struct hal_reo_status reo_status
;
3906 srng
= &ab
->hal
.srng_list
[dp
->reo_status_ring
.ring_id
];
3908 memset(&reo_status
, 0, sizeof(reo_status
));
3910 spin_lock_bh(&srng
->lock
);
3912 ath12k_hal_srng_access_begin(ab
, srng
);
3914 while ((hdr
= ath12k_hal_srng_dst_get_next_entry(ab
, srng
))) {
3915 tag
= u64_get_bits(hdr
->tl
, HAL_SRNG_TLV_HDR_TAG
);
3918 case HAL_REO_GET_QUEUE_STATS_STATUS
:
3919 ath12k_hal_reo_status_queue_stats(ab
, hdr
,
3922 case HAL_REO_FLUSH_QUEUE_STATUS
:
3923 ath12k_hal_reo_flush_queue_status(ab
, hdr
,
3926 case HAL_REO_FLUSH_CACHE_STATUS
:
3927 ath12k_hal_reo_flush_cache_status(ab
, hdr
,
3930 case HAL_REO_UNBLOCK_CACHE_STATUS
:
3931 ath12k_hal_reo_unblk_cache_status(ab
, hdr
,
3934 case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS
:
3935 ath12k_hal_reo_flush_timeout_list_status(ab
, hdr
,
3938 case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS
:
3939 ath12k_hal_reo_desc_thresh_reached_status(ab
, hdr
,
3942 case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS
:
3943 ath12k_hal_reo_update_rx_reo_queue_status(ab
, hdr
,
3947 ath12k_warn(ab
, "Unknown reo status type %d\n", tag
);
3951 spin_lock_bh(&dp
->reo_cmd_lock
);
3952 list_for_each_entry_safe(cmd
, tmp
, &dp
->reo_cmd_list
, list
) {
3953 if (reo_status
.uniform_hdr
.cmd_num
== cmd
->cmd_num
) {
3955 list_del(&cmd
->list
);
3959 spin_unlock_bh(&dp
->reo_cmd_lock
);
3962 cmd
->handler(dp
, (void *)&cmd
->data
,
3963 reo_status
.uniform_hdr
.cmd_status
);
3970 ath12k_hal_srng_access_end(ab
, srng
);
3972 spin_unlock_bh(&srng
->lock
);
3975 void ath12k_dp_rx_free(struct ath12k_base
*ab
)
3977 struct ath12k_dp
*dp
= &ab
->dp
;
3980 ath12k_dp_srng_cleanup(ab
, &dp
->rx_refill_buf_ring
.refill_buf_ring
);
3982 for (i
= 0; i
< ab
->hw_params
->num_rxdma_per_pdev
; i
++) {
3983 if (ab
->hw_params
->rx_mac_buf_ring
)
3984 ath12k_dp_srng_cleanup(ab
, &dp
->rx_mac_buf_ring
[i
]);
3987 for (i
= 0; i
< ab
->hw_params
->num_rxdma_dst_ring
; i
++)
3988 ath12k_dp_srng_cleanup(ab
, &dp
->rxdma_err_dst_ring
[i
]);
3990 ath12k_dp_srng_cleanup(ab
, &dp
->rxdma_mon_buf_ring
.refill_buf_ring
);
3992 ath12k_dp_rxdma_buf_free(ab
);
3995 void ath12k_dp_rx_pdev_free(struct ath12k_base
*ab
, int mac_id
)
3997 struct ath12k
*ar
= ab
->pdevs
[mac_id
].ar
;
3999 ath12k_dp_rx_pdev_srng_free(ar
);
4002 int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base
*ab
)
4004 struct ath12k_dp
*dp
= &ab
->dp
;
4005 struct htt_rx_ring_tlv_filter tlv_filter
= {0};
4008 u32 hal_rx_desc_sz
= ab
->hal
.hal_desc_sz
;
4010 ring_id
= dp
->rx_refill_buf_ring
.refill_buf_ring
.ring_id
;
4012 tlv_filter
.rx_filter
= HTT_RX_TLV_FLAGS_RXDMA_RING
;
4013 tlv_filter
.pkt_filter_flags2
= HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR
;
4014 tlv_filter
.pkt_filter_flags3
= HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST
|
4015 HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST
|
4016 HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA
;
4017 tlv_filter
.offset_valid
= true;
4018 tlv_filter
.rx_packet_offset
= hal_rx_desc_sz
;
4020 tlv_filter
.rx_mpdu_start_offset
=
4021 ab
->hal_rx_ops
->rx_desc_get_mpdu_start_offset();
4022 tlv_filter
.rx_msdu_end_offset
=
4023 ab
->hal_rx_ops
->rx_desc_get_msdu_end_offset();
4025 if (ath12k_dp_wmask_compaction_rx_tlv_supported(ab
)) {
4026 tlv_filter
.rx_mpdu_start_wmask
=
4027 ab
->hw_params
->hal_ops
->rxdma_ring_wmask_rx_mpdu_start();
4028 tlv_filter
.rx_msdu_end_wmask
=
4029 ab
->hw_params
->hal_ops
->rxdma_ring_wmask_rx_msdu_end();
4030 ath12k_dbg(ab
, ATH12K_DBG_DATA
,
4031 "Configuring compact tlv masks rx_mpdu_start_wmask 0x%x rx_msdu_end_wmask 0x%x\n",
4032 tlv_filter
.rx_mpdu_start_wmask
, tlv_filter
.rx_msdu_end_wmask
);
4035 ret
= ath12k_dp_tx_htt_rx_filter_setup(ab
, ring_id
, 0,
4037 DP_RXDMA_REFILL_RING_SIZE
,
4043 int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base
*ab
)
4045 struct ath12k_dp
*dp
= &ab
->dp
;
4046 struct htt_rx_ring_tlv_filter tlv_filter
= {0};
4049 u32 hal_rx_desc_sz
= ab
->hal
.hal_desc_sz
;
4052 ring_id
= dp
->rx_refill_buf_ring
.refill_buf_ring
.ring_id
;
4054 tlv_filter
.rx_filter
= HTT_RX_TLV_FLAGS_RXDMA_RING
;
4055 tlv_filter
.pkt_filter_flags2
= HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR
;
4056 tlv_filter
.pkt_filter_flags3
= HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST
|
4057 HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST
|
4058 HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA
;
4059 tlv_filter
.offset_valid
= true;
4060 tlv_filter
.rx_packet_offset
= hal_rx_desc_sz
;
4062 tlv_filter
.rx_header_offset
= offsetof(struct hal_rx_desc_wcn7850
, pkt_hdr_tlv
);
4064 tlv_filter
.rx_mpdu_start_offset
=
4065 ab
->hal_rx_ops
->rx_desc_get_mpdu_start_offset();
4066 tlv_filter
.rx_msdu_end_offset
=
4067 ab
->hal_rx_ops
->rx_desc_get_msdu_end_offset();
4069 /* TODO: Selectively subscribe to required qwords within msdu_end
4070 * and mpdu_start and setup the mask in below msg
4071 * and modify the rx_desc struct
4074 for (i
= 0; i
< ab
->hw_params
->num_rxdma_per_pdev
; i
++) {
4075 ring_id
= dp
->rx_mac_buf_ring
[i
].ring_id
;
4076 ret
= ath12k_dp_tx_htt_rx_filter_setup(ab
, ring_id
, i
,
4078 DP_RXDMA_REFILL_RING_SIZE
,
4085 int ath12k_dp_rx_htt_setup(struct ath12k_base
*ab
)
4087 struct ath12k_dp
*dp
= &ab
->dp
;
4091 /* TODO: Need to verify the HTT setup for QCN9224 */
4092 ring_id
= dp
->rx_refill_buf_ring
.refill_buf_ring
.ring_id
;
4093 ret
= ath12k_dp_tx_htt_srng_setup(ab
, ring_id
, 0, HAL_RXDMA_BUF
);
4095 ath12k_warn(ab
, "failed to configure rx_refill_buf_ring %d\n",
4100 if (ab
->hw_params
->rx_mac_buf_ring
) {
4101 for (i
= 0; i
< ab
->hw_params
->num_rxdma_per_pdev
; i
++) {
4102 ring_id
= dp
->rx_mac_buf_ring
[i
].ring_id
;
4103 ret
= ath12k_dp_tx_htt_srng_setup(ab
, ring_id
,
4106 ath12k_warn(ab
, "failed to configure rx_mac_buf_ring%d %d\n",
4113 for (i
= 0; i
< ab
->hw_params
->num_rxdma_dst_ring
; i
++) {
4114 ring_id
= dp
->rxdma_err_dst_ring
[i
].ring_id
;
4115 ret
= ath12k_dp_tx_htt_srng_setup(ab
, ring_id
,
4118 ath12k_warn(ab
, "failed to configure rxdma_err_dest_ring%d %d\n",
4124 if (ab
->hw_params
->rxdma1_enable
) {
4125 ring_id
= dp
->rxdma_mon_buf_ring
.refill_buf_ring
.ring_id
;
4126 ret
= ath12k_dp_tx_htt_srng_setup(ab
, ring_id
,
4127 0, HAL_RXDMA_MONITOR_BUF
);
4129 ath12k_warn(ab
, "failed to configure rxdma_mon_buf_ring %d\n",
4135 ret
= ab
->hw_params
->hw_ops
->rxdma_ring_sel_config(ab
);
4137 ath12k_warn(ab
, "failed to setup rxdma ring selection config\n");
4144 int ath12k_dp_rx_alloc(struct ath12k_base
*ab
)
4146 struct ath12k_dp
*dp
= &ab
->dp
;
4149 idr_init(&dp
->rxdma_mon_buf_ring
.bufs_idr
);
4150 spin_lock_init(&dp
->rxdma_mon_buf_ring
.idr_lock
);
4152 ret
= ath12k_dp_srng_setup(ab
,
4153 &dp
->rx_refill_buf_ring
.refill_buf_ring
,
4154 HAL_RXDMA_BUF
, 0, 0,
4155 DP_RXDMA_BUF_RING_SIZE
);
4157 ath12k_warn(ab
, "failed to setup rx_refill_buf_ring\n");
4161 if (ab
->hw_params
->rx_mac_buf_ring
) {
4162 for (i
= 0; i
< ab
->hw_params
->num_rxdma_per_pdev
; i
++) {
4163 ret
= ath12k_dp_srng_setup(ab
,
4164 &dp
->rx_mac_buf_ring
[i
],
4166 i
, DP_RX_MAC_BUF_RING_SIZE
);
4168 ath12k_warn(ab
, "failed to setup rx_mac_buf_ring %d\n",
4175 for (i
= 0; i
< ab
->hw_params
->num_rxdma_dst_ring
; i
++) {
4176 ret
= ath12k_dp_srng_setup(ab
, &dp
->rxdma_err_dst_ring
[i
],
4177 HAL_RXDMA_DST
, 0, i
,
4178 DP_RXDMA_ERR_DST_RING_SIZE
);
4180 ath12k_warn(ab
, "failed to setup rxdma_err_dst_ring %d\n", i
);
4185 if (ab
->hw_params
->rxdma1_enable
) {
4186 ret
= ath12k_dp_srng_setup(ab
,
4187 &dp
->rxdma_mon_buf_ring
.refill_buf_ring
,
4188 HAL_RXDMA_MONITOR_BUF
, 0, 0,
4189 DP_RXDMA_MONITOR_BUF_RING_SIZE
);
4191 ath12k_warn(ab
, "failed to setup HAL_RXDMA_MONITOR_BUF\n");
4196 ret
= ath12k_dp_rxdma_buf_setup(ab
);
4198 ath12k_warn(ab
, "failed to setup rxdma ring\n");
4205 int ath12k_dp_rx_pdev_alloc(struct ath12k_base
*ab
, int mac_id
)
4207 struct ath12k
*ar
= ab
->pdevs
[mac_id
].ar
;
4208 struct ath12k_pdev_dp
*dp
= &ar
->dp
;
4213 if (!ab
->hw_params
->rxdma1_enable
)
4216 ret
= ath12k_dp_rx_pdev_srng_alloc(ar
);
4218 ath12k_warn(ab
, "failed to setup rx srngs\n");
4222 for (i
= 0; i
< ab
->hw_params
->num_rxdma_per_pdev
; i
++) {
4223 ring_id
= dp
->rxdma_mon_dst_ring
[i
].ring_id
;
4224 ret
= ath12k_dp_tx_htt_srng_setup(ab
, ring_id
,
4226 HAL_RXDMA_MONITOR_DST
);
4229 "failed to configure rxdma_mon_dst_ring %d %d\n",
4238 static int ath12k_dp_rx_pdev_mon_status_attach(struct ath12k
*ar
)
4240 struct ath12k_pdev_dp
*dp
= &ar
->dp
;
4241 struct ath12k_mon_data
*pmon
= (struct ath12k_mon_data
*)&dp
->mon_data
;
4243 skb_queue_head_init(&pmon
->rx_status_q
);
4245 pmon
->mon_ppdu_status
= DP_PPDU_STATUS_START
;
4247 memset(&pmon
->rx_mon_stats
, 0,
4248 sizeof(pmon
->rx_mon_stats
));
4252 int ath12k_dp_rx_pdev_mon_attach(struct ath12k
*ar
)
4254 struct ath12k_pdev_dp
*dp
= &ar
->dp
;
4255 struct ath12k_mon_data
*pmon
= &dp
->mon_data
;
4258 ret
= ath12k_dp_rx_pdev_mon_status_attach(ar
);
4260 ath12k_warn(ar
->ab
, "pdev_mon_status_attach() failed");
4264 /* if rxdma1_enable is false, no need to setup
4265 * rxdma_mon_desc_ring.
4267 if (!ar
->ab
->hw_params
->rxdma1_enable
)
4270 pmon
->mon_last_linkdesc_paddr
= 0;
4271 pmon
->mon_last_buf_cookie
= DP_RX_DESC_COOKIE_MAX
+ 1;
4272 spin_lock_init(&pmon
->mon_lock
);