1 // SPDX-License-Identifier: ISC
3 * Copyright (c) 2005-2011 Atheros Communications Inc.
4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
6 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
17 #include <linux/log2.h>
18 #include <linux/bitfield.h>
20 /* when under memory pressure rx ring refill may fail and needs a retry */
21 #define HTT_RX_RING_REFILL_RETRY_MS 50
23 #define HTT_RX_RING_REFILL_RESCHED_MS 5
25 /* shortcut to interpret a raw memory buffer as a rx descriptor */
26 #define HTT_RX_BUF_TO_RX_DESC(hw, buf) ath10k_htt_rx_desc_from_raw_buffer(hw, buf)
28 static int ath10k_htt_rx_get_csum_state(struct ath10k_hw_params
*hw
, struct sk_buff
*skb
);
30 static struct sk_buff
*
31 ath10k_htt_rx_find_skb_paddr(struct ath10k
*ar
, u64 paddr
)
33 struct ath10k_skb_rxcb
*rxcb
;
35 hash_for_each_possible(ar
->htt
.rx_ring
.skb_table
, rxcb
, hlist
, paddr
)
36 if (rxcb
->paddr
== paddr
)
37 return ATH10K_RXCB_SKB(rxcb
);
43 static void ath10k_htt_rx_ring_free(struct ath10k_htt
*htt
)
46 struct ath10k_skb_rxcb
*rxcb
;
50 if (htt
->rx_ring
.in_ord_rx
) {
51 hash_for_each_safe(htt
->rx_ring
.skb_table
, i
, n
, rxcb
, hlist
) {
52 skb
= ATH10K_RXCB_SKB(rxcb
);
53 dma_unmap_single(htt
->ar
->dev
, rxcb
->paddr
,
54 skb
->len
+ skb_tailroom(skb
),
56 hash_del(&rxcb
->hlist
);
57 dev_kfree_skb_any(skb
);
60 for (i
= 0; i
< htt
->rx_ring
.size
; i
++) {
61 skb
= htt
->rx_ring
.netbufs_ring
[i
];
65 rxcb
= ATH10K_SKB_RXCB(skb
);
66 dma_unmap_single(htt
->ar
->dev
, rxcb
->paddr
,
67 skb
->len
+ skb_tailroom(skb
),
69 dev_kfree_skb_any(skb
);
73 htt
->rx_ring
.fill_cnt
= 0;
74 hash_init(htt
->rx_ring
.skb_table
);
75 memset(htt
->rx_ring
.netbufs_ring
, 0,
76 htt
->rx_ring
.size
* sizeof(htt
->rx_ring
.netbufs_ring
[0]));
79 static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt
*htt
)
81 return htt
->rx_ring
.size
* sizeof(htt
->rx_ring
.paddrs_ring_32
);
84 static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt
*htt
)
86 return htt
->rx_ring
.size
* sizeof(htt
->rx_ring
.paddrs_ring_64
);
89 static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt
*htt
,
92 htt
->rx_ring
.paddrs_ring_32
= vaddr
;
95 static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt
*htt
,
98 htt
->rx_ring
.paddrs_ring_64
= vaddr
;
101 static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt
*htt
,
102 dma_addr_t paddr
, int idx
)
104 htt
->rx_ring
.paddrs_ring_32
[idx
] = __cpu_to_le32(paddr
);
107 static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt
*htt
,
108 dma_addr_t paddr
, int idx
)
110 htt
->rx_ring
.paddrs_ring_64
[idx
] = __cpu_to_le64(paddr
);
113 static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt
*htt
, int idx
)
115 htt
->rx_ring
.paddrs_ring_32
[idx
] = 0;
118 static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt
*htt
, int idx
)
120 htt
->rx_ring
.paddrs_ring_64
[idx
] = 0;
123 static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt
*htt
)
125 return (void *)htt
->rx_ring
.paddrs_ring_32
;
128 static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt
*htt
)
130 return (void *)htt
->rx_ring
.paddrs_ring_64
;
133 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt
*htt
, int num
)
135 struct ath10k_hw_params
*hw
= &htt
->ar
->hw_params
;
136 struct htt_rx_desc
*rx_desc
;
137 struct ath10k_skb_rxcb
*rxcb
;
142 /* The Full Rx Reorder firmware has no way of telling the host
143 * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
144 * To keep things simple make sure ring is always half empty. This
145 * guarantees there'll be no replenishment overruns possible.
147 BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL
>= HTT_RX_RING_SIZE
/ 2);
149 idx
= __le32_to_cpu(*htt
->rx_ring
.alloc_idx
.vaddr
);
151 if (idx
< 0 || idx
>= htt
->rx_ring
.size
) {
152 ath10k_err(htt
->ar
, "rx ring index is not valid, firmware malfunctioning?\n");
153 idx
&= htt
->rx_ring
.size_mask
;
159 skb
= dev_alloc_skb(HTT_RX_BUF_SIZE
+ HTT_RX_DESC_ALIGN
);
165 if (!IS_ALIGNED((unsigned long)skb
->data
, HTT_RX_DESC_ALIGN
))
167 PTR_ALIGN(skb
->data
, HTT_RX_DESC_ALIGN
) -
170 /* Clear rx_desc attention word before posting to Rx ring */
171 rx_desc
= HTT_RX_BUF_TO_RX_DESC(hw
, skb
->data
);
172 ath10k_htt_rx_desc_get_attention(hw
, rx_desc
)->flags
= __cpu_to_le32(0);
174 paddr
= dma_map_single(htt
->ar
->dev
, skb
->data
,
175 skb
->len
+ skb_tailroom(skb
),
178 if (unlikely(dma_mapping_error(htt
->ar
->dev
, paddr
))) {
179 dev_kfree_skb_any(skb
);
184 rxcb
= ATH10K_SKB_RXCB(skb
);
186 htt
->rx_ring
.netbufs_ring
[idx
] = skb
;
187 ath10k_htt_set_paddrs_ring(htt
, paddr
, idx
);
188 htt
->rx_ring
.fill_cnt
++;
190 if (htt
->rx_ring
.in_ord_rx
) {
191 hash_add(htt
->rx_ring
.skb_table
,
192 &ATH10K_SKB_RXCB(skb
)->hlist
,
198 idx
&= htt
->rx_ring
.size_mask
;
203 * Make sure the rx buffer is updated before available buffer
204 * index to avoid any potential rx ring corruption.
207 *htt
->rx_ring
.alloc_idx
.vaddr
= __cpu_to_le32(idx
);
211 static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt
*htt
, int num
)
213 lockdep_assert_held(&htt
->rx_ring
.lock
);
214 return __ath10k_htt_rx_ring_fill_n(htt
, num
);
217 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt
*htt
)
219 int ret
, num_deficit
, num_to_fill
;
221 /* Refilling the whole RX ring buffer proves to be a bad idea. The
222 * reason is RX may take up significant amount of CPU cycles and starve
223 * other tasks, e.g. TX on an ethernet device while acting as a bridge
224 * with ath10k wlan interface. This ended up with very poor performance
225 * once CPU the host system was overwhelmed with RX on ath10k.
227 * By limiting the number of refills the replenishing occurs
228 * progressively. This in turns makes use of the fact tasklets are
229 * processed in FIFO order. This means actual RX processing can starve
230 * out refilling. If there's not enough buffers on RX ring FW will not
231 * report RX until it is refilled with enough buffers. This
232 * automatically balances load wrt to CPU power.
234 * This probably comes at a cost of lower maximum throughput but
235 * improves the average and stability.
237 spin_lock_bh(&htt
->rx_ring
.lock
);
238 num_deficit
= htt
->rx_ring
.fill_level
- htt
->rx_ring
.fill_cnt
;
239 num_to_fill
= min(ATH10K_HTT_MAX_NUM_REFILL
, num_deficit
);
240 num_deficit
-= num_to_fill
;
241 ret
= ath10k_htt_rx_ring_fill_n(htt
, num_to_fill
);
242 if (ret
== -ENOMEM
) {
244 * Failed to fill it to the desired level -
245 * we'll start a timer and try again next time.
246 * As long as enough buffers are left in the ring for
247 * another A-MPDU rx, no special recovery is needed.
249 mod_timer(&htt
->rx_ring
.refill_retry_timer
, jiffies
+
250 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS
));
251 } else if (num_deficit
> 0) {
252 mod_timer(&htt
->rx_ring
.refill_retry_timer
, jiffies
+
253 msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS
));
255 spin_unlock_bh(&htt
->rx_ring
.lock
);
258 static void ath10k_htt_rx_ring_refill_retry(struct timer_list
*t
)
260 struct ath10k_htt
*htt
= from_timer(htt
, t
, rx_ring
.refill_retry_timer
);
262 ath10k_htt_rx_msdu_buff_replenish(htt
);
265 int ath10k_htt_rx_ring_refill(struct ath10k
*ar
)
267 struct ath10k_htt
*htt
= &ar
->htt
;
270 if (ar
->bus_param
.dev_type
== ATH10K_DEV_TYPE_HL
)
273 spin_lock_bh(&htt
->rx_ring
.lock
);
274 ret
= ath10k_htt_rx_ring_fill_n(htt
, (htt
->rx_ring
.fill_level
-
275 htt
->rx_ring
.fill_cnt
));
278 ath10k_htt_rx_ring_free(htt
);
280 spin_unlock_bh(&htt
->rx_ring
.lock
);
285 void ath10k_htt_rx_free(struct ath10k_htt
*htt
)
287 if (htt
->ar
->bus_param
.dev_type
== ATH10K_DEV_TYPE_HL
)
290 del_timer_sync(&htt
->rx_ring
.refill_retry_timer
);
292 skb_queue_purge(&htt
->rx_msdus_q
);
293 skb_queue_purge(&htt
->rx_in_ord_compl_q
);
294 skb_queue_purge(&htt
->tx_fetch_ind_q
);
296 spin_lock_bh(&htt
->rx_ring
.lock
);
297 ath10k_htt_rx_ring_free(htt
);
298 spin_unlock_bh(&htt
->rx_ring
.lock
);
300 dma_free_coherent(htt
->ar
->dev
,
301 ath10k_htt_get_rx_ring_size(htt
),
302 ath10k_htt_get_vaddr_ring(htt
),
303 htt
->rx_ring
.base_paddr
);
305 ath10k_htt_config_paddrs_ring(htt
, NULL
);
307 dma_free_coherent(htt
->ar
->dev
,
308 sizeof(*htt
->rx_ring
.alloc_idx
.vaddr
),
309 htt
->rx_ring
.alloc_idx
.vaddr
,
310 htt
->rx_ring
.alloc_idx
.paddr
);
311 htt
->rx_ring
.alloc_idx
.vaddr
= NULL
;
313 kfree(htt
->rx_ring
.netbufs_ring
);
314 htt
->rx_ring
.netbufs_ring
= NULL
;
317 static inline struct sk_buff
*ath10k_htt_rx_netbuf_pop(struct ath10k_htt
*htt
)
319 struct ath10k
*ar
= htt
->ar
;
321 struct sk_buff
*msdu
;
323 lockdep_assert_held(&htt
->rx_ring
.lock
);
325 if (htt
->rx_ring
.fill_cnt
== 0) {
326 ath10k_warn(ar
, "tried to pop sk_buff from an empty rx ring\n");
330 idx
= htt
->rx_ring
.sw_rd_idx
.msdu_payld
;
331 msdu
= htt
->rx_ring
.netbufs_ring
[idx
];
332 htt
->rx_ring
.netbufs_ring
[idx
] = NULL
;
333 ath10k_htt_reset_paddrs_ring(htt
, idx
);
336 idx
&= htt
->rx_ring
.size_mask
;
337 htt
->rx_ring
.sw_rd_idx
.msdu_payld
= idx
;
338 htt
->rx_ring
.fill_cnt
--;
340 dma_unmap_single(htt
->ar
->dev
,
341 ATH10K_SKB_RXCB(msdu
)->paddr
,
342 msdu
->len
+ skb_tailroom(msdu
),
344 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt rx netbuf pop: ",
345 msdu
->data
, msdu
->len
+ skb_tailroom(msdu
));
350 /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
351 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt
*htt
,
352 struct sk_buff_head
*amsdu
)
354 struct ath10k
*ar
= htt
->ar
;
355 struct ath10k_hw_params
*hw
= &ar
->hw_params
;
356 int msdu_len
, msdu_chaining
= 0;
357 struct sk_buff
*msdu
;
358 struct htt_rx_desc
*rx_desc
;
359 struct rx_attention
*rx_desc_attention
;
360 struct rx_frag_info_common
*rx_desc_frag_info_common
;
361 struct rx_msdu_start_common
*rx_desc_msdu_start_common
;
362 struct rx_msdu_end_common
*rx_desc_msdu_end_common
;
364 lockdep_assert_held(&htt
->rx_ring
.lock
);
367 int last_msdu
, msdu_len_invalid
, msdu_chained
;
369 msdu
= ath10k_htt_rx_netbuf_pop(htt
);
371 __skb_queue_purge(amsdu
);
375 __skb_queue_tail(amsdu
, msdu
);
377 rx_desc
= HTT_RX_BUF_TO_RX_DESC(hw
, msdu
->data
);
378 rx_desc_attention
= ath10k_htt_rx_desc_get_attention(hw
, rx_desc
);
379 rx_desc_msdu_start_common
= ath10k_htt_rx_desc_get_msdu_start(hw
,
381 rx_desc_msdu_end_common
= ath10k_htt_rx_desc_get_msdu_end(hw
, rx_desc
);
382 rx_desc_frag_info_common
= ath10k_htt_rx_desc_get_frag_info(hw
, rx_desc
);
384 /* FIXME: we must report msdu payload since this is what caller
387 skb_put(msdu
, hw
->rx_desc_ops
->rx_desc_msdu_payload_offset
);
388 skb_pull(msdu
, hw
->rx_desc_ops
->rx_desc_msdu_payload_offset
);
391 * Sanity check - confirm the HW is finished filling in the
393 * If the HW and SW are working correctly, then it's guaranteed
394 * that the HW's MAC DMA is done before this point in the SW.
395 * To prevent the case that we handle a stale Rx descriptor,
396 * just assert for now until we have a way to recover.
398 if (!(__le32_to_cpu(rx_desc_attention
->flags
)
399 & RX_ATTENTION_FLAGS_MSDU_DONE
)) {
400 __skb_queue_purge(amsdu
);
404 msdu_len_invalid
= !!(__le32_to_cpu(rx_desc_attention
->flags
)
405 & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR
|
406 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR
));
407 msdu_len
= MS(__le32_to_cpu(rx_desc_msdu_start_common
->info0
),
408 RX_MSDU_START_INFO0_MSDU_LENGTH
);
409 msdu_chained
= rx_desc_frag_info_common
->ring2_more_count
;
411 if (msdu_len_invalid
)
415 skb_put(msdu
, min(msdu_len
, ath10k_htt_rx_msdu_size(hw
)));
416 msdu_len
-= msdu
->len
;
418 /* Note: Chained buffers do not contain rx descriptor */
419 while (msdu_chained
--) {
420 msdu
= ath10k_htt_rx_netbuf_pop(htt
);
422 __skb_queue_purge(amsdu
);
426 __skb_queue_tail(amsdu
, msdu
);
428 skb_put(msdu
, min(msdu_len
, HTT_RX_BUF_SIZE
));
429 msdu_len
-= msdu
->len
;
433 last_msdu
= __le32_to_cpu(rx_desc_msdu_end_common
->info0
) &
434 RX_MSDU_END_INFO0_LAST_MSDU
;
436 /* FIXME: why are we skipping the first part of the rx_desc? */
437 trace_ath10k_htt_rx_desc(ar
, (void *)rx_desc
+ sizeof(u32
),
438 hw
->rx_desc_ops
->rx_desc_size
- sizeof(u32
));
444 if (skb_queue_empty(amsdu
))
448 * Don't refill the ring yet.
450 * First, the elements popped here are still in use - it is not
451 * safe to overwrite them until the matching call to
452 * mpdu_desc_list_next. Second, for efficiency it is preferable to
453 * refill the rx ring with 1 PPDU's worth of rx buffers (something
454 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
455 * (something like 3 buffers). Consequently, we'll rely on the txrx
456 * SW to tell us when it is done pulling all the PPDU's rx buffers
457 * out of the rx ring, and then refill it just once.
460 return msdu_chaining
;
463 static struct sk_buff
*ath10k_htt_rx_pop_paddr(struct ath10k_htt
*htt
,
466 struct ath10k
*ar
= htt
->ar
;
467 struct ath10k_skb_rxcb
*rxcb
;
468 struct sk_buff
*msdu
;
470 lockdep_assert_held(&htt
->rx_ring
.lock
);
472 msdu
= ath10k_htt_rx_find_skb_paddr(ar
, paddr
);
476 rxcb
= ATH10K_SKB_RXCB(msdu
);
477 hash_del(&rxcb
->hlist
);
478 htt
->rx_ring
.fill_cnt
--;
480 dma_unmap_single(htt
->ar
->dev
, rxcb
->paddr
,
481 msdu
->len
+ skb_tailroom(msdu
),
483 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt rx netbuf pop: ",
484 msdu
->data
, msdu
->len
+ skb_tailroom(msdu
));
489 static inline void ath10k_htt_append_frag_list(struct sk_buff
*skb_head
,
490 struct sk_buff
*frag_list
,
491 unsigned int frag_len
)
493 skb_shinfo(skb_head
)->frag_list
= frag_list
;
494 skb_head
->data_len
= frag_len
;
495 skb_head
->len
+= skb_head
->data_len
;
498 static int ath10k_htt_rx_handle_amsdu_mon_32(struct ath10k_htt
*htt
,
499 struct sk_buff
*msdu
,
500 struct htt_rx_in_ord_msdu_desc
**msdu_desc
)
502 struct ath10k
*ar
= htt
->ar
;
503 struct ath10k_hw_params
*hw
= &ar
->hw_params
;
505 struct sk_buff
*frag_buf
;
506 struct sk_buff
*prev_frag_buf
;
508 struct htt_rx_in_ord_msdu_desc
*ind_desc
= *msdu_desc
;
509 struct htt_rx_desc
*rxd
;
510 int amsdu_len
= __le16_to_cpu(ind_desc
->msdu_len
);
512 rxd
= HTT_RX_BUF_TO_RX_DESC(hw
, msdu
->data
);
513 trace_ath10k_htt_rx_desc(ar
, rxd
, hw
->rx_desc_ops
->rx_desc_size
);
515 skb_put(msdu
, hw
->rx_desc_ops
->rx_desc_size
);
516 skb_pull(msdu
, hw
->rx_desc_ops
->rx_desc_size
);
517 skb_put(msdu
, min(amsdu_len
, ath10k_htt_rx_msdu_size(hw
)));
518 amsdu_len
-= msdu
->len
;
520 last_frag
= ind_desc
->reserved
;
523 ath10k_warn(ar
, "invalid amsdu len %u, left %d",
524 __le16_to_cpu(ind_desc
->msdu_len
),
531 paddr
= __le32_to_cpu(ind_desc
->msdu_paddr
);
532 frag_buf
= ath10k_htt_rx_pop_paddr(htt
, paddr
);
534 ath10k_warn(ar
, "failed to pop frag-1 paddr: 0x%x", paddr
);
538 skb_put(frag_buf
, min(amsdu_len
, HTT_RX_BUF_SIZE
));
539 ath10k_htt_append_frag_list(msdu
, frag_buf
, amsdu_len
);
541 amsdu_len
-= frag_buf
->len
;
542 prev_frag_buf
= frag_buf
;
543 last_frag
= ind_desc
->reserved
;
546 paddr
= __le32_to_cpu(ind_desc
->msdu_paddr
);
547 frag_buf
= ath10k_htt_rx_pop_paddr(htt
, paddr
);
549 ath10k_warn(ar
, "failed to pop frag-n paddr: 0x%x",
551 prev_frag_buf
->next
= NULL
;
555 skb_put(frag_buf
, min(amsdu_len
, HTT_RX_BUF_SIZE
));
556 last_frag
= ind_desc
->reserved
;
557 amsdu_len
-= frag_buf
->len
;
559 prev_frag_buf
->next
= frag_buf
;
560 prev_frag_buf
= frag_buf
;
564 ath10k_warn(ar
, "invalid amsdu len %u, left %d",
565 __le16_to_cpu(ind_desc
->msdu_len
), amsdu_len
);
568 *msdu_desc
= ind_desc
;
570 prev_frag_buf
->next
= NULL
;
575 ath10k_htt_rx_handle_amsdu_mon_64(struct ath10k_htt
*htt
,
576 struct sk_buff
*msdu
,
577 struct htt_rx_in_ord_msdu_desc_ext
**msdu_desc
)
579 struct ath10k
*ar
= htt
->ar
;
580 struct ath10k_hw_params
*hw
= &ar
->hw_params
;
582 struct sk_buff
*frag_buf
;
583 struct sk_buff
*prev_frag_buf
;
585 struct htt_rx_in_ord_msdu_desc_ext
*ind_desc
= *msdu_desc
;
586 struct htt_rx_desc
*rxd
;
587 int amsdu_len
= __le16_to_cpu(ind_desc
->msdu_len
);
589 rxd
= HTT_RX_BUF_TO_RX_DESC(hw
, msdu
->data
);
590 trace_ath10k_htt_rx_desc(ar
, rxd
, hw
->rx_desc_ops
->rx_desc_size
);
592 skb_put(msdu
, hw
->rx_desc_ops
->rx_desc_size
);
593 skb_pull(msdu
, hw
->rx_desc_ops
->rx_desc_size
);
594 skb_put(msdu
, min(amsdu_len
, ath10k_htt_rx_msdu_size(hw
)));
595 amsdu_len
-= msdu
->len
;
597 last_frag
= ind_desc
->reserved
;
600 ath10k_warn(ar
, "invalid amsdu len %u, left %d",
601 __le16_to_cpu(ind_desc
->msdu_len
),
608 paddr
= __le64_to_cpu(ind_desc
->msdu_paddr
);
609 frag_buf
= ath10k_htt_rx_pop_paddr(htt
, paddr
);
611 ath10k_warn(ar
, "failed to pop frag-1 paddr: 0x%llx", paddr
);
615 skb_put(frag_buf
, min(amsdu_len
, HTT_RX_BUF_SIZE
));
616 ath10k_htt_append_frag_list(msdu
, frag_buf
, amsdu_len
);
618 amsdu_len
-= frag_buf
->len
;
619 prev_frag_buf
= frag_buf
;
620 last_frag
= ind_desc
->reserved
;
623 paddr
= __le64_to_cpu(ind_desc
->msdu_paddr
);
624 frag_buf
= ath10k_htt_rx_pop_paddr(htt
, paddr
);
626 ath10k_warn(ar
, "failed to pop frag-n paddr: 0x%llx",
628 prev_frag_buf
->next
= NULL
;
632 skb_put(frag_buf
, min(amsdu_len
, HTT_RX_BUF_SIZE
));
633 last_frag
= ind_desc
->reserved
;
634 amsdu_len
-= frag_buf
->len
;
636 prev_frag_buf
->next
= frag_buf
;
637 prev_frag_buf
= frag_buf
;
641 ath10k_warn(ar
, "invalid amsdu len %u, left %d",
642 __le16_to_cpu(ind_desc
->msdu_len
), amsdu_len
);
645 *msdu_desc
= ind_desc
;
647 prev_frag_buf
->next
= NULL
;
651 static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt
*htt
,
652 struct htt_rx_in_ord_ind
*ev
,
653 struct sk_buff_head
*list
)
655 struct ath10k
*ar
= htt
->ar
;
656 struct ath10k_hw_params
*hw
= &ar
->hw_params
;
657 struct htt_rx_in_ord_msdu_desc
*msdu_desc
= ev
->msdu_descs32
;
658 struct htt_rx_desc
*rxd
;
659 struct rx_attention
*rxd_attention
;
660 struct sk_buff
*msdu
;
665 lockdep_assert_held(&htt
->rx_ring
.lock
);
667 msdu_count
= __le16_to_cpu(ev
->msdu_count
);
668 is_offload
= !!(ev
->info
& HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK
);
670 while (msdu_count
--) {
671 paddr
= __le32_to_cpu(msdu_desc
->msdu_paddr
);
673 msdu
= ath10k_htt_rx_pop_paddr(htt
, paddr
);
675 __skb_queue_purge(list
);
679 if (!is_offload
&& ar
->monitor_arvif
) {
680 ret
= ath10k_htt_rx_handle_amsdu_mon_32(htt
, msdu
,
683 __skb_queue_purge(list
);
686 __skb_queue_tail(list
, msdu
);
691 __skb_queue_tail(list
, msdu
);
694 rxd
= HTT_RX_BUF_TO_RX_DESC(hw
, msdu
->data
);
695 rxd_attention
= ath10k_htt_rx_desc_get_attention(hw
, rxd
);
697 trace_ath10k_htt_rx_desc(ar
, rxd
, hw
->rx_desc_ops
->rx_desc_size
);
699 skb_put(msdu
, hw
->rx_desc_ops
->rx_desc_size
);
700 skb_pull(msdu
, hw
->rx_desc_ops
->rx_desc_size
);
701 skb_put(msdu
, __le16_to_cpu(msdu_desc
->msdu_len
));
703 if (!(__le32_to_cpu(rxd_attention
->flags
) &
704 RX_ATTENTION_FLAGS_MSDU_DONE
)) {
705 ath10k_warn(htt
->ar
, "tried to pop an incomplete frame, oops!\n");
716 static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt
*htt
,
717 struct htt_rx_in_ord_ind
*ev
,
718 struct sk_buff_head
*list
)
720 struct ath10k
*ar
= htt
->ar
;
721 struct ath10k_hw_params
*hw
= &ar
->hw_params
;
722 struct htt_rx_in_ord_msdu_desc_ext
*msdu_desc
= ev
->msdu_descs64
;
723 struct htt_rx_desc
*rxd
;
724 struct rx_attention
*rxd_attention
;
725 struct sk_buff
*msdu
;
730 lockdep_assert_held(&htt
->rx_ring
.lock
);
732 msdu_count
= __le16_to_cpu(ev
->msdu_count
);
733 is_offload
= !!(ev
->info
& HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK
);
735 while (msdu_count
--) {
736 paddr
= __le64_to_cpu(msdu_desc
->msdu_paddr
);
737 msdu
= ath10k_htt_rx_pop_paddr(htt
, paddr
);
739 __skb_queue_purge(list
);
743 if (!is_offload
&& ar
->monitor_arvif
) {
744 ret
= ath10k_htt_rx_handle_amsdu_mon_64(htt
, msdu
,
747 __skb_queue_purge(list
);
750 __skb_queue_tail(list
, msdu
);
755 __skb_queue_tail(list
, msdu
);
758 rxd
= HTT_RX_BUF_TO_RX_DESC(hw
, msdu
->data
);
759 rxd_attention
= ath10k_htt_rx_desc_get_attention(hw
, rxd
);
761 trace_ath10k_htt_rx_desc(ar
, rxd
, hw
->rx_desc_ops
->rx_desc_size
);
763 skb_put(msdu
, hw
->rx_desc_ops
->rx_desc_size
);
764 skb_pull(msdu
, hw
->rx_desc_ops
->rx_desc_size
);
765 skb_put(msdu
, __le16_to_cpu(msdu_desc
->msdu_len
));
767 if (!(__le32_to_cpu(rxd_attention
->flags
) &
768 RX_ATTENTION_FLAGS_MSDU_DONE
)) {
769 ath10k_warn(htt
->ar
, "tried to pop an incomplete frame, oops!\n");
780 int ath10k_htt_rx_alloc(struct ath10k_htt
*htt
)
782 struct ath10k
*ar
= htt
->ar
;
784 void *vaddr
, *vaddr_ring
;
786 struct timer_list
*timer
= &htt
->rx_ring
.refill_retry_timer
;
788 if (ar
->bus_param
.dev_type
== ATH10K_DEV_TYPE_HL
)
791 htt
->rx_confused
= false;
793 /* XXX: The fill level could be changed during runtime in response to
794 * the host processing latency. Is this really worth it?
796 htt
->rx_ring
.size
= HTT_RX_RING_SIZE
;
797 htt
->rx_ring
.size_mask
= htt
->rx_ring
.size
- 1;
798 htt
->rx_ring
.fill_level
= ar
->hw_params
.rx_ring_fill_level
;
800 if (!is_power_of_2(htt
->rx_ring
.size
)) {
801 ath10k_warn(ar
, "htt rx ring size is not power of 2\n");
805 htt
->rx_ring
.netbufs_ring
=
806 kcalloc(htt
->rx_ring
.size
, sizeof(struct sk_buff
*),
808 if (!htt
->rx_ring
.netbufs_ring
)
811 size
= ath10k_htt_get_rx_ring_size(htt
);
813 vaddr_ring
= dma_alloc_coherent(htt
->ar
->dev
, size
, &paddr
, GFP_KERNEL
);
817 ath10k_htt_config_paddrs_ring(htt
, vaddr_ring
);
818 htt
->rx_ring
.base_paddr
= paddr
;
820 vaddr
= dma_alloc_coherent(htt
->ar
->dev
,
821 sizeof(*htt
->rx_ring
.alloc_idx
.vaddr
),
826 htt
->rx_ring
.alloc_idx
.vaddr
= vaddr
;
827 htt
->rx_ring
.alloc_idx
.paddr
= paddr
;
828 htt
->rx_ring
.sw_rd_idx
.msdu_payld
= htt
->rx_ring
.size_mask
;
829 *htt
->rx_ring
.alloc_idx
.vaddr
= 0;
831 /* Initialize the Rx refill retry timer */
832 timer_setup(timer
, ath10k_htt_rx_ring_refill_retry
, 0);
834 spin_lock_init(&htt
->rx_ring
.lock
);
836 htt
->rx_ring
.fill_cnt
= 0;
837 htt
->rx_ring
.sw_rd_idx
.msdu_payld
= 0;
838 hash_init(htt
->rx_ring
.skb_table
);
840 skb_queue_head_init(&htt
->rx_msdus_q
);
841 skb_queue_head_init(&htt
->rx_in_ord_compl_q
);
842 skb_queue_head_init(&htt
->tx_fetch_ind_q
);
843 atomic_set(&htt
->num_mpdus_ready
, 0);
845 ath10k_dbg(ar
, ATH10K_DBG_BOOT
, "htt rx ring size %d fill_level %d\n",
846 htt
->rx_ring
.size
, htt
->rx_ring
.fill_level
);
850 dma_free_coherent(htt
->ar
->dev
,
851 ath10k_htt_get_rx_ring_size(htt
),
853 htt
->rx_ring
.base_paddr
);
854 ath10k_htt_config_paddrs_ring(htt
, NULL
);
856 kfree(htt
->rx_ring
.netbufs_ring
);
857 htt
->rx_ring
.netbufs_ring
= NULL
;
862 static int ath10k_htt_rx_crypto_param_len(struct ath10k
*ar
,
863 enum htt_rx_mpdu_encrypt_type type
)
866 case HTT_RX_MPDU_ENCRYPT_NONE
:
868 case HTT_RX_MPDU_ENCRYPT_WEP40
:
869 case HTT_RX_MPDU_ENCRYPT_WEP104
:
870 return IEEE80211_WEP_IV_LEN
;
871 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC
:
872 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA
:
873 return IEEE80211_TKIP_IV_LEN
;
874 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2
:
875 return IEEE80211_CCMP_HDR_LEN
;
876 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2
:
877 return IEEE80211_CCMP_256_HDR_LEN
;
878 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2
:
879 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2
:
880 return IEEE80211_GCMP_HDR_LEN
;
881 case HTT_RX_MPDU_ENCRYPT_WEP128
:
882 case HTT_RX_MPDU_ENCRYPT_WAPI
:
886 ath10k_warn(ar
, "unsupported encryption type %d\n", type
);
890 #define MICHAEL_MIC_LEN 8
892 static int ath10k_htt_rx_crypto_mic_len(struct ath10k
*ar
,
893 enum htt_rx_mpdu_encrypt_type type
)
896 case HTT_RX_MPDU_ENCRYPT_NONE
:
897 case HTT_RX_MPDU_ENCRYPT_WEP40
:
898 case HTT_RX_MPDU_ENCRYPT_WEP104
:
899 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC
:
900 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA
:
902 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2
:
903 return IEEE80211_CCMP_MIC_LEN
;
904 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2
:
905 return IEEE80211_CCMP_256_MIC_LEN
;
906 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2
:
907 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2
:
908 return IEEE80211_GCMP_MIC_LEN
;
909 case HTT_RX_MPDU_ENCRYPT_WEP128
:
910 case HTT_RX_MPDU_ENCRYPT_WAPI
:
914 ath10k_warn(ar
, "unsupported encryption type %d\n", type
);
918 static int ath10k_htt_rx_crypto_icv_len(struct ath10k
*ar
,
919 enum htt_rx_mpdu_encrypt_type type
)
922 case HTT_RX_MPDU_ENCRYPT_NONE
:
923 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2
:
924 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2
:
925 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2
:
926 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2
:
928 case HTT_RX_MPDU_ENCRYPT_WEP40
:
929 case HTT_RX_MPDU_ENCRYPT_WEP104
:
930 return IEEE80211_WEP_ICV_LEN
;
931 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC
:
932 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA
:
933 return IEEE80211_TKIP_ICV_LEN
;
934 case HTT_RX_MPDU_ENCRYPT_WEP128
:
935 case HTT_RX_MPDU_ENCRYPT_WAPI
:
939 ath10k_warn(ar
, "unsupported encryption type %d\n", type
);
943 struct amsdu_subframe_hdr
{
949 #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
951 static inline u8
ath10k_bw_to_mac80211_bw(u8 bw
)
957 ret
= RATE_INFO_BW_20
;
960 ret
= RATE_INFO_BW_40
;
963 ret
= RATE_INFO_BW_80
;
966 ret
= RATE_INFO_BW_160
;
973 static void ath10k_htt_rx_h_rates(struct ath10k
*ar
,
974 struct ieee80211_rx_status
*status
,
975 struct htt_rx_desc
*rxd
)
977 struct ath10k_hw_params
*hw
= &ar
->hw_params
;
978 struct rx_attention
*rxd_attention
;
979 struct rx_mpdu_start
*rxd_mpdu_start
;
980 struct rx_mpdu_end
*rxd_mpdu_end
;
981 struct rx_msdu_start_common
*rxd_msdu_start_common
;
982 struct rx_msdu_end_common
*rxd_msdu_end_common
;
983 struct rx_ppdu_start
*rxd_ppdu_start
;
984 struct ieee80211_supported_band
*sband
;
985 u8 cck
, rate
, bw
, sgi
, mcs
, nss
;
986 u8
*rxd_msdu_payload
;
989 u32 info1
, info2
, info3
;
992 rxd_attention
= ath10k_htt_rx_desc_get_attention(hw
, rxd
);
993 rxd_mpdu_start
= ath10k_htt_rx_desc_get_mpdu_start(hw
, rxd
);
994 rxd_mpdu_end
= ath10k_htt_rx_desc_get_mpdu_end(hw
, rxd
);
995 rxd_msdu_start_common
= ath10k_htt_rx_desc_get_msdu_start(hw
, rxd
);
996 rxd_msdu_end_common
= ath10k_htt_rx_desc_get_msdu_end(hw
, rxd
);
997 rxd_ppdu_start
= ath10k_htt_rx_desc_get_ppdu_start(hw
, rxd
);
998 rxd_msdu_payload
= ath10k_htt_rx_desc_get_msdu_payload(hw
, rxd
);
1000 info1
= __le32_to_cpu(rxd_ppdu_start
->info1
);
1001 info2
= __le32_to_cpu(rxd_ppdu_start
->info2
);
1002 info3
= __le32_to_cpu(rxd_ppdu_start
->info3
);
1004 preamble
= MS(info1
, RX_PPDU_START_INFO1_PREAMBLE_TYPE
);
1008 /* To get legacy rate index band is required. Since band can't
1009 * be undefined check if freq is non-zero.
1014 cck
= info1
& RX_PPDU_START_INFO1_L_SIG_RATE_SELECT
;
1015 rate
= MS(info1
, RX_PPDU_START_INFO1_L_SIG_RATE
);
1016 rate
&= ~RX_PPDU_START_RATE_FLAG
;
1018 sband
= &ar
->mac
.sbands
[status
->band
];
1019 status
->rate_idx
= ath10k_mac_hw_rate_to_idx(sband
, rate
, cck
);
1022 case HTT_RX_HT_WITH_TXBF
:
1023 /* HT-SIG - Table 20-11 in info2 and info3 */
1026 bw
= (info2
>> 7) & 1;
1027 sgi
= (info3
>> 7) & 1;
1029 status
->rate_idx
= mcs
;
1030 status
->encoding
= RX_ENC_HT
;
1032 status
->enc_flags
|= RX_ENC_FLAG_SHORT_GI
;
1034 status
->bw
= RATE_INFO_BW_40
;
1037 case HTT_RX_VHT_WITH_TXBF
:
1038 /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
1043 stbc
= (info2
>> 3) & 1;
1044 group_id
= (info2
>> 4) & 0x3F;
1046 if (GROUP_ID_IS_SU_MIMO(group_id
)) {
1047 mcs
= (info3
>> 4) & 0x0F;
1048 nsts_su
= ((info2
>> 10) & 0x07);
1050 nss
= (nsts_su
>> 2) + 1;
1052 nss
= (nsts_su
+ 1);
1054 /* Hardware doesn't decode VHT-SIG-B into Rx descriptor
1055 * so it's impossible to decode MCS. Also since
1056 * firmware consumes Group Id Management frames host
1057 * has no knowledge regarding group/user position
1058 * mapping so it's impossible to pick the correct Nsts
1061 * Bandwidth and SGI are valid so report the rateinfo
1062 * on best-effort basis.
1069 ath10k_warn(ar
, "invalid MCS received %u\n", mcs
);
1070 ath10k_warn(ar
, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
1071 __le32_to_cpu(rxd_attention
->flags
),
1072 __le32_to_cpu(rxd_mpdu_start
->info0
),
1073 __le32_to_cpu(rxd_mpdu_start
->info1
),
1074 __le32_to_cpu(rxd_msdu_start_common
->info0
),
1075 __le32_to_cpu(rxd_msdu_start_common
->info1
),
1076 rxd_ppdu_start
->info0
,
1077 __le32_to_cpu(rxd_ppdu_start
->info1
),
1078 __le32_to_cpu(rxd_ppdu_start
->info2
),
1079 __le32_to_cpu(rxd_ppdu_start
->info3
),
1080 __le32_to_cpu(rxd_ppdu_start
->info4
));
1082 ath10k_warn(ar
, "msdu end %08x mpdu end %08x\n",
1083 __le32_to_cpu(rxd_msdu_end_common
->info0
),
1084 __le32_to_cpu(rxd_mpdu_end
->info0
));
1086 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
,
1087 "rx desc msdu payload: ",
1088 rxd_msdu_payload
, 50);
1091 status
->rate_idx
= mcs
;
1095 status
->enc_flags
|= RX_ENC_FLAG_SHORT_GI
;
1097 status
->bw
= ath10k_bw_to_mac80211_bw(bw
);
1098 status
->encoding
= RX_ENC_VHT
;
1105 static struct ieee80211_channel
*
1106 ath10k_htt_rx_h_peer_channel(struct ath10k
*ar
, struct htt_rx_desc
*rxd
)
1108 struct ath10k_hw_params
*hw
= &ar
->hw_params
;
1109 struct rx_attention
*rxd_attention
;
1110 struct rx_msdu_end_common
*rxd_msdu_end_common
;
1111 struct rx_mpdu_start
*rxd_mpdu_start
;
1112 struct ath10k_peer
*peer
;
1113 struct ath10k_vif
*arvif
;
1114 struct cfg80211_chan_def def
;
1117 lockdep_assert_held(&ar
->data_lock
);
1122 rxd_attention
= ath10k_htt_rx_desc_get_attention(hw
, rxd
);
1123 rxd_msdu_end_common
= ath10k_htt_rx_desc_get_msdu_end(hw
, rxd
);
1124 rxd_mpdu_start
= ath10k_htt_rx_desc_get_mpdu_start(hw
, rxd
);
1126 if (rxd_attention
->flags
&
1127 __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID
))
1130 if (!(rxd_msdu_end_common
->info0
&
1131 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU
)))
1134 peer_id
= MS(__le32_to_cpu(rxd_mpdu_start
->info0
),
1135 RX_MPDU_START_INFO0_PEER_IDX
);
1137 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
1141 arvif
= ath10k_get_arvif(ar
, peer
->vdev_id
);
1142 if (WARN_ON_ONCE(!arvif
))
1145 if (ath10k_mac_vif_chan(arvif
->vif
, &def
))
1151 static struct ieee80211_channel
*
1152 ath10k_htt_rx_h_vdev_channel(struct ath10k
*ar
, u32 vdev_id
)
1154 struct ath10k_vif
*arvif
;
1155 struct cfg80211_chan_def def
;
1157 lockdep_assert_held(&ar
->data_lock
);
1159 list_for_each_entry(arvif
, &ar
->arvifs
, list
) {
1160 if (arvif
->vdev_id
== vdev_id
&&
1161 ath10k_mac_vif_chan(arvif
->vif
, &def
) == 0)
1169 ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw
*hw
,
1170 struct ieee80211_chanctx_conf
*conf
,
1173 struct cfg80211_chan_def
*def
= data
;
1178 static struct ieee80211_channel
*
1179 ath10k_htt_rx_h_any_channel(struct ath10k
*ar
)
1181 struct cfg80211_chan_def def
= {};
1183 ieee80211_iter_chan_contexts_atomic(ar
->hw
,
1184 ath10k_htt_rx_h_any_chan_iter
,
1190 static bool ath10k_htt_rx_h_channel(struct ath10k
*ar
,
1191 struct ieee80211_rx_status
*status
,
1192 struct htt_rx_desc
*rxd
,
1195 struct ieee80211_channel
*ch
;
1197 spin_lock_bh(&ar
->data_lock
);
1198 ch
= ar
->scan_channel
;
1200 ch
= ar
->rx_channel
;
1202 ch
= ath10k_htt_rx_h_peer_channel(ar
, rxd
);
1204 ch
= ath10k_htt_rx_h_vdev_channel(ar
, vdev_id
);
1206 ch
= ath10k_htt_rx_h_any_channel(ar
);
1208 ch
= ar
->tgt_oper_chan
;
1209 spin_unlock_bh(&ar
->data_lock
);
1214 status
->band
= ch
->band
;
1215 status
->freq
= ch
->center_freq
;
1220 static void ath10k_htt_rx_h_signal(struct ath10k
*ar
,
1221 struct ieee80211_rx_status
*status
,
1222 struct htt_rx_desc
*rxd
)
1224 struct ath10k_hw_params
*hw
= &ar
->hw_params
;
1225 struct rx_ppdu_start
*rxd_ppdu_start
= ath10k_htt_rx_desc_get_ppdu_start(hw
, rxd
);
1228 for (i
= 0; i
< IEEE80211_MAX_CHAINS
; i
++) {
1229 status
->chains
&= ~BIT(i
);
1231 if (rxd_ppdu_start
->rssi_chains
[i
].pri20_mhz
!= 0x80) {
1232 status
->chain_signal
[i
] = ATH10K_DEFAULT_NOISE_FLOOR
+
1233 rxd_ppdu_start
->rssi_chains
[i
].pri20_mhz
;
1235 status
->chains
|= BIT(i
);
1239 /* FIXME: Get real NF */
1240 status
->signal
= ATH10K_DEFAULT_NOISE_FLOOR
+
1241 rxd_ppdu_start
->rssi_comb
;
1242 status
->flag
&= ~RX_FLAG_NO_SIGNAL_VAL
;
1245 static void ath10k_htt_rx_h_mactime(struct ath10k
*ar
,
1246 struct ieee80211_rx_status
*status
,
1247 struct htt_rx_desc
*rxd
)
1249 struct ath10k_hw_params
*hw
= &ar
->hw_params
;
1250 struct rx_ppdu_end_common
*rxd_ppdu_end_common
;
1252 rxd_ppdu_end_common
= ath10k_htt_rx_desc_get_ppdu_end(hw
, rxd
);
1254 /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
1255 * means all prior MSDUs in a PPDU are reported to mac80211 without the
1256 * TSF. Is it worth holding frames until end of PPDU is known?
1258 * FIXME: Can we get/compute 64bit TSF?
1260 status
->mactime
= __le32_to_cpu(rxd_ppdu_end_common
->tsf_timestamp
);
1261 status
->flag
|= RX_FLAG_MACTIME_END
;
1264 static void ath10k_htt_rx_h_ppdu(struct ath10k
*ar
,
1265 struct sk_buff_head
*amsdu
,
1266 struct ieee80211_rx_status
*status
,
1269 struct sk_buff
*first
;
1270 struct ath10k_hw_params
*hw
= &ar
->hw_params
;
1271 struct htt_rx_desc
*rxd
;
1272 struct rx_attention
*rxd_attention
;
1276 if (skb_queue_empty(amsdu
))
1279 first
= skb_peek(amsdu
);
1280 rxd
= HTT_RX_BUF_TO_RX_DESC(hw
,
1281 (void *)first
->data
- hw
->rx_desc_ops
->rx_desc_size
);
1283 rxd_attention
= ath10k_htt_rx_desc_get_attention(hw
, rxd
);
1285 is_first_ppdu
= !!(rxd_attention
->flags
&
1286 __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU
));
1287 is_last_ppdu
= !!(rxd_attention
->flags
&
1288 __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU
));
1290 if (is_first_ppdu
) {
1291 /* New PPDU starts so clear out the old per-PPDU status. */
1293 status
->rate_idx
= 0;
1295 status
->encoding
= RX_ENC_LEGACY
;
1296 status
->bw
= RATE_INFO_BW_20
;
1298 status
->flag
&= ~RX_FLAG_MACTIME
;
1299 status
->flag
|= RX_FLAG_NO_SIGNAL_VAL
;
1301 status
->flag
&= ~(RX_FLAG_AMPDU_IS_LAST
);
1302 status
->flag
|= RX_FLAG_AMPDU_DETAILS
| RX_FLAG_AMPDU_LAST_KNOWN
;
1303 status
->ampdu_reference
= ar
->ampdu_reference
;
1305 ath10k_htt_rx_h_signal(ar
, status
, rxd
);
1306 ath10k_htt_rx_h_channel(ar
, status
, rxd
, vdev_id
);
1307 ath10k_htt_rx_h_rates(ar
, status
, rxd
);
1311 ath10k_htt_rx_h_mactime(ar
, status
, rxd
);
1313 /* set ampdu last segment flag */
1314 status
->flag
|= RX_FLAG_AMPDU_IS_LAST
;
1315 ar
->ampdu_reference
++;
1319 static const char * const tid_to_ac
[] = {
1330 static char *ath10k_get_tid(struct ieee80211_hdr
*hdr
, char *out
, size_t size
)
1335 if (!ieee80211_is_data_qos(hdr
->frame_control
))
1338 qc
= ieee80211_get_qos_ctl(hdr
);
1339 tid
= *qc
& IEEE80211_QOS_CTL_TID_MASK
;
1341 snprintf(out
, size
, "tid %d (%s)", tid
, tid_to_ac
[tid
]);
1343 snprintf(out
, size
, "tid %d", tid
);
1348 static void ath10k_htt_rx_h_queue_msdu(struct ath10k
*ar
,
1349 struct ieee80211_rx_status
*rx_status
,
1350 struct sk_buff
*skb
)
1352 struct ieee80211_rx_status
*status
;
1354 status
= IEEE80211_SKB_RXCB(skb
);
1355 *status
= *rx_status
;
1357 skb_queue_tail(&ar
->htt
.rx_msdus_q
, skb
);
1360 static void ath10k_process_rx(struct ath10k
*ar
, struct sk_buff
*skb
)
1362 struct ieee80211_rx_status
*status
;
1363 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
1366 status
= IEEE80211_SKB_RXCB(skb
);
1368 if (!(ar
->filter_flags
& FIF_FCSFAIL
) &&
1369 status
->flag
& RX_FLAG_FAILED_FCS_CRC
) {
1370 ar
->stats
.rx_crc_err_drop
++;
1371 dev_kfree_skb_any(skb
);
1375 ath10k_dbg(ar
, ATH10K_DBG_DATA
,
1376 "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
1379 ieee80211_get_SA(hdr
),
1380 ath10k_get_tid(hdr
, tid
, sizeof(tid
)),
1381 is_multicast_ether_addr(ieee80211_get_DA(hdr
)) ?
1383 IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr
->seq_ctrl
)),
1384 (status
->encoding
== RX_ENC_LEGACY
) ? "legacy" : "",
1385 (status
->encoding
== RX_ENC_HT
) ? "ht" : "",
1386 (status
->encoding
== RX_ENC_VHT
) ? "vht" : "",
1387 (status
->bw
== RATE_INFO_BW_40
) ? "40" : "",
1388 (status
->bw
== RATE_INFO_BW_80
) ? "80" : "",
1389 (status
->bw
== RATE_INFO_BW_160
) ? "160" : "",
1390 status
->enc_flags
& RX_ENC_FLAG_SHORT_GI
? "sgi " : "",
1394 status
->band
, status
->flag
,
1395 !!(status
->flag
& RX_FLAG_FAILED_FCS_CRC
),
1396 !!(status
->flag
& RX_FLAG_MMIC_ERROR
),
1397 !!(status
->flag
& RX_FLAG_AMSDU_MORE
));
1398 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "rx skb: ",
1399 skb
->data
, skb
->len
);
1400 trace_ath10k_rx_hdr(ar
, skb
->data
, skb
->len
);
1401 trace_ath10k_rx_payload(ar
, skb
->data
, skb
->len
);
1403 ieee80211_rx_napi(ar
->hw
, NULL
, skb
, &ar
->napi
);
1406 static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k
*ar
,
1407 struct ieee80211_hdr
*hdr
)
1409 int len
= ieee80211_hdrlen(hdr
->frame_control
);
1411 if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING
,
1412 ar
->running_fw
->fw_file
.fw_features
))
1413 len
= round_up(len
, 4);
1418 static void ath10k_htt_rx_h_undecap_raw(struct ath10k
*ar
,
1419 struct sk_buff
*msdu
,
1420 struct ieee80211_rx_status
*status
,
1421 enum htt_rx_mpdu_encrypt_type enctype
,
1423 const u8 first_hdr
[64])
1425 struct ieee80211_hdr
*hdr
;
1426 struct ath10k_hw_params
*hw
= &ar
->hw_params
;
1427 struct htt_rx_desc
*rxd
;
1428 struct rx_msdu_end_common
*rxd_msdu_end_common
;
1433 bool msdu_limit_err
;
1434 int bytes_aligned
= ar
->hw_params
.decap_align_bytes
;
1437 rxd
= HTT_RX_BUF_TO_RX_DESC(hw
,
1438 (void *)msdu
->data
- hw
->rx_desc_ops
->rx_desc_size
);
1440 rxd_msdu_end_common
= ath10k_htt_rx_desc_get_msdu_end(hw
, rxd
);
1441 is_first
= !!(rxd_msdu_end_common
->info0
&
1442 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU
));
1443 is_last
= !!(rxd_msdu_end_common
->info0
&
1444 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU
));
1446 /* Delivered decapped frame:
1448 * [crypto param] <-- can be trimmed if !fcs_err &&
1449 * !decrypt_err && !peer_idx_invalid
1450 * [amsdu header] <-- only if A-MSDU
1453 * [FCS] <-- at end, needs to be trimmed
1456 /* Some hardwares(QCA99x0 variants) limit number of msdus in a-msdu when
1457 * deaggregate, so that unwanted MSDU-deaggregation is avoided for
1458 * error packets. If limit exceeds, hw sends all remaining MSDUs as
1459 * a single last MSDU with this msdu limit error set.
1461 msdu_limit_err
= ath10k_htt_rx_desc_msdu_limit_error(hw
, rxd
);
1463 /* If MSDU limit error happens, then don't warn on, the partial raw MSDU
1464 * without first MSDU is expected in that case, and handled later here.
1466 /* This probably shouldn't happen but warn just in case */
1467 if (WARN_ON_ONCE(!is_first
&& !msdu_limit_err
))
1470 /* This probably shouldn't happen but warn just in case */
1471 if (WARN_ON_ONCE(!(is_first
&& is_last
) && !msdu_limit_err
))
1474 skb_trim(msdu
, msdu
->len
- FCS_LEN
);
1476 /* Push original 80211 header */
1477 if (unlikely(msdu_limit_err
)) {
1478 hdr
= (struct ieee80211_hdr
*)first_hdr
;
1479 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1480 crypto_len
= ath10k_htt_rx_crypto_param_len(ar
, enctype
);
1482 if (ieee80211_is_data_qos(hdr
->frame_control
)) {
1483 qos
= ieee80211_get_qos_ctl(hdr
);
1484 qos
[0] |= IEEE80211_QOS_CTL_A_MSDU_PRESENT
;
1488 memcpy(skb_push(msdu
, crypto_len
),
1489 (void *)hdr
+ round_up(hdr_len
, bytes_aligned
),
1492 memcpy(skb_push(msdu
, hdr_len
), hdr
, hdr_len
);
1495 /* In most cases this will be true for sniffed frames. It makes sense
1496 * to deliver them as-is without stripping the crypto param. This is
1497 * necessary for software based decryption.
1499 * If there's no error then the frame is decrypted. At least that is
1500 * the case for frames that come in via fragmented rx indication.
1505 /* The payload is decrypted so strip crypto params. Start from tail
1506 * since hdr is used to compute some stuff.
1509 hdr
= (void *)msdu
->data
;
1512 if (status
->flag
& RX_FLAG_IV_STRIPPED
) {
1513 skb_trim(msdu
, msdu
->len
-
1514 ath10k_htt_rx_crypto_mic_len(ar
, enctype
));
1516 skb_trim(msdu
, msdu
->len
-
1517 ath10k_htt_rx_crypto_icv_len(ar
, enctype
));
1520 if (status
->flag
& RX_FLAG_MIC_STRIPPED
)
1521 skb_trim(msdu
, msdu
->len
-
1522 ath10k_htt_rx_crypto_mic_len(ar
, enctype
));
1525 if (status
->flag
& RX_FLAG_ICV_STRIPPED
)
1526 skb_trim(msdu
, msdu
->len
-
1527 ath10k_htt_rx_crypto_icv_len(ar
, enctype
));
1531 if ((status
->flag
& RX_FLAG_MMIC_STRIPPED
) &&
1532 !ieee80211_has_morefrags(hdr
->frame_control
) &&
1533 enctype
== HTT_RX_MPDU_ENCRYPT_TKIP_WPA
)
1534 skb_trim(msdu
, msdu
->len
- MICHAEL_MIC_LEN
);
1537 if (status
->flag
& RX_FLAG_IV_STRIPPED
) {
1538 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1539 crypto_len
= ath10k_htt_rx_crypto_param_len(ar
, enctype
);
1541 memmove((void *)msdu
->data
+ crypto_len
,
1542 (void *)msdu
->data
, hdr_len
);
1543 skb_pull(msdu
, crypto_len
);
1547 static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k
*ar
,
1548 struct sk_buff
*msdu
,
1549 struct ieee80211_rx_status
*status
,
1550 const u8 first_hdr
[64],
1551 enum htt_rx_mpdu_encrypt_type enctype
)
1553 struct ath10k_hw_params
*hw
= &ar
->hw_params
;
1554 struct ieee80211_hdr
*hdr
;
1555 struct htt_rx_desc
*rxd
;
1560 int bytes_aligned
= ar
->hw_params
.decap_align_bytes
;
1562 /* Delivered decapped frame:
1563 * [nwifi 802.11 header] <-- replaced with 802.11 hdr
1566 * Note: The nwifi header doesn't have QoS Control and is
1567 * (always?) a 3addr frame.
1569 * Note2: There's no A-MSDU subframe header. Even if it's part
1573 /* pull decapped header and copy SA & DA */
1574 rxd
= HTT_RX_BUF_TO_RX_DESC(hw
, (void *)msdu
->data
-
1575 hw
->rx_desc_ops
->rx_desc_size
);
1577 l3_pad_bytes
= ath10k_htt_rx_desc_get_l3_pad_bytes(&ar
->hw_params
, rxd
);
1578 skb_put(msdu
, l3_pad_bytes
);
1580 hdr
= (struct ieee80211_hdr
*)(msdu
->data
+ l3_pad_bytes
);
1582 hdr_len
= ath10k_htt_rx_nwifi_hdrlen(ar
, hdr
);
1583 ether_addr_copy(da
, ieee80211_get_DA(hdr
));
1584 ether_addr_copy(sa
, ieee80211_get_SA(hdr
));
1585 skb_pull(msdu
, hdr_len
);
1587 /* push original 802.11 header */
1588 hdr
= (struct ieee80211_hdr
*)first_hdr
;
1589 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1591 if (!(status
->flag
& RX_FLAG_IV_STRIPPED
)) {
1592 memcpy(skb_push(msdu
,
1593 ath10k_htt_rx_crypto_param_len(ar
, enctype
)),
1594 (void *)hdr
+ round_up(hdr_len
, bytes_aligned
),
1595 ath10k_htt_rx_crypto_param_len(ar
, enctype
));
1598 memcpy(skb_push(msdu
, hdr_len
), hdr
, hdr_len
);
1600 /* original 802.11 header has a different DA and in
1601 * case of 4addr it may also have different SA
1603 hdr
= (struct ieee80211_hdr
*)msdu
->data
;
1604 ether_addr_copy(ieee80211_get_DA(hdr
), da
);
1605 ether_addr_copy(ieee80211_get_SA(hdr
), sa
);
1608 static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k
*ar
,
1609 struct sk_buff
*msdu
,
1610 enum htt_rx_mpdu_encrypt_type enctype
)
1612 struct ieee80211_hdr
*hdr
;
1613 struct ath10k_hw_params
*hw
= &ar
->hw_params
;
1614 struct htt_rx_desc
*rxd
;
1615 struct rx_msdu_end_common
*rxd_msdu_end_common
;
1616 u8
*rxd_rx_hdr_status
;
1617 size_t hdr_len
, crypto_len
;
1619 bool is_first
, is_last
, is_amsdu
;
1620 int bytes_aligned
= ar
->hw_params
.decap_align_bytes
;
1622 rxd
= HTT_RX_BUF_TO_RX_DESC(hw
,
1623 (void *)msdu
->data
- hw
->rx_desc_ops
->rx_desc_size
);
1625 rxd_msdu_end_common
= ath10k_htt_rx_desc_get_msdu_end(hw
, rxd
);
1626 rxd_rx_hdr_status
= ath10k_htt_rx_desc_get_rx_hdr_status(hw
, rxd
);
1627 hdr
= (void *)rxd_rx_hdr_status
;
1629 is_first
= !!(rxd_msdu_end_common
->info0
&
1630 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU
));
1631 is_last
= !!(rxd_msdu_end_common
->info0
&
1632 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU
));
1633 is_amsdu
= !(is_first
&& is_last
);
1638 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1639 crypto_len
= ath10k_htt_rx_crypto_param_len(ar
, enctype
);
1641 rfc1042
+= round_up(hdr_len
, bytes_aligned
) +
1642 round_up(crypto_len
, bytes_aligned
);
1646 rfc1042
+= sizeof(struct amsdu_subframe_hdr
);
1651 static void ath10k_htt_rx_h_undecap_eth(struct ath10k
*ar
,
1652 struct sk_buff
*msdu
,
1653 struct ieee80211_rx_status
*status
,
1654 const u8 first_hdr
[64],
1655 enum htt_rx_mpdu_encrypt_type enctype
)
1657 struct ath10k_hw_params
*hw
= &ar
->hw_params
;
1658 struct ieee80211_hdr
*hdr
;
1665 struct htt_rx_desc
*rxd
;
1666 int bytes_aligned
= ar
->hw_params
.decap_align_bytes
;
1668 /* Delivered decapped frame:
1669 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
1673 rfc1042
= ath10k_htt_rx_h_find_rfc1042(ar
, msdu
, enctype
);
1674 if (WARN_ON_ONCE(!rfc1042
))
1677 rxd
= HTT_RX_BUF_TO_RX_DESC(hw
,
1678 (void *)msdu
->data
- hw
->rx_desc_ops
->rx_desc_size
);
1680 l3_pad_bytes
= ath10k_htt_rx_desc_get_l3_pad_bytes(&ar
->hw_params
, rxd
);
1681 skb_put(msdu
, l3_pad_bytes
);
1682 skb_pull(msdu
, l3_pad_bytes
);
1684 /* pull decapped header and copy SA & DA */
1685 eth
= (struct ethhdr
*)msdu
->data
;
1686 ether_addr_copy(da
, eth
->h_dest
);
1687 ether_addr_copy(sa
, eth
->h_source
);
1688 skb_pull(msdu
, sizeof(struct ethhdr
));
1690 /* push rfc1042/llc/snap */
1691 memcpy(skb_push(msdu
, sizeof(struct rfc1042_hdr
)), rfc1042
,
1692 sizeof(struct rfc1042_hdr
));
1694 /* push original 802.11 header */
1695 hdr
= (struct ieee80211_hdr
*)first_hdr
;
1696 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1698 if (!(status
->flag
& RX_FLAG_IV_STRIPPED
)) {
1699 memcpy(skb_push(msdu
,
1700 ath10k_htt_rx_crypto_param_len(ar
, enctype
)),
1701 (void *)hdr
+ round_up(hdr_len
, bytes_aligned
),
1702 ath10k_htt_rx_crypto_param_len(ar
, enctype
));
1705 memcpy(skb_push(msdu
, hdr_len
), hdr
, hdr_len
);
1707 /* original 802.11 header has a different DA and in
1708 * case of 4addr it may also have different SA
1710 hdr
= (struct ieee80211_hdr
*)msdu
->data
;
1711 ether_addr_copy(ieee80211_get_DA(hdr
), da
);
1712 ether_addr_copy(ieee80211_get_SA(hdr
), sa
);
1715 static void ath10k_htt_rx_h_undecap_snap(struct ath10k
*ar
,
1716 struct sk_buff
*msdu
,
1717 struct ieee80211_rx_status
*status
,
1718 const u8 first_hdr
[64],
1719 enum htt_rx_mpdu_encrypt_type enctype
)
1721 struct ath10k_hw_params
*hw
= &ar
->hw_params
;
1722 struct ieee80211_hdr
*hdr
;
1725 struct htt_rx_desc
*rxd
;
1726 int bytes_aligned
= ar
->hw_params
.decap_align_bytes
;
1728 /* Delivered decapped frame:
1729 * [amsdu header] <-- replaced with 802.11 hdr
1734 rxd
= HTT_RX_BUF_TO_RX_DESC(hw
,
1735 (void *)msdu
->data
- hw
->rx_desc_ops
->rx_desc_size
);
1737 l3_pad_bytes
= ath10k_htt_rx_desc_get_l3_pad_bytes(&ar
->hw_params
, rxd
);
1739 skb_put(msdu
, l3_pad_bytes
);
1740 skb_pull(msdu
, sizeof(struct amsdu_subframe_hdr
) + l3_pad_bytes
);
1742 hdr
= (struct ieee80211_hdr
*)first_hdr
;
1743 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1745 if (!(status
->flag
& RX_FLAG_IV_STRIPPED
)) {
1746 memcpy(skb_push(msdu
,
1747 ath10k_htt_rx_crypto_param_len(ar
, enctype
)),
1748 (void *)hdr
+ round_up(hdr_len
, bytes_aligned
),
1749 ath10k_htt_rx_crypto_param_len(ar
, enctype
));
1752 memcpy(skb_push(msdu
, hdr_len
), hdr
, hdr_len
);
1755 static void ath10k_htt_rx_h_undecap(struct ath10k
*ar
,
1756 struct sk_buff
*msdu
,
1757 struct ieee80211_rx_status
*status
,
1759 enum htt_rx_mpdu_encrypt_type enctype
,
1762 struct ath10k_hw_params
*hw
= &ar
->hw_params
;
1763 struct htt_rx_desc
*rxd
;
1764 struct rx_msdu_start_common
*rxd_msdu_start_common
;
1765 enum rx_msdu_decap_format decap
;
1767 /* First msdu's decapped header:
1768 * [802.11 header] <-- padded to 4 bytes long
1769 * [crypto param] <-- padded to 4 bytes long
1770 * [amsdu header] <-- only if A-MSDU
1773 * Other (2nd, 3rd, ..) msdu's decapped header:
1774 * [amsdu header] <-- only if A-MSDU
1778 rxd
= HTT_RX_BUF_TO_RX_DESC(hw
,
1779 (void *)msdu
->data
- hw
->rx_desc_ops
->rx_desc_size
);
1781 rxd_msdu_start_common
= ath10k_htt_rx_desc_get_msdu_start(hw
, rxd
);
1782 decap
= MS(__le32_to_cpu(rxd_msdu_start_common
->info1
),
1783 RX_MSDU_START_INFO1_DECAP_FORMAT
);
1786 case RX_MSDU_DECAP_RAW
:
1787 ath10k_htt_rx_h_undecap_raw(ar
, msdu
, status
, enctype
,
1788 is_decrypted
, first_hdr
);
1790 case RX_MSDU_DECAP_NATIVE_WIFI
:
1791 ath10k_htt_rx_h_undecap_nwifi(ar
, msdu
, status
, first_hdr
,
1794 case RX_MSDU_DECAP_ETHERNET2_DIX
:
1795 ath10k_htt_rx_h_undecap_eth(ar
, msdu
, status
, first_hdr
, enctype
);
1797 case RX_MSDU_DECAP_8023_SNAP_LLC
:
1798 ath10k_htt_rx_h_undecap_snap(ar
, msdu
, status
, first_hdr
,
1804 static int ath10k_htt_rx_get_csum_state(struct ath10k_hw_params
*hw
, struct sk_buff
*skb
)
1806 struct htt_rx_desc
*rxd
;
1807 struct rx_attention
*rxd_attention
;
1808 struct rx_msdu_start_common
*rxd_msdu_start_common
;
1810 bool is_ip4
, is_ip6
;
1811 bool is_tcp
, is_udp
;
1812 bool ip_csum_ok
, tcpudp_csum_ok
;
1814 rxd
= HTT_RX_BUF_TO_RX_DESC(hw
,
1815 (void *)skb
->data
- hw
->rx_desc_ops
->rx_desc_size
);
1817 rxd_attention
= ath10k_htt_rx_desc_get_attention(hw
, rxd
);
1818 rxd_msdu_start_common
= ath10k_htt_rx_desc_get_msdu_start(hw
, rxd
);
1819 flags
= __le32_to_cpu(rxd_attention
->flags
);
1820 info
= __le32_to_cpu(rxd_msdu_start_common
->info1
);
1822 is_ip4
= !!(info
& RX_MSDU_START_INFO1_IPV4_PROTO
);
1823 is_ip6
= !!(info
& RX_MSDU_START_INFO1_IPV6_PROTO
);
1824 is_tcp
= !!(info
& RX_MSDU_START_INFO1_TCP_PROTO
);
1825 is_udp
= !!(info
& RX_MSDU_START_INFO1_UDP_PROTO
);
1826 ip_csum_ok
= !(flags
& RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL
);
1827 tcpudp_csum_ok
= !(flags
& RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL
);
1829 if (!is_ip4
&& !is_ip6
)
1830 return CHECKSUM_NONE
;
1831 if (!is_tcp
&& !is_udp
)
1832 return CHECKSUM_NONE
;
1834 return CHECKSUM_NONE
;
1835 if (!tcpudp_csum_ok
)
1836 return CHECKSUM_NONE
;
1838 return CHECKSUM_UNNECESSARY
;
1841 static void ath10k_htt_rx_h_csum_offload(struct ath10k_hw_params
*hw
,
1842 struct sk_buff
*msdu
)
1844 msdu
->ip_summed
= ath10k_htt_rx_get_csum_state(hw
, msdu
);
1847 static u64
ath10k_htt_rx_h_get_pn(struct ath10k
*ar
, struct sk_buff
*skb
,
1848 enum htt_rx_mpdu_encrypt_type enctype
)
1850 struct ieee80211_hdr
*hdr
;
1854 hdr
= (struct ieee80211_hdr
*)skb
->data
;
1855 ehdr
= skb
->data
+ ieee80211_hdrlen(hdr
->frame_control
);
1857 if (enctype
== HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2
) {
1859 pn
|= (u64
)ehdr
[1] << 8;
1860 pn
|= (u64
)ehdr
[4] << 16;
1861 pn
|= (u64
)ehdr
[5] << 24;
1862 pn
|= (u64
)ehdr
[6] << 32;
1863 pn
|= (u64
)ehdr
[7] << 40;
1868 static bool ath10k_htt_rx_h_frag_multicast_check(struct ath10k
*ar
,
1869 struct sk_buff
*skb
)
1871 struct ieee80211_hdr
*hdr
;
1873 hdr
= (struct ieee80211_hdr
*)skb
->data
;
1874 return !is_multicast_ether_addr(hdr
->addr1
);
1877 static bool ath10k_htt_rx_h_frag_pn_check(struct ath10k
*ar
,
1878 struct sk_buff
*skb
,
1880 enum htt_rx_mpdu_encrypt_type enctype
)
1882 struct ath10k_peer
*peer
;
1883 union htt_rx_pn_t
*last_pn
, new_pn
= {0};
1884 struct ieee80211_hdr
*hdr
;
1885 u8 tid
, frag_number
;
1888 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
1890 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "invalid peer for frag pn check\n");
1894 hdr
= (struct ieee80211_hdr
*)skb
->data
;
1895 if (ieee80211_is_data_qos(hdr
->frame_control
))
1896 tid
= ieee80211_get_tid(hdr
);
1898 tid
= ATH10K_TXRX_NON_QOS_TID
;
1900 last_pn
= &peer
->frag_tids_last_pn
[tid
];
1901 new_pn
.pn48
= ath10k_htt_rx_h_get_pn(ar
, skb
, enctype
);
1902 frag_number
= le16_to_cpu(hdr
->seq_ctrl
) & IEEE80211_SCTL_FRAG
;
1903 seq
= IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr
->seq_ctrl
));
1905 if (frag_number
== 0) {
1906 last_pn
->pn48
= new_pn
.pn48
;
1907 peer
->frag_tids_seq
[tid
] = seq
;
1909 if (seq
!= peer
->frag_tids_seq
[tid
])
1912 if (new_pn
.pn48
!= last_pn
->pn48
+ 1)
1915 last_pn
->pn48
= new_pn
.pn48
;
1921 static void ath10k_htt_rx_h_mpdu(struct ath10k
*ar
,
1922 struct sk_buff_head
*amsdu
,
1923 struct ieee80211_rx_status
*status
,
1924 bool fill_crypt_header
,
1926 enum ath10k_pkt_rx_err
*err
,
1930 struct sk_buff
*first
;
1931 struct sk_buff
*last
;
1932 struct sk_buff
*msdu
, *temp
;
1933 struct ath10k_hw_params
*hw
= &ar
->hw_params
;
1934 struct htt_rx_desc
*rxd
;
1935 struct rx_attention
*rxd_attention
;
1936 struct rx_mpdu_start
*rxd_mpdu_start
;
1938 struct ieee80211_hdr
*hdr
;
1939 enum htt_rx_mpdu_encrypt_type enctype
;
1943 bool has_crypto_err
;
1945 bool has_peer_idx_invalid
;
1949 bool frag_pn_check
= true, multicast_check
= true;
1951 if (skb_queue_empty(amsdu
))
1954 first
= skb_peek(amsdu
);
1955 rxd
= HTT_RX_BUF_TO_RX_DESC(hw
,
1956 (void *)first
->data
- hw
->rx_desc_ops
->rx_desc_size
);
1958 rxd_attention
= ath10k_htt_rx_desc_get_attention(hw
, rxd
);
1959 rxd_mpdu_start
= ath10k_htt_rx_desc_get_mpdu_start(hw
, rxd
);
1961 is_mgmt
= !!(rxd_attention
->flags
&
1962 __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE
));
1964 enctype
= MS(__le32_to_cpu(rxd_mpdu_start
->info0
),
1965 RX_MPDU_START_INFO0_ENCRYPT_TYPE
);
1967 /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
1968 * decapped header. It'll be used for undecapping of each MSDU.
1970 hdr
= (void *)ath10k_htt_rx_desc_get_rx_hdr_status(hw
, rxd
);
1971 memcpy(first_hdr
, hdr
, RX_HTT_HDR_STATUS_LEN
);
1974 memcpy(rx_hdr
, hdr
, RX_HTT_HDR_STATUS_LEN
);
1976 /* Each A-MSDU subframe will use the original header as the base and be
1977 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
1979 hdr
= (void *)first_hdr
;
1981 if (ieee80211_is_data_qos(hdr
->frame_control
)) {
1982 qos
= ieee80211_get_qos_ctl(hdr
);
1983 qos
[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT
;
1986 /* Some attention flags are valid only in the last MSDU. */
1987 last
= skb_peek_tail(amsdu
);
1988 rxd
= HTT_RX_BUF_TO_RX_DESC(hw
,
1989 (void *)last
->data
- hw
->rx_desc_ops
->rx_desc_size
);
1991 rxd_attention
= ath10k_htt_rx_desc_get_attention(hw
, rxd
);
1992 attention
= __le32_to_cpu(rxd_attention
->flags
);
1994 has_fcs_err
= !!(attention
& RX_ATTENTION_FLAGS_FCS_ERR
);
1995 has_crypto_err
= !!(attention
& RX_ATTENTION_FLAGS_DECRYPT_ERR
);
1996 has_tkip_err
= !!(attention
& RX_ATTENTION_FLAGS_TKIP_MIC_ERR
);
1997 has_peer_idx_invalid
= !!(attention
& RX_ATTENTION_FLAGS_PEER_IDX_INVALID
);
1999 /* Note: If hardware captures an encrypted frame that it can't decrypt,
2000 * e.g. due to fcs error, missing peer or invalid key data it will
2001 * report the frame as raw.
2003 is_decrypted
= (enctype
!= HTT_RX_MPDU_ENCRYPT_NONE
&&
2006 !has_peer_idx_invalid
);
2008 /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
2009 status
->flag
&= ~(RX_FLAG_FAILED_FCS_CRC
|
2010 RX_FLAG_MMIC_ERROR
|
2012 RX_FLAG_IV_STRIPPED
|
2013 RX_FLAG_ONLY_MONITOR
|
2014 RX_FLAG_MMIC_STRIPPED
);
2017 status
->flag
|= RX_FLAG_FAILED_FCS_CRC
;
2020 status
->flag
|= RX_FLAG_MMIC_ERROR
;
2024 *err
= ATH10K_PKT_RX_ERR_FCS
;
2025 else if (has_tkip_err
)
2026 *err
= ATH10K_PKT_RX_ERR_TKIP
;
2027 else if (has_crypto_err
)
2028 *err
= ATH10K_PKT_RX_ERR_CRYPT
;
2029 else if (has_peer_idx_invalid
)
2030 *err
= ATH10K_PKT_RX_ERR_PEER_IDX_INVAL
;
2033 /* Firmware reports all necessary management frames via WMI already.
2034 * They are not reported to monitor interfaces at all so pass the ones
2035 * coming via HTT to monitor interfaces instead. This simplifies
2039 status
->flag
|= RX_FLAG_ONLY_MONITOR
;
2042 status
->flag
|= RX_FLAG_DECRYPTED
;
2044 if (likely(!is_mgmt
))
2045 status
->flag
|= RX_FLAG_MMIC_STRIPPED
;
2047 if (fill_crypt_header
)
2048 status
->flag
|= RX_FLAG_MIC_STRIPPED
|
2049 RX_FLAG_ICV_STRIPPED
;
2051 status
->flag
|= RX_FLAG_IV_STRIPPED
;
2054 skb_queue_walk(amsdu
, msdu
) {
2055 if (frag
&& !fill_crypt_header
&& is_decrypted
&&
2056 enctype
== HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2
)
2057 frag_pn_check
= ath10k_htt_rx_h_frag_pn_check(ar
,
2063 multicast_check
= ath10k_htt_rx_h_frag_multicast_check(ar
,
2066 if (!frag_pn_check
|| !multicast_check
) {
2067 /* Discard the fragment with invalid PN or multicast DA
2070 __skb_unlink(msdu
, amsdu
);
2071 dev_kfree_skb_any(msdu
);
2073 frag_pn_check
= true;
2074 multicast_check
= true;
2078 ath10k_htt_rx_h_csum_offload(&ar
->hw_params
, msdu
);
2080 if (frag
&& !fill_crypt_header
&&
2081 enctype
== HTT_RX_MPDU_ENCRYPT_TKIP_WPA
)
2082 status
->flag
&= ~RX_FLAG_MMIC_STRIPPED
;
2084 ath10k_htt_rx_h_undecap(ar
, msdu
, status
, first_hdr
, enctype
,
2087 /* Undecapping involves copying the original 802.11 header back
2088 * to sk_buff. If frame is protected and hardware has decrypted
2089 * it then remove the protected bit.
2096 if (fill_crypt_header
)
2099 hdr
= (void *)msdu
->data
;
2100 hdr
->frame_control
&= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED
);
2102 if (frag
&& !fill_crypt_header
&&
2103 enctype
== HTT_RX_MPDU_ENCRYPT_TKIP_WPA
)
2104 status
->flag
&= ~RX_FLAG_IV_STRIPPED
&
2105 ~RX_FLAG_MMIC_STRIPPED
;
2109 static void ath10k_htt_rx_h_enqueue(struct ath10k
*ar
,
2110 struct sk_buff_head
*amsdu
,
2111 struct ieee80211_rx_status
*status
)
2113 struct sk_buff
*msdu
;
2114 struct sk_buff
*first_subframe
;
2116 first_subframe
= skb_peek(amsdu
);
2118 while ((msdu
= __skb_dequeue(amsdu
))) {
2119 /* Setup per-MSDU flags */
2120 if (skb_queue_empty(amsdu
))
2121 status
->flag
&= ~RX_FLAG_AMSDU_MORE
;
2123 status
->flag
|= RX_FLAG_AMSDU_MORE
;
2125 if (msdu
== first_subframe
) {
2126 first_subframe
= NULL
;
2127 status
->flag
&= ~RX_FLAG_ALLOW_SAME_PN
;
2129 status
->flag
|= RX_FLAG_ALLOW_SAME_PN
;
2132 ath10k_htt_rx_h_queue_msdu(ar
, status
, msdu
);
2136 static int ath10k_unchain_msdu(struct sk_buff_head
*amsdu
,
2137 unsigned long *unchain_cnt
)
2139 struct sk_buff
*skb
, *first
;
2142 int amsdu_len
= skb_queue_len(amsdu
);
2144 /* TODO: Might could optimize this by using
2145 * skb_try_coalesce or similar method to
2146 * decrease copying, or maybe get mac80211 to
2147 * provide a way to just receive a list of
2151 first
= __skb_dequeue(amsdu
);
2153 /* Allocate total length all at once. */
2154 skb_queue_walk(amsdu
, skb
)
2155 total_len
+= skb
->len
;
2157 space
= total_len
- skb_tailroom(first
);
2159 (pskb_expand_head(first
, 0, space
, GFP_ATOMIC
) < 0)) {
2160 /* TODO: bump some rx-oom error stat */
2161 /* put it back together so we can free the
2162 * whole list at once.
2164 __skb_queue_head(amsdu
, first
);
2168 /* Walk list again, copying contents into
2171 while ((skb
= __skb_dequeue(amsdu
))) {
2172 skb_copy_from_linear_data(skb
, skb_put(first
, skb
->len
),
2174 dev_kfree_skb_any(skb
);
2177 __skb_queue_head(amsdu
, first
);
2179 *unchain_cnt
+= amsdu_len
- 1;
2184 static void ath10k_htt_rx_h_unchain(struct ath10k
*ar
,
2185 struct sk_buff_head
*amsdu
,
2186 unsigned long *drop_cnt
,
2187 unsigned long *unchain_cnt
)
2189 struct sk_buff
*first
;
2190 struct ath10k_hw_params
*hw
= &ar
->hw_params
;
2191 struct htt_rx_desc
*rxd
;
2192 struct rx_msdu_start_common
*rxd_msdu_start_common
;
2193 struct rx_frag_info_common
*rxd_frag_info
;
2194 enum rx_msdu_decap_format decap
;
2196 first
= skb_peek(amsdu
);
2197 rxd
= HTT_RX_BUF_TO_RX_DESC(hw
,
2198 (void *)first
->data
- hw
->rx_desc_ops
->rx_desc_size
);
2200 rxd_msdu_start_common
= ath10k_htt_rx_desc_get_msdu_start(hw
, rxd
);
2201 rxd_frag_info
= ath10k_htt_rx_desc_get_frag_info(hw
, rxd
);
2202 decap
= MS(__le32_to_cpu(rxd_msdu_start_common
->info1
),
2203 RX_MSDU_START_INFO1_DECAP_FORMAT
);
2205 /* FIXME: Current unchaining logic can only handle simple case of raw
2206 * msdu chaining. If decapping is other than raw the chaining may be
2207 * more complex and this isn't handled by the current code. Don't even
2208 * try re-constructing such frames - it'll be pretty much garbage.
2210 if (decap
!= RX_MSDU_DECAP_RAW
||
2211 skb_queue_len(amsdu
) != 1 + rxd_frag_info
->ring2_more_count
) {
2212 *drop_cnt
+= skb_queue_len(amsdu
);
2213 __skb_queue_purge(amsdu
);
2217 ath10k_unchain_msdu(amsdu
, unchain_cnt
);
2220 static bool ath10k_htt_rx_validate_amsdu(struct ath10k
*ar
,
2221 struct sk_buff_head
*amsdu
)
2224 struct sk_buff
*first
;
2225 bool is_first
, is_last
;
2226 struct ath10k_hw_params
*hw
= &ar
->hw_params
;
2227 struct htt_rx_desc
*rxd
;
2228 struct rx_msdu_end_common
*rxd_msdu_end_common
;
2229 struct rx_mpdu_start
*rxd_mpdu_start
;
2230 struct ieee80211_hdr
*hdr
;
2231 size_t hdr_len
, crypto_len
;
2232 enum htt_rx_mpdu_encrypt_type enctype
;
2233 int bytes_aligned
= ar
->hw_params
.decap_align_bytes
;
2235 first
= skb_peek(amsdu
);
2237 rxd
= HTT_RX_BUF_TO_RX_DESC(hw
,
2238 (void *)first
->data
- hw
->rx_desc_ops
->rx_desc_size
);
2240 rxd_msdu_end_common
= ath10k_htt_rx_desc_get_msdu_end(hw
, rxd
);
2241 rxd_mpdu_start
= ath10k_htt_rx_desc_get_mpdu_start(hw
, rxd
);
2242 hdr
= (void *)ath10k_htt_rx_desc_get_rx_hdr_status(hw
, rxd
);
2244 is_first
= !!(rxd_msdu_end_common
->info0
&
2245 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU
));
2246 is_last
= !!(rxd_msdu_end_common
->info0
&
2247 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU
));
2249 /* Return in case of non-aggregated msdu */
2250 if (is_first
&& is_last
)
2253 /* First msdu flag is not set for the first msdu of the list */
2257 enctype
= MS(__le32_to_cpu(rxd_mpdu_start
->info0
),
2258 RX_MPDU_START_INFO0_ENCRYPT_TYPE
);
2260 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
2261 crypto_len
= ath10k_htt_rx_crypto_param_len(ar
, enctype
);
2263 subframe_hdr
= (u8
*)hdr
+ round_up(hdr_len
, bytes_aligned
) +
2266 /* Validate if the amsdu has a proper first subframe.
2267 * There are chances a single msdu can be received as amsdu when
2268 * the unauthenticated amsdu flag of a QoS header
2269 * gets flipped in non-SPP AMSDU's, in such cases the first
2270 * subframe has llc/snap header in place of a valid da.
2271 * return false if the da matches rfc1042 pattern
2273 if (ether_addr_equal(subframe_hdr
, rfc1042_header
))
2279 static bool ath10k_htt_rx_amsdu_allowed(struct ath10k
*ar
,
2280 struct sk_buff_head
*amsdu
,
2281 struct ieee80211_rx_status
*rx_status
)
2283 if (!rx_status
->freq
) {
2284 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "no channel configured; ignoring frame(s)!\n");
2288 if (test_bit(ATH10K_CAC_RUNNING
, &ar
->dev_flags
)) {
2289 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx cac running\n");
2293 if (!ath10k_htt_rx_validate_amsdu(ar
, amsdu
)) {
2294 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "invalid amsdu received\n");
2301 static void ath10k_htt_rx_h_filter(struct ath10k
*ar
,
2302 struct sk_buff_head
*amsdu
,
2303 struct ieee80211_rx_status
*rx_status
,
2304 unsigned long *drop_cnt
)
2306 if (skb_queue_empty(amsdu
))
2309 if (ath10k_htt_rx_amsdu_allowed(ar
, amsdu
, rx_status
))
2313 *drop_cnt
+= skb_queue_len(amsdu
);
2315 __skb_queue_purge(amsdu
);
2318 static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt
*htt
)
2320 struct ath10k
*ar
= htt
->ar
;
2321 struct ieee80211_rx_status
*rx_status
= &htt
->rx_status
;
2322 struct sk_buff_head amsdu
;
2324 unsigned long drop_cnt
= 0;
2325 unsigned long unchain_cnt
= 0;
2326 unsigned long drop_cnt_filter
= 0;
2327 unsigned long msdus_to_queue
, num_msdus
;
2328 enum ath10k_pkt_rx_err err
= ATH10K_PKT_RX_ERR_MAX
;
2329 u8 first_hdr
[RX_HTT_HDR_STATUS_LEN
];
2331 __skb_queue_head_init(&amsdu
);
2333 spin_lock_bh(&htt
->rx_ring
.lock
);
2334 if (htt
->rx_confused
) {
2335 spin_unlock_bh(&htt
->rx_ring
.lock
);
2338 ret
= ath10k_htt_rx_amsdu_pop(htt
, &amsdu
);
2339 spin_unlock_bh(&htt
->rx_ring
.lock
);
2342 ath10k_warn(ar
, "rx ring became corrupted: %d\n", ret
);
2343 __skb_queue_purge(&amsdu
);
2344 /* FIXME: It's probably a good idea to reboot the
2345 * device instead of leaving it inoperable.
2347 htt
->rx_confused
= true;
2351 num_msdus
= skb_queue_len(&amsdu
);
2353 ath10k_htt_rx_h_ppdu(ar
, &amsdu
, rx_status
, 0xffff);
2355 /* only for ret = 1 indicates chained msdus */
2357 ath10k_htt_rx_h_unchain(ar
, &amsdu
, &drop_cnt
, &unchain_cnt
);
2359 ath10k_htt_rx_h_filter(ar
, &amsdu
, rx_status
, &drop_cnt_filter
);
2360 ath10k_htt_rx_h_mpdu(ar
, &amsdu
, rx_status
, true, first_hdr
, &err
, 0,
2362 msdus_to_queue
= skb_queue_len(&amsdu
);
2363 ath10k_htt_rx_h_enqueue(ar
, &amsdu
, rx_status
);
2365 ath10k_sta_update_rx_tid_stats(ar
, first_hdr
, num_msdus
, err
,
2366 unchain_cnt
, drop_cnt
, drop_cnt_filter
,
2372 static void ath10k_htt_rx_mpdu_desc_pn_hl(struct htt_hl_rx_desc
*rx_desc
,
2373 union htt_rx_pn_t
*pn
,
2376 switch (pn_len_bits
) {
2378 pn
->pn48
= __le32_to_cpu(rx_desc
->pn_31_0
) +
2379 ((u64
)(__le32_to_cpu(rx_desc
->u0
.pn_63_32
) & 0xFFFF) << 32);
2382 pn
->pn24
= __le32_to_cpu(rx_desc
->pn_31_0
);
2387 static bool ath10k_htt_rx_pn_cmp48(union htt_rx_pn_t
*new_pn
,
2388 union htt_rx_pn_t
*old_pn
)
2390 return ((new_pn
->pn48
& 0xffffffffffffULL
) <=
2391 (old_pn
->pn48
& 0xffffffffffffULL
));
2394 static bool ath10k_htt_rx_pn_check_replay_hl(struct ath10k
*ar
,
2395 struct ath10k_peer
*peer
,
2396 struct htt_rx_indication_hl
*rx
)
2398 bool last_pn_valid
, pn_invalid
= false;
2399 enum htt_txrx_sec_cast_type sec_index
;
2400 enum htt_security_types sec_type
;
2401 union htt_rx_pn_t new_pn
= {0};
2402 struct htt_hl_rx_desc
*rx_desc
;
2403 union htt_rx_pn_t
*last_pn
;
2404 u32 rx_desc_info
, tid
;
2405 int num_mpdu_ranges
;
2407 lockdep_assert_held(&ar
->data_lock
);
2412 if (!(rx
->fw_desc
.flags
& FW_RX_DESC_FLAGS_FIRST_MSDU
))
2415 num_mpdu_ranges
= MS(__le32_to_cpu(rx
->hdr
.info1
),
2416 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES
);
2418 rx_desc
= (struct htt_hl_rx_desc
*)&rx
->mpdu_ranges
[num_mpdu_ranges
];
2419 rx_desc_info
= __le32_to_cpu(rx_desc
->info
);
2421 if (!MS(rx_desc_info
, HTT_RX_DESC_HL_INFO_ENCRYPTED
))
2424 tid
= MS(rx
->hdr
.info0
, HTT_RX_INDICATION_INFO0_EXT_TID
);
2425 last_pn_valid
= peer
->tids_last_pn_valid
[tid
];
2426 last_pn
= &peer
->tids_last_pn
[tid
];
2428 if (MS(rx_desc_info
, HTT_RX_DESC_HL_INFO_MCAST_BCAST
))
2429 sec_index
= HTT_TXRX_SEC_MCAST
;
2431 sec_index
= HTT_TXRX_SEC_UCAST
;
2433 sec_type
= peer
->rx_pn
[sec_index
].sec_type
;
2434 ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc
, &new_pn
, peer
->rx_pn
[sec_index
].pn_len
);
2436 if (sec_type
!= HTT_SECURITY_AES_CCMP
&&
2437 sec_type
!= HTT_SECURITY_TKIP
&&
2438 sec_type
!= HTT_SECURITY_TKIP_NOMIC
)
2442 pn_invalid
= ath10k_htt_rx_pn_cmp48(&new_pn
, last_pn
);
2444 peer
->tids_last_pn_valid
[tid
] = true;
2447 last_pn
->pn48
= new_pn
.pn48
;
2452 static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt
*htt
,
2453 struct htt_rx_indication_hl
*rx
,
2454 struct sk_buff
*skb
,
2455 enum htt_rx_pn_check_type check_pn_type
,
2456 enum htt_rx_tkip_demic_type tkip_mic_type
)
2458 struct ath10k
*ar
= htt
->ar
;
2459 struct ath10k_peer
*peer
;
2460 struct htt_rx_indication_mpdu_range
*mpdu_ranges
;
2461 struct fw_rx_desc_hl
*fw_desc
;
2462 enum htt_txrx_sec_cast_type sec_index
;
2463 enum htt_security_types sec_type
;
2464 union htt_rx_pn_t new_pn
= {0};
2465 struct htt_hl_rx_desc
*rx_desc
;
2466 struct ieee80211_hdr
*hdr
;
2467 struct ieee80211_rx_status
*rx_status
;
2470 int num_mpdu_ranges
;
2472 struct ieee80211_channel
*ch
;
2473 bool pn_invalid
, qos
, first_msdu
;
2474 u32 tid
, rx_desc_info
;
2476 peer_id
= __le16_to_cpu(rx
->hdr
.peer_id
);
2477 tid
= MS(rx
->hdr
.info0
, HTT_RX_INDICATION_INFO0_EXT_TID
);
2479 spin_lock_bh(&ar
->data_lock
);
2480 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
2481 spin_unlock_bh(&ar
->data_lock
);
2482 if (!peer
&& peer_id
!= HTT_INVALID_PEERID
)
2483 ath10k_warn(ar
, "Got RX ind from invalid peer: %u\n", peer_id
);
2488 num_mpdu_ranges
= MS(__le32_to_cpu(rx
->hdr
.info1
),
2489 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES
);
2490 mpdu_ranges
= htt_rx_ind_get_mpdu_ranges_hl(rx
);
2491 fw_desc
= &rx
->fw_desc
;
2492 rx_desc_len
= fw_desc
->len
;
2494 if (fw_desc
->u
.bits
.discard
) {
2495 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt discard mpdu\n");
2499 /* I have not yet seen any case where num_mpdu_ranges > 1.
2500 * qcacld does not seem handle that case either, so we introduce the
2501 * same limitation here as well.
2503 if (num_mpdu_ranges
> 1)
2505 "Unsupported number of MPDU ranges: %d, ignoring all but the first\n",
2508 if (mpdu_ranges
->mpdu_range_status
!=
2509 HTT_RX_IND_MPDU_STATUS_OK
&&
2510 mpdu_ranges
->mpdu_range_status
!=
2511 HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR
) {
2512 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt mpdu_range_status %d\n",
2513 mpdu_ranges
->mpdu_range_status
);
2517 rx_desc
= (struct htt_hl_rx_desc
*)&rx
->mpdu_ranges
[num_mpdu_ranges
];
2518 rx_desc_info
= __le32_to_cpu(rx_desc
->info
);
2520 if (MS(rx_desc_info
, HTT_RX_DESC_HL_INFO_MCAST_BCAST
))
2521 sec_index
= HTT_TXRX_SEC_MCAST
;
2523 sec_index
= HTT_TXRX_SEC_UCAST
;
2525 sec_type
= peer
->rx_pn
[sec_index
].sec_type
;
2526 first_msdu
= rx
->fw_desc
.flags
& FW_RX_DESC_FLAGS_FIRST_MSDU
;
2528 ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc
, &new_pn
, peer
->rx_pn
[sec_index
].pn_len
);
2530 if (check_pn_type
== HTT_RX_PN_CHECK
&& tid
>= IEEE80211_NUM_TIDS
) {
2531 spin_lock_bh(&ar
->data_lock
);
2532 pn_invalid
= ath10k_htt_rx_pn_check_replay_hl(ar
, peer
, rx
);
2533 spin_unlock_bh(&ar
->data_lock
);
2539 /* Strip off all headers before the MAC header before delivery to
2542 tot_hdr_len
= sizeof(struct htt_resp_hdr
) + sizeof(rx
->hdr
) +
2543 sizeof(rx
->ppdu
) + sizeof(rx
->prefix
) +
2544 sizeof(rx
->fw_desc
) +
2545 sizeof(*mpdu_ranges
) * num_mpdu_ranges
+ rx_desc_len
;
2547 skb_pull(skb
, tot_hdr_len
);
2549 hdr
= (struct ieee80211_hdr
*)skb
->data
;
2550 qos
= ieee80211_is_data_qos(hdr
->frame_control
);
2552 rx_status
= IEEE80211_SKB_RXCB(skb
);
2553 memset(rx_status
, 0, sizeof(*rx_status
));
2555 if (rx
->ppdu
.combined_rssi
== 0) {
2556 /* SDIO firmware does not provide signal */
2557 rx_status
->signal
= 0;
2558 rx_status
->flag
|= RX_FLAG_NO_SIGNAL_VAL
;
2560 rx_status
->signal
= ATH10K_DEFAULT_NOISE_FLOOR
+
2561 rx
->ppdu
.combined_rssi
;
2562 rx_status
->flag
&= ~RX_FLAG_NO_SIGNAL_VAL
;
2565 spin_lock_bh(&ar
->data_lock
);
2566 ch
= ar
->scan_channel
;
2568 ch
= ar
->rx_channel
;
2570 ch
= ath10k_htt_rx_h_any_channel(ar
);
2572 ch
= ar
->tgt_oper_chan
;
2573 spin_unlock_bh(&ar
->data_lock
);
2576 rx_status
->band
= ch
->band
;
2577 rx_status
->freq
= ch
->center_freq
;
2579 if (rx
->fw_desc
.flags
& FW_RX_DESC_FLAGS_LAST_MSDU
)
2580 rx_status
->flag
&= ~RX_FLAG_AMSDU_MORE
;
2582 rx_status
->flag
|= RX_FLAG_AMSDU_MORE
;
2584 /* Not entirely sure about this, but all frames from the chipset has
2585 * the protected flag set even though they have already been decrypted.
2586 * Unmasking this flag is necessary in order for mac80211 not to drop
2588 * TODO: Verify this is always the case or find out a way to check
2589 * if there has been hw decryption.
2591 if (ieee80211_has_protected(hdr
->frame_control
)) {
2592 hdr
->frame_control
&= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED
);
2593 rx_status
->flag
|= RX_FLAG_DECRYPTED
|
2594 RX_FLAG_IV_STRIPPED
|
2595 RX_FLAG_MMIC_STRIPPED
;
2597 if (tid
< IEEE80211_NUM_TIDS
&&
2599 check_pn_type
== HTT_RX_PN_CHECK
&&
2600 (sec_type
== HTT_SECURITY_AES_CCMP
||
2601 sec_type
== HTT_SECURITY_TKIP
||
2602 sec_type
== HTT_SECURITY_TKIP_NOMIC
)) {
2605 __le64 pn48
= cpu_to_le64(new_pn
.pn48
);
2607 hdr
= (struct ieee80211_hdr
*)skb
->data
;
2608 offset
= ieee80211_hdrlen(hdr
->frame_control
);
2609 hdr
->frame_control
|= __cpu_to_le16(IEEE80211_FCTL_PROTECTED
);
2610 rx_status
->flag
&= ~RX_FLAG_IV_STRIPPED
;
2612 memmove(skb
->data
- IEEE80211_CCMP_HDR_LEN
,
2614 skb_push(skb
, IEEE80211_CCMP_HDR_LEN
);
2615 ivp
= skb
->data
+ offset
;
2616 memset(skb
->data
+ offset
, 0, IEEE80211_CCMP_HDR_LEN
);
2618 ivp
[IEEE80211_WEP_IV_LEN
- 1] |= ATH10K_IEEE80211_EXTIV
;
2620 for (i
= 0; i
< ARRAY_SIZE(peer
->keys
); i
++) {
2621 if (peer
->keys
[i
] &&
2622 peer
->keys
[i
]->flags
& IEEE80211_KEY_FLAG_PAIRWISE
)
2623 keyidx
= peer
->keys
[i
]->keyidx
;
2627 ivp
[IEEE80211_WEP_IV_LEN
- 1] |= keyidx
<< 6;
2629 if (sec_type
== HTT_SECURITY_AES_CCMP
) {
2630 rx_status
->flag
|= RX_FLAG_MIC_STRIPPED
;
2632 memcpy(skb
->data
+ offset
, &pn48
, 2);
2633 /* pn 1, pn 3 , pn 34 , pn 5 */
2634 memcpy(skb
->data
+ offset
+ 4, ((u8
*)&pn48
) + 2, 4);
2636 rx_status
->flag
|= RX_FLAG_ICV_STRIPPED
;
2638 memcpy(skb
->data
+ offset
+ 2, &pn48
, 1);
2640 memcpy(skb
->data
+ offset
, ((u8
*)&pn48
) + 1, 1);
2641 /* TSC 2 , TSC 3 , TSC 4 , TSC 5*/
2642 memcpy(skb
->data
+ offset
+ 4, ((u8
*)&pn48
) + 2, 4);
2647 if (tkip_mic_type
== HTT_RX_TKIP_MIC
)
2648 rx_status
->flag
&= ~RX_FLAG_IV_STRIPPED
&
2649 ~RX_FLAG_MMIC_STRIPPED
;
2651 if (mpdu_ranges
->mpdu_range_status
== HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR
)
2652 rx_status
->flag
|= RX_FLAG_MMIC_ERROR
;
2654 if (!qos
&& tid
< IEEE80211_NUM_TIDS
) {
2656 __le16 qos_ctrl
= 0;
2658 hdr
= (struct ieee80211_hdr
*)skb
->data
;
2659 offset
= ieee80211_hdrlen(hdr
->frame_control
);
2661 hdr
->frame_control
|= cpu_to_le16(IEEE80211_STYPE_QOS_DATA
);
2662 memmove(skb
->data
- IEEE80211_QOS_CTL_LEN
, skb
->data
, offset
);
2663 skb_push(skb
, IEEE80211_QOS_CTL_LEN
);
2664 qos_ctrl
= cpu_to_le16(tid
);
2665 memcpy(skb
->data
+ offset
, &qos_ctrl
, IEEE80211_QOS_CTL_LEN
);
2669 ieee80211_rx_napi(ar
->hw
, NULL
, skb
, &ar
->napi
);
2671 ieee80211_rx_ni(ar
->hw
, skb
);
2673 /* We have delivered the skb to the upper layers (mac80211) so we
2678 /* Tell the caller that it must free the skb since we have not
2684 static int ath10k_htt_rx_frag_tkip_decap_nomic(struct sk_buff
*skb
,
2690 orig_hdr
= skb
->data
;
2691 ivp
= orig_hdr
+ hdr_len
+ head_len
;
2693 /* the ExtIV bit is always set to 1 for TKIP */
2694 if (!(ivp
[IEEE80211_WEP_IV_LEN
- 1] & ATH10K_IEEE80211_EXTIV
))
2697 memmove(orig_hdr
+ IEEE80211_TKIP_IV_LEN
, orig_hdr
, head_len
+ hdr_len
);
2698 skb_pull(skb
, IEEE80211_TKIP_IV_LEN
);
2699 skb_trim(skb
, skb
->len
- ATH10K_IEEE80211_TKIP_MICLEN
);
2703 static int ath10k_htt_rx_frag_tkip_decap_withmic(struct sk_buff
*skb
,
2709 orig_hdr
= skb
->data
;
2710 ivp
= orig_hdr
+ hdr_len
+ head_len
;
2712 /* the ExtIV bit is always set to 1 for TKIP */
2713 if (!(ivp
[IEEE80211_WEP_IV_LEN
- 1] & ATH10K_IEEE80211_EXTIV
))
2716 memmove(orig_hdr
+ IEEE80211_TKIP_IV_LEN
, orig_hdr
, head_len
+ hdr_len
);
2717 skb_pull(skb
, IEEE80211_TKIP_IV_LEN
);
2718 skb_trim(skb
, skb
->len
- IEEE80211_TKIP_ICV_LEN
);
2722 static int ath10k_htt_rx_frag_ccmp_decap(struct sk_buff
*skb
,
2728 orig_hdr
= skb
->data
;
2729 ivp
= orig_hdr
+ hdr_len
+ head_len
;
2731 /* the ExtIV bit is always set to 1 for CCMP */
2732 if (!(ivp
[IEEE80211_WEP_IV_LEN
- 1] & ATH10K_IEEE80211_EXTIV
))
2735 skb_trim(skb
, skb
->len
- IEEE80211_CCMP_MIC_LEN
);
2736 memmove(orig_hdr
+ IEEE80211_CCMP_HDR_LEN
, orig_hdr
, head_len
+ hdr_len
);
2737 skb_pull(skb
, IEEE80211_CCMP_HDR_LEN
);
2741 static int ath10k_htt_rx_frag_wep_decap(struct sk_buff
*skb
,
2747 orig_hdr
= skb
->data
;
2749 memmove(orig_hdr
+ IEEE80211_WEP_IV_LEN
,
2750 orig_hdr
, head_len
+ hdr_len
);
2751 skb_pull(skb
, IEEE80211_WEP_IV_LEN
);
2752 skb_trim(skb
, skb
->len
- IEEE80211_WEP_ICV_LEN
);
2756 static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt
*htt
,
2757 struct htt_rx_fragment_indication
*rx
,
2758 struct sk_buff
*skb
)
2760 struct ath10k
*ar
= htt
->ar
;
2761 enum htt_rx_tkip_demic_type tkip_mic
= HTT_RX_NON_TKIP_MIC
;
2762 enum htt_txrx_sec_cast_type sec_index
;
2763 struct htt_rx_indication_hl
*rx_hl
;
2764 enum htt_security_types sec_type
;
2765 u32 tid
, frag
, seq
, rx_desc_info
;
2766 union htt_rx_pn_t new_pn
= {0};
2767 struct htt_hl_rx_desc
*rx_desc
;
2768 u16 peer_id
, sc
, hdr_space
;
2769 union htt_rx_pn_t
*last_pn
;
2770 struct ieee80211_hdr
*hdr
;
2771 int ret
, num_mpdu_ranges
;
2772 struct ath10k_peer
*peer
;
2773 struct htt_resp
*resp
;
2776 resp
= (struct htt_resp
*)(skb
->data
+ HTT_RX_FRAG_IND_INFO0_HEADER_LEN
);
2777 skb_pull(skb
, HTT_RX_FRAG_IND_INFO0_HEADER_LEN
);
2778 skb_trim(skb
, skb
->len
- FCS_LEN
);
2780 peer_id
= __le16_to_cpu(rx
->peer_id
);
2781 rx_hl
= (struct htt_rx_indication_hl
*)(&resp
->rx_ind_hl
);
2783 spin_lock_bh(&ar
->data_lock
);
2784 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
2786 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "invalid peer: %u\n", peer_id
);
2790 num_mpdu_ranges
= MS(__le32_to_cpu(rx_hl
->hdr
.info1
),
2791 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES
);
2793 tot_hdr_len
= sizeof(struct htt_resp_hdr
) +
2794 sizeof(rx_hl
->hdr
) +
2795 sizeof(rx_hl
->ppdu
) +
2796 sizeof(rx_hl
->prefix
) +
2797 sizeof(rx_hl
->fw_desc
) +
2798 sizeof(struct htt_rx_indication_mpdu_range
) * num_mpdu_ranges
;
2800 tid
= MS(rx_hl
->hdr
.info0
, HTT_RX_INDICATION_INFO0_EXT_TID
);
2801 rx_desc
= (struct htt_hl_rx_desc
*)(skb
->data
+ tot_hdr_len
);
2802 rx_desc_info
= __le32_to_cpu(rx_desc
->info
);
2804 hdr
= (struct ieee80211_hdr
*)((u8
*)rx_desc
+ rx_hl
->fw_desc
.len
);
2806 if (is_multicast_ether_addr(hdr
->addr1
)) {
2807 /* Discard the fragment with multicast DA */
2811 if (!MS(rx_desc_info
, HTT_RX_DESC_HL_INFO_ENCRYPTED
)) {
2812 spin_unlock_bh(&ar
->data_lock
);
2813 return ath10k_htt_rx_proc_rx_ind_hl(htt
, &resp
->rx_ind_hl
, skb
,
2814 HTT_RX_NON_PN_CHECK
,
2815 HTT_RX_NON_TKIP_MIC
);
2818 if (ieee80211_has_retry(hdr
->frame_control
))
2821 hdr_space
= ieee80211_hdrlen(hdr
->frame_control
);
2822 sc
= __le16_to_cpu(hdr
->seq_ctrl
);
2823 seq
= IEEE80211_SEQ_TO_SN(sc
);
2824 frag
= sc
& IEEE80211_SCTL_FRAG
;
2826 sec_index
= MS(rx_desc_info
, HTT_RX_DESC_HL_INFO_MCAST_BCAST
) ?
2827 HTT_TXRX_SEC_MCAST
: HTT_TXRX_SEC_UCAST
;
2828 sec_type
= peer
->rx_pn
[sec_index
].sec_type
;
2829 ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc
, &new_pn
, peer
->rx_pn
[sec_index
].pn_len
);
2832 case HTT_SECURITY_TKIP
:
2833 tkip_mic
= HTT_RX_TKIP_MIC
;
2834 ret
= ath10k_htt_rx_frag_tkip_decap_withmic(skb
,
2841 case HTT_SECURITY_TKIP_NOMIC
:
2842 ret
= ath10k_htt_rx_frag_tkip_decap_nomic(skb
,
2849 case HTT_SECURITY_AES_CCMP
:
2850 ret
= ath10k_htt_rx_frag_ccmp_decap(skb
,
2851 tot_hdr_len
+ rx_hl
->fw_desc
.len
,
2856 case HTT_SECURITY_WEP128
:
2857 case HTT_SECURITY_WEP104
:
2858 case HTT_SECURITY_WEP40
:
2859 ret
= ath10k_htt_rx_frag_wep_decap(skb
,
2860 tot_hdr_len
+ rx_hl
->fw_desc
.len
,
2869 resp
= (struct htt_resp
*)(skb
->data
);
2871 if (sec_type
!= HTT_SECURITY_AES_CCMP
&&
2872 sec_type
!= HTT_SECURITY_TKIP
&&
2873 sec_type
!= HTT_SECURITY_TKIP_NOMIC
) {
2874 spin_unlock_bh(&ar
->data_lock
);
2875 return ath10k_htt_rx_proc_rx_ind_hl(htt
, &resp
->rx_ind_hl
, skb
,
2876 HTT_RX_NON_PN_CHECK
,
2877 HTT_RX_NON_TKIP_MIC
);
2880 last_pn
= &peer
->frag_tids_last_pn
[tid
];
2883 if (ath10k_htt_rx_pn_check_replay_hl(ar
, peer
, &resp
->rx_ind_hl
))
2886 last_pn
->pn48
= new_pn
.pn48
;
2887 peer
->frag_tids_seq
[tid
] = seq
;
2888 } else if (sec_type
== HTT_SECURITY_AES_CCMP
) {
2889 if (seq
!= peer
->frag_tids_seq
[tid
])
2892 if (new_pn
.pn48
!= last_pn
->pn48
+ 1)
2895 last_pn
->pn48
= new_pn
.pn48
;
2896 last_pn
= &peer
->tids_last_pn
[tid
];
2897 last_pn
->pn48
= new_pn
.pn48
;
2900 spin_unlock_bh(&ar
->data_lock
);
2902 return ath10k_htt_rx_proc_rx_ind_hl(htt
, &resp
->rx_ind_hl
, skb
,
2903 HTT_RX_NON_PN_CHECK
, tkip_mic
);
2906 spin_unlock_bh(&ar
->data_lock
);
2908 /* Tell the caller that it must free the skb since we have not
2914 static void ath10k_htt_rx_proc_rx_ind_ll(struct ath10k_htt
*htt
,
2915 struct htt_rx_indication
*rx
)
2917 struct ath10k
*ar
= htt
->ar
;
2918 struct htt_rx_indication_mpdu_range
*mpdu_ranges
;
2919 int num_mpdu_ranges
;
2920 int i
, mpdu_count
= 0;
2924 num_mpdu_ranges
= MS(__le32_to_cpu(rx
->hdr
.info1
),
2925 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES
);
2926 peer_id
= __le16_to_cpu(rx
->hdr
.peer_id
);
2927 tid
= MS(rx
->hdr
.info0
, HTT_RX_INDICATION_INFO0_EXT_TID
);
2929 mpdu_ranges
= htt_rx_ind_get_mpdu_ranges(rx
);
2931 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt rx ind: ",
2932 rx
, struct_size(rx
, mpdu_ranges
, num_mpdu_ranges
));
2934 for (i
= 0; i
< num_mpdu_ranges
; i
++)
2935 mpdu_count
+= mpdu_ranges
[i
].mpdu_count
;
2937 atomic_add(mpdu_count
, &htt
->num_mpdus_ready
);
2939 ath10k_sta_update_rx_tid_stats_ampdu(ar
, peer_id
, tid
, mpdu_ranges
,
2943 static void ath10k_htt_rx_tx_compl_ind(struct ath10k
*ar
,
2944 struct sk_buff
*skb
)
2946 struct ath10k_htt
*htt
= &ar
->htt
;
2947 struct htt_resp
*resp
= (struct htt_resp
*)skb
->data
;
2948 struct htt_tx_done tx_done
= {};
2949 int status
= MS(resp
->data_tx_completion
.flags
, HTT_DATA_TX_STATUS
);
2950 __le16 msdu_id
, *msdus
;
2951 bool rssi_enabled
= false;
2952 u8 msdu_count
= 0, num_airtime_records
, tid
;
2954 struct htt_data_tx_compl_ppdu_dur
*ppdu_info
;
2955 struct ath10k_peer
*peer
;
2956 u16 ppdu_info_offset
= 0, peer_id
;
2960 case HTT_DATA_TX_STATUS_NO_ACK
:
2961 tx_done
.status
= HTT_TX_COMPL_STATE_NOACK
;
2963 case HTT_DATA_TX_STATUS_OK
:
2964 tx_done
.status
= HTT_TX_COMPL_STATE_ACK
;
2966 case HTT_DATA_TX_STATUS_DISCARD
:
2967 case HTT_DATA_TX_STATUS_POSTPONE
:
2968 tx_done
.status
= HTT_TX_COMPL_STATE_DISCARD
;
2971 ath10k_warn(ar
, "unhandled tx completion status %d\n", status
);
2972 tx_done
.status
= HTT_TX_COMPL_STATE_DISCARD
;
2976 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt tx completion num_msdus %d\n",
2977 resp
->data_tx_completion
.num_msdus
);
2979 msdu_count
= resp
->data_tx_completion
.num_msdus
;
2980 msdus
= resp
->data_tx_completion
.msdus
;
2981 rssi_enabled
= ath10k_is_rssi_enable(&ar
->hw_params
, resp
);
2984 htt_pad
= ath10k_tx_data_rssi_get_pad_bytes(&ar
->hw_params
,
2987 for (i
= 0; i
< msdu_count
; i
++) {
2989 tx_done
.msdu_id
= __le16_to_cpu(msdu_id
);
2992 /* Total no of MSDUs should be even,
2993 * if odd MSDUs are sent firmware fills
2994 * last msdu id with 0xffff
2996 if (msdu_count
& 0x01) {
2997 msdu_id
= msdus
[msdu_count
+ i
+ 1 + htt_pad
];
2998 tx_done
.ack_rssi
= __le16_to_cpu(msdu_id
);
3000 msdu_id
= msdus
[msdu_count
+ i
+ htt_pad
];
3001 tx_done
.ack_rssi
= __le16_to_cpu(msdu_id
);
3005 /* kfifo_put: In practice firmware shouldn't fire off per-CE
3006 * interrupt and main interrupt (MSI/-X range case) for the same
3007 * HTC service so it should be safe to use kfifo_put w/o lock.
3009 * From kfifo_put() documentation:
3010 * Note that with only one concurrent reader and one concurrent
3011 * writer, you don't need extra locking to use these macro.
3013 if (ar
->bus_param
.dev_type
== ATH10K_DEV_TYPE_HL
) {
3014 ath10k_txrx_tx_unref(htt
, &tx_done
);
3015 } else if (!kfifo_put(&htt
->txdone_fifo
, tx_done
)) {
3016 ath10k_warn(ar
, "txdone fifo overrun, msdu_id %d status %d\n",
3017 tx_done
.msdu_id
, tx_done
.status
);
3018 ath10k_txrx_tx_unref(htt
, &tx_done
);
3022 if (!(resp
->data_tx_completion
.flags2
& HTT_TX_CMPL_FLAG_PPDU_DURATION_PRESENT
))
3025 ppdu_info_offset
= (msdu_count
& 0x01) ? msdu_count
+ 1 : msdu_count
;
3028 ppdu_info_offset
+= ppdu_info_offset
;
3030 if (resp
->data_tx_completion
.flags2
&
3031 (HTT_TX_CMPL_FLAG_PPID_PRESENT
| HTT_TX_CMPL_FLAG_PA_PRESENT
))
3032 ppdu_info_offset
+= 2;
3034 ppdu_info
= (struct htt_data_tx_compl_ppdu_dur
*)&msdus
[ppdu_info_offset
];
3035 num_airtime_records
= FIELD_GET(HTT_TX_COMPL_PPDU_DUR_INFO0_NUM_ENTRIES_MASK
,
3036 __le32_to_cpu(ppdu_info
->info0
));
3038 for (i
= 0; i
< num_airtime_records
; i
++) {
3039 struct htt_data_tx_ppdu_dur
*ppdu_dur
;
3042 ppdu_dur
= &ppdu_info
->ppdu_dur
[i
];
3043 info0
= __le32_to_cpu(ppdu_dur
->info0
);
3045 peer_id
= FIELD_GET(HTT_TX_PPDU_DUR_INFO0_PEER_ID_MASK
,
3048 spin_lock_bh(&ar
->data_lock
);
3050 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
3051 if (!peer
|| !peer
->sta
) {
3052 spin_unlock_bh(&ar
->data_lock
);
3057 tid
= FIELD_GET(HTT_TX_PPDU_DUR_INFO0_TID_MASK
, info0
) &
3058 IEEE80211_QOS_CTL_TID_MASK
;
3059 tx_duration
= __le32_to_cpu(ppdu_dur
->tx_duration
);
3061 ieee80211_sta_register_airtime(peer
->sta
, tid
, tx_duration
, 0);
3063 spin_unlock_bh(&ar
->data_lock
);
3068 static void ath10k_htt_rx_addba(struct ath10k
*ar
, struct htt_resp
*resp
)
3070 struct htt_rx_addba
*ev
= &resp
->rx_addba
;
3071 struct ath10k_peer
*peer
;
3072 struct ath10k_vif
*arvif
;
3073 u16 info0
, tid
, peer_id
;
3075 info0
= __le16_to_cpu(ev
->info0
);
3076 tid
= MS(info0
, HTT_RX_BA_INFO0_TID
);
3077 peer_id
= MS(info0
, HTT_RX_BA_INFO0_PEER_ID
);
3079 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
3080 "htt rx addba tid %u peer_id %u size %u\n",
3081 tid
, peer_id
, ev
->window_size
);
3083 spin_lock_bh(&ar
->data_lock
);
3084 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
3086 ath10k_warn(ar
, "received addba event for invalid peer_id: %u\n",
3088 spin_unlock_bh(&ar
->data_lock
);
3092 arvif
= ath10k_get_arvif(ar
, peer
->vdev_id
);
3094 ath10k_warn(ar
, "received addba event for invalid vdev_id: %u\n",
3096 spin_unlock_bh(&ar
->data_lock
);
3100 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
3101 "htt rx start rx ba session sta %pM tid %u size %u\n",
3102 peer
->addr
, tid
, ev
->window_size
);
3104 ieee80211_start_rx_ba_session_offl(arvif
->vif
, peer
->addr
, tid
);
3105 spin_unlock_bh(&ar
->data_lock
);
3108 static void ath10k_htt_rx_delba(struct ath10k
*ar
, struct htt_resp
*resp
)
3110 struct htt_rx_delba
*ev
= &resp
->rx_delba
;
3111 struct ath10k_peer
*peer
;
3112 struct ath10k_vif
*arvif
;
3113 u16 info0
, tid
, peer_id
;
3115 info0
= __le16_to_cpu(ev
->info0
);
3116 tid
= MS(info0
, HTT_RX_BA_INFO0_TID
);
3117 peer_id
= MS(info0
, HTT_RX_BA_INFO0_PEER_ID
);
3119 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
3120 "htt rx delba tid %u peer_id %u\n",
3123 spin_lock_bh(&ar
->data_lock
);
3124 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
3126 ath10k_warn(ar
, "received addba event for invalid peer_id: %u\n",
3128 spin_unlock_bh(&ar
->data_lock
);
3132 arvif
= ath10k_get_arvif(ar
, peer
->vdev_id
);
3134 ath10k_warn(ar
, "received addba event for invalid vdev_id: %u\n",
3136 spin_unlock_bh(&ar
->data_lock
);
3140 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
3141 "htt rx stop rx ba session sta %pM tid %u\n",
3144 ieee80211_stop_rx_ba_session_offl(arvif
->vif
, peer
->addr
, tid
);
3145 spin_unlock_bh(&ar
->data_lock
);
3148 static int ath10k_htt_rx_extract_amsdu(struct ath10k_hw_params
*hw
,
3149 struct sk_buff_head
*list
,
3150 struct sk_buff_head
*amsdu
)
3152 struct sk_buff
*msdu
;
3153 struct htt_rx_desc
*rxd
;
3154 struct rx_msdu_end_common
*rxd_msdu_end_common
;
3156 if (skb_queue_empty(list
))
3159 if (WARN_ON(!skb_queue_empty(amsdu
)))
3162 while ((msdu
= __skb_dequeue(list
))) {
3163 __skb_queue_tail(amsdu
, msdu
);
3165 rxd
= HTT_RX_BUF_TO_RX_DESC(hw
,
3166 (void *)msdu
->data
-
3167 hw
->rx_desc_ops
->rx_desc_size
);
3169 rxd_msdu_end_common
= ath10k_htt_rx_desc_get_msdu_end(hw
, rxd
);
3170 if (rxd_msdu_end_common
->info0
&
3171 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU
))
3175 msdu
= skb_peek_tail(amsdu
);
3176 rxd
= HTT_RX_BUF_TO_RX_DESC(hw
,
3177 (void *)msdu
->data
- hw
->rx_desc_ops
->rx_desc_size
);
3179 rxd_msdu_end_common
= ath10k_htt_rx_desc_get_msdu_end(hw
, rxd
);
3180 if (!(rxd_msdu_end_common
->info0
&
3181 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU
))) {
3182 skb_queue_splice_init(amsdu
, list
);
3189 static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status
*status
,
3190 struct sk_buff
*skb
)
3192 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
3194 if (!ieee80211_has_protected(hdr
->frame_control
))
3197 /* Offloaded frames are already decrypted but firmware insists they are
3198 * protected in the 802.11 header. Strip the flag. Otherwise mac80211
3199 * will drop the frame.
3202 hdr
->frame_control
&= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED
);
3203 status
->flag
|= RX_FLAG_DECRYPTED
|
3204 RX_FLAG_IV_STRIPPED
|
3205 RX_FLAG_MMIC_STRIPPED
;
3208 static void ath10k_htt_rx_h_rx_offload(struct ath10k
*ar
,
3209 struct sk_buff_head
*list
)
3211 struct ath10k_htt
*htt
= &ar
->htt
;
3212 struct ieee80211_rx_status
*status
= &htt
->rx_status
;
3213 struct htt_rx_offload_msdu
*rx
;
3214 struct sk_buff
*msdu
;
3217 while ((msdu
= __skb_dequeue(list
))) {
3218 /* Offloaded frames don't have Rx descriptor. Instead they have
3219 * a short meta information header.
3222 rx
= (void *)msdu
->data
;
3224 skb_put(msdu
, sizeof(*rx
));
3225 skb_pull(msdu
, sizeof(*rx
));
3227 if (skb_tailroom(msdu
) < __le16_to_cpu(rx
->msdu_len
)) {
3228 ath10k_warn(ar
, "dropping frame: offloaded rx msdu is too long!\n");
3229 dev_kfree_skb_any(msdu
);
3233 skb_put(msdu
, __le16_to_cpu(rx
->msdu_len
));
3235 /* Offloaded rx header length isn't multiple of 2 nor 4 so the
3236 * actual payload is unaligned. Align the frame. Otherwise
3237 * mac80211 complains. This shouldn't reduce performance much
3238 * because these offloaded frames are rare.
3240 offset
= 4 - ((unsigned long)msdu
->data
& 3);
3241 skb_put(msdu
, offset
);
3242 memmove(msdu
->data
+ offset
, msdu
->data
, msdu
->len
);
3243 skb_pull(msdu
, offset
);
3245 /* FIXME: The frame is NWifi. Re-construct QoS Control
3246 * if possible later.
3249 memset(status
, 0, sizeof(*status
));
3250 status
->flag
|= RX_FLAG_NO_SIGNAL_VAL
;
3252 ath10k_htt_rx_h_rx_offload_prot(status
, msdu
);
3253 ath10k_htt_rx_h_channel(ar
, status
, NULL
, rx
->vdev_id
);
3254 ath10k_htt_rx_h_queue_msdu(ar
, status
, msdu
);
3258 static int ath10k_htt_rx_in_ord_ind(struct ath10k
*ar
, struct sk_buff
*skb
)
3260 struct ath10k_htt
*htt
= &ar
->htt
;
3261 struct htt_resp
*resp
= (void *)skb
->data
;
3262 struct ieee80211_rx_status
*status
= &htt
->rx_status
;
3263 struct sk_buff_head list
;
3264 struct sk_buff_head amsdu
;
3273 lockdep_assert_held(&htt
->rx_ring
.lock
);
3275 if (htt
->rx_confused
)
3278 skb_pull(skb
, sizeof(resp
->hdr
));
3279 skb_pull(skb
, sizeof(resp
->rx_in_ord_ind
));
3281 peer_id
= __le16_to_cpu(resp
->rx_in_ord_ind
.peer_id
);
3282 msdu_count
= __le16_to_cpu(resp
->rx_in_ord_ind
.msdu_count
);
3283 vdev_id
= resp
->rx_in_ord_ind
.vdev_id
;
3284 tid
= SM(resp
->rx_in_ord_ind
.info
, HTT_RX_IN_ORD_IND_INFO_TID
);
3285 offload
= !!(resp
->rx_in_ord_ind
.info
&
3286 HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK
);
3287 frag
= !!(resp
->rx_in_ord_ind
.info
& HTT_RX_IN_ORD_IND_INFO_FRAG_MASK
);
3289 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
3290 "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
3291 vdev_id
, peer_id
, tid
, offload
, frag
, msdu_count
);
3293 if (skb
->len
< msdu_count
* sizeof(*resp
->rx_in_ord_ind
.msdu_descs32
)) {
3294 ath10k_warn(ar
, "dropping invalid in order rx indication\n");
3298 /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
3299 * extracted and processed.
3301 __skb_queue_head_init(&list
);
3302 if (ar
->hw_params
.target_64bit
)
3303 ret
= ath10k_htt_rx_pop_paddr64_list(htt
, &resp
->rx_in_ord_ind
,
3306 ret
= ath10k_htt_rx_pop_paddr32_list(htt
, &resp
->rx_in_ord_ind
,
3310 ath10k_warn(ar
, "failed to pop paddr list: %d\n", ret
);
3311 htt
->rx_confused
= true;
3315 /* Offloaded frames are very different and need to be handled
3319 ath10k_htt_rx_h_rx_offload(ar
, &list
);
3321 while (!skb_queue_empty(&list
)) {
3322 __skb_queue_head_init(&amsdu
);
3323 ret
= ath10k_htt_rx_extract_amsdu(&ar
->hw_params
, &list
, &amsdu
);
3326 /* Note: The in-order indication may report interleaved
3327 * frames from different PPDUs meaning reported rx rate
3328 * to mac80211 isn't accurate/reliable. It's still
3329 * better to report something than nothing though. This
3330 * should still give an idea about rx rate to the user.
3332 ath10k_htt_rx_h_ppdu(ar
, &amsdu
, status
, vdev_id
);
3333 ath10k_htt_rx_h_filter(ar
, &amsdu
, status
, NULL
);
3334 ath10k_htt_rx_h_mpdu(ar
, &amsdu
, status
, false, NULL
,
3335 NULL
, peer_id
, frag
);
3336 ath10k_htt_rx_h_enqueue(ar
, &amsdu
, status
);
3341 /* Should not happen. */
3342 ath10k_warn(ar
, "failed to extract amsdu: %d\n", ret
);
3343 htt
->rx_confused
= true;
3344 __skb_queue_purge(&list
);
3351 static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k
*ar
,
3352 const __le32
*resp_ids
,
3358 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch confirm num_resp_ids %d\n",
3361 for (i
= 0; i
< num_resp_ids
; i
++) {
3362 resp_id
= le32_to_cpu(resp_ids
[i
]);
3364 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch confirm resp_id %u\n",
3367 /* TODO: free resp_id */
3371 static void ath10k_htt_rx_tx_fetch_ind(struct ath10k
*ar
, struct sk_buff
*skb
)
3373 struct ieee80211_hw
*hw
= ar
->hw
;
3374 struct ieee80211_txq
*txq
;
3375 struct htt_resp
*resp
= (struct htt_resp
*)skb
->data
;
3376 struct htt_tx_fetch_record
*record
;
3378 size_t max_num_bytes
;
3379 size_t max_num_msdus
;
3382 const __le32
*resp_ids
;
3391 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch ind\n");
3393 len
= sizeof(resp
->hdr
) + sizeof(resp
->tx_fetch_ind
);
3394 if (unlikely(skb
->len
< len
)) {
3395 ath10k_warn(ar
, "received corrupted tx_fetch_ind event: buffer too short\n");
3399 num_records
= le16_to_cpu(resp
->tx_fetch_ind
.num_records
);
3400 num_resp_ids
= le16_to_cpu(resp
->tx_fetch_ind
.num_resp_ids
);
3402 len
+= sizeof(resp
->tx_fetch_ind
.records
[0]) * num_records
;
3403 len
+= sizeof(resp
->tx_fetch_ind
.resp_ids
[0]) * num_resp_ids
;
3405 if (unlikely(skb
->len
< len
)) {
3406 ath10k_warn(ar
, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
3410 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch ind num records %u num resps %u seq %u\n",
3411 num_records
, num_resp_ids
,
3412 le16_to_cpu(resp
->tx_fetch_ind
.fetch_seq_num
));
3414 if (!ar
->htt
.tx_q_state
.enabled
) {
3415 ath10k_warn(ar
, "received unexpected tx_fetch_ind event: not enabled\n");
3419 if (ar
->htt
.tx_q_state
.mode
== HTT_TX_MODE_SWITCH_PUSH
) {
3420 ath10k_warn(ar
, "received unexpected tx_fetch_ind event: in push mode\n");
3426 for (i
= 0; i
< num_records
; i
++) {
3427 record
= &resp
->tx_fetch_ind
.records
[i
];
3428 peer_id
= MS(le16_to_cpu(record
->info
),
3429 HTT_TX_FETCH_RECORD_INFO_PEER_ID
);
3430 tid
= MS(le16_to_cpu(record
->info
),
3431 HTT_TX_FETCH_RECORD_INFO_TID
);
3432 max_num_msdus
= le16_to_cpu(record
->num_msdus
);
3433 max_num_bytes
= le32_to_cpu(record
->num_bytes
);
3435 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch record %i peer_id %u tid %u msdus %zu bytes %zu\n",
3436 i
, peer_id
, tid
, max_num_msdus
, max_num_bytes
);
3438 if (unlikely(peer_id
>= ar
->htt
.tx_q_state
.num_peers
) ||
3439 unlikely(tid
>= ar
->htt
.tx_q_state
.num_tids
)) {
3440 ath10k_warn(ar
, "received out of range peer_id %u tid %u\n",
3445 spin_lock_bh(&ar
->data_lock
);
3446 txq
= ath10k_mac_txq_lookup(ar
, peer_id
, tid
);
3447 spin_unlock_bh(&ar
->data_lock
);
3449 /* It is okay to release the lock and use txq because RCU read
3453 if (unlikely(!txq
)) {
3454 ath10k_warn(ar
, "failed to lookup txq for peer_id %u tid %u\n",
3462 ieee80211_txq_schedule_start(hw
, txq
->ac
);
3463 may_tx
= ieee80211_txq_may_transmit(hw
, txq
);
3464 while (num_msdus
< max_num_msdus
&&
3465 num_bytes
< max_num_bytes
) {
3469 ret
= ath10k_mac_tx_push_txq(hw
, txq
);
3476 ieee80211_return_txq(hw
, txq
, false);
3477 ieee80211_txq_schedule_end(hw
, txq
->ac
);
3479 record
->num_msdus
= cpu_to_le16(num_msdus
);
3480 record
->num_bytes
= cpu_to_le32(num_bytes
);
3482 ath10k_htt_tx_txq_recalc(hw
, txq
);
3487 resp_ids
= ath10k_htt_get_tx_fetch_ind_resp_ids(&resp
->tx_fetch_ind
);
3488 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar
, resp_ids
, num_resp_ids
);
3490 ret
= ath10k_htt_tx_fetch_resp(ar
,
3491 resp
->tx_fetch_ind
.token
,
3492 resp
->tx_fetch_ind
.fetch_seq_num
,
3493 resp
->tx_fetch_ind
.records
,
3495 if (unlikely(ret
)) {
3496 ath10k_warn(ar
, "failed to submit tx fetch resp for token 0x%08x: %d\n",
3497 le32_to_cpu(resp
->tx_fetch_ind
.token
), ret
);
3498 /* FIXME: request fw restart */
3501 ath10k_htt_tx_txq_sync(ar
);
3504 static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k
*ar
,
3505 struct sk_buff
*skb
)
3507 const struct htt_resp
*resp
= (void *)skb
->data
;
3511 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch confirm\n");
3513 len
= sizeof(resp
->hdr
) + sizeof(resp
->tx_fetch_confirm
);
3514 if (unlikely(skb
->len
< len
)) {
3515 ath10k_warn(ar
, "received corrupted tx_fetch_confirm event: buffer too short\n");
3519 num_resp_ids
= le16_to_cpu(resp
->tx_fetch_confirm
.num_resp_ids
);
3520 len
+= sizeof(resp
->tx_fetch_confirm
.resp_ids
[0]) * num_resp_ids
;
3522 if (unlikely(skb
->len
< len
)) {
3523 ath10k_warn(ar
, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
3527 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar
,
3528 resp
->tx_fetch_confirm
.resp_ids
,
3532 static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k
*ar
,
3533 struct sk_buff
*skb
)
3535 const struct htt_resp
*resp
= (void *)skb
->data
;
3536 const struct htt_tx_mode_switch_record
*record
;
3537 struct ieee80211_txq
*txq
;
3538 struct ath10k_txq
*artxq
;
3541 enum htt_tx_mode_switch_mode mode
;
3550 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx mode switch ind\n");
3552 len
= sizeof(resp
->hdr
) + sizeof(resp
->tx_mode_switch_ind
);
3553 if (unlikely(skb
->len
< len
)) {
3554 ath10k_warn(ar
, "received corrupted tx_mode_switch_ind event: buffer too short\n");
3558 info0
= le16_to_cpu(resp
->tx_mode_switch_ind
.info0
);
3559 info1
= le16_to_cpu(resp
->tx_mode_switch_ind
.info1
);
3561 enable
= !!(info0
& HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE
);
3562 num_records
= MS(info0
, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD
);
3563 mode
= MS(info1
, HTT_TX_MODE_SWITCH_IND_INFO1_MODE
);
3564 threshold
= MS(info1
, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD
);
3566 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
3567 "htt rx tx mode switch ind info0 0x%04x info1 0x%04x enable %d num records %zd mode %d threshold %u\n",
3568 info0
, info1
, enable
, num_records
, mode
, threshold
);
3570 len
+= sizeof(resp
->tx_mode_switch_ind
.records
[0]) * num_records
;
3572 if (unlikely(skb
->len
< len
)) {
3573 ath10k_warn(ar
, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
3578 case HTT_TX_MODE_SWITCH_PUSH
:
3579 case HTT_TX_MODE_SWITCH_PUSH_PULL
:
3582 ath10k_warn(ar
, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
3590 ar
->htt
.tx_q_state
.enabled
= enable
;
3591 ar
->htt
.tx_q_state
.mode
= mode
;
3592 ar
->htt
.tx_q_state
.num_push_allowed
= threshold
;
3596 for (i
= 0; i
< num_records
; i
++) {
3597 record
= &resp
->tx_mode_switch_ind
.records
[i
];
3598 info0
= le16_to_cpu(record
->info0
);
3599 peer_id
= MS(info0
, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID
);
3600 tid
= MS(info0
, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID
);
3602 if (unlikely(peer_id
>= ar
->htt
.tx_q_state
.num_peers
) ||
3603 unlikely(tid
>= ar
->htt
.tx_q_state
.num_tids
)) {
3604 ath10k_warn(ar
, "received out of range peer_id %u tid %u\n",
3609 spin_lock_bh(&ar
->data_lock
);
3610 txq
= ath10k_mac_txq_lookup(ar
, peer_id
, tid
);
3611 spin_unlock_bh(&ar
->data_lock
);
3613 /* It is okay to release the lock and use txq because RCU read
3617 if (unlikely(!txq
)) {
3618 ath10k_warn(ar
, "failed to lookup txq for peer_id %u tid %u\n",
3623 spin_lock_bh(&ar
->htt
.tx_lock
);
3624 artxq
= (void *)txq
->drv_priv
;
3625 artxq
->num_push_allowed
= le16_to_cpu(record
->num_max_msdus
);
3626 spin_unlock_bh(&ar
->htt
.tx_lock
);
3631 ath10k_mac_tx_push_pending(ar
);
3634 void ath10k_htt_htc_t2h_msg_handler(struct ath10k
*ar
, struct sk_buff
*skb
)
3638 release
= ath10k_htt_t2h_msg_handler(ar
, skb
);
3640 /* Free the indication buffer */
3642 dev_kfree_skb_any(skb
);
3645 static inline s8
ath10k_get_legacy_rate_idx(struct ath10k
*ar
, u8 rate
)
3647 static const u8 legacy_rates
[] = {1, 2, 5, 11, 6, 9, 12,
3648 18, 24, 36, 48, 54};
3651 for (i
= 0; i
< ARRAY_SIZE(legacy_rates
); i
++) {
3652 if (rate
== legacy_rates
[i
])
3656 ath10k_warn(ar
, "Invalid legacy rate %d peer stats", rate
);
3661 ath10k_accumulate_per_peer_tx_stats(struct ath10k
*ar
,
3662 struct ath10k_sta
*arsta
,
3663 struct ath10k_per_peer_tx_stats
*pstats
,
3666 struct rate_info
*txrate
= &arsta
->txrate
;
3667 struct ath10k_htt_tx_stats
*tx_stats
;
3668 int idx
, ht_idx
, gi
, mcs
, bw
, nss
;
3669 unsigned long flags
;
3671 if (!arsta
->tx_stats
)
3674 tx_stats
= arsta
->tx_stats
;
3675 flags
= txrate
->flags
;
3676 gi
= test_bit(ATH10K_RATE_INFO_FLAGS_SGI_BIT
, &flags
);
3677 mcs
= ATH10K_HW_MCS_RATE(pstats
->ratecode
);
3680 ht_idx
= mcs
+ (nss
- 1) * 8;
3681 idx
= mcs
* 8 + 8 * 10 * (nss
- 1);
3684 #define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name]
3686 if (txrate
->flags
& RATE_INFO_FLAGS_VHT_MCS
) {
3687 STATS_OP_FMT(SUCC
).vht
[0][mcs
] += pstats
->succ_bytes
;
3688 STATS_OP_FMT(SUCC
).vht
[1][mcs
] += pstats
->succ_pkts
;
3689 STATS_OP_FMT(FAIL
).vht
[0][mcs
] += pstats
->failed_bytes
;
3690 STATS_OP_FMT(FAIL
).vht
[1][mcs
] += pstats
->failed_pkts
;
3691 STATS_OP_FMT(RETRY
).vht
[0][mcs
] += pstats
->retry_bytes
;
3692 STATS_OP_FMT(RETRY
).vht
[1][mcs
] += pstats
->retry_pkts
;
3693 } else if (txrate
->flags
& RATE_INFO_FLAGS_MCS
) {
3694 STATS_OP_FMT(SUCC
).ht
[0][ht_idx
] += pstats
->succ_bytes
;
3695 STATS_OP_FMT(SUCC
).ht
[1][ht_idx
] += pstats
->succ_pkts
;
3696 STATS_OP_FMT(FAIL
).ht
[0][ht_idx
] += pstats
->failed_bytes
;
3697 STATS_OP_FMT(FAIL
).ht
[1][ht_idx
] += pstats
->failed_pkts
;
3698 STATS_OP_FMT(RETRY
).ht
[0][ht_idx
] += pstats
->retry_bytes
;
3699 STATS_OP_FMT(RETRY
).ht
[1][ht_idx
] += pstats
->retry_pkts
;
3701 mcs
= legacy_rate_idx
;
3703 STATS_OP_FMT(SUCC
).legacy
[0][mcs
] += pstats
->succ_bytes
;
3704 STATS_OP_FMT(SUCC
).legacy
[1][mcs
] += pstats
->succ_pkts
;
3705 STATS_OP_FMT(FAIL
).legacy
[0][mcs
] += pstats
->failed_bytes
;
3706 STATS_OP_FMT(FAIL
).legacy
[1][mcs
] += pstats
->failed_pkts
;
3707 STATS_OP_FMT(RETRY
).legacy
[0][mcs
] += pstats
->retry_bytes
;
3708 STATS_OP_FMT(RETRY
).legacy
[1][mcs
] += pstats
->retry_pkts
;
3711 if (ATH10K_HW_AMPDU(pstats
->flags
)) {
3712 tx_stats
->ba_fails
+= ATH10K_HW_BA_FAIL(pstats
->flags
);
3714 if (txrate
->flags
& RATE_INFO_FLAGS_MCS
) {
3715 STATS_OP_FMT(AMPDU
).ht
[0][ht_idx
] +=
3716 pstats
->succ_bytes
+ pstats
->retry_bytes
;
3717 STATS_OP_FMT(AMPDU
).ht
[1][ht_idx
] +=
3718 pstats
->succ_pkts
+ pstats
->retry_pkts
;
3720 STATS_OP_FMT(AMPDU
).vht
[0][mcs
] +=
3721 pstats
->succ_bytes
+ pstats
->retry_bytes
;
3722 STATS_OP_FMT(AMPDU
).vht
[1][mcs
] +=
3723 pstats
->succ_pkts
+ pstats
->retry_pkts
;
3725 STATS_OP_FMT(AMPDU
).bw
[0][bw
] +=
3726 pstats
->succ_bytes
+ pstats
->retry_bytes
;
3727 STATS_OP_FMT(AMPDU
).nss
[0][nss
- 1] +=
3728 pstats
->succ_bytes
+ pstats
->retry_bytes
;
3729 STATS_OP_FMT(AMPDU
).gi
[0][gi
] +=
3730 pstats
->succ_bytes
+ pstats
->retry_bytes
;
3731 STATS_OP_FMT(AMPDU
).rate_table
[0][idx
] +=
3732 pstats
->succ_bytes
+ pstats
->retry_bytes
;
3733 STATS_OP_FMT(AMPDU
).bw
[1][bw
] +=
3734 pstats
->succ_pkts
+ pstats
->retry_pkts
;
3735 STATS_OP_FMT(AMPDU
).nss
[1][nss
- 1] +=
3736 pstats
->succ_pkts
+ pstats
->retry_pkts
;
3737 STATS_OP_FMT(AMPDU
).gi
[1][gi
] +=
3738 pstats
->succ_pkts
+ pstats
->retry_pkts
;
3739 STATS_OP_FMT(AMPDU
).rate_table
[1][idx
] +=
3740 pstats
->succ_pkts
+ pstats
->retry_pkts
;
3742 tx_stats
->ack_fails
+=
3743 ATH10K_HW_BA_FAIL(pstats
->flags
);
3746 STATS_OP_FMT(SUCC
).bw
[0][bw
] += pstats
->succ_bytes
;
3747 STATS_OP_FMT(SUCC
).nss
[0][nss
- 1] += pstats
->succ_bytes
;
3748 STATS_OP_FMT(SUCC
).gi
[0][gi
] += pstats
->succ_bytes
;
3750 STATS_OP_FMT(SUCC
).bw
[1][bw
] += pstats
->succ_pkts
;
3751 STATS_OP_FMT(SUCC
).nss
[1][nss
- 1] += pstats
->succ_pkts
;
3752 STATS_OP_FMT(SUCC
).gi
[1][gi
] += pstats
->succ_pkts
;
3754 STATS_OP_FMT(FAIL
).bw
[0][bw
] += pstats
->failed_bytes
;
3755 STATS_OP_FMT(FAIL
).nss
[0][nss
- 1] += pstats
->failed_bytes
;
3756 STATS_OP_FMT(FAIL
).gi
[0][gi
] += pstats
->failed_bytes
;
3758 STATS_OP_FMT(FAIL
).bw
[1][bw
] += pstats
->failed_pkts
;
3759 STATS_OP_FMT(FAIL
).nss
[1][nss
- 1] += pstats
->failed_pkts
;
3760 STATS_OP_FMT(FAIL
).gi
[1][gi
] += pstats
->failed_pkts
;
3762 STATS_OP_FMT(RETRY
).bw
[0][bw
] += pstats
->retry_bytes
;
3763 STATS_OP_FMT(RETRY
).nss
[0][nss
- 1] += pstats
->retry_bytes
;
3764 STATS_OP_FMT(RETRY
).gi
[0][gi
] += pstats
->retry_bytes
;
3766 STATS_OP_FMT(RETRY
).bw
[1][bw
] += pstats
->retry_pkts
;
3767 STATS_OP_FMT(RETRY
).nss
[1][nss
- 1] += pstats
->retry_pkts
;
3768 STATS_OP_FMT(RETRY
).gi
[1][gi
] += pstats
->retry_pkts
;
3770 if (txrate
->flags
>= RATE_INFO_FLAGS_MCS
) {
3771 STATS_OP_FMT(SUCC
).rate_table
[0][idx
] += pstats
->succ_bytes
;
3772 STATS_OP_FMT(SUCC
).rate_table
[1][idx
] += pstats
->succ_pkts
;
3773 STATS_OP_FMT(FAIL
).rate_table
[0][idx
] += pstats
->failed_bytes
;
3774 STATS_OP_FMT(FAIL
).rate_table
[1][idx
] += pstats
->failed_pkts
;
3775 STATS_OP_FMT(RETRY
).rate_table
[0][idx
] += pstats
->retry_bytes
;
3776 STATS_OP_FMT(RETRY
).rate_table
[1][idx
] += pstats
->retry_pkts
;
3779 tx_stats
->tx_duration
+= pstats
->duration
;
3783 ath10k_update_per_peer_tx_stats(struct ath10k
*ar
,
3784 struct ieee80211_sta
*sta
,
3785 struct ath10k_per_peer_tx_stats
*peer_stats
)
3787 struct ath10k_sta
*arsta
= (struct ath10k_sta
*)sta
->drv_priv
;
3788 struct ieee80211_chanctx_conf
*conf
= NULL
;
3791 bool skip_auto_rate
;
3792 struct rate_info txrate
;
3794 lockdep_assert_held(&ar
->data_lock
);
3796 txrate
.flags
= ATH10K_HW_PREAMBLE(peer_stats
->ratecode
);
3797 txrate
.bw
= ATH10K_HW_BW(peer_stats
->flags
);
3798 txrate
.nss
= ATH10K_HW_NSS(peer_stats
->ratecode
);
3799 txrate
.mcs
= ATH10K_HW_MCS_RATE(peer_stats
->ratecode
);
3800 sgi
= ATH10K_HW_GI(peer_stats
->flags
);
3801 skip_auto_rate
= ATH10K_FW_SKIPPED_RATE_CTRL(peer_stats
->flags
);
3803 /* Firmware's rate control skips broadcast/management frames,
3804 * if host has configure fixed rates and in some other special cases.
3809 if (txrate
.flags
== WMI_RATE_PREAMBLE_VHT
&& txrate
.mcs
> 9) {
3810 ath10k_warn(ar
, "Invalid VHT mcs %d peer stats", txrate
.mcs
);
3814 if (txrate
.flags
== WMI_RATE_PREAMBLE_HT
&&
3815 (txrate
.mcs
> 7 || txrate
.nss
< 1)) {
3816 ath10k_warn(ar
, "Invalid HT mcs %d nss %d peer stats",
3817 txrate
.mcs
, txrate
.nss
);
3821 memset(&arsta
->txrate
, 0, sizeof(arsta
->txrate
));
3822 memset(&arsta
->tx_info
.status
, 0, sizeof(arsta
->tx_info
.status
));
3823 if (txrate
.flags
== WMI_RATE_PREAMBLE_CCK
||
3824 txrate
.flags
== WMI_RATE_PREAMBLE_OFDM
) {
3825 rate
= ATH10K_HW_LEGACY_RATE(peer_stats
->ratecode
);
3826 /* This is hacky, FW sends CCK rate 5.5Mbps as 6 */
3827 if (rate
== 6 && txrate
.flags
== WMI_RATE_PREAMBLE_CCK
)
3829 rate_idx
= ath10k_get_legacy_rate_idx(ar
, rate
);
3832 arsta
->txrate
.legacy
= rate
;
3833 } else if (txrate
.flags
== WMI_RATE_PREAMBLE_HT
) {
3834 arsta
->txrate
.flags
= RATE_INFO_FLAGS_MCS
;
3835 arsta
->txrate
.mcs
= txrate
.mcs
+ 8 * (txrate
.nss
- 1);
3837 arsta
->txrate
.flags
= RATE_INFO_FLAGS_VHT_MCS
;
3838 arsta
->txrate
.mcs
= txrate
.mcs
;
3841 switch (txrate
.flags
) {
3842 case WMI_RATE_PREAMBLE_OFDM
:
3843 if (arsta
->arvif
&& arsta
->arvif
->vif
)
3844 conf
= rcu_dereference(arsta
->arvif
->vif
->bss_conf
.chanctx_conf
);
3845 if (conf
&& conf
->def
.chan
->band
== NL80211_BAND_5GHZ
)
3846 arsta
->tx_info
.status
.rates
[0].idx
= rate_idx
- 4;
3848 case WMI_RATE_PREAMBLE_CCK
:
3849 arsta
->tx_info
.status
.rates
[0].idx
= rate_idx
;
3851 arsta
->tx_info
.status
.rates
[0].flags
|=
3852 (IEEE80211_TX_RC_USE_SHORT_PREAMBLE
|
3853 IEEE80211_TX_RC_SHORT_GI
);
3855 case WMI_RATE_PREAMBLE_HT
:
3856 arsta
->tx_info
.status
.rates
[0].idx
=
3857 txrate
.mcs
+ ((txrate
.nss
- 1) * 8);
3859 arsta
->tx_info
.status
.rates
[0].flags
|=
3860 IEEE80211_TX_RC_SHORT_GI
;
3861 arsta
->tx_info
.status
.rates
[0].flags
|= IEEE80211_TX_RC_MCS
;
3863 case WMI_RATE_PREAMBLE_VHT
:
3864 ieee80211_rate_set_vht(&arsta
->tx_info
.status
.rates
[0],
3865 txrate
.mcs
, txrate
.nss
);
3867 arsta
->tx_info
.status
.rates
[0].flags
|=
3868 IEEE80211_TX_RC_SHORT_GI
;
3869 arsta
->tx_info
.status
.rates
[0].flags
|= IEEE80211_TX_RC_VHT_MCS
;
3873 arsta
->txrate
.nss
= txrate
.nss
;
3874 arsta
->txrate
.bw
= ath10k_bw_to_mac80211_bw(txrate
.bw
);
3875 arsta
->last_tx_bitrate
= cfg80211_calculate_bitrate(&arsta
->txrate
);
3877 arsta
->txrate
.flags
|= RATE_INFO_FLAGS_SHORT_GI
;
3879 switch (arsta
->txrate
.bw
) {
3880 case RATE_INFO_BW_40
:
3881 arsta
->tx_info
.status
.rates
[0].flags
|=
3882 IEEE80211_TX_RC_40_MHZ_WIDTH
;
3884 case RATE_INFO_BW_80
:
3885 arsta
->tx_info
.status
.rates
[0].flags
|=
3886 IEEE80211_TX_RC_80_MHZ_WIDTH
;
3888 case RATE_INFO_BW_160
:
3889 arsta
->tx_info
.status
.rates
[0].flags
|=
3890 IEEE80211_TX_RC_160_MHZ_WIDTH
;
3894 if (peer_stats
->succ_pkts
) {
3895 arsta
->tx_info
.flags
= IEEE80211_TX_STAT_ACK
;
3896 arsta
->tx_info
.status
.rates
[0].count
= 1;
3897 ieee80211_tx_rate_update(ar
->hw
, sta
, &arsta
->tx_info
);
3900 if (ar
->htt
.disable_tx_comp
) {
3901 arsta
->tx_failed
+= peer_stats
->failed_pkts
;
3902 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "tx failed %d\n",
3906 arsta
->tx_retries
+= peer_stats
->retry_pkts
;
3907 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt tx retries %d", arsta
->tx_retries
);
3909 if (ath10k_debug_is_extd_tx_stats_enabled(ar
))
3910 ath10k_accumulate_per_peer_tx_stats(ar
, arsta
, peer_stats
,
3914 static void ath10k_htt_fetch_peer_stats(struct ath10k
*ar
,
3915 struct sk_buff
*skb
)
3917 struct htt_resp
*resp
= (struct htt_resp
*)skb
->data
;
3918 struct ath10k_per_peer_tx_stats
*p_tx_stats
= &ar
->peer_tx_stats
;
3919 struct htt_per_peer_tx_stats_ind
*tx_stats
;
3920 struct ieee80211_sta
*sta
;
3921 struct ath10k_peer
*peer
;
3923 u8 ppdu_len
, num_ppdu
;
3925 num_ppdu
= resp
->peer_tx_stats
.num_ppdu
;
3926 ppdu_len
= resp
->peer_tx_stats
.ppdu_len
* sizeof(__le32
);
3928 if (skb
->len
< sizeof(struct htt_resp_hdr
) + num_ppdu
* ppdu_len
) {
3929 ath10k_warn(ar
, "Invalid peer stats buf length %d\n", skb
->len
);
3933 tx_stats
= (struct htt_per_peer_tx_stats_ind
*)
3934 (resp
->peer_tx_stats
.payload
);
3935 peer_id
= __le16_to_cpu(tx_stats
->peer_id
);
3938 spin_lock_bh(&ar
->data_lock
);
3939 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
3940 if (!peer
|| !peer
->sta
) {
3941 ath10k_warn(ar
, "Invalid peer id %d peer stats buffer\n",
3947 for (i
= 0; i
< num_ppdu
; i
++) {
3948 tx_stats
= (struct htt_per_peer_tx_stats_ind
*)
3949 (resp
->peer_tx_stats
.payload
+ i
* ppdu_len
);
3951 p_tx_stats
->succ_bytes
= __le32_to_cpu(tx_stats
->succ_bytes
);
3952 p_tx_stats
->retry_bytes
= __le32_to_cpu(tx_stats
->retry_bytes
);
3953 p_tx_stats
->failed_bytes
=
3954 __le32_to_cpu(tx_stats
->failed_bytes
);
3955 p_tx_stats
->ratecode
= tx_stats
->ratecode
;
3956 p_tx_stats
->flags
= tx_stats
->flags
;
3957 p_tx_stats
->succ_pkts
= __le16_to_cpu(tx_stats
->succ_pkts
);
3958 p_tx_stats
->retry_pkts
= __le16_to_cpu(tx_stats
->retry_pkts
);
3959 p_tx_stats
->failed_pkts
= __le16_to_cpu(tx_stats
->failed_pkts
);
3960 p_tx_stats
->duration
= __le16_to_cpu(tx_stats
->tx_duration
);
3962 ath10k_update_per_peer_tx_stats(ar
, sta
, p_tx_stats
);
3966 spin_unlock_bh(&ar
->data_lock
);
3970 static void ath10k_fetch_10_2_tx_stats(struct ath10k
*ar
, u8
*data
)
3972 struct ath10k_pktlog_hdr
*hdr
= (struct ath10k_pktlog_hdr
*)data
;
3973 struct ath10k_per_peer_tx_stats
*p_tx_stats
= &ar
->peer_tx_stats
;
3974 struct ath10k_10_2_peer_tx_stats
*tx_stats
;
3975 struct ieee80211_sta
*sta
;
3976 struct ath10k_peer
*peer
;
3977 u16 log_type
= __le16_to_cpu(hdr
->log_type
);
3980 if (log_type
!= ATH_PKTLOG_TYPE_TX_STAT
)
3983 tx_stats
= (struct ath10k_10_2_peer_tx_stats
*)((hdr
->payload
) +
3984 ATH10K_10_2_TX_STATS_OFFSET
);
3986 if (!tx_stats
->tx_ppdu_cnt
)
3989 peer_id
= tx_stats
->peer_id
;
3992 spin_lock_bh(&ar
->data_lock
);
3993 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
3994 if (!peer
|| !peer
->sta
) {
3995 ath10k_warn(ar
, "Invalid peer id %d in peer stats buffer\n",
4001 for (i
= 0; i
< tx_stats
->tx_ppdu_cnt
; i
++) {
4002 p_tx_stats
->succ_bytes
=
4003 __le16_to_cpu(tx_stats
->success_bytes
[i
]);
4004 p_tx_stats
->retry_bytes
=
4005 __le16_to_cpu(tx_stats
->retry_bytes
[i
]);
4006 p_tx_stats
->failed_bytes
=
4007 __le16_to_cpu(tx_stats
->failed_bytes
[i
]);
4008 p_tx_stats
->ratecode
= tx_stats
->ratecode
[i
];
4009 p_tx_stats
->flags
= tx_stats
->flags
[i
];
4010 p_tx_stats
->succ_pkts
= tx_stats
->success_pkts
[i
];
4011 p_tx_stats
->retry_pkts
= tx_stats
->retry_pkts
[i
];
4012 p_tx_stats
->failed_pkts
= tx_stats
->failed_pkts
[i
];
4014 ath10k_update_per_peer_tx_stats(ar
, sta
, p_tx_stats
);
4016 spin_unlock_bh(&ar
->data_lock
);
4022 spin_unlock_bh(&ar
->data_lock
);
4026 static int ath10k_htt_rx_pn_len(enum htt_security_types sec_type
)
4029 case HTT_SECURITY_TKIP
:
4030 case HTT_SECURITY_TKIP_NOMIC
:
4031 case HTT_SECURITY_AES_CCMP
:
4038 static void ath10k_htt_rx_sec_ind_handler(struct ath10k
*ar
,
4039 struct htt_security_indication
*ev
)
4041 enum htt_txrx_sec_cast_type sec_index
;
4042 enum htt_security_types sec_type
;
4043 struct ath10k_peer
*peer
;
4045 spin_lock_bh(&ar
->data_lock
);
4047 peer
= ath10k_peer_find_by_id(ar
, __le16_to_cpu(ev
->peer_id
));
4049 ath10k_warn(ar
, "failed to find peer id %d for security indication",
4050 __le16_to_cpu(ev
->peer_id
));
4054 sec_type
= MS(ev
->flags
, HTT_SECURITY_TYPE
);
4056 if (ev
->flags
& HTT_SECURITY_IS_UNICAST
)
4057 sec_index
= HTT_TXRX_SEC_UCAST
;
4059 sec_index
= HTT_TXRX_SEC_MCAST
;
4061 peer
->rx_pn
[sec_index
].sec_type
= sec_type
;
4062 peer
->rx_pn
[sec_index
].pn_len
= ath10k_htt_rx_pn_len(sec_type
);
4064 memset(peer
->tids_last_pn_valid
, 0, sizeof(peer
->tids_last_pn_valid
));
4065 memset(peer
->tids_last_pn
, 0, sizeof(peer
->tids_last_pn
));
4068 spin_unlock_bh(&ar
->data_lock
);
4071 bool ath10k_htt_t2h_msg_handler(struct ath10k
*ar
, struct sk_buff
*skb
)
4073 struct ath10k_htt
*htt
= &ar
->htt
;
4074 struct htt_resp
*resp
= (struct htt_resp
*)skb
->data
;
4075 enum htt_t2h_msg_type type
;
4077 /* confirm alignment */
4078 if (!IS_ALIGNED((unsigned long)skb
->data
, 4))
4079 ath10k_warn(ar
, "unaligned htt message, expect trouble\n");
4081 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx, msg_type: 0x%0X\n",
4082 resp
->hdr
.msg_type
);
4084 if (resp
->hdr
.msg_type
>= ar
->htt
.t2h_msg_types_max
) {
4085 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
4086 resp
->hdr
.msg_type
, ar
->htt
.t2h_msg_types_max
);
4089 type
= ar
->htt
.t2h_msg_types
[resp
->hdr
.msg_type
];
4092 case HTT_T2H_MSG_TYPE_VERSION_CONF
: {
4093 htt
->target_version_major
= resp
->ver_resp
.major
;
4094 htt
->target_version_minor
= resp
->ver_resp
.minor
;
4095 complete(&htt
->target_version_received
);
4098 case HTT_T2H_MSG_TYPE_RX_IND
:
4099 if (ar
->bus_param
.dev_type
!= ATH10K_DEV_TYPE_HL
) {
4100 ath10k_htt_rx_proc_rx_ind_ll(htt
, &resp
->rx_ind
);
4102 skb_queue_tail(&htt
->rx_indication_head
, skb
);
4106 case HTT_T2H_MSG_TYPE_PEER_MAP
: {
4107 struct htt_peer_map_event ev
= {
4108 .vdev_id
= resp
->peer_map
.vdev_id
,
4109 .peer_id
= __le16_to_cpu(resp
->peer_map
.peer_id
),
4111 memcpy(ev
.addr
, resp
->peer_map
.addr
, sizeof(ev
.addr
));
4112 ath10k_peer_map_event(htt
, &ev
);
4115 case HTT_T2H_MSG_TYPE_PEER_UNMAP
: {
4116 struct htt_peer_unmap_event ev
= {
4117 .peer_id
= __le16_to_cpu(resp
->peer_unmap
.peer_id
),
4119 ath10k_peer_unmap_event(htt
, &ev
);
4122 case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION
: {
4123 struct htt_tx_done tx_done
= {};
4124 struct ath10k_htt
*htt
= &ar
->htt
;
4125 struct ath10k_htc
*htc
= &ar
->htc
;
4126 struct ath10k_htc_ep
*ep
= &ar
->htc
.endpoint
[htt
->eid
];
4127 int status
= __le32_to_cpu(resp
->mgmt_tx_completion
.status
);
4128 int info
= __le32_to_cpu(resp
->mgmt_tx_completion
.info
);
4130 tx_done
.msdu_id
= __le32_to_cpu(resp
->mgmt_tx_completion
.desc_id
);
4133 case HTT_MGMT_TX_STATUS_OK
:
4134 tx_done
.status
= HTT_TX_COMPL_STATE_ACK
;
4135 if (test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS
,
4137 (resp
->mgmt_tx_completion
.flags
&
4138 HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI
)) {
4140 FIELD_GET(HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK
,
4144 case HTT_MGMT_TX_STATUS_RETRY
:
4145 tx_done
.status
= HTT_TX_COMPL_STATE_NOACK
;
4147 case HTT_MGMT_TX_STATUS_DROP
:
4148 tx_done
.status
= HTT_TX_COMPL_STATE_DISCARD
;
4152 if (htt
->disable_tx_comp
) {
4153 spin_lock_bh(&htc
->tx_lock
);
4155 spin_unlock_bh(&htc
->tx_lock
);
4158 status
= ath10k_txrx_tx_unref(htt
, &tx_done
);
4160 spin_lock_bh(&htt
->tx_lock
);
4161 ath10k_htt_tx_mgmt_dec_pending(htt
);
4162 spin_unlock_bh(&htt
->tx_lock
);
4166 case HTT_T2H_MSG_TYPE_TX_COMPL_IND
:
4167 ath10k_htt_rx_tx_compl_ind(htt
->ar
, skb
);
4169 case HTT_T2H_MSG_TYPE_SEC_IND
: {
4170 struct ath10k
*ar
= htt
->ar
;
4171 struct htt_security_indication
*ev
= &resp
->security_indication
;
4173 ath10k_htt_rx_sec_ind_handler(ar
, ev
);
4174 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
4175 "sec ind peer_id %d unicast %d type %d\n",
4176 __le16_to_cpu(ev
->peer_id
),
4177 !!(ev
->flags
& HTT_SECURITY_IS_UNICAST
),
4178 MS(ev
->flags
, HTT_SECURITY_TYPE
));
4179 complete(&ar
->install_key_done
);
4182 case HTT_T2H_MSG_TYPE_RX_FRAG_IND
: {
4183 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt event: ",
4184 skb
->data
, skb
->len
);
4185 atomic_inc(&htt
->num_mpdus_ready
);
4187 return ath10k_htt_rx_proc_rx_frag_ind(htt
,
4191 case HTT_T2H_MSG_TYPE_TEST
:
4193 case HTT_T2H_MSG_TYPE_STATS_CONF
:
4194 trace_ath10k_htt_stats(ar
, skb
->data
, skb
->len
);
4196 case HTT_T2H_MSG_TYPE_TX_INSPECT_IND
:
4197 /* Firmware can return tx frames if it's unable to fully
4198 * process them and suspects host may be able to fix it. ath10k
4199 * sends all tx frames as already inspected so this shouldn't
4200 * happen unless fw has a bug.
4202 ath10k_warn(ar
, "received an unexpected htt tx inspect event\n");
4204 case HTT_T2H_MSG_TYPE_RX_ADDBA
:
4205 ath10k_htt_rx_addba(ar
, resp
);
4207 case HTT_T2H_MSG_TYPE_RX_DELBA
:
4208 ath10k_htt_rx_delba(ar
, resp
);
4210 case HTT_T2H_MSG_TYPE_PKTLOG
: {
4211 trace_ath10k_htt_pktlog(ar
, resp
->pktlog_msg
.payload
,
4213 offsetof(struct htt_resp
,
4214 pktlog_msg
.payload
));
4216 if (ath10k_peer_stats_enabled(ar
))
4217 ath10k_fetch_10_2_tx_stats(ar
,
4218 resp
->pktlog_msg
.payload
);
4221 case HTT_T2H_MSG_TYPE_RX_FLUSH
: {
4222 /* Ignore this event because mac80211 takes care of Rx
4223 * aggregation reordering.
4227 case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND
: {
4228 skb_queue_tail(&htt
->rx_in_ord_compl_q
, skb
);
4231 case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND
: {
4232 struct ath10k_htt
*htt
= &ar
->htt
;
4233 struct ath10k_htc
*htc
= &ar
->htc
;
4234 struct ath10k_htc_ep
*ep
= &ar
->htc
.endpoint
[htt
->eid
];
4235 u32 msg_word
= __le32_to_cpu(*(__le32
*)resp
);
4236 int htt_credit_delta
;
4238 htt_credit_delta
= HTT_TX_CREDIT_DELTA_ABS_GET(msg_word
);
4239 if (HTT_TX_CREDIT_SIGN_BIT_GET(msg_word
))
4240 htt_credit_delta
= -htt_credit_delta
;
4242 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
4243 "htt credit update delta %d\n",
4246 if (htt
->disable_tx_comp
) {
4247 spin_lock_bh(&htc
->tx_lock
);
4248 ep
->tx_credits
+= htt_credit_delta
;
4249 spin_unlock_bh(&htc
->tx_lock
);
4250 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
4251 "htt credit total %d\n",
4253 ep
->ep_ops
.ep_tx_credits(htc
->ar
);
4257 case HTT_T2H_MSG_TYPE_CHAN_CHANGE
: {
4258 u32 phymode
= __le32_to_cpu(resp
->chan_change
.phymode
);
4259 u32 freq
= __le32_to_cpu(resp
->chan_change
.freq
);
4261 ar
->tgt_oper_chan
= ieee80211_get_channel(ar
->hw
->wiphy
, freq
);
4262 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
4263 "htt chan change freq %u phymode %s\n",
4264 freq
, ath10k_wmi_phymode_str(phymode
));
4267 case HTT_T2H_MSG_TYPE_AGGR_CONF
:
4269 case HTT_T2H_MSG_TYPE_TX_FETCH_IND
: {
4270 struct sk_buff
*tx_fetch_ind
= skb_copy(skb
, GFP_ATOMIC
);
4272 if (!tx_fetch_ind
) {
4273 ath10k_warn(ar
, "failed to copy htt tx fetch ind\n");
4276 skb_queue_tail(&htt
->tx_fetch_ind_q
, tx_fetch_ind
);
4279 case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM
:
4280 ath10k_htt_rx_tx_fetch_confirm(ar
, skb
);
4282 case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND
:
4283 ath10k_htt_rx_tx_mode_switch_ind(ar
, skb
);
4285 case HTT_T2H_MSG_TYPE_PEER_STATS
:
4286 ath10k_htt_fetch_peer_stats(ar
, skb
);
4288 case HTT_T2H_MSG_TYPE_EN_STATS
:
4290 ath10k_warn(ar
, "htt event (%d) not handled\n",
4291 resp
->hdr
.msg_type
);
4292 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt event: ",
4293 skb
->data
, skb
->len
);
4298 EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler
);
4300 void ath10k_htt_rx_pktlog_completion_handler(struct ath10k
*ar
,
4301 struct sk_buff
*skb
)
4303 trace_ath10k_htt_pktlog(ar
, skb
->data
, skb
->len
);
4304 dev_kfree_skb_any(skb
);
4306 EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler
);
4308 static int ath10k_htt_rx_deliver_msdu(struct ath10k
*ar
, int quota
, int budget
)
4310 struct sk_buff
*skb
;
4312 while (quota
< budget
) {
4313 if (skb_queue_empty(&ar
->htt
.rx_msdus_q
))
4316 skb
= skb_dequeue(&ar
->htt
.rx_msdus_q
);
4319 ath10k_process_rx(ar
, skb
);
4326 int ath10k_htt_rx_hl_indication(struct ath10k
*ar
, int budget
)
4328 struct htt_resp
*resp
;
4329 struct ath10k_htt
*htt
= &ar
->htt
;
4330 struct sk_buff
*skb
;
4334 for (quota
= 0; quota
< budget
; quota
++) {
4335 skb
= skb_dequeue(&htt
->rx_indication_head
);
4339 resp
= (struct htt_resp
*)skb
->data
;
4341 release
= ath10k_htt_rx_proc_rx_ind_hl(htt
,
4345 HTT_RX_NON_TKIP_MIC
);
4348 dev_kfree_skb_any(skb
);
4350 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "rx indication poll pending count:%d\n",
4351 skb_queue_len(&htt
->rx_indication_head
));
4355 EXPORT_SYMBOL(ath10k_htt_rx_hl_indication
);
4357 int ath10k_htt_txrx_compl_task(struct ath10k
*ar
, int budget
)
4359 struct ath10k_htt
*htt
= &ar
->htt
;
4360 struct htt_tx_done tx_done
= {};
4361 struct sk_buff_head tx_ind_q
;
4362 struct sk_buff
*skb
;
4363 unsigned long flags
;
4364 int quota
= 0, done
, ret
;
4365 bool resched_napi
= false;
4367 __skb_queue_head_init(&tx_ind_q
);
4369 /* Process pending frames before dequeuing more data
4372 quota
= ath10k_htt_rx_deliver_msdu(ar
, quota
, budget
);
4373 if (quota
== budget
) {
4374 resched_napi
= true;
4378 while ((skb
= skb_dequeue(&htt
->rx_in_ord_compl_q
))) {
4379 spin_lock_bh(&htt
->rx_ring
.lock
);
4380 ret
= ath10k_htt_rx_in_ord_ind(ar
, skb
);
4381 spin_unlock_bh(&htt
->rx_ring
.lock
);
4383 dev_kfree_skb_any(skb
);
4385 resched_napi
= true;
4390 while (atomic_read(&htt
->num_mpdus_ready
)) {
4391 ret
= ath10k_htt_rx_handle_amsdu(htt
);
4393 resched_napi
= true;
4396 atomic_dec(&htt
->num_mpdus_ready
);
4399 /* Deliver received data after processing data from hardware */
4400 quota
= ath10k_htt_rx_deliver_msdu(ar
, quota
, budget
);
4402 /* From NAPI documentation:
4403 * The napi poll() function may also process TX completions, in which
4404 * case if it processes the entire TX ring then it should count that
4405 * work as the rest of the budget.
4407 if ((quota
< budget
) && !kfifo_is_empty(&htt
->txdone_fifo
))
4410 /* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
4411 * From kfifo_get() documentation:
4412 * Note that with only one concurrent reader and one concurrent writer,
4413 * you don't need extra locking to use these macro.
4415 while (kfifo_get(&htt
->txdone_fifo
, &tx_done
))
4416 ath10k_txrx_tx_unref(htt
, &tx_done
);
4418 ath10k_mac_tx_push_pending(ar
);
4420 spin_lock_irqsave(&htt
->tx_fetch_ind_q
.lock
, flags
);
4421 skb_queue_splice_init(&htt
->tx_fetch_ind_q
, &tx_ind_q
);
4422 spin_unlock_irqrestore(&htt
->tx_fetch_ind_q
.lock
, flags
);
4424 while ((skb
= __skb_dequeue(&tx_ind_q
))) {
4425 ath10k_htt_rx_tx_fetch_ind(ar
, skb
);
4426 dev_kfree_skb_any(skb
);
4430 ath10k_htt_rx_msdu_buff_replenish(htt
);
4431 /* In case of rx failure or more data to read, report budget
4432 * to reschedule NAPI poll
4434 done
= resched_napi
? budget
: quota
;
4438 EXPORT_SYMBOL(ath10k_htt_txrx_compl_task
);
4440 static const struct ath10k_htt_rx_ops htt_rx_ops_32
= {
4441 .htt_get_rx_ring_size
= ath10k_htt_get_rx_ring_size_32
,
4442 .htt_config_paddrs_ring
= ath10k_htt_config_paddrs_ring_32
,
4443 .htt_set_paddrs_ring
= ath10k_htt_set_paddrs_ring_32
,
4444 .htt_get_vaddr_ring
= ath10k_htt_get_vaddr_ring_32
,
4445 .htt_reset_paddrs_ring
= ath10k_htt_reset_paddrs_ring_32
,
4448 static const struct ath10k_htt_rx_ops htt_rx_ops_64
= {
4449 .htt_get_rx_ring_size
= ath10k_htt_get_rx_ring_size_64
,
4450 .htt_config_paddrs_ring
= ath10k_htt_config_paddrs_ring_64
,
4451 .htt_set_paddrs_ring
= ath10k_htt_set_paddrs_ring_64
,
4452 .htt_get_vaddr_ring
= ath10k_htt_get_vaddr_ring_64
,
4453 .htt_reset_paddrs_ring
= ath10k_htt_reset_paddrs_ring_64
,
4456 static const struct ath10k_htt_rx_ops htt_rx_ops_hl
= {
4457 .htt_rx_proc_rx_frag_ind
= ath10k_htt_rx_proc_rx_frag_ind_hl
,
4460 void ath10k_htt_set_rx_ops(struct ath10k_htt
*htt
)
4462 struct ath10k
*ar
= htt
->ar
;
4464 if (ar
->bus_param
.dev_type
== ATH10K_DEV_TYPE_HL
)
4465 htt
->rx_ops
= &htt_rx_ops_hl
;
4466 else if (ar
->hw_params
.target_64bit
)
4467 htt
->rx_ops
= &htt_rx_ops_64
;
4469 htt
->rx_ops
= &htt_rx_ops_32
;