1 // SPDX-License-Identifier: ISC
3 * Copyright (c) 2005-2011 Atheros Communications Inc.
4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
16 #include <linux/log2.h>
17 #include <linux/bitfield.h>
19 /* when under memory pressure rx ring refill may fail and needs a retry */
20 #define HTT_RX_RING_REFILL_RETRY_MS 50
22 #define HTT_RX_RING_REFILL_RESCHED_MS 5
24 static int ath10k_htt_rx_get_csum_state(struct sk_buff
*skb
);
26 static struct sk_buff
*
27 ath10k_htt_rx_find_skb_paddr(struct ath10k
*ar
, u64 paddr
)
29 struct ath10k_skb_rxcb
*rxcb
;
31 hash_for_each_possible(ar
->htt
.rx_ring
.skb_table
, rxcb
, hlist
, paddr
)
32 if (rxcb
->paddr
== paddr
)
33 return ATH10K_RXCB_SKB(rxcb
);
39 static void ath10k_htt_rx_ring_free(struct ath10k_htt
*htt
)
42 struct ath10k_skb_rxcb
*rxcb
;
46 if (htt
->rx_ring
.in_ord_rx
) {
47 hash_for_each_safe(htt
->rx_ring
.skb_table
, i
, n
, rxcb
, hlist
) {
48 skb
= ATH10K_RXCB_SKB(rxcb
);
49 dma_unmap_single(htt
->ar
->dev
, rxcb
->paddr
,
50 skb
->len
+ skb_tailroom(skb
),
52 hash_del(&rxcb
->hlist
);
53 dev_kfree_skb_any(skb
);
56 for (i
= 0; i
< htt
->rx_ring
.size
; i
++) {
57 skb
= htt
->rx_ring
.netbufs_ring
[i
];
61 rxcb
= ATH10K_SKB_RXCB(skb
);
62 dma_unmap_single(htt
->ar
->dev
, rxcb
->paddr
,
63 skb
->len
+ skb_tailroom(skb
),
65 dev_kfree_skb_any(skb
);
69 htt
->rx_ring
.fill_cnt
= 0;
70 hash_init(htt
->rx_ring
.skb_table
);
71 memset(htt
->rx_ring
.netbufs_ring
, 0,
72 htt
->rx_ring
.size
* sizeof(htt
->rx_ring
.netbufs_ring
[0]));
75 static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt
*htt
)
77 return htt
->rx_ring
.size
* sizeof(htt
->rx_ring
.paddrs_ring_32
);
80 static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt
*htt
)
82 return htt
->rx_ring
.size
* sizeof(htt
->rx_ring
.paddrs_ring_64
);
85 static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt
*htt
,
88 htt
->rx_ring
.paddrs_ring_32
= vaddr
;
91 static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt
*htt
,
94 htt
->rx_ring
.paddrs_ring_64
= vaddr
;
97 static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt
*htt
,
98 dma_addr_t paddr
, int idx
)
100 htt
->rx_ring
.paddrs_ring_32
[idx
] = __cpu_to_le32(paddr
);
103 static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt
*htt
,
104 dma_addr_t paddr
, int idx
)
106 htt
->rx_ring
.paddrs_ring_64
[idx
] = __cpu_to_le64(paddr
);
109 static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt
*htt
, int idx
)
111 htt
->rx_ring
.paddrs_ring_32
[idx
] = 0;
114 static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt
*htt
, int idx
)
116 htt
->rx_ring
.paddrs_ring_64
[idx
] = 0;
119 static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt
*htt
)
121 return (void *)htt
->rx_ring
.paddrs_ring_32
;
124 static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt
*htt
)
126 return (void *)htt
->rx_ring
.paddrs_ring_64
;
129 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt
*htt
, int num
)
131 struct htt_rx_desc
*rx_desc
;
132 struct ath10k_skb_rxcb
*rxcb
;
137 /* The Full Rx Reorder firmware has no way of telling the host
138 * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
139 * To keep things simple make sure ring is always half empty. This
140 * guarantees there'll be no replenishment overruns possible.
142 BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL
>= HTT_RX_RING_SIZE
/ 2);
144 idx
= __le32_to_cpu(*htt
->rx_ring
.alloc_idx
.vaddr
);
146 skb
= dev_alloc_skb(HTT_RX_BUF_SIZE
+ HTT_RX_DESC_ALIGN
);
152 if (!IS_ALIGNED((unsigned long)skb
->data
, HTT_RX_DESC_ALIGN
))
154 PTR_ALIGN(skb
->data
, HTT_RX_DESC_ALIGN
) -
157 /* Clear rx_desc attention word before posting to Rx ring */
158 rx_desc
= (struct htt_rx_desc
*)skb
->data
;
159 rx_desc
->attention
.flags
= __cpu_to_le32(0);
161 paddr
= dma_map_single(htt
->ar
->dev
, skb
->data
,
162 skb
->len
+ skb_tailroom(skb
),
165 if (unlikely(dma_mapping_error(htt
->ar
->dev
, paddr
))) {
166 dev_kfree_skb_any(skb
);
171 rxcb
= ATH10K_SKB_RXCB(skb
);
173 htt
->rx_ring
.netbufs_ring
[idx
] = skb
;
174 ath10k_htt_set_paddrs_ring(htt
, paddr
, idx
);
175 htt
->rx_ring
.fill_cnt
++;
177 if (htt
->rx_ring
.in_ord_rx
) {
178 hash_add(htt
->rx_ring
.skb_table
,
179 &ATH10K_SKB_RXCB(skb
)->hlist
,
185 idx
&= htt
->rx_ring
.size_mask
;
190 * Make sure the rx buffer is updated before available buffer
191 * index to avoid any potential rx ring corruption.
194 *htt
->rx_ring
.alloc_idx
.vaddr
= __cpu_to_le32(idx
);
198 static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt
*htt
, int num
)
200 lockdep_assert_held(&htt
->rx_ring
.lock
);
201 return __ath10k_htt_rx_ring_fill_n(htt
, num
);
204 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt
*htt
)
206 int ret
, num_deficit
, num_to_fill
;
208 /* Refilling the whole RX ring buffer proves to be a bad idea. The
209 * reason is RX may take up significant amount of CPU cycles and starve
210 * other tasks, e.g. TX on an ethernet device while acting as a bridge
211 * with ath10k wlan interface. This ended up with very poor performance
212 * once CPU the host system was overwhelmed with RX on ath10k.
214 * By limiting the number of refills the replenishing occurs
215 * progressively. This in turns makes use of the fact tasklets are
216 * processed in FIFO order. This means actual RX processing can starve
217 * out refilling. If there's not enough buffers on RX ring FW will not
218 * report RX until it is refilled with enough buffers. This
219 * automatically balances load wrt to CPU power.
221 * This probably comes at a cost of lower maximum throughput but
222 * improves the average and stability.
224 spin_lock_bh(&htt
->rx_ring
.lock
);
225 num_deficit
= htt
->rx_ring
.fill_level
- htt
->rx_ring
.fill_cnt
;
226 num_to_fill
= min(ATH10K_HTT_MAX_NUM_REFILL
, num_deficit
);
227 num_deficit
-= num_to_fill
;
228 ret
= ath10k_htt_rx_ring_fill_n(htt
, num_to_fill
);
229 if (ret
== -ENOMEM
) {
231 * Failed to fill it to the desired level -
232 * we'll start a timer and try again next time.
233 * As long as enough buffers are left in the ring for
234 * another A-MPDU rx, no special recovery is needed.
236 mod_timer(&htt
->rx_ring
.refill_retry_timer
, jiffies
+
237 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS
));
238 } else if (num_deficit
> 0) {
239 mod_timer(&htt
->rx_ring
.refill_retry_timer
, jiffies
+
240 msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS
));
242 spin_unlock_bh(&htt
->rx_ring
.lock
);
245 static void ath10k_htt_rx_ring_refill_retry(struct timer_list
*t
)
247 struct ath10k_htt
*htt
= from_timer(htt
, t
, rx_ring
.refill_retry_timer
);
249 ath10k_htt_rx_msdu_buff_replenish(htt
);
252 int ath10k_htt_rx_ring_refill(struct ath10k
*ar
)
254 struct ath10k_htt
*htt
= &ar
->htt
;
257 if (ar
->bus_param
.dev_type
== ATH10K_DEV_TYPE_HL
)
260 spin_lock_bh(&htt
->rx_ring
.lock
);
261 ret
= ath10k_htt_rx_ring_fill_n(htt
, (htt
->rx_ring
.fill_level
-
262 htt
->rx_ring
.fill_cnt
));
265 ath10k_htt_rx_ring_free(htt
);
267 spin_unlock_bh(&htt
->rx_ring
.lock
);
272 void ath10k_htt_rx_free(struct ath10k_htt
*htt
)
274 if (htt
->ar
->bus_param
.dev_type
== ATH10K_DEV_TYPE_HL
)
277 del_timer_sync(&htt
->rx_ring
.refill_retry_timer
);
279 skb_queue_purge(&htt
->rx_msdus_q
);
280 skb_queue_purge(&htt
->rx_in_ord_compl_q
);
281 skb_queue_purge(&htt
->tx_fetch_ind_q
);
283 spin_lock_bh(&htt
->rx_ring
.lock
);
284 ath10k_htt_rx_ring_free(htt
);
285 spin_unlock_bh(&htt
->rx_ring
.lock
);
287 dma_free_coherent(htt
->ar
->dev
,
288 ath10k_htt_get_rx_ring_size(htt
),
289 ath10k_htt_get_vaddr_ring(htt
),
290 htt
->rx_ring
.base_paddr
);
292 dma_free_coherent(htt
->ar
->dev
,
293 sizeof(*htt
->rx_ring
.alloc_idx
.vaddr
),
294 htt
->rx_ring
.alloc_idx
.vaddr
,
295 htt
->rx_ring
.alloc_idx
.paddr
);
297 kfree(htt
->rx_ring
.netbufs_ring
);
300 static inline struct sk_buff
*ath10k_htt_rx_netbuf_pop(struct ath10k_htt
*htt
)
302 struct ath10k
*ar
= htt
->ar
;
304 struct sk_buff
*msdu
;
306 lockdep_assert_held(&htt
->rx_ring
.lock
);
308 if (htt
->rx_ring
.fill_cnt
== 0) {
309 ath10k_warn(ar
, "tried to pop sk_buff from an empty rx ring\n");
313 idx
= htt
->rx_ring
.sw_rd_idx
.msdu_payld
;
314 msdu
= htt
->rx_ring
.netbufs_ring
[idx
];
315 htt
->rx_ring
.netbufs_ring
[idx
] = NULL
;
316 ath10k_htt_reset_paddrs_ring(htt
, idx
);
319 idx
&= htt
->rx_ring
.size_mask
;
320 htt
->rx_ring
.sw_rd_idx
.msdu_payld
= idx
;
321 htt
->rx_ring
.fill_cnt
--;
323 dma_unmap_single(htt
->ar
->dev
,
324 ATH10K_SKB_RXCB(msdu
)->paddr
,
325 msdu
->len
+ skb_tailroom(msdu
),
327 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt rx netbuf pop: ",
328 msdu
->data
, msdu
->len
+ skb_tailroom(msdu
));
333 /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
334 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt
*htt
,
335 struct sk_buff_head
*amsdu
)
337 struct ath10k
*ar
= htt
->ar
;
338 int msdu_len
, msdu_chaining
= 0;
339 struct sk_buff
*msdu
;
340 struct htt_rx_desc
*rx_desc
;
342 lockdep_assert_held(&htt
->rx_ring
.lock
);
345 int last_msdu
, msdu_len_invalid
, msdu_chained
;
347 msdu
= ath10k_htt_rx_netbuf_pop(htt
);
349 __skb_queue_purge(amsdu
);
353 __skb_queue_tail(amsdu
, msdu
);
355 rx_desc
= (struct htt_rx_desc
*)msdu
->data
;
357 /* FIXME: we must report msdu payload since this is what caller
360 skb_put(msdu
, offsetof(struct htt_rx_desc
, msdu_payload
));
361 skb_pull(msdu
, offsetof(struct htt_rx_desc
, msdu_payload
));
364 * Sanity check - confirm the HW is finished filling in the
366 * If the HW and SW are working correctly, then it's guaranteed
367 * that the HW's MAC DMA is done before this point in the SW.
368 * To prevent the case that we handle a stale Rx descriptor,
369 * just assert for now until we have a way to recover.
371 if (!(__le32_to_cpu(rx_desc
->attention
.flags
)
372 & RX_ATTENTION_FLAGS_MSDU_DONE
)) {
373 __skb_queue_purge(amsdu
);
377 msdu_len_invalid
= !!(__le32_to_cpu(rx_desc
->attention
.flags
)
378 & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR
|
379 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR
));
380 msdu_len
= MS(__le32_to_cpu(rx_desc
->msdu_start
.common
.info0
),
381 RX_MSDU_START_INFO0_MSDU_LENGTH
);
382 msdu_chained
= rx_desc
->frag_info
.ring2_more_count
;
384 if (msdu_len_invalid
)
388 skb_put(msdu
, min(msdu_len
, HTT_RX_MSDU_SIZE
));
389 msdu_len
-= msdu
->len
;
391 /* Note: Chained buffers do not contain rx descriptor */
392 while (msdu_chained
--) {
393 msdu
= ath10k_htt_rx_netbuf_pop(htt
);
395 __skb_queue_purge(amsdu
);
399 __skb_queue_tail(amsdu
, msdu
);
401 skb_put(msdu
, min(msdu_len
, HTT_RX_BUF_SIZE
));
402 msdu_len
-= msdu
->len
;
406 last_msdu
= __le32_to_cpu(rx_desc
->msdu_end
.common
.info0
) &
407 RX_MSDU_END_INFO0_LAST_MSDU
;
409 trace_ath10k_htt_rx_desc(ar
, &rx_desc
->attention
,
410 sizeof(*rx_desc
) - sizeof(u32
));
416 if (skb_queue_empty(amsdu
))
420 * Don't refill the ring yet.
422 * First, the elements popped here are still in use - it is not
423 * safe to overwrite them until the matching call to
424 * mpdu_desc_list_next. Second, for efficiency it is preferable to
425 * refill the rx ring with 1 PPDU's worth of rx buffers (something
426 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
427 * (something like 3 buffers). Consequently, we'll rely on the txrx
428 * SW to tell us when it is done pulling all the PPDU's rx buffers
429 * out of the rx ring, and then refill it just once.
432 return msdu_chaining
;
435 static struct sk_buff
*ath10k_htt_rx_pop_paddr(struct ath10k_htt
*htt
,
438 struct ath10k
*ar
= htt
->ar
;
439 struct ath10k_skb_rxcb
*rxcb
;
440 struct sk_buff
*msdu
;
442 lockdep_assert_held(&htt
->rx_ring
.lock
);
444 msdu
= ath10k_htt_rx_find_skb_paddr(ar
, paddr
);
448 rxcb
= ATH10K_SKB_RXCB(msdu
);
449 hash_del(&rxcb
->hlist
);
450 htt
->rx_ring
.fill_cnt
--;
452 dma_unmap_single(htt
->ar
->dev
, rxcb
->paddr
,
453 msdu
->len
+ skb_tailroom(msdu
),
455 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt rx netbuf pop: ",
456 msdu
->data
, msdu
->len
+ skb_tailroom(msdu
));
461 static inline void ath10k_htt_append_frag_list(struct sk_buff
*skb_head
,
462 struct sk_buff
*frag_list
,
463 unsigned int frag_len
)
465 skb_shinfo(skb_head
)->frag_list
= frag_list
;
466 skb_head
->data_len
= frag_len
;
467 skb_head
->len
+= skb_head
->data_len
;
470 static int ath10k_htt_rx_handle_amsdu_mon_32(struct ath10k_htt
*htt
,
471 struct sk_buff
*msdu
,
472 struct htt_rx_in_ord_msdu_desc
**msdu_desc
)
474 struct ath10k
*ar
= htt
->ar
;
476 struct sk_buff
*frag_buf
;
477 struct sk_buff
*prev_frag_buf
;
479 struct htt_rx_in_ord_msdu_desc
*ind_desc
= *msdu_desc
;
480 struct htt_rx_desc
*rxd
;
481 int amsdu_len
= __le16_to_cpu(ind_desc
->msdu_len
);
483 rxd
= (void *)msdu
->data
;
484 trace_ath10k_htt_rx_desc(ar
, rxd
, sizeof(*rxd
));
486 skb_put(msdu
, sizeof(struct htt_rx_desc
));
487 skb_pull(msdu
, sizeof(struct htt_rx_desc
));
488 skb_put(msdu
, min(amsdu_len
, HTT_RX_MSDU_SIZE
));
489 amsdu_len
-= msdu
->len
;
491 last_frag
= ind_desc
->reserved
;
494 ath10k_warn(ar
, "invalid amsdu len %u, left %d",
495 __le16_to_cpu(ind_desc
->msdu_len
),
502 paddr
= __le32_to_cpu(ind_desc
->msdu_paddr
);
503 frag_buf
= ath10k_htt_rx_pop_paddr(htt
, paddr
);
505 ath10k_warn(ar
, "failed to pop frag-1 paddr: 0x%x", paddr
);
509 skb_put(frag_buf
, min(amsdu_len
, HTT_RX_BUF_SIZE
));
510 ath10k_htt_append_frag_list(msdu
, frag_buf
, amsdu_len
);
512 amsdu_len
-= frag_buf
->len
;
513 prev_frag_buf
= frag_buf
;
514 last_frag
= ind_desc
->reserved
;
517 paddr
= __le32_to_cpu(ind_desc
->msdu_paddr
);
518 frag_buf
= ath10k_htt_rx_pop_paddr(htt
, paddr
);
520 ath10k_warn(ar
, "failed to pop frag-n paddr: 0x%x",
522 prev_frag_buf
->next
= NULL
;
526 skb_put(frag_buf
, min(amsdu_len
, HTT_RX_BUF_SIZE
));
527 last_frag
= ind_desc
->reserved
;
528 amsdu_len
-= frag_buf
->len
;
530 prev_frag_buf
->next
= frag_buf
;
531 prev_frag_buf
= frag_buf
;
535 ath10k_warn(ar
, "invalid amsdu len %u, left %d",
536 __le16_to_cpu(ind_desc
->msdu_len
), amsdu_len
);
539 *msdu_desc
= ind_desc
;
541 prev_frag_buf
->next
= NULL
;
546 ath10k_htt_rx_handle_amsdu_mon_64(struct ath10k_htt
*htt
,
547 struct sk_buff
*msdu
,
548 struct htt_rx_in_ord_msdu_desc_ext
**msdu_desc
)
550 struct ath10k
*ar
= htt
->ar
;
552 struct sk_buff
*frag_buf
;
553 struct sk_buff
*prev_frag_buf
;
555 struct htt_rx_in_ord_msdu_desc_ext
*ind_desc
= *msdu_desc
;
556 struct htt_rx_desc
*rxd
;
557 int amsdu_len
= __le16_to_cpu(ind_desc
->msdu_len
);
559 rxd
= (void *)msdu
->data
;
560 trace_ath10k_htt_rx_desc(ar
, rxd
, sizeof(*rxd
));
562 skb_put(msdu
, sizeof(struct htt_rx_desc
));
563 skb_pull(msdu
, sizeof(struct htt_rx_desc
));
564 skb_put(msdu
, min(amsdu_len
, HTT_RX_MSDU_SIZE
));
565 amsdu_len
-= msdu
->len
;
567 last_frag
= ind_desc
->reserved
;
570 ath10k_warn(ar
, "invalid amsdu len %u, left %d",
571 __le16_to_cpu(ind_desc
->msdu_len
),
578 paddr
= __le64_to_cpu(ind_desc
->msdu_paddr
);
579 frag_buf
= ath10k_htt_rx_pop_paddr(htt
, paddr
);
581 ath10k_warn(ar
, "failed to pop frag-1 paddr: 0x%llx", paddr
);
585 skb_put(frag_buf
, min(amsdu_len
, HTT_RX_BUF_SIZE
));
586 ath10k_htt_append_frag_list(msdu
, frag_buf
, amsdu_len
);
588 amsdu_len
-= frag_buf
->len
;
589 prev_frag_buf
= frag_buf
;
590 last_frag
= ind_desc
->reserved
;
593 paddr
= __le64_to_cpu(ind_desc
->msdu_paddr
);
594 frag_buf
= ath10k_htt_rx_pop_paddr(htt
, paddr
);
596 ath10k_warn(ar
, "failed to pop frag-n paddr: 0x%llx",
598 prev_frag_buf
->next
= NULL
;
602 skb_put(frag_buf
, min(amsdu_len
, HTT_RX_BUF_SIZE
));
603 last_frag
= ind_desc
->reserved
;
604 amsdu_len
-= frag_buf
->len
;
606 prev_frag_buf
->next
= frag_buf
;
607 prev_frag_buf
= frag_buf
;
611 ath10k_warn(ar
, "invalid amsdu len %u, left %d",
612 __le16_to_cpu(ind_desc
->msdu_len
), amsdu_len
);
615 *msdu_desc
= ind_desc
;
617 prev_frag_buf
->next
= NULL
;
621 static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt
*htt
,
622 struct htt_rx_in_ord_ind
*ev
,
623 struct sk_buff_head
*list
)
625 struct ath10k
*ar
= htt
->ar
;
626 struct htt_rx_in_ord_msdu_desc
*msdu_desc
= ev
->msdu_descs32
;
627 struct htt_rx_desc
*rxd
;
628 struct sk_buff
*msdu
;
633 lockdep_assert_held(&htt
->rx_ring
.lock
);
635 msdu_count
= __le16_to_cpu(ev
->msdu_count
);
636 is_offload
= !!(ev
->info
& HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK
);
638 while (msdu_count
--) {
639 paddr
= __le32_to_cpu(msdu_desc
->msdu_paddr
);
641 msdu
= ath10k_htt_rx_pop_paddr(htt
, paddr
);
643 __skb_queue_purge(list
);
647 if (!is_offload
&& ar
->monitor_arvif
) {
648 ret
= ath10k_htt_rx_handle_amsdu_mon_32(htt
, msdu
,
651 __skb_queue_purge(list
);
654 __skb_queue_tail(list
, msdu
);
659 __skb_queue_tail(list
, msdu
);
662 rxd
= (void *)msdu
->data
;
664 trace_ath10k_htt_rx_desc(ar
, rxd
, sizeof(*rxd
));
666 skb_put(msdu
, sizeof(*rxd
));
667 skb_pull(msdu
, sizeof(*rxd
));
668 skb_put(msdu
, __le16_to_cpu(msdu_desc
->msdu_len
));
670 if (!(__le32_to_cpu(rxd
->attention
.flags
) &
671 RX_ATTENTION_FLAGS_MSDU_DONE
)) {
672 ath10k_warn(htt
->ar
, "tried to pop an incomplete frame, oops!\n");
683 static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt
*htt
,
684 struct htt_rx_in_ord_ind
*ev
,
685 struct sk_buff_head
*list
)
687 struct ath10k
*ar
= htt
->ar
;
688 struct htt_rx_in_ord_msdu_desc_ext
*msdu_desc
= ev
->msdu_descs64
;
689 struct htt_rx_desc
*rxd
;
690 struct sk_buff
*msdu
;
695 lockdep_assert_held(&htt
->rx_ring
.lock
);
697 msdu_count
= __le16_to_cpu(ev
->msdu_count
);
698 is_offload
= !!(ev
->info
& HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK
);
700 while (msdu_count
--) {
701 paddr
= __le64_to_cpu(msdu_desc
->msdu_paddr
);
702 msdu
= ath10k_htt_rx_pop_paddr(htt
, paddr
);
704 __skb_queue_purge(list
);
708 if (!is_offload
&& ar
->monitor_arvif
) {
709 ret
= ath10k_htt_rx_handle_amsdu_mon_64(htt
, msdu
,
712 __skb_queue_purge(list
);
715 __skb_queue_tail(list
, msdu
);
720 __skb_queue_tail(list
, msdu
);
723 rxd
= (void *)msdu
->data
;
725 trace_ath10k_htt_rx_desc(ar
, rxd
, sizeof(*rxd
));
727 skb_put(msdu
, sizeof(*rxd
));
728 skb_pull(msdu
, sizeof(*rxd
));
729 skb_put(msdu
, __le16_to_cpu(msdu_desc
->msdu_len
));
731 if (!(__le32_to_cpu(rxd
->attention
.flags
) &
732 RX_ATTENTION_FLAGS_MSDU_DONE
)) {
733 ath10k_warn(htt
->ar
, "tried to pop an incomplete frame, oops!\n");
744 int ath10k_htt_rx_alloc(struct ath10k_htt
*htt
)
746 struct ath10k
*ar
= htt
->ar
;
748 void *vaddr
, *vaddr_ring
;
750 struct timer_list
*timer
= &htt
->rx_ring
.refill_retry_timer
;
752 if (ar
->bus_param
.dev_type
== ATH10K_DEV_TYPE_HL
)
755 htt
->rx_confused
= false;
757 /* XXX: The fill level could be changed during runtime in response to
758 * the host processing latency. Is this really worth it?
760 htt
->rx_ring
.size
= HTT_RX_RING_SIZE
;
761 htt
->rx_ring
.size_mask
= htt
->rx_ring
.size
- 1;
762 htt
->rx_ring
.fill_level
= ar
->hw_params
.rx_ring_fill_level
;
764 if (!is_power_of_2(htt
->rx_ring
.size
)) {
765 ath10k_warn(ar
, "htt rx ring size is not power of 2\n");
769 htt
->rx_ring
.netbufs_ring
=
770 kcalloc(htt
->rx_ring
.size
, sizeof(struct sk_buff
*),
772 if (!htt
->rx_ring
.netbufs_ring
)
775 size
= ath10k_htt_get_rx_ring_size(htt
);
777 vaddr_ring
= dma_alloc_coherent(htt
->ar
->dev
, size
, &paddr
, GFP_KERNEL
);
781 ath10k_htt_config_paddrs_ring(htt
, vaddr_ring
);
782 htt
->rx_ring
.base_paddr
= paddr
;
784 vaddr
= dma_alloc_coherent(htt
->ar
->dev
,
785 sizeof(*htt
->rx_ring
.alloc_idx
.vaddr
),
790 htt
->rx_ring
.alloc_idx
.vaddr
= vaddr
;
791 htt
->rx_ring
.alloc_idx
.paddr
= paddr
;
792 htt
->rx_ring
.sw_rd_idx
.msdu_payld
= htt
->rx_ring
.size_mask
;
793 *htt
->rx_ring
.alloc_idx
.vaddr
= 0;
795 /* Initialize the Rx refill retry timer */
796 timer_setup(timer
, ath10k_htt_rx_ring_refill_retry
, 0);
798 spin_lock_init(&htt
->rx_ring
.lock
);
800 htt
->rx_ring
.fill_cnt
= 0;
801 htt
->rx_ring
.sw_rd_idx
.msdu_payld
= 0;
802 hash_init(htt
->rx_ring
.skb_table
);
804 skb_queue_head_init(&htt
->rx_msdus_q
);
805 skb_queue_head_init(&htt
->rx_in_ord_compl_q
);
806 skb_queue_head_init(&htt
->tx_fetch_ind_q
);
807 atomic_set(&htt
->num_mpdus_ready
, 0);
809 ath10k_dbg(ar
, ATH10K_DBG_BOOT
, "htt rx ring size %d fill_level %d\n",
810 htt
->rx_ring
.size
, htt
->rx_ring
.fill_level
);
814 dma_free_coherent(htt
->ar
->dev
,
815 ath10k_htt_get_rx_ring_size(htt
),
817 htt
->rx_ring
.base_paddr
);
819 kfree(htt
->rx_ring
.netbufs_ring
);
824 static int ath10k_htt_rx_crypto_param_len(struct ath10k
*ar
,
825 enum htt_rx_mpdu_encrypt_type type
)
828 case HTT_RX_MPDU_ENCRYPT_NONE
:
830 case HTT_RX_MPDU_ENCRYPT_WEP40
:
831 case HTT_RX_MPDU_ENCRYPT_WEP104
:
832 return IEEE80211_WEP_IV_LEN
;
833 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC
:
834 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA
:
835 return IEEE80211_TKIP_IV_LEN
;
836 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2
:
837 return IEEE80211_CCMP_HDR_LEN
;
838 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2
:
839 return IEEE80211_CCMP_256_HDR_LEN
;
840 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2
:
841 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2
:
842 return IEEE80211_GCMP_HDR_LEN
;
843 case HTT_RX_MPDU_ENCRYPT_WEP128
:
844 case HTT_RX_MPDU_ENCRYPT_WAPI
:
848 ath10k_warn(ar
, "unsupported encryption type %d\n", type
);
852 #define MICHAEL_MIC_LEN 8
854 static int ath10k_htt_rx_crypto_mic_len(struct ath10k
*ar
,
855 enum htt_rx_mpdu_encrypt_type type
)
858 case HTT_RX_MPDU_ENCRYPT_NONE
:
859 case HTT_RX_MPDU_ENCRYPT_WEP40
:
860 case HTT_RX_MPDU_ENCRYPT_WEP104
:
861 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC
:
862 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA
:
864 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2
:
865 return IEEE80211_CCMP_MIC_LEN
;
866 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2
:
867 return IEEE80211_CCMP_256_MIC_LEN
;
868 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2
:
869 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2
:
870 return IEEE80211_GCMP_MIC_LEN
;
871 case HTT_RX_MPDU_ENCRYPT_WEP128
:
872 case HTT_RX_MPDU_ENCRYPT_WAPI
:
876 ath10k_warn(ar
, "unsupported encryption type %d\n", type
);
880 static int ath10k_htt_rx_crypto_icv_len(struct ath10k
*ar
,
881 enum htt_rx_mpdu_encrypt_type type
)
884 case HTT_RX_MPDU_ENCRYPT_NONE
:
885 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2
:
886 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2
:
887 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2
:
888 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2
:
890 case HTT_RX_MPDU_ENCRYPT_WEP40
:
891 case HTT_RX_MPDU_ENCRYPT_WEP104
:
892 return IEEE80211_WEP_ICV_LEN
;
893 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC
:
894 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA
:
895 return IEEE80211_TKIP_ICV_LEN
;
896 case HTT_RX_MPDU_ENCRYPT_WEP128
:
897 case HTT_RX_MPDU_ENCRYPT_WAPI
:
901 ath10k_warn(ar
, "unsupported encryption type %d\n", type
);
905 struct amsdu_subframe_hdr
{
911 #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
913 static inline u8
ath10k_bw_to_mac80211_bw(u8 bw
)
919 ret
= RATE_INFO_BW_20
;
922 ret
= RATE_INFO_BW_40
;
925 ret
= RATE_INFO_BW_80
;
928 ret
= RATE_INFO_BW_160
;
935 static void ath10k_htt_rx_h_rates(struct ath10k
*ar
,
936 struct ieee80211_rx_status
*status
,
937 struct htt_rx_desc
*rxd
)
939 struct ieee80211_supported_band
*sband
;
940 u8 cck
, rate
, bw
, sgi
, mcs
, nss
;
943 u32 info1
, info2
, info3
;
945 info1
= __le32_to_cpu(rxd
->ppdu_start
.info1
);
946 info2
= __le32_to_cpu(rxd
->ppdu_start
.info2
);
947 info3
= __le32_to_cpu(rxd
->ppdu_start
.info3
);
949 preamble
= MS(info1
, RX_PPDU_START_INFO1_PREAMBLE_TYPE
);
953 /* To get legacy rate index band is required. Since band can't
954 * be undefined check if freq is non-zero.
959 cck
= info1
& RX_PPDU_START_INFO1_L_SIG_RATE_SELECT
;
960 rate
= MS(info1
, RX_PPDU_START_INFO1_L_SIG_RATE
);
961 rate
&= ~RX_PPDU_START_RATE_FLAG
;
963 sband
= &ar
->mac
.sbands
[status
->band
];
964 status
->rate_idx
= ath10k_mac_hw_rate_to_idx(sband
, rate
, cck
);
967 case HTT_RX_HT_WITH_TXBF
:
968 /* HT-SIG - Table 20-11 in info2 and info3 */
971 bw
= (info2
>> 7) & 1;
972 sgi
= (info3
>> 7) & 1;
974 status
->rate_idx
= mcs
;
975 status
->encoding
= RX_ENC_HT
;
977 status
->enc_flags
|= RX_ENC_FLAG_SHORT_GI
;
979 status
->bw
= RATE_INFO_BW_40
;
982 case HTT_RX_VHT_WITH_TXBF
:
983 /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
988 group_id
= (info2
>> 4) & 0x3F;
990 if (GROUP_ID_IS_SU_MIMO(group_id
)) {
991 mcs
= (info3
>> 4) & 0x0F;
992 nss
= ((info2
>> 10) & 0x07) + 1;
994 /* Hardware doesn't decode VHT-SIG-B into Rx descriptor
995 * so it's impossible to decode MCS. Also since
996 * firmware consumes Group Id Management frames host
997 * has no knowledge regarding group/user position
998 * mapping so it's impossible to pick the correct Nsts
1001 * Bandwidth and SGI are valid so report the rateinfo
1002 * on best-effort basis.
1009 ath10k_warn(ar
, "invalid MCS received %u\n", mcs
);
1010 ath10k_warn(ar
, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
1011 __le32_to_cpu(rxd
->attention
.flags
),
1012 __le32_to_cpu(rxd
->mpdu_start
.info0
),
1013 __le32_to_cpu(rxd
->mpdu_start
.info1
),
1014 __le32_to_cpu(rxd
->msdu_start
.common
.info0
),
1015 __le32_to_cpu(rxd
->msdu_start
.common
.info1
),
1016 rxd
->ppdu_start
.info0
,
1017 __le32_to_cpu(rxd
->ppdu_start
.info1
),
1018 __le32_to_cpu(rxd
->ppdu_start
.info2
),
1019 __le32_to_cpu(rxd
->ppdu_start
.info3
),
1020 __le32_to_cpu(rxd
->ppdu_start
.info4
));
1022 ath10k_warn(ar
, "msdu end %08x mpdu end %08x\n",
1023 __le32_to_cpu(rxd
->msdu_end
.common
.info0
),
1024 __le32_to_cpu(rxd
->mpdu_end
.info0
));
1026 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
,
1027 "rx desc msdu payload: ",
1028 rxd
->msdu_payload
, 50);
1031 status
->rate_idx
= mcs
;
1035 status
->enc_flags
|= RX_ENC_FLAG_SHORT_GI
;
1037 status
->bw
= ath10k_bw_to_mac80211_bw(bw
);
1038 status
->encoding
= RX_ENC_VHT
;
1045 static struct ieee80211_channel
*
1046 ath10k_htt_rx_h_peer_channel(struct ath10k
*ar
, struct htt_rx_desc
*rxd
)
1048 struct ath10k_peer
*peer
;
1049 struct ath10k_vif
*arvif
;
1050 struct cfg80211_chan_def def
;
1053 lockdep_assert_held(&ar
->data_lock
);
1058 if (rxd
->attention
.flags
&
1059 __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID
))
1062 if (!(rxd
->msdu_end
.common
.info0
&
1063 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU
)))
1066 peer_id
= MS(__le32_to_cpu(rxd
->mpdu_start
.info0
),
1067 RX_MPDU_START_INFO0_PEER_IDX
);
1069 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
1073 arvif
= ath10k_get_arvif(ar
, peer
->vdev_id
);
1074 if (WARN_ON_ONCE(!arvif
))
1077 if (ath10k_mac_vif_chan(arvif
->vif
, &def
))
1083 static struct ieee80211_channel
*
1084 ath10k_htt_rx_h_vdev_channel(struct ath10k
*ar
, u32 vdev_id
)
1086 struct ath10k_vif
*arvif
;
1087 struct cfg80211_chan_def def
;
1089 lockdep_assert_held(&ar
->data_lock
);
1091 list_for_each_entry(arvif
, &ar
->arvifs
, list
) {
1092 if (arvif
->vdev_id
== vdev_id
&&
1093 ath10k_mac_vif_chan(arvif
->vif
, &def
) == 0)
1101 ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw
*hw
,
1102 struct ieee80211_chanctx_conf
*conf
,
1105 struct cfg80211_chan_def
*def
= data
;
1110 static struct ieee80211_channel
*
1111 ath10k_htt_rx_h_any_channel(struct ath10k
*ar
)
1113 struct cfg80211_chan_def def
= {};
1115 ieee80211_iter_chan_contexts_atomic(ar
->hw
,
1116 ath10k_htt_rx_h_any_chan_iter
,
1122 static bool ath10k_htt_rx_h_channel(struct ath10k
*ar
,
1123 struct ieee80211_rx_status
*status
,
1124 struct htt_rx_desc
*rxd
,
1127 struct ieee80211_channel
*ch
;
1129 spin_lock_bh(&ar
->data_lock
);
1130 ch
= ar
->scan_channel
;
1132 ch
= ar
->rx_channel
;
1134 ch
= ath10k_htt_rx_h_peer_channel(ar
, rxd
);
1136 ch
= ath10k_htt_rx_h_vdev_channel(ar
, vdev_id
);
1138 ch
= ath10k_htt_rx_h_any_channel(ar
);
1140 ch
= ar
->tgt_oper_chan
;
1141 spin_unlock_bh(&ar
->data_lock
);
1146 status
->band
= ch
->band
;
1147 status
->freq
= ch
->center_freq
;
1152 static void ath10k_htt_rx_h_signal(struct ath10k
*ar
,
1153 struct ieee80211_rx_status
*status
,
1154 struct htt_rx_desc
*rxd
)
1158 for (i
= 0; i
< IEEE80211_MAX_CHAINS
; i
++) {
1159 status
->chains
&= ~BIT(i
);
1161 if (rxd
->ppdu_start
.rssi_chains
[i
].pri20_mhz
!= 0x80) {
1162 status
->chain_signal
[i
] = ATH10K_DEFAULT_NOISE_FLOOR
+
1163 rxd
->ppdu_start
.rssi_chains
[i
].pri20_mhz
;
1165 status
->chains
|= BIT(i
);
1169 /* FIXME: Get real NF */
1170 status
->signal
= ATH10K_DEFAULT_NOISE_FLOOR
+
1171 rxd
->ppdu_start
.rssi_comb
;
1172 status
->flag
&= ~RX_FLAG_NO_SIGNAL_VAL
;
1175 static void ath10k_htt_rx_h_mactime(struct ath10k
*ar
,
1176 struct ieee80211_rx_status
*status
,
1177 struct htt_rx_desc
*rxd
)
1179 /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
1180 * means all prior MSDUs in a PPDU are reported to mac80211 without the
1181 * TSF. Is it worth holding frames until end of PPDU is known?
1183 * FIXME: Can we get/compute 64bit TSF?
1185 status
->mactime
= __le32_to_cpu(rxd
->ppdu_end
.common
.tsf_timestamp
);
1186 status
->flag
|= RX_FLAG_MACTIME_END
;
1189 static void ath10k_htt_rx_h_ppdu(struct ath10k
*ar
,
1190 struct sk_buff_head
*amsdu
,
1191 struct ieee80211_rx_status
*status
,
1194 struct sk_buff
*first
;
1195 struct htt_rx_desc
*rxd
;
1199 if (skb_queue_empty(amsdu
))
1202 first
= skb_peek(amsdu
);
1203 rxd
= (void *)first
->data
- sizeof(*rxd
);
1205 is_first_ppdu
= !!(rxd
->attention
.flags
&
1206 __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU
));
1207 is_last_ppdu
= !!(rxd
->attention
.flags
&
1208 __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU
));
1210 if (is_first_ppdu
) {
1211 /* New PPDU starts so clear out the old per-PPDU status. */
1213 status
->rate_idx
= 0;
1215 status
->encoding
= RX_ENC_LEGACY
;
1216 status
->bw
= RATE_INFO_BW_20
;
1218 status
->flag
&= ~RX_FLAG_MACTIME_END
;
1219 status
->flag
|= RX_FLAG_NO_SIGNAL_VAL
;
1221 status
->flag
&= ~(RX_FLAG_AMPDU_IS_LAST
);
1222 status
->flag
|= RX_FLAG_AMPDU_DETAILS
| RX_FLAG_AMPDU_LAST_KNOWN
;
1223 status
->ampdu_reference
= ar
->ampdu_reference
;
1225 ath10k_htt_rx_h_signal(ar
, status
, rxd
);
1226 ath10k_htt_rx_h_channel(ar
, status
, rxd
, vdev_id
);
1227 ath10k_htt_rx_h_rates(ar
, status
, rxd
);
1231 ath10k_htt_rx_h_mactime(ar
, status
, rxd
);
1233 /* set ampdu last segment flag */
1234 status
->flag
|= RX_FLAG_AMPDU_IS_LAST
;
1235 ar
->ampdu_reference
++;
1239 static const char * const tid_to_ac
[] = {
1250 static char *ath10k_get_tid(struct ieee80211_hdr
*hdr
, char *out
, size_t size
)
1255 if (!ieee80211_is_data_qos(hdr
->frame_control
))
1258 qc
= ieee80211_get_qos_ctl(hdr
);
1259 tid
= *qc
& IEEE80211_QOS_CTL_TID_MASK
;
1261 snprintf(out
, size
, "tid %d (%s)", tid
, tid_to_ac
[tid
]);
1263 snprintf(out
, size
, "tid %d", tid
);
1268 static void ath10k_htt_rx_h_queue_msdu(struct ath10k
*ar
,
1269 struct ieee80211_rx_status
*rx_status
,
1270 struct sk_buff
*skb
)
1272 struct ieee80211_rx_status
*status
;
1274 status
= IEEE80211_SKB_RXCB(skb
);
1275 *status
= *rx_status
;
1277 skb_queue_tail(&ar
->htt
.rx_msdus_q
, skb
);
1280 static void ath10k_process_rx(struct ath10k
*ar
, struct sk_buff
*skb
)
1282 struct ieee80211_rx_status
*status
;
1283 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
1286 status
= IEEE80211_SKB_RXCB(skb
);
1288 if (!(ar
->filter_flags
& FIF_FCSFAIL
) &&
1289 status
->flag
& RX_FLAG_FAILED_FCS_CRC
) {
1290 ar
->stats
.rx_crc_err_drop
++;
1291 dev_kfree_skb_any(skb
);
1295 ath10k_dbg(ar
, ATH10K_DBG_DATA
,
1296 "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
1299 ieee80211_get_SA(hdr
),
1300 ath10k_get_tid(hdr
, tid
, sizeof(tid
)),
1301 is_multicast_ether_addr(ieee80211_get_DA(hdr
)) ?
1303 (__le16_to_cpu(hdr
->seq_ctrl
) & IEEE80211_SCTL_SEQ
) >> 4,
1304 (status
->encoding
== RX_ENC_LEGACY
) ? "legacy" : "",
1305 (status
->encoding
== RX_ENC_HT
) ? "ht" : "",
1306 (status
->encoding
== RX_ENC_VHT
) ? "vht" : "",
1307 (status
->bw
== RATE_INFO_BW_40
) ? "40" : "",
1308 (status
->bw
== RATE_INFO_BW_80
) ? "80" : "",
1309 (status
->bw
== RATE_INFO_BW_160
) ? "160" : "",
1310 status
->enc_flags
& RX_ENC_FLAG_SHORT_GI
? "sgi " : "",
1314 status
->band
, status
->flag
,
1315 !!(status
->flag
& RX_FLAG_FAILED_FCS_CRC
),
1316 !!(status
->flag
& RX_FLAG_MMIC_ERROR
),
1317 !!(status
->flag
& RX_FLAG_AMSDU_MORE
));
1318 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "rx skb: ",
1319 skb
->data
, skb
->len
);
1320 trace_ath10k_rx_hdr(ar
, skb
->data
, skb
->len
);
1321 trace_ath10k_rx_payload(ar
, skb
->data
, skb
->len
);
1323 ieee80211_rx_napi(ar
->hw
, NULL
, skb
, &ar
->napi
);
1326 static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k
*ar
,
1327 struct ieee80211_hdr
*hdr
)
1329 int len
= ieee80211_hdrlen(hdr
->frame_control
);
1331 if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING
,
1332 ar
->running_fw
->fw_file
.fw_features
))
1333 len
= round_up(len
, 4);
1338 static void ath10k_htt_rx_h_undecap_raw(struct ath10k
*ar
,
1339 struct sk_buff
*msdu
,
1340 struct ieee80211_rx_status
*status
,
1341 enum htt_rx_mpdu_encrypt_type enctype
,
1343 const u8 first_hdr
[64])
1345 struct ieee80211_hdr
*hdr
;
1346 struct htt_rx_desc
*rxd
;
1351 bool msdu_limit_err
;
1352 int bytes_aligned
= ar
->hw_params
.decap_align_bytes
;
1355 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
1356 is_first
= !!(rxd
->msdu_end
.common
.info0
&
1357 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU
));
1358 is_last
= !!(rxd
->msdu_end
.common
.info0
&
1359 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU
));
1361 /* Delivered decapped frame:
1363 * [crypto param] <-- can be trimmed if !fcs_err &&
1364 * !decrypt_err && !peer_idx_invalid
1365 * [amsdu header] <-- only if A-MSDU
1368 * [FCS] <-- at end, needs to be trimmed
1371 /* Some hardwares(QCA99x0 variants) limit number of msdus in a-msdu when
1372 * deaggregate, so that unwanted MSDU-deaggregation is avoided for
1373 * error packets. If limit exceeds, hw sends all remaining MSDUs as
1374 * a single last MSDU with this msdu limit error set.
1376 msdu_limit_err
= ath10k_rx_desc_msdu_limit_error(&ar
->hw_params
, rxd
);
1378 /* If MSDU limit error happens, then don't warn on, the partial raw MSDU
1379 * without first MSDU is expected in that case, and handled later here.
1381 /* This probably shouldn't happen but warn just in case */
1382 if (WARN_ON_ONCE(!is_first
&& !msdu_limit_err
))
1385 /* This probably shouldn't happen but warn just in case */
1386 if (WARN_ON_ONCE(!(is_first
&& is_last
) && !msdu_limit_err
))
1389 skb_trim(msdu
, msdu
->len
- FCS_LEN
);
1391 /* Push original 80211 header */
1392 if (unlikely(msdu_limit_err
)) {
1393 hdr
= (struct ieee80211_hdr
*)first_hdr
;
1394 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1395 crypto_len
= ath10k_htt_rx_crypto_param_len(ar
, enctype
);
1397 if (ieee80211_is_data_qos(hdr
->frame_control
)) {
1398 qos
= ieee80211_get_qos_ctl(hdr
);
1399 qos
[0] |= IEEE80211_QOS_CTL_A_MSDU_PRESENT
;
1403 memcpy(skb_push(msdu
, crypto_len
),
1404 (void *)hdr
+ round_up(hdr_len
, bytes_aligned
),
1407 memcpy(skb_push(msdu
, hdr_len
), hdr
, hdr_len
);
1410 /* In most cases this will be true for sniffed frames. It makes sense
1411 * to deliver them as-is without stripping the crypto param. This is
1412 * necessary for software based decryption.
1414 * If there's no error then the frame is decrypted. At least that is
1415 * the case for frames that come in via fragmented rx indication.
1420 /* The payload is decrypted so strip crypto params. Start from tail
1421 * since hdr is used to compute some stuff.
1424 hdr
= (void *)msdu
->data
;
1427 if (status
->flag
& RX_FLAG_IV_STRIPPED
) {
1428 skb_trim(msdu
, msdu
->len
-
1429 ath10k_htt_rx_crypto_mic_len(ar
, enctype
));
1431 skb_trim(msdu
, msdu
->len
-
1432 ath10k_htt_rx_crypto_icv_len(ar
, enctype
));
1435 if (status
->flag
& RX_FLAG_MIC_STRIPPED
)
1436 skb_trim(msdu
, msdu
->len
-
1437 ath10k_htt_rx_crypto_mic_len(ar
, enctype
));
1440 if (status
->flag
& RX_FLAG_ICV_STRIPPED
)
1441 skb_trim(msdu
, msdu
->len
-
1442 ath10k_htt_rx_crypto_icv_len(ar
, enctype
));
1446 if ((status
->flag
& RX_FLAG_MMIC_STRIPPED
) &&
1447 !ieee80211_has_morefrags(hdr
->frame_control
) &&
1448 enctype
== HTT_RX_MPDU_ENCRYPT_TKIP_WPA
)
1449 skb_trim(msdu
, msdu
->len
- MICHAEL_MIC_LEN
);
1452 if (status
->flag
& RX_FLAG_IV_STRIPPED
) {
1453 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1454 crypto_len
= ath10k_htt_rx_crypto_param_len(ar
, enctype
);
1456 memmove((void *)msdu
->data
+ crypto_len
,
1457 (void *)msdu
->data
, hdr_len
);
1458 skb_pull(msdu
, crypto_len
);
1462 static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k
*ar
,
1463 struct sk_buff
*msdu
,
1464 struct ieee80211_rx_status
*status
,
1465 const u8 first_hdr
[64],
1466 enum htt_rx_mpdu_encrypt_type enctype
)
1468 struct ieee80211_hdr
*hdr
;
1469 struct htt_rx_desc
*rxd
;
1474 int bytes_aligned
= ar
->hw_params
.decap_align_bytes
;
1476 /* Delivered decapped frame:
1477 * [nwifi 802.11 header] <-- replaced with 802.11 hdr
1480 * Note: The nwifi header doesn't have QoS Control and is
1481 * (always?) a 3addr frame.
1483 * Note2: There's no A-MSDU subframe header. Even if it's part
1487 /* pull decapped header and copy SA & DA */
1488 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
1490 l3_pad_bytes
= ath10k_rx_desc_get_l3_pad_bytes(&ar
->hw_params
, rxd
);
1491 skb_put(msdu
, l3_pad_bytes
);
1493 hdr
= (struct ieee80211_hdr
*)(msdu
->data
+ l3_pad_bytes
);
1495 hdr_len
= ath10k_htt_rx_nwifi_hdrlen(ar
, hdr
);
1496 ether_addr_copy(da
, ieee80211_get_DA(hdr
));
1497 ether_addr_copy(sa
, ieee80211_get_SA(hdr
));
1498 skb_pull(msdu
, hdr_len
);
1500 /* push original 802.11 header */
1501 hdr
= (struct ieee80211_hdr
*)first_hdr
;
1502 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1504 if (!(status
->flag
& RX_FLAG_IV_STRIPPED
)) {
1505 memcpy(skb_push(msdu
,
1506 ath10k_htt_rx_crypto_param_len(ar
, enctype
)),
1507 (void *)hdr
+ round_up(hdr_len
, bytes_aligned
),
1508 ath10k_htt_rx_crypto_param_len(ar
, enctype
));
1511 memcpy(skb_push(msdu
, hdr_len
), hdr
, hdr_len
);
1513 /* original 802.11 header has a different DA and in
1514 * case of 4addr it may also have different SA
1516 hdr
= (struct ieee80211_hdr
*)msdu
->data
;
1517 ether_addr_copy(ieee80211_get_DA(hdr
), da
);
1518 ether_addr_copy(ieee80211_get_SA(hdr
), sa
);
1521 static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k
*ar
,
1522 struct sk_buff
*msdu
,
1523 enum htt_rx_mpdu_encrypt_type enctype
)
1525 struct ieee80211_hdr
*hdr
;
1526 struct htt_rx_desc
*rxd
;
1527 size_t hdr_len
, crypto_len
;
1529 bool is_first
, is_last
, is_amsdu
;
1530 int bytes_aligned
= ar
->hw_params
.decap_align_bytes
;
1532 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
1533 hdr
= (void *)rxd
->rx_hdr_status
;
1535 is_first
= !!(rxd
->msdu_end
.common
.info0
&
1536 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU
));
1537 is_last
= !!(rxd
->msdu_end
.common
.info0
&
1538 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU
));
1539 is_amsdu
= !(is_first
&& is_last
);
1544 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1545 crypto_len
= ath10k_htt_rx_crypto_param_len(ar
, enctype
);
1547 rfc1042
+= round_up(hdr_len
, bytes_aligned
) +
1548 round_up(crypto_len
, bytes_aligned
);
1552 rfc1042
+= sizeof(struct amsdu_subframe_hdr
);
1557 static void ath10k_htt_rx_h_undecap_eth(struct ath10k
*ar
,
1558 struct sk_buff
*msdu
,
1559 struct ieee80211_rx_status
*status
,
1560 const u8 first_hdr
[64],
1561 enum htt_rx_mpdu_encrypt_type enctype
)
1563 struct ieee80211_hdr
*hdr
;
1570 struct htt_rx_desc
*rxd
;
1571 int bytes_aligned
= ar
->hw_params
.decap_align_bytes
;
1573 /* Delivered decapped frame:
1574 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
1578 rfc1042
= ath10k_htt_rx_h_find_rfc1042(ar
, msdu
, enctype
);
1579 if (WARN_ON_ONCE(!rfc1042
))
1582 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
1583 l3_pad_bytes
= ath10k_rx_desc_get_l3_pad_bytes(&ar
->hw_params
, rxd
);
1584 skb_put(msdu
, l3_pad_bytes
);
1585 skb_pull(msdu
, l3_pad_bytes
);
1587 /* pull decapped header and copy SA & DA */
1588 eth
= (struct ethhdr
*)msdu
->data
;
1589 ether_addr_copy(da
, eth
->h_dest
);
1590 ether_addr_copy(sa
, eth
->h_source
);
1591 skb_pull(msdu
, sizeof(struct ethhdr
));
1593 /* push rfc1042/llc/snap */
1594 memcpy(skb_push(msdu
, sizeof(struct rfc1042_hdr
)), rfc1042
,
1595 sizeof(struct rfc1042_hdr
));
1597 /* push original 802.11 header */
1598 hdr
= (struct ieee80211_hdr
*)first_hdr
;
1599 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1601 if (!(status
->flag
& RX_FLAG_IV_STRIPPED
)) {
1602 memcpy(skb_push(msdu
,
1603 ath10k_htt_rx_crypto_param_len(ar
, enctype
)),
1604 (void *)hdr
+ round_up(hdr_len
, bytes_aligned
),
1605 ath10k_htt_rx_crypto_param_len(ar
, enctype
));
1608 memcpy(skb_push(msdu
, hdr_len
), hdr
, hdr_len
);
1610 /* original 802.11 header has a different DA and in
1611 * case of 4addr it may also have different SA
1613 hdr
= (struct ieee80211_hdr
*)msdu
->data
;
1614 ether_addr_copy(ieee80211_get_DA(hdr
), da
);
1615 ether_addr_copy(ieee80211_get_SA(hdr
), sa
);
1618 static void ath10k_htt_rx_h_undecap_snap(struct ath10k
*ar
,
1619 struct sk_buff
*msdu
,
1620 struct ieee80211_rx_status
*status
,
1621 const u8 first_hdr
[64],
1622 enum htt_rx_mpdu_encrypt_type enctype
)
1624 struct ieee80211_hdr
*hdr
;
1627 struct htt_rx_desc
*rxd
;
1628 int bytes_aligned
= ar
->hw_params
.decap_align_bytes
;
1630 /* Delivered decapped frame:
1631 * [amsdu header] <-- replaced with 802.11 hdr
1636 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
1637 l3_pad_bytes
= ath10k_rx_desc_get_l3_pad_bytes(&ar
->hw_params
, rxd
);
1639 skb_put(msdu
, l3_pad_bytes
);
1640 skb_pull(msdu
, sizeof(struct amsdu_subframe_hdr
) + l3_pad_bytes
);
1642 hdr
= (struct ieee80211_hdr
*)first_hdr
;
1643 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1645 if (!(status
->flag
& RX_FLAG_IV_STRIPPED
)) {
1646 memcpy(skb_push(msdu
,
1647 ath10k_htt_rx_crypto_param_len(ar
, enctype
)),
1648 (void *)hdr
+ round_up(hdr_len
, bytes_aligned
),
1649 ath10k_htt_rx_crypto_param_len(ar
, enctype
));
1652 memcpy(skb_push(msdu
, hdr_len
), hdr
, hdr_len
);
1655 static void ath10k_htt_rx_h_undecap(struct ath10k
*ar
,
1656 struct sk_buff
*msdu
,
1657 struct ieee80211_rx_status
*status
,
1659 enum htt_rx_mpdu_encrypt_type enctype
,
1662 struct htt_rx_desc
*rxd
;
1663 enum rx_msdu_decap_format decap
;
1665 /* First msdu's decapped header:
1666 * [802.11 header] <-- padded to 4 bytes long
1667 * [crypto param] <-- padded to 4 bytes long
1668 * [amsdu header] <-- only if A-MSDU
1671 * Other (2nd, 3rd, ..) msdu's decapped header:
1672 * [amsdu header] <-- only if A-MSDU
1676 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
1677 decap
= MS(__le32_to_cpu(rxd
->msdu_start
.common
.info1
),
1678 RX_MSDU_START_INFO1_DECAP_FORMAT
);
1681 case RX_MSDU_DECAP_RAW
:
1682 ath10k_htt_rx_h_undecap_raw(ar
, msdu
, status
, enctype
,
1683 is_decrypted
, first_hdr
);
1685 case RX_MSDU_DECAP_NATIVE_WIFI
:
1686 ath10k_htt_rx_h_undecap_nwifi(ar
, msdu
, status
, first_hdr
,
1689 case RX_MSDU_DECAP_ETHERNET2_DIX
:
1690 ath10k_htt_rx_h_undecap_eth(ar
, msdu
, status
, first_hdr
, enctype
);
1692 case RX_MSDU_DECAP_8023_SNAP_LLC
:
1693 ath10k_htt_rx_h_undecap_snap(ar
, msdu
, status
, first_hdr
,
1699 static int ath10k_htt_rx_get_csum_state(struct sk_buff
*skb
)
1701 struct htt_rx_desc
*rxd
;
1703 bool is_ip4
, is_ip6
;
1704 bool is_tcp
, is_udp
;
1705 bool ip_csum_ok
, tcpudp_csum_ok
;
1707 rxd
= (void *)skb
->data
- sizeof(*rxd
);
1708 flags
= __le32_to_cpu(rxd
->attention
.flags
);
1709 info
= __le32_to_cpu(rxd
->msdu_start
.common
.info1
);
1711 is_ip4
= !!(info
& RX_MSDU_START_INFO1_IPV4_PROTO
);
1712 is_ip6
= !!(info
& RX_MSDU_START_INFO1_IPV6_PROTO
);
1713 is_tcp
= !!(info
& RX_MSDU_START_INFO1_TCP_PROTO
);
1714 is_udp
= !!(info
& RX_MSDU_START_INFO1_UDP_PROTO
);
1715 ip_csum_ok
= !(flags
& RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL
);
1716 tcpudp_csum_ok
= !(flags
& RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL
);
1718 if (!is_ip4
&& !is_ip6
)
1719 return CHECKSUM_NONE
;
1720 if (!is_tcp
&& !is_udp
)
1721 return CHECKSUM_NONE
;
1723 return CHECKSUM_NONE
;
1724 if (!tcpudp_csum_ok
)
1725 return CHECKSUM_NONE
;
1727 return CHECKSUM_UNNECESSARY
;
1730 static void ath10k_htt_rx_h_csum_offload(struct sk_buff
*msdu
)
1732 msdu
->ip_summed
= ath10k_htt_rx_get_csum_state(msdu
);
1735 static void ath10k_htt_rx_h_mpdu(struct ath10k
*ar
,
1736 struct sk_buff_head
*amsdu
,
1737 struct ieee80211_rx_status
*status
,
1738 bool fill_crypt_header
,
1740 enum ath10k_pkt_rx_err
*err
)
1742 struct sk_buff
*first
;
1743 struct sk_buff
*last
;
1744 struct sk_buff
*msdu
;
1745 struct htt_rx_desc
*rxd
;
1746 struct ieee80211_hdr
*hdr
;
1747 enum htt_rx_mpdu_encrypt_type enctype
;
1751 bool has_crypto_err
;
1753 bool has_peer_idx_invalid
;
1758 if (skb_queue_empty(amsdu
))
1761 first
= skb_peek(amsdu
);
1762 rxd
= (void *)first
->data
- sizeof(*rxd
);
1764 is_mgmt
= !!(rxd
->attention
.flags
&
1765 __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE
));
1767 enctype
= MS(__le32_to_cpu(rxd
->mpdu_start
.info0
),
1768 RX_MPDU_START_INFO0_ENCRYPT_TYPE
);
1770 /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
1771 * decapped header. It'll be used for undecapping of each MSDU.
1773 hdr
= (void *)rxd
->rx_hdr_status
;
1774 memcpy(first_hdr
, hdr
, RX_HTT_HDR_STATUS_LEN
);
1777 memcpy(rx_hdr
, hdr
, RX_HTT_HDR_STATUS_LEN
);
1779 /* Each A-MSDU subframe will use the original header as the base and be
1780 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
1782 hdr
= (void *)first_hdr
;
1784 if (ieee80211_is_data_qos(hdr
->frame_control
)) {
1785 qos
= ieee80211_get_qos_ctl(hdr
);
1786 qos
[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT
;
1789 /* Some attention flags are valid only in the last MSDU. */
1790 last
= skb_peek_tail(amsdu
);
1791 rxd
= (void *)last
->data
- sizeof(*rxd
);
1792 attention
= __le32_to_cpu(rxd
->attention
.flags
);
1794 has_fcs_err
= !!(attention
& RX_ATTENTION_FLAGS_FCS_ERR
);
1795 has_crypto_err
= !!(attention
& RX_ATTENTION_FLAGS_DECRYPT_ERR
);
1796 has_tkip_err
= !!(attention
& RX_ATTENTION_FLAGS_TKIP_MIC_ERR
);
1797 has_peer_idx_invalid
= !!(attention
& RX_ATTENTION_FLAGS_PEER_IDX_INVALID
);
1799 /* Note: If hardware captures an encrypted frame that it can't decrypt,
1800 * e.g. due to fcs error, missing peer or invalid key data it will
1801 * report the frame as raw.
1803 is_decrypted
= (enctype
!= HTT_RX_MPDU_ENCRYPT_NONE
&&
1806 !has_peer_idx_invalid
);
1808 /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
1809 status
->flag
&= ~(RX_FLAG_FAILED_FCS_CRC
|
1810 RX_FLAG_MMIC_ERROR
|
1812 RX_FLAG_IV_STRIPPED
|
1813 RX_FLAG_ONLY_MONITOR
|
1814 RX_FLAG_MMIC_STRIPPED
);
1817 status
->flag
|= RX_FLAG_FAILED_FCS_CRC
;
1820 status
->flag
|= RX_FLAG_MMIC_ERROR
;
1824 *err
= ATH10K_PKT_RX_ERR_FCS
;
1825 else if (has_tkip_err
)
1826 *err
= ATH10K_PKT_RX_ERR_TKIP
;
1827 else if (has_crypto_err
)
1828 *err
= ATH10K_PKT_RX_ERR_CRYPT
;
1829 else if (has_peer_idx_invalid
)
1830 *err
= ATH10K_PKT_RX_ERR_PEER_IDX_INVAL
;
1833 /* Firmware reports all necessary management frames via WMI already.
1834 * They are not reported to monitor interfaces at all so pass the ones
1835 * coming via HTT to monitor interfaces instead. This simplifies
1839 status
->flag
|= RX_FLAG_ONLY_MONITOR
;
1842 status
->flag
|= RX_FLAG_DECRYPTED
;
1844 if (likely(!is_mgmt
))
1845 status
->flag
|= RX_FLAG_MMIC_STRIPPED
;
1847 if (fill_crypt_header
)
1848 status
->flag
|= RX_FLAG_MIC_STRIPPED
|
1849 RX_FLAG_ICV_STRIPPED
;
1851 status
->flag
|= RX_FLAG_IV_STRIPPED
;
1854 skb_queue_walk(amsdu
, msdu
) {
1855 ath10k_htt_rx_h_csum_offload(msdu
);
1856 ath10k_htt_rx_h_undecap(ar
, msdu
, status
, first_hdr
, enctype
,
1859 /* Undecapping involves copying the original 802.11 header back
1860 * to sk_buff. If frame is protected and hardware has decrypted
1861 * it then remove the protected bit.
1868 if (fill_crypt_header
)
1871 hdr
= (void *)msdu
->data
;
1872 hdr
->frame_control
&= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED
);
1876 static void ath10k_htt_rx_h_enqueue(struct ath10k
*ar
,
1877 struct sk_buff_head
*amsdu
,
1878 struct ieee80211_rx_status
*status
)
1880 struct sk_buff
*msdu
;
1881 struct sk_buff
*first_subframe
;
1883 first_subframe
= skb_peek(amsdu
);
1885 while ((msdu
= __skb_dequeue(amsdu
))) {
1886 /* Setup per-MSDU flags */
1887 if (skb_queue_empty(amsdu
))
1888 status
->flag
&= ~RX_FLAG_AMSDU_MORE
;
1890 status
->flag
|= RX_FLAG_AMSDU_MORE
;
1892 if (msdu
== first_subframe
) {
1893 first_subframe
= NULL
;
1894 status
->flag
&= ~RX_FLAG_ALLOW_SAME_PN
;
1896 status
->flag
|= RX_FLAG_ALLOW_SAME_PN
;
1899 ath10k_htt_rx_h_queue_msdu(ar
, status
, msdu
);
1903 static int ath10k_unchain_msdu(struct sk_buff_head
*amsdu
,
1904 unsigned long *unchain_cnt
)
1906 struct sk_buff
*skb
, *first
;
1909 int amsdu_len
= skb_queue_len(amsdu
);
1911 /* TODO: Might could optimize this by using
1912 * skb_try_coalesce or similar method to
1913 * decrease copying, or maybe get mac80211 to
1914 * provide a way to just receive a list of
1918 first
= __skb_dequeue(amsdu
);
1920 /* Allocate total length all at once. */
1921 skb_queue_walk(amsdu
, skb
)
1922 total_len
+= skb
->len
;
1924 space
= total_len
- skb_tailroom(first
);
1926 (pskb_expand_head(first
, 0, space
, GFP_ATOMIC
) < 0)) {
1927 /* TODO: bump some rx-oom error stat */
1928 /* put it back together so we can free the
1929 * whole list at once.
1931 __skb_queue_head(amsdu
, first
);
1935 /* Walk list again, copying contents into
1938 while ((skb
= __skb_dequeue(amsdu
))) {
1939 skb_copy_from_linear_data(skb
, skb_put(first
, skb
->len
),
1941 dev_kfree_skb_any(skb
);
1944 __skb_queue_head(amsdu
, first
);
1946 *unchain_cnt
+= amsdu_len
- 1;
1951 static void ath10k_htt_rx_h_unchain(struct ath10k
*ar
,
1952 struct sk_buff_head
*amsdu
,
1953 unsigned long *drop_cnt
,
1954 unsigned long *unchain_cnt
)
1956 struct sk_buff
*first
;
1957 struct htt_rx_desc
*rxd
;
1958 enum rx_msdu_decap_format decap
;
1960 first
= skb_peek(amsdu
);
1961 rxd
= (void *)first
->data
- sizeof(*rxd
);
1962 decap
= MS(__le32_to_cpu(rxd
->msdu_start
.common
.info1
),
1963 RX_MSDU_START_INFO1_DECAP_FORMAT
);
1965 /* FIXME: Current unchaining logic can only handle simple case of raw
1966 * msdu chaining. If decapping is other than raw the chaining may be
1967 * more complex and this isn't handled by the current code. Don't even
1968 * try re-constructing such frames - it'll be pretty much garbage.
1970 if (decap
!= RX_MSDU_DECAP_RAW
||
1971 skb_queue_len(amsdu
) != 1 + rxd
->frag_info
.ring2_more_count
) {
1972 *drop_cnt
+= skb_queue_len(amsdu
);
1973 __skb_queue_purge(amsdu
);
1977 ath10k_unchain_msdu(amsdu
, unchain_cnt
);
1980 static bool ath10k_htt_rx_amsdu_allowed(struct ath10k
*ar
,
1981 struct sk_buff_head
*amsdu
,
1982 struct ieee80211_rx_status
*rx_status
)
1984 /* FIXME: It might be a good idea to do some fuzzy-testing to drop
1985 * invalid/dangerous frames.
1988 if (!rx_status
->freq
) {
1989 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "no channel configured; ignoring frame(s)!\n");
1993 if (test_bit(ATH10K_CAC_RUNNING
, &ar
->dev_flags
)) {
1994 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx cac running\n");
2001 static void ath10k_htt_rx_h_filter(struct ath10k
*ar
,
2002 struct sk_buff_head
*amsdu
,
2003 struct ieee80211_rx_status
*rx_status
,
2004 unsigned long *drop_cnt
)
2006 if (skb_queue_empty(amsdu
))
2009 if (ath10k_htt_rx_amsdu_allowed(ar
, amsdu
, rx_status
))
2013 *drop_cnt
+= skb_queue_len(amsdu
);
2015 __skb_queue_purge(amsdu
);
2018 static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt
*htt
)
2020 struct ath10k
*ar
= htt
->ar
;
2021 struct ieee80211_rx_status
*rx_status
= &htt
->rx_status
;
2022 struct sk_buff_head amsdu
;
2024 unsigned long drop_cnt
= 0;
2025 unsigned long unchain_cnt
= 0;
2026 unsigned long drop_cnt_filter
= 0;
2027 unsigned long msdus_to_queue
, num_msdus
;
2028 enum ath10k_pkt_rx_err err
= ATH10K_PKT_RX_ERR_MAX
;
2029 u8 first_hdr
[RX_HTT_HDR_STATUS_LEN
];
2031 __skb_queue_head_init(&amsdu
);
2033 spin_lock_bh(&htt
->rx_ring
.lock
);
2034 if (htt
->rx_confused
) {
2035 spin_unlock_bh(&htt
->rx_ring
.lock
);
2038 ret
= ath10k_htt_rx_amsdu_pop(htt
, &amsdu
);
2039 spin_unlock_bh(&htt
->rx_ring
.lock
);
2042 ath10k_warn(ar
, "rx ring became corrupted: %d\n", ret
);
2043 __skb_queue_purge(&amsdu
);
2044 /* FIXME: It's probably a good idea to reboot the
2045 * device instead of leaving it inoperable.
2047 htt
->rx_confused
= true;
2051 num_msdus
= skb_queue_len(&amsdu
);
2053 ath10k_htt_rx_h_ppdu(ar
, &amsdu
, rx_status
, 0xffff);
2055 /* only for ret = 1 indicates chained msdus */
2057 ath10k_htt_rx_h_unchain(ar
, &amsdu
, &drop_cnt
, &unchain_cnt
);
2059 ath10k_htt_rx_h_filter(ar
, &amsdu
, rx_status
, &drop_cnt_filter
);
2060 ath10k_htt_rx_h_mpdu(ar
, &amsdu
, rx_status
, true, first_hdr
, &err
);
2061 msdus_to_queue
= skb_queue_len(&amsdu
);
2062 ath10k_htt_rx_h_enqueue(ar
, &amsdu
, rx_status
);
2064 ath10k_sta_update_rx_tid_stats(ar
, first_hdr
, num_msdus
, err
,
2065 unchain_cnt
, drop_cnt
, drop_cnt_filter
,
2071 static void ath10k_htt_rx_mpdu_desc_pn_hl(struct htt_hl_rx_desc
*rx_desc
,
2072 union htt_rx_pn_t
*pn
,
2075 switch (pn_len_bits
) {
2077 pn
->pn48
= __le32_to_cpu(rx_desc
->pn_31_0
) +
2078 ((u64
)(__le32_to_cpu(rx_desc
->u0
.pn_63_32
) & 0xFFFF) << 32);
2081 pn
->pn24
= __le32_to_cpu(rx_desc
->pn_31_0
);
2086 static bool ath10k_htt_rx_pn_cmp48(union htt_rx_pn_t
*new_pn
,
2087 union htt_rx_pn_t
*old_pn
)
2089 return ((new_pn
->pn48
& 0xffffffffffffULL
) <=
2090 (old_pn
->pn48
& 0xffffffffffffULL
));
2093 static bool ath10k_htt_rx_pn_check_replay_hl(struct ath10k
*ar
,
2094 struct ath10k_peer
*peer
,
2095 struct htt_rx_indication_hl
*rx
)
2097 bool last_pn_valid
, pn_invalid
= false;
2098 enum htt_txrx_sec_cast_type sec_index
;
2099 enum htt_security_types sec_type
;
2100 union htt_rx_pn_t new_pn
= {0};
2101 struct htt_hl_rx_desc
*rx_desc
;
2102 union htt_rx_pn_t
*last_pn
;
2103 u32 rx_desc_info
, tid
;
2104 int num_mpdu_ranges
;
2106 lockdep_assert_held(&ar
->data_lock
);
2111 if (!(rx
->fw_desc
.flags
& FW_RX_DESC_FLAGS_FIRST_MSDU
))
2114 num_mpdu_ranges
= MS(__le32_to_cpu(rx
->hdr
.info1
),
2115 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES
);
2117 rx_desc
= (struct htt_hl_rx_desc
*)&rx
->mpdu_ranges
[num_mpdu_ranges
];
2118 rx_desc_info
= __le32_to_cpu(rx_desc
->info
);
2120 if (!MS(rx_desc_info
, HTT_RX_DESC_HL_INFO_ENCRYPTED
))
2123 tid
= MS(rx
->hdr
.info0
, HTT_RX_INDICATION_INFO0_EXT_TID
);
2124 last_pn_valid
= peer
->tids_last_pn_valid
[tid
];
2125 last_pn
= &peer
->tids_last_pn
[tid
];
2127 if (MS(rx_desc_info
, HTT_RX_DESC_HL_INFO_MCAST_BCAST
))
2128 sec_index
= HTT_TXRX_SEC_MCAST
;
2130 sec_index
= HTT_TXRX_SEC_UCAST
;
2132 sec_type
= peer
->rx_pn
[sec_index
].sec_type
;
2133 ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc
, &new_pn
, peer
->rx_pn
[sec_index
].pn_len
);
2135 if (sec_type
!= HTT_SECURITY_AES_CCMP
&&
2136 sec_type
!= HTT_SECURITY_TKIP
&&
2137 sec_type
!= HTT_SECURITY_TKIP_NOMIC
)
2141 pn_invalid
= ath10k_htt_rx_pn_cmp48(&new_pn
, last_pn
);
2143 peer
->tids_last_pn_valid
[tid
] = true;
2146 last_pn
->pn48
= new_pn
.pn48
;
2151 static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt
*htt
,
2152 struct htt_rx_indication_hl
*rx
,
2153 struct sk_buff
*skb
,
2154 enum htt_rx_pn_check_type check_pn_type
,
2155 enum htt_rx_tkip_demic_type tkip_mic_type
)
2157 struct ath10k
*ar
= htt
->ar
;
2158 struct ath10k_peer
*peer
;
2159 struct htt_rx_indication_mpdu_range
*mpdu_ranges
;
2160 struct fw_rx_desc_hl
*fw_desc
;
2161 enum htt_txrx_sec_cast_type sec_index
;
2162 enum htt_security_types sec_type
;
2163 union htt_rx_pn_t new_pn
= {0};
2164 struct htt_hl_rx_desc
*rx_desc
;
2165 struct ieee80211_hdr
*hdr
;
2166 struct ieee80211_rx_status
*rx_status
;
2169 int num_mpdu_ranges
;
2171 struct ieee80211_channel
*ch
;
2172 bool pn_invalid
, qos
, first_msdu
;
2173 u32 tid
, rx_desc_info
;
2175 peer_id
= __le16_to_cpu(rx
->hdr
.peer_id
);
2176 tid
= MS(rx
->hdr
.info0
, HTT_RX_INDICATION_INFO0_EXT_TID
);
2178 spin_lock_bh(&ar
->data_lock
);
2179 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
2180 spin_unlock_bh(&ar
->data_lock
);
2181 if (!peer
&& peer_id
!= HTT_INVALID_PEERID
)
2182 ath10k_warn(ar
, "Got RX ind from invalid peer: %u\n", peer_id
);
2187 num_mpdu_ranges
= MS(__le32_to_cpu(rx
->hdr
.info1
),
2188 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES
);
2189 mpdu_ranges
= htt_rx_ind_get_mpdu_ranges_hl(rx
);
2190 fw_desc
= &rx
->fw_desc
;
2191 rx_desc_len
= fw_desc
->len
;
2193 /* I have not yet seen any case where num_mpdu_ranges > 1.
2194 * qcacld does not seem handle that case either, so we introduce the
2195 * same limitiation here as well.
2197 if (num_mpdu_ranges
> 1)
2199 "Unsupported number of MPDU ranges: %d, ignoring all but the first\n",
2202 if (mpdu_ranges
->mpdu_range_status
!=
2203 HTT_RX_IND_MPDU_STATUS_OK
&&
2204 mpdu_ranges
->mpdu_range_status
!=
2205 HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR
) {
2206 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt mpdu_range_status %d\n",
2207 mpdu_ranges
->mpdu_range_status
);
2211 rx_desc
= (struct htt_hl_rx_desc
*)&rx
->mpdu_ranges
[num_mpdu_ranges
];
2212 rx_desc_info
= __le32_to_cpu(rx_desc
->info
);
2214 if (MS(rx_desc_info
, HTT_RX_DESC_HL_INFO_MCAST_BCAST
))
2215 sec_index
= HTT_TXRX_SEC_MCAST
;
2217 sec_index
= HTT_TXRX_SEC_UCAST
;
2219 sec_type
= peer
->rx_pn
[sec_index
].sec_type
;
2220 first_msdu
= rx
->fw_desc
.flags
& FW_RX_DESC_FLAGS_FIRST_MSDU
;
2222 ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc
, &new_pn
, peer
->rx_pn
[sec_index
].pn_len
);
2224 if (check_pn_type
== HTT_RX_PN_CHECK
&& tid
>= IEEE80211_NUM_TIDS
) {
2225 spin_lock_bh(&ar
->data_lock
);
2226 pn_invalid
= ath10k_htt_rx_pn_check_replay_hl(ar
, peer
, rx
);
2227 spin_unlock_bh(&ar
->data_lock
);
2233 /* Strip off all headers before the MAC header before delivery to
2236 tot_hdr_len
= sizeof(struct htt_resp_hdr
) + sizeof(rx
->hdr
) +
2237 sizeof(rx
->ppdu
) + sizeof(rx
->prefix
) +
2238 sizeof(rx
->fw_desc
) +
2239 sizeof(*mpdu_ranges
) * num_mpdu_ranges
+ rx_desc_len
;
2241 skb_pull(skb
, tot_hdr_len
);
2243 hdr
= (struct ieee80211_hdr
*)skb
->data
;
2244 qos
= ieee80211_is_data_qos(hdr
->frame_control
);
2246 rx_status
= IEEE80211_SKB_RXCB(skb
);
2247 memset(rx_status
, 0, sizeof(*rx_status
));
2249 if (rx
->ppdu
.combined_rssi
== 0) {
2250 /* SDIO firmware does not provide signal */
2251 rx_status
->signal
= 0;
2252 rx_status
->flag
|= RX_FLAG_NO_SIGNAL_VAL
;
2254 rx_status
->signal
= ATH10K_DEFAULT_NOISE_FLOOR
+
2255 rx
->ppdu
.combined_rssi
;
2256 rx_status
->flag
&= ~RX_FLAG_NO_SIGNAL_VAL
;
2259 spin_lock_bh(&ar
->data_lock
);
2260 ch
= ar
->scan_channel
;
2262 ch
= ar
->rx_channel
;
2264 ch
= ath10k_htt_rx_h_any_channel(ar
);
2266 ch
= ar
->tgt_oper_chan
;
2267 spin_unlock_bh(&ar
->data_lock
);
2270 rx_status
->band
= ch
->band
;
2271 rx_status
->freq
= ch
->center_freq
;
2273 if (rx
->fw_desc
.flags
& FW_RX_DESC_FLAGS_LAST_MSDU
)
2274 rx_status
->flag
&= ~RX_FLAG_AMSDU_MORE
;
2276 rx_status
->flag
|= RX_FLAG_AMSDU_MORE
;
2278 /* Not entirely sure about this, but all frames from the chipset has
2279 * the protected flag set even though they have already been decrypted.
2280 * Unmasking this flag is necessary in order for mac80211 not to drop
2282 * TODO: Verify this is always the case or find out a way to check
2283 * if there has been hw decryption.
2285 if (ieee80211_has_protected(hdr
->frame_control
)) {
2286 hdr
->frame_control
&= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED
);
2287 rx_status
->flag
|= RX_FLAG_DECRYPTED
|
2288 RX_FLAG_IV_STRIPPED
|
2289 RX_FLAG_MMIC_STRIPPED
;
2291 if (tid
< IEEE80211_NUM_TIDS
&&
2293 check_pn_type
== HTT_RX_PN_CHECK
&&
2294 (sec_type
== HTT_SECURITY_AES_CCMP
||
2295 sec_type
== HTT_SECURITY_TKIP
||
2296 sec_type
== HTT_SECURITY_TKIP_NOMIC
)) {
2299 __le64 pn48
= cpu_to_le64(new_pn
.pn48
);
2301 hdr
= (struct ieee80211_hdr
*)skb
->data
;
2302 offset
= ieee80211_hdrlen(hdr
->frame_control
);
2303 hdr
->frame_control
|= __cpu_to_le16(IEEE80211_FCTL_PROTECTED
);
2304 rx_status
->flag
&= ~RX_FLAG_IV_STRIPPED
;
2306 memmove(skb
->data
- IEEE80211_CCMP_HDR_LEN
,
2308 skb_push(skb
, IEEE80211_CCMP_HDR_LEN
);
2309 ivp
= skb
->data
+ offset
;
2310 memset(skb
->data
+ offset
, 0, IEEE80211_CCMP_HDR_LEN
);
2312 ivp
[IEEE80211_WEP_IV_LEN
- 1] |= ATH10K_IEEE80211_EXTIV
;
2314 for (i
= 0; i
< ARRAY_SIZE(peer
->keys
); i
++) {
2315 if (peer
->keys
[i
] &&
2316 peer
->keys
[i
]->flags
& IEEE80211_KEY_FLAG_PAIRWISE
)
2317 keyidx
= peer
->keys
[i
]->keyidx
;
2321 ivp
[IEEE80211_WEP_IV_LEN
- 1] |= keyidx
<< 6;
2323 if (sec_type
== HTT_SECURITY_AES_CCMP
) {
2324 rx_status
->flag
|= RX_FLAG_MIC_STRIPPED
;
2326 memcpy(skb
->data
+ offset
, &pn48
, 2);
2327 /* pn 1, pn 3 , pn 34 , pn 5 */
2328 memcpy(skb
->data
+ offset
+ 4, ((u8
*)&pn48
) + 2, 4);
2330 rx_status
->flag
|= RX_FLAG_ICV_STRIPPED
;
2332 memcpy(skb
->data
+ offset
+ 2, &pn48
, 1);
2334 memcpy(skb
->data
+ offset
, ((u8
*)&pn48
) + 1, 1);
2335 /* TSC 2 , TSC 3 , TSC 4 , TSC 5*/
2336 memcpy(skb
->data
+ offset
+ 4, ((u8
*)&pn48
) + 2, 4);
2341 if (tkip_mic_type
== HTT_RX_TKIP_MIC
)
2342 rx_status
->flag
&= ~RX_FLAG_IV_STRIPPED
&
2343 ~RX_FLAG_MMIC_STRIPPED
;
2345 if (mpdu_ranges
->mpdu_range_status
== HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR
)
2346 rx_status
->flag
|= RX_FLAG_MMIC_ERROR
;
2348 if (!qos
&& tid
< IEEE80211_NUM_TIDS
) {
2350 __le16 qos_ctrl
= 0;
2352 hdr
= (struct ieee80211_hdr
*)skb
->data
;
2353 offset
= ieee80211_hdrlen(hdr
->frame_control
);
2355 hdr
->frame_control
|= cpu_to_le16(IEEE80211_STYPE_QOS_DATA
);
2356 memmove(skb
->data
- IEEE80211_QOS_CTL_LEN
, skb
->data
, offset
);
2357 skb_push(skb
, IEEE80211_QOS_CTL_LEN
);
2358 qos_ctrl
= cpu_to_le16(tid
);
2359 memcpy(skb
->data
+ offset
, &qos_ctrl
, IEEE80211_QOS_CTL_LEN
);
2363 ieee80211_rx_napi(ar
->hw
, NULL
, skb
, &ar
->napi
);
2365 ieee80211_rx_ni(ar
->hw
, skb
);
2367 /* We have delivered the skb to the upper layers (mac80211) so we
2372 /* Tell the caller that it must free the skb since we have not
2378 static int ath10k_htt_rx_frag_tkip_decap_nomic(struct sk_buff
*skb
,
2384 orig_hdr
= skb
->data
;
2385 ivp
= orig_hdr
+ hdr_len
+ head_len
;
2387 /* the ExtIV bit is always set to 1 for TKIP */
2388 if (!(ivp
[IEEE80211_WEP_IV_LEN
- 1] & ATH10K_IEEE80211_EXTIV
))
2391 memmove(orig_hdr
+ IEEE80211_TKIP_IV_LEN
, orig_hdr
, head_len
+ hdr_len
);
2392 skb_pull(skb
, IEEE80211_TKIP_IV_LEN
);
2393 skb_trim(skb
, skb
->len
- ATH10K_IEEE80211_TKIP_MICLEN
);
2397 static int ath10k_htt_rx_frag_tkip_decap_withmic(struct sk_buff
*skb
,
2403 orig_hdr
= skb
->data
;
2404 ivp
= orig_hdr
+ hdr_len
+ head_len
;
2406 /* the ExtIV bit is always set to 1 for TKIP */
2407 if (!(ivp
[IEEE80211_WEP_IV_LEN
- 1] & ATH10K_IEEE80211_EXTIV
))
2410 memmove(orig_hdr
+ IEEE80211_TKIP_IV_LEN
, orig_hdr
, head_len
+ hdr_len
);
2411 skb_pull(skb
, IEEE80211_TKIP_IV_LEN
);
2412 skb_trim(skb
, skb
->len
- IEEE80211_TKIP_ICV_LEN
);
2416 static int ath10k_htt_rx_frag_ccmp_decap(struct sk_buff
*skb
,
2422 orig_hdr
= skb
->data
;
2423 ivp
= orig_hdr
+ hdr_len
+ head_len
;
2425 /* the ExtIV bit is always set to 1 for CCMP */
2426 if (!(ivp
[IEEE80211_WEP_IV_LEN
- 1] & ATH10K_IEEE80211_EXTIV
))
2429 skb_trim(skb
, skb
->len
- IEEE80211_CCMP_MIC_LEN
);
2430 memmove(orig_hdr
+ IEEE80211_CCMP_HDR_LEN
, orig_hdr
, head_len
+ hdr_len
);
2431 skb_pull(skb
, IEEE80211_CCMP_HDR_LEN
);
2435 static int ath10k_htt_rx_frag_wep_decap(struct sk_buff
*skb
,
2441 orig_hdr
= skb
->data
;
2443 memmove(orig_hdr
+ IEEE80211_WEP_IV_LEN
,
2444 orig_hdr
, head_len
+ hdr_len
);
2445 skb_pull(skb
, IEEE80211_WEP_IV_LEN
);
2446 skb_trim(skb
, skb
->len
- IEEE80211_WEP_ICV_LEN
);
2450 static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt
*htt
,
2451 struct htt_rx_fragment_indication
*rx
,
2452 struct sk_buff
*skb
)
2454 struct ath10k
*ar
= htt
->ar
;
2455 enum htt_rx_tkip_demic_type tkip_mic
= HTT_RX_NON_TKIP_MIC
;
2456 enum htt_txrx_sec_cast_type sec_index
;
2457 struct htt_rx_indication_hl
*rx_hl
;
2458 enum htt_security_types sec_type
;
2459 u32 tid
, frag
, seq
, rx_desc_info
;
2460 union htt_rx_pn_t new_pn
= {0};
2461 struct htt_hl_rx_desc
*rx_desc
;
2462 u16 peer_id
, sc
, hdr_space
;
2463 union htt_rx_pn_t
*last_pn
;
2464 struct ieee80211_hdr
*hdr
;
2465 int ret
, num_mpdu_ranges
;
2466 struct ath10k_peer
*peer
;
2467 struct htt_resp
*resp
;
2470 resp
= (struct htt_resp
*)(skb
->data
+ HTT_RX_FRAG_IND_INFO0_HEADER_LEN
);
2471 skb_pull(skb
, HTT_RX_FRAG_IND_INFO0_HEADER_LEN
);
2472 skb_trim(skb
, skb
->len
- FCS_LEN
);
2474 peer_id
= __le16_to_cpu(rx
->peer_id
);
2475 rx_hl
= (struct htt_rx_indication_hl
*)(&resp
->rx_ind_hl
);
2477 spin_lock_bh(&ar
->data_lock
);
2478 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
2480 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "invalid peer: %u\n", peer_id
);
2484 num_mpdu_ranges
= MS(__le32_to_cpu(rx_hl
->hdr
.info1
),
2485 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES
);
2487 tot_hdr_len
= sizeof(struct htt_resp_hdr
) +
2488 sizeof(rx_hl
->hdr
) +
2489 sizeof(rx_hl
->ppdu
) +
2490 sizeof(rx_hl
->prefix
) +
2491 sizeof(rx_hl
->fw_desc
) +
2492 sizeof(struct htt_rx_indication_mpdu_range
) * num_mpdu_ranges
;
2494 tid
= MS(rx_hl
->hdr
.info0
, HTT_RX_INDICATION_INFO0_EXT_TID
);
2495 rx_desc
= (struct htt_hl_rx_desc
*)(skb
->data
+ tot_hdr_len
);
2496 rx_desc_info
= __le32_to_cpu(rx_desc
->info
);
2498 if (!MS(rx_desc_info
, HTT_RX_DESC_HL_INFO_ENCRYPTED
)) {
2499 spin_unlock_bh(&ar
->data_lock
);
2500 return ath10k_htt_rx_proc_rx_ind_hl(htt
, &resp
->rx_ind_hl
, skb
,
2501 HTT_RX_NON_PN_CHECK
,
2502 HTT_RX_NON_TKIP_MIC
);
2505 hdr
= (struct ieee80211_hdr
*)((u8
*)rx_desc
+ rx_hl
->fw_desc
.len
);
2507 if (ieee80211_has_retry(hdr
->frame_control
))
2510 hdr_space
= ieee80211_hdrlen(hdr
->frame_control
);
2511 sc
= __le16_to_cpu(hdr
->seq_ctrl
);
2512 seq
= (sc
& IEEE80211_SCTL_SEQ
) >> 4;
2513 frag
= sc
& IEEE80211_SCTL_FRAG
;
2515 sec_index
= MS(rx_desc_info
, HTT_RX_DESC_HL_INFO_MCAST_BCAST
) ?
2516 HTT_TXRX_SEC_MCAST
: HTT_TXRX_SEC_UCAST
;
2517 sec_type
= peer
->rx_pn
[sec_index
].sec_type
;
2518 ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc
, &new_pn
, peer
->rx_pn
[sec_index
].pn_len
);
2521 case HTT_SECURITY_TKIP
:
2522 tkip_mic
= HTT_RX_TKIP_MIC
;
2523 ret
= ath10k_htt_rx_frag_tkip_decap_withmic(skb
,
2530 case HTT_SECURITY_TKIP_NOMIC
:
2531 ret
= ath10k_htt_rx_frag_tkip_decap_nomic(skb
,
2538 case HTT_SECURITY_AES_CCMP
:
2539 ret
= ath10k_htt_rx_frag_ccmp_decap(skb
,
2540 tot_hdr_len
+ rx_hl
->fw_desc
.len
,
2545 case HTT_SECURITY_WEP128
:
2546 case HTT_SECURITY_WEP104
:
2547 case HTT_SECURITY_WEP40
:
2548 ret
= ath10k_htt_rx_frag_wep_decap(skb
,
2549 tot_hdr_len
+ rx_hl
->fw_desc
.len
,
2558 resp
= (struct htt_resp
*)(skb
->data
);
2560 if (sec_type
!= HTT_SECURITY_AES_CCMP
&&
2561 sec_type
!= HTT_SECURITY_TKIP
&&
2562 sec_type
!= HTT_SECURITY_TKIP_NOMIC
) {
2563 spin_unlock_bh(&ar
->data_lock
);
2564 return ath10k_htt_rx_proc_rx_ind_hl(htt
, &resp
->rx_ind_hl
, skb
,
2565 HTT_RX_NON_PN_CHECK
,
2566 HTT_RX_NON_TKIP_MIC
);
2569 last_pn
= &peer
->frag_tids_last_pn
[tid
];
2572 if (ath10k_htt_rx_pn_check_replay_hl(ar
, peer
, &resp
->rx_ind_hl
))
2575 last_pn
->pn48
= new_pn
.pn48
;
2576 peer
->frag_tids_seq
[tid
] = seq
;
2577 } else if (sec_type
== HTT_SECURITY_AES_CCMP
) {
2578 if (seq
!= peer
->frag_tids_seq
[tid
])
2581 if (new_pn
.pn48
!= last_pn
->pn48
+ 1)
2584 last_pn
->pn48
= new_pn
.pn48
;
2585 last_pn
= &peer
->tids_last_pn
[tid
];
2586 last_pn
->pn48
= new_pn
.pn48
;
2589 spin_unlock_bh(&ar
->data_lock
);
2591 return ath10k_htt_rx_proc_rx_ind_hl(htt
, &resp
->rx_ind_hl
, skb
,
2592 HTT_RX_NON_PN_CHECK
, tkip_mic
);
2595 spin_unlock_bh(&ar
->data_lock
);
2597 /* Tell the caller that it must free the skb since we have not
2603 static void ath10k_htt_rx_proc_rx_ind_ll(struct ath10k_htt
*htt
,
2604 struct htt_rx_indication
*rx
)
2606 struct ath10k
*ar
= htt
->ar
;
2607 struct htt_rx_indication_mpdu_range
*mpdu_ranges
;
2608 int num_mpdu_ranges
;
2609 int i
, mpdu_count
= 0;
2613 num_mpdu_ranges
= MS(__le32_to_cpu(rx
->hdr
.info1
),
2614 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES
);
2615 peer_id
= __le16_to_cpu(rx
->hdr
.peer_id
);
2616 tid
= MS(rx
->hdr
.info0
, HTT_RX_INDICATION_INFO0_EXT_TID
);
2618 mpdu_ranges
= htt_rx_ind_get_mpdu_ranges(rx
);
2620 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt rx ind: ",
2621 rx
, struct_size(rx
, mpdu_ranges
, num_mpdu_ranges
));
2623 for (i
= 0; i
< num_mpdu_ranges
; i
++)
2624 mpdu_count
+= mpdu_ranges
[i
].mpdu_count
;
2626 atomic_add(mpdu_count
, &htt
->num_mpdus_ready
);
2628 ath10k_sta_update_rx_tid_stats_ampdu(ar
, peer_id
, tid
, mpdu_ranges
,
2632 static void ath10k_htt_rx_tx_compl_ind(struct ath10k
*ar
,
2633 struct sk_buff
*skb
)
2635 struct ath10k_htt
*htt
= &ar
->htt
;
2636 struct htt_resp
*resp
= (struct htt_resp
*)skb
->data
;
2637 struct htt_tx_done tx_done
= {};
2638 int status
= MS(resp
->data_tx_completion
.flags
, HTT_DATA_TX_STATUS
);
2639 __le16 msdu_id
, *msdus
;
2640 bool rssi_enabled
= false;
2641 u8 msdu_count
= 0, num_airtime_records
, tid
;
2643 struct htt_data_tx_compl_ppdu_dur
*ppdu_info
;
2644 struct ath10k_peer
*peer
;
2645 u16 ppdu_info_offset
= 0, peer_id
;
2649 case HTT_DATA_TX_STATUS_NO_ACK
:
2650 tx_done
.status
= HTT_TX_COMPL_STATE_NOACK
;
2652 case HTT_DATA_TX_STATUS_OK
:
2653 tx_done
.status
= HTT_TX_COMPL_STATE_ACK
;
2655 case HTT_DATA_TX_STATUS_DISCARD
:
2656 case HTT_DATA_TX_STATUS_POSTPONE
:
2657 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL
:
2658 tx_done
.status
= HTT_TX_COMPL_STATE_DISCARD
;
2661 ath10k_warn(ar
, "unhandled tx completion status %d\n", status
);
2662 tx_done
.status
= HTT_TX_COMPL_STATE_DISCARD
;
2666 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt tx completion num_msdus %d\n",
2667 resp
->data_tx_completion
.num_msdus
);
2669 msdu_count
= resp
->data_tx_completion
.num_msdus
;
2670 msdus
= resp
->data_tx_completion
.msdus
;
2671 rssi_enabled
= ath10k_is_rssi_enable(&ar
->hw_params
, resp
);
2674 htt_pad
= ath10k_tx_data_rssi_get_pad_bytes(&ar
->hw_params
,
2677 for (i
= 0; i
< msdu_count
; i
++) {
2679 tx_done
.msdu_id
= __le16_to_cpu(msdu_id
);
2682 /* Total no of MSDUs should be even,
2683 * if odd MSDUs are sent firmware fills
2684 * last msdu id with 0xffff
2686 if (msdu_count
& 0x01) {
2687 msdu_id
= msdus
[msdu_count
+ i
+ 1 + htt_pad
];
2688 tx_done
.ack_rssi
= __le16_to_cpu(msdu_id
);
2690 msdu_id
= msdus
[msdu_count
+ i
+ htt_pad
];
2691 tx_done
.ack_rssi
= __le16_to_cpu(msdu_id
);
2695 /* kfifo_put: In practice firmware shouldn't fire off per-CE
2696 * interrupt and main interrupt (MSI/-X range case) for the same
2697 * HTC service so it should be safe to use kfifo_put w/o lock.
2699 * From kfifo_put() documentation:
2700 * Note that with only one concurrent reader and one concurrent
2701 * writer, you don't need extra locking to use these macro.
2703 if (ar
->bus_param
.dev_type
== ATH10K_DEV_TYPE_HL
) {
2704 ath10k_txrx_tx_unref(htt
, &tx_done
);
2705 } else if (!kfifo_put(&htt
->txdone_fifo
, tx_done
)) {
2706 ath10k_warn(ar
, "txdone fifo overrun, msdu_id %d status %d\n",
2707 tx_done
.msdu_id
, tx_done
.status
);
2708 ath10k_txrx_tx_unref(htt
, &tx_done
);
2712 if (!(resp
->data_tx_completion
.flags2
& HTT_TX_CMPL_FLAG_PPDU_DURATION_PRESENT
))
2715 ppdu_info_offset
= (msdu_count
& 0x01) ? msdu_count
+ 1 : msdu_count
;
2718 ppdu_info_offset
+= ppdu_info_offset
;
2720 if (resp
->data_tx_completion
.flags2
&
2721 (HTT_TX_CMPL_FLAG_PPID_PRESENT
| HTT_TX_CMPL_FLAG_PA_PRESENT
))
2722 ppdu_info_offset
+= 2;
2724 ppdu_info
= (struct htt_data_tx_compl_ppdu_dur
*)&msdus
[ppdu_info_offset
];
2725 num_airtime_records
= FIELD_GET(HTT_TX_COMPL_PPDU_DUR_INFO0_NUM_ENTRIES_MASK
,
2726 __le32_to_cpu(ppdu_info
->info0
));
2728 for (i
= 0; i
< num_airtime_records
; i
++) {
2729 struct htt_data_tx_ppdu_dur
*ppdu_dur
;
2732 ppdu_dur
= &ppdu_info
->ppdu_dur
[i
];
2733 info0
= __le32_to_cpu(ppdu_dur
->info0
);
2735 peer_id
= FIELD_GET(HTT_TX_PPDU_DUR_INFO0_PEER_ID_MASK
,
2738 spin_lock_bh(&ar
->data_lock
);
2740 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
2741 if (!peer
|| !peer
->sta
) {
2742 spin_unlock_bh(&ar
->data_lock
);
2747 tid
= FIELD_GET(HTT_TX_PPDU_DUR_INFO0_TID_MASK
, info0
) &
2748 IEEE80211_QOS_CTL_TID_MASK
;
2749 tx_duration
= __le32_to_cpu(ppdu_dur
->tx_duration
);
2751 ieee80211_sta_register_airtime(peer
->sta
, tid
, tx_duration
, 0);
2753 spin_unlock_bh(&ar
->data_lock
);
2758 static void ath10k_htt_rx_addba(struct ath10k
*ar
, struct htt_resp
*resp
)
2760 struct htt_rx_addba
*ev
= &resp
->rx_addba
;
2761 struct ath10k_peer
*peer
;
2762 struct ath10k_vif
*arvif
;
2763 u16 info0
, tid
, peer_id
;
2765 info0
= __le16_to_cpu(ev
->info0
);
2766 tid
= MS(info0
, HTT_RX_BA_INFO0_TID
);
2767 peer_id
= MS(info0
, HTT_RX_BA_INFO0_PEER_ID
);
2769 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
2770 "htt rx addba tid %hu peer_id %hu size %hhu\n",
2771 tid
, peer_id
, ev
->window_size
);
2773 spin_lock_bh(&ar
->data_lock
);
2774 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
2776 ath10k_warn(ar
, "received addba event for invalid peer_id: %hu\n",
2778 spin_unlock_bh(&ar
->data_lock
);
2782 arvif
= ath10k_get_arvif(ar
, peer
->vdev_id
);
2784 ath10k_warn(ar
, "received addba event for invalid vdev_id: %u\n",
2786 spin_unlock_bh(&ar
->data_lock
);
2790 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
2791 "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
2792 peer
->addr
, tid
, ev
->window_size
);
2794 ieee80211_start_rx_ba_session_offl(arvif
->vif
, peer
->addr
, tid
);
2795 spin_unlock_bh(&ar
->data_lock
);
2798 static void ath10k_htt_rx_delba(struct ath10k
*ar
, struct htt_resp
*resp
)
2800 struct htt_rx_delba
*ev
= &resp
->rx_delba
;
2801 struct ath10k_peer
*peer
;
2802 struct ath10k_vif
*arvif
;
2803 u16 info0
, tid
, peer_id
;
2805 info0
= __le16_to_cpu(ev
->info0
);
2806 tid
= MS(info0
, HTT_RX_BA_INFO0_TID
);
2807 peer_id
= MS(info0
, HTT_RX_BA_INFO0_PEER_ID
);
2809 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
2810 "htt rx delba tid %hu peer_id %hu\n",
2813 spin_lock_bh(&ar
->data_lock
);
2814 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
2816 ath10k_warn(ar
, "received addba event for invalid peer_id: %hu\n",
2818 spin_unlock_bh(&ar
->data_lock
);
2822 arvif
= ath10k_get_arvif(ar
, peer
->vdev_id
);
2824 ath10k_warn(ar
, "received addba event for invalid vdev_id: %u\n",
2826 spin_unlock_bh(&ar
->data_lock
);
2830 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
2831 "htt rx stop rx ba session sta %pM tid %hu\n",
2834 ieee80211_stop_rx_ba_session_offl(arvif
->vif
, peer
->addr
, tid
);
2835 spin_unlock_bh(&ar
->data_lock
);
2838 static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head
*list
,
2839 struct sk_buff_head
*amsdu
)
2841 struct sk_buff
*msdu
;
2842 struct htt_rx_desc
*rxd
;
2844 if (skb_queue_empty(list
))
2847 if (WARN_ON(!skb_queue_empty(amsdu
)))
2850 while ((msdu
= __skb_dequeue(list
))) {
2851 __skb_queue_tail(amsdu
, msdu
);
2853 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
2854 if (rxd
->msdu_end
.common
.info0
&
2855 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU
))
2859 msdu
= skb_peek_tail(amsdu
);
2860 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
2861 if (!(rxd
->msdu_end
.common
.info0
&
2862 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU
))) {
2863 skb_queue_splice_init(amsdu
, list
);
2870 static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status
*status
,
2871 struct sk_buff
*skb
)
2873 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
2875 if (!ieee80211_has_protected(hdr
->frame_control
))
2878 /* Offloaded frames are already decrypted but firmware insists they are
2879 * protected in the 802.11 header. Strip the flag. Otherwise mac80211
2880 * will drop the frame.
2883 hdr
->frame_control
&= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED
);
2884 status
->flag
|= RX_FLAG_DECRYPTED
|
2885 RX_FLAG_IV_STRIPPED
|
2886 RX_FLAG_MMIC_STRIPPED
;
2889 static void ath10k_htt_rx_h_rx_offload(struct ath10k
*ar
,
2890 struct sk_buff_head
*list
)
2892 struct ath10k_htt
*htt
= &ar
->htt
;
2893 struct ieee80211_rx_status
*status
= &htt
->rx_status
;
2894 struct htt_rx_offload_msdu
*rx
;
2895 struct sk_buff
*msdu
;
2898 while ((msdu
= __skb_dequeue(list
))) {
2899 /* Offloaded frames don't have Rx descriptor. Instead they have
2900 * a short meta information header.
2903 rx
= (void *)msdu
->data
;
2905 skb_put(msdu
, sizeof(*rx
));
2906 skb_pull(msdu
, sizeof(*rx
));
2908 if (skb_tailroom(msdu
) < __le16_to_cpu(rx
->msdu_len
)) {
2909 ath10k_warn(ar
, "dropping frame: offloaded rx msdu is too long!\n");
2910 dev_kfree_skb_any(msdu
);
2914 skb_put(msdu
, __le16_to_cpu(rx
->msdu_len
));
2916 /* Offloaded rx header length isn't multiple of 2 nor 4 so the
2917 * actual payload is unaligned. Align the frame. Otherwise
2918 * mac80211 complains. This shouldn't reduce performance much
2919 * because these offloaded frames are rare.
2921 offset
= 4 - ((unsigned long)msdu
->data
& 3);
2922 skb_put(msdu
, offset
);
2923 memmove(msdu
->data
+ offset
, msdu
->data
, msdu
->len
);
2924 skb_pull(msdu
, offset
);
2926 /* FIXME: The frame is NWifi. Re-construct QoS Control
2927 * if possible later.
2930 memset(status
, 0, sizeof(*status
));
2931 status
->flag
|= RX_FLAG_NO_SIGNAL_VAL
;
2933 ath10k_htt_rx_h_rx_offload_prot(status
, msdu
);
2934 ath10k_htt_rx_h_channel(ar
, status
, NULL
, rx
->vdev_id
);
2935 ath10k_htt_rx_h_queue_msdu(ar
, status
, msdu
);
2939 static int ath10k_htt_rx_in_ord_ind(struct ath10k
*ar
, struct sk_buff
*skb
)
2941 struct ath10k_htt
*htt
= &ar
->htt
;
2942 struct htt_resp
*resp
= (void *)skb
->data
;
2943 struct ieee80211_rx_status
*status
= &htt
->rx_status
;
2944 struct sk_buff_head list
;
2945 struct sk_buff_head amsdu
;
2954 lockdep_assert_held(&htt
->rx_ring
.lock
);
2956 if (htt
->rx_confused
)
2959 skb_pull(skb
, sizeof(resp
->hdr
));
2960 skb_pull(skb
, sizeof(resp
->rx_in_ord_ind
));
2962 peer_id
= __le16_to_cpu(resp
->rx_in_ord_ind
.peer_id
);
2963 msdu_count
= __le16_to_cpu(resp
->rx_in_ord_ind
.msdu_count
);
2964 vdev_id
= resp
->rx_in_ord_ind
.vdev_id
;
2965 tid
= SM(resp
->rx_in_ord_ind
.info
, HTT_RX_IN_ORD_IND_INFO_TID
);
2966 offload
= !!(resp
->rx_in_ord_ind
.info
&
2967 HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK
);
2968 frag
= !!(resp
->rx_in_ord_ind
.info
& HTT_RX_IN_ORD_IND_INFO_FRAG_MASK
);
2970 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
2971 "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
2972 vdev_id
, peer_id
, tid
, offload
, frag
, msdu_count
);
2974 if (skb
->len
< msdu_count
* sizeof(*resp
->rx_in_ord_ind
.msdu_descs32
)) {
2975 ath10k_warn(ar
, "dropping invalid in order rx indication\n");
2979 /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
2980 * extracted and processed.
2982 __skb_queue_head_init(&list
);
2983 if (ar
->hw_params
.target_64bit
)
2984 ret
= ath10k_htt_rx_pop_paddr64_list(htt
, &resp
->rx_in_ord_ind
,
2987 ret
= ath10k_htt_rx_pop_paddr32_list(htt
, &resp
->rx_in_ord_ind
,
2991 ath10k_warn(ar
, "failed to pop paddr list: %d\n", ret
);
2992 htt
->rx_confused
= true;
2996 /* Offloaded frames are very different and need to be handled
3000 ath10k_htt_rx_h_rx_offload(ar
, &list
);
3002 while (!skb_queue_empty(&list
)) {
3003 __skb_queue_head_init(&amsdu
);
3004 ret
= ath10k_htt_rx_extract_amsdu(&list
, &amsdu
);
3007 /* Note: The in-order indication may report interleaved
3008 * frames from different PPDUs meaning reported rx rate
3009 * to mac80211 isn't accurate/reliable. It's still
3010 * better to report something than nothing though. This
3011 * should still give an idea about rx rate to the user.
3013 ath10k_htt_rx_h_ppdu(ar
, &amsdu
, status
, vdev_id
);
3014 ath10k_htt_rx_h_filter(ar
, &amsdu
, status
, NULL
);
3015 ath10k_htt_rx_h_mpdu(ar
, &amsdu
, status
, false, NULL
,
3017 ath10k_htt_rx_h_enqueue(ar
, &amsdu
, status
);
3022 /* Should not happen. */
3023 ath10k_warn(ar
, "failed to extract amsdu: %d\n", ret
);
3024 htt
->rx_confused
= true;
3025 __skb_queue_purge(&list
);
3032 static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k
*ar
,
3033 const __le32
*resp_ids
,
3039 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch confirm num_resp_ids %d\n",
3042 for (i
= 0; i
< num_resp_ids
; i
++) {
3043 resp_id
= le32_to_cpu(resp_ids
[i
]);
3045 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch confirm resp_id %u\n",
3048 /* TODO: free resp_id */
3052 static void ath10k_htt_rx_tx_fetch_ind(struct ath10k
*ar
, struct sk_buff
*skb
)
3054 struct ieee80211_hw
*hw
= ar
->hw
;
3055 struct ieee80211_txq
*txq
;
3056 struct htt_resp
*resp
= (struct htt_resp
*)skb
->data
;
3057 struct htt_tx_fetch_record
*record
;
3059 size_t max_num_bytes
;
3060 size_t max_num_msdus
;
3063 const __le32
*resp_ids
;
3072 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch ind\n");
3074 len
= sizeof(resp
->hdr
) + sizeof(resp
->tx_fetch_ind
);
3075 if (unlikely(skb
->len
< len
)) {
3076 ath10k_warn(ar
, "received corrupted tx_fetch_ind event: buffer too short\n");
3080 num_records
= le16_to_cpu(resp
->tx_fetch_ind
.num_records
);
3081 num_resp_ids
= le16_to_cpu(resp
->tx_fetch_ind
.num_resp_ids
);
3083 len
+= sizeof(resp
->tx_fetch_ind
.records
[0]) * num_records
;
3084 len
+= sizeof(resp
->tx_fetch_ind
.resp_ids
[0]) * num_resp_ids
;
3086 if (unlikely(skb
->len
< len
)) {
3087 ath10k_warn(ar
, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
3091 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n",
3092 num_records
, num_resp_ids
,
3093 le16_to_cpu(resp
->tx_fetch_ind
.fetch_seq_num
));
3095 if (!ar
->htt
.tx_q_state
.enabled
) {
3096 ath10k_warn(ar
, "received unexpected tx_fetch_ind event: not enabled\n");
3100 if (ar
->htt
.tx_q_state
.mode
== HTT_TX_MODE_SWITCH_PUSH
) {
3101 ath10k_warn(ar
, "received unexpected tx_fetch_ind event: in push mode\n");
3107 for (i
= 0; i
< num_records
; i
++) {
3108 record
= &resp
->tx_fetch_ind
.records
[i
];
3109 peer_id
= MS(le16_to_cpu(record
->info
),
3110 HTT_TX_FETCH_RECORD_INFO_PEER_ID
);
3111 tid
= MS(le16_to_cpu(record
->info
),
3112 HTT_TX_FETCH_RECORD_INFO_TID
);
3113 max_num_msdus
= le16_to_cpu(record
->num_msdus
);
3114 max_num_bytes
= le32_to_cpu(record
->num_bytes
);
3116 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n",
3117 i
, peer_id
, tid
, max_num_msdus
, max_num_bytes
);
3119 if (unlikely(peer_id
>= ar
->htt
.tx_q_state
.num_peers
) ||
3120 unlikely(tid
>= ar
->htt
.tx_q_state
.num_tids
)) {
3121 ath10k_warn(ar
, "received out of range peer_id %hu tid %hhu\n",
3126 spin_lock_bh(&ar
->data_lock
);
3127 txq
= ath10k_mac_txq_lookup(ar
, peer_id
, tid
);
3128 spin_unlock_bh(&ar
->data_lock
);
3130 /* It is okay to release the lock and use txq because RCU read
3134 if (unlikely(!txq
)) {
3135 ath10k_warn(ar
, "failed to lookup txq for peer_id %hu tid %hhu\n",
3143 ieee80211_txq_schedule_start(hw
, txq
->ac
);
3144 may_tx
= ieee80211_txq_may_transmit(hw
, txq
);
3145 while (num_msdus
< max_num_msdus
&&
3146 num_bytes
< max_num_bytes
) {
3150 ret
= ath10k_mac_tx_push_txq(hw
, txq
);
3157 ieee80211_return_txq(hw
, txq
, false);
3158 ieee80211_txq_schedule_end(hw
, txq
->ac
);
3160 record
->num_msdus
= cpu_to_le16(num_msdus
);
3161 record
->num_bytes
= cpu_to_le32(num_bytes
);
3163 ath10k_htt_tx_txq_recalc(hw
, txq
);
3168 resp_ids
= ath10k_htt_get_tx_fetch_ind_resp_ids(&resp
->tx_fetch_ind
);
3169 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar
, resp_ids
, num_resp_ids
);
3171 ret
= ath10k_htt_tx_fetch_resp(ar
,
3172 resp
->tx_fetch_ind
.token
,
3173 resp
->tx_fetch_ind
.fetch_seq_num
,
3174 resp
->tx_fetch_ind
.records
,
3176 if (unlikely(ret
)) {
3177 ath10k_warn(ar
, "failed to submit tx fetch resp for token 0x%08x: %d\n",
3178 le32_to_cpu(resp
->tx_fetch_ind
.token
), ret
);
3179 /* FIXME: request fw restart */
3182 ath10k_htt_tx_txq_sync(ar
);
3185 static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k
*ar
,
3186 struct sk_buff
*skb
)
3188 const struct htt_resp
*resp
= (void *)skb
->data
;
3192 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch confirm\n");
3194 len
= sizeof(resp
->hdr
) + sizeof(resp
->tx_fetch_confirm
);
3195 if (unlikely(skb
->len
< len
)) {
3196 ath10k_warn(ar
, "received corrupted tx_fetch_confirm event: buffer too short\n");
3200 num_resp_ids
= le16_to_cpu(resp
->tx_fetch_confirm
.num_resp_ids
);
3201 len
+= sizeof(resp
->tx_fetch_confirm
.resp_ids
[0]) * num_resp_ids
;
3203 if (unlikely(skb
->len
< len
)) {
3204 ath10k_warn(ar
, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
3208 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar
,
3209 resp
->tx_fetch_confirm
.resp_ids
,
3213 static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k
*ar
,
3214 struct sk_buff
*skb
)
3216 const struct htt_resp
*resp
= (void *)skb
->data
;
3217 const struct htt_tx_mode_switch_record
*record
;
3218 struct ieee80211_txq
*txq
;
3219 struct ath10k_txq
*artxq
;
3222 enum htt_tx_mode_switch_mode mode
;
3231 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx mode switch ind\n");
3233 len
= sizeof(resp
->hdr
) + sizeof(resp
->tx_mode_switch_ind
);
3234 if (unlikely(skb
->len
< len
)) {
3235 ath10k_warn(ar
, "received corrupted tx_mode_switch_ind event: buffer too short\n");
3239 info0
= le16_to_cpu(resp
->tx_mode_switch_ind
.info0
);
3240 info1
= le16_to_cpu(resp
->tx_mode_switch_ind
.info1
);
3242 enable
= !!(info0
& HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE
);
3243 num_records
= MS(info0
, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD
);
3244 mode
= MS(info1
, HTT_TX_MODE_SWITCH_IND_INFO1_MODE
);
3245 threshold
= MS(info1
, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD
);
3247 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
3248 "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n",
3249 info0
, info1
, enable
, num_records
, mode
, threshold
);
3251 len
+= sizeof(resp
->tx_mode_switch_ind
.records
[0]) * num_records
;
3253 if (unlikely(skb
->len
< len
)) {
3254 ath10k_warn(ar
, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
3259 case HTT_TX_MODE_SWITCH_PUSH
:
3260 case HTT_TX_MODE_SWITCH_PUSH_PULL
:
3263 ath10k_warn(ar
, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
3271 ar
->htt
.tx_q_state
.enabled
= enable
;
3272 ar
->htt
.tx_q_state
.mode
= mode
;
3273 ar
->htt
.tx_q_state
.num_push_allowed
= threshold
;
3277 for (i
= 0; i
< num_records
; i
++) {
3278 record
= &resp
->tx_mode_switch_ind
.records
[i
];
3279 info0
= le16_to_cpu(record
->info0
);
3280 peer_id
= MS(info0
, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID
);
3281 tid
= MS(info0
, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID
);
3283 if (unlikely(peer_id
>= ar
->htt
.tx_q_state
.num_peers
) ||
3284 unlikely(tid
>= ar
->htt
.tx_q_state
.num_tids
)) {
3285 ath10k_warn(ar
, "received out of range peer_id %hu tid %hhu\n",
3290 spin_lock_bh(&ar
->data_lock
);
3291 txq
= ath10k_mac_txq_lookup(ar
, peer_id
, tid
);
3292 spin_unlock_bh(&ar
->data_lock
);
3294 /* It is okay to release the lock and use txq because RCU read
3298 if (unlikely(!txq
)) {
3299 ath10k_warn(ar
, "failed to lookup txq for peer_id %hu tid %hhu\n",
3304 spin_lock_bh(&ar
->htt
.tx_lock
);
3305 artxq
= (void *)txq
->drv_priv
;
3306 artxq
->num_push_allowed
= le16_to_cpu(record
->num_max_msdus
);
3307 spin_unlock_bh(&ar
->htt
.tx_lock
);
3312 ath10k_mac_tx_push_pending(ar
);
3315 void ath10k_htt_htc_t2h_msg_handler(struct ath10k
*ar
, struct sk_buff
*skb
)
3319 release
= ath10k_htt_t2h_msg_handler(ar
, skb
);
3321 /* Free the indication buffer */
3323 dev_kfree_skb_any(skb
);
3326 static inline s8
ath10k_get_legacy_rate_idx(struct ath10k
*ar
, u8 rate
)
3328 static const u8 legacy_rates
[] = {1, 2, 5, 11, 6, 9, 12,
3329 18, 24, 36, 48, 54};
3332 for (i
= 0; i
< ARRAY_SIZE(legacy_rates
); i
++) {
3333 if (rate
== legacy_rates
[i
])
3337 ath10k_warn(ar
, "Invalid legacy rate %hhd peer stats", rate
);
3342 ath10k_accumulate_per_peer_tx_stats(struct ath10k
*ar
,
3343 struct ath10k_sta
*arsta
,
3344 struct ath10k_per_peer_tx_stats
*pstats
,
3347 struct rate_info
*txrate
= &arsta
->txrate
;
3348 struct ath10k_htt_tx_stats
*tx_stats
;
3349 int idx
, ht_idx
, gi
, mcs
, bw
, nss
;
3350 unsigned long flags
;
3352 if (!arsta
->tx_stats
)
3355 tx_stats
= arsta
->tx_stats
;
3356 flags
= txrate
->flags
;
3357 gi
= test_bit(ATH10K_RATE_INFO_FLAGS_SGI_BIT
, &flags
);
3358 mcs
= ATH10K_HW_MCS_RATE(pstats
->ratecode
);
3361 ht_idx
= mcs
+ (nss
- 1) * 8;
3362 idx
= mcs
* 8 + 8 * 10 * (nss
- 1);
3365 #define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name]
3367 if (txrate
->flags
& RATE_INFO_FLAGS_VHT_MCS
) {
3368 STATS_OP_FMT(SUCC
).vht
[0][mcs
] += pstats
->succ_bytes
;
3369 STATS_OP_FMT(SUCC
).vht
[1][mcs
] += pstats
->succ_pkts
;
3370 STATS_OP_FMT(FAIL
).vht
[0][mcs
] += pstats
->failed_bytes
;
3371 STATS_OP_FMT(FAIL
).vht
[1][mcs
] += pstats
->failed_pkts
;
3372 STATS_OP_FMT(RETRY
).vht
[0][mcs
] += pstats
->retry_bytes
;
3373 STATS_OP_FMT(RETRY
).vht
[1][mcs
] += pstats
->retry_pkts
;
3374 } else if (txrate
->flags
& RATE_INFO_FLAGS_MCS
) {
3375 STATS_OP_FMT(SUCC
).ht
[0][ht_idx
] += pstats
->succ_bytes
;
3376 STATS_OP_FMT(SUCC
).ht
[1][ht_idx
] += pstats
->succ_pkts
;
3377 STATS_OP_FMT(FAIL
).ht
[0][ht_idx
] += pstats
->failed_bytes
;
3378 STATS_OP_FMT(FAIL
).ht
[1][ht_idx
] += pstats
->failed_pkts
;
3379 STATS_OP_FMT(RETRY
).ht
[0][ht_idx
] += pstats
->retry_bytes
;
3380 STATS_OP_FMT(RETRY
).ht
[1][ht_idx
] += pstats
->retry_pkts
;
3382 mcs
= legacy_rate_idx
;
3384 STATS_OP_FMT(SUCC
).legacy
[0][mcs
] += pstats
->succ_bytes
;
3385 STATS_OP_FMT(SUCC
).legacy
[1][mcs
] += pstats
->succ_pkts
;
3386 STATS_OP_FMT(FAIL
).legacy
[0][mcs
] += pstats
->failed_bytes
;
3387 STATS_OP_FMT(FAIL
).legacy
[1][mcs
] += pstats
->failed_pkts
;
3388 STATS_OP_FMT(RETRY
).legacy
[0][mcs
] += pstats
->retry_bytes
;
3389 STATS_OP_FMT(RETRY
).legacy
[1][mcs
] += pstats
->retry_pkts
;
3392 if (ATH10K_HW_AMPDU(pstats
->flags
)) {
3393 tx_stats
->ba_fails
+= ATH10K_HW_BA_FAIL(pstats
->flags
);
3395 if (txrate
->flags
& RATE_INFO_FLAGS_MCS
) {
3396 STATS_OP_FMT(AMPDU
).ht
[0][ht_idx
] +=
3397 pstats
->succ_bytes
+ pstats
->retry_bytes
;
3398 STATS_OP_FMT(AMPDU
).ht
[1][ht_idx
] +=
3399 pstats
->succ_pkts
+ pstats
->retry_pkts
;
3401 STATS_OP_FMT(AMPDU
).vht
[0][mcs
] +=
3402 pstats
->succ_bytes
+ pstats
->retry_bytes
;
3403 STATS_OP_FMT(AMPDU
).vht
[1][mcs
] +=
3404 pstats
->succ_pkts
+ pstats
->retry_pkts
;
3406 STATS_OP_FMT(AMPDU
).bw
[0][bw
] +=
3407 pstats
->succ_bytes
+ pstats
->retry_bytes
;
3408 STATS_OP_FMT(AMPDU
).nss
[0][nss
- 1] +=
3409 pstats
->succ_bytes
+ pstats
->retry_bytes
;
3410 STATS_OP_FMT(AMPDU
).gi
[0][gi
] +=
3411 pstats
->succ_bytes
+ pstats
->retry_bytes
;
3412 STATS_OP_FMT(AMPDU
).rate_table
[0][idx
] +=
3413 pstats
->succ_bytes
+ pstats
->retry_bytes
;
3414 STATS_OP_FMT(AMPDU
).bw
[1][bw
] +=
3415 pstats
->succ_pkts
+ pstats
->retry_pkts
;
3416 STATS_OP_FMT(AMPDU
).nss
[1][nss
- 1] +=
3417 pstats
->succ_pkts
+ pstats
->retry_pkts
;
3418 STATS_OP_FMT(AMPDU
).gi
[1][gi
] +=
3419 pstats
->succ_pkts
+ pstats
->retry_pkts
;
3420 STATS_OP_FMT(AMPDU
).rate_table
[1][idx
] +=
3421 pstats
->succ_pkts
+ pstats
->retry_pkts
;
3423 tx_stats
->ack_fails
+=
3424 ATH10K_HW_BA_FAIL(pstats
->flags
);
3427 STATS_OP_FMT(SUCC
).bw
[0][bw
] += pstats
->succ_bytes
;
3428 STATS_OP_FMT(SUCC
).nss
[0][nss
- 1] += pstats
->succ_bytes
;
3429 STATS_OP_FMT(SUCC
).gi
[0][gi
] += pstats
->succ_bytes
;
3431 STATS_OP_FMT(SUCC
).bw
[1][bw
] += pstats
->succ_pkts
;
3432 STATS_OP_FMT(SUCC
).nss
[1][nss
- 1] += pstats
->succ_pkts
;
3433 STATS_OP_FMT(SUCC
).gi
[1][gi
] += pstats
->succ_pkts
;
3435 STATS_OP_FMT(FAIL
).bw
[0][bw
] += pstats
->failed_bytes
;
3436 STATS_OP_FMT(FAIL
).nss
[0][nss
- 1] += pstats
->failed_bytes
;
3437 STATS_OP_FMT(FAIL
).gi
[0][gi
] += pstats
->failed_bytes
;
3439 STATS_OP_FMT(FAIL
).bw
[1][bw
] += pstats
->failed_pkts
;
3440 STATS_OP_FMT(FAIL
).nss
[1][nss
- 1] += pstats
->failed_pkts
;
3441 STATS_OP_FMT(FAIL
).gi
[1][gi
] += pstats
->failed_pkts
;
3443 STATS_OP_FMT(RETRY
).bw
[0][bw
] += pstats
->retry_bytes
;
3444 STATS_OP_FMT(RETRY
).nss
[0][nss
- 1] += pstats
->retry_bytes
;
3445 STATS_OP_FMT(RETRY
).gi
[0][gi
] += pstats
->retry_bytes
;
3447 STATS_OP_FMT(RETRY
).bw
[1][bw
] += pstats
->retry_pkts
;
3448 STATS_OP_FMT(RETRY
).nss
[1][nss
- 1] += pstats
->retry_pkts
;
3449 STATS_OP_FMT(RETRY
).gi
[1][gi
] += pstats
->retry_pkts
;
3451 if (txrate
->flags
>= RATE_INFO_FLAGS_MCS
) {
3452 STATS_OP_FMT(SUCC
).rate_table
[0][idx
] += pstats
->succ_bytes
;
3453 STATS_OP_FMT(SUCC
).rate_table
[1][idx
] += pstats
->succ_pkts
;
3454 STATS_OP_FMT(FAIL
).rate_table
[0][idx
] += pstats
->failed_bytes
;
3455 STATS_OP_FMT(FAIL
).rate_table
[1][idx
] += pstats
->failed_pkts
;
3456 STATS_OP_FMT(RETRY
).rate_table
[0][idx
] += pstats
->retry_bytes
;
3457 STATS_OP_FMT(RETRY
).rate_table
[1][idx
] += pstats
->retry_pkts
;
3460 tx_stats
->tx_duration
+= pstats
->duration
;
3464 ath10k_update_per_peer_tx_stats(struct ath10k
*ar
,
3465 struct ieee80211_sta
*sta
,
3466 struct ath10k_per_peer_tx_stats
*peer_stats
)
3468 struct ath10k_sta
*arsta
= (struct ath10k_sta
*)sta
->drv_priv
;
3469 struct ieee80211_chanctx_conf
*conf
= NULL
;
3472 bool skip_auto_rate
;
3473 struct rate_info txrate
;
3475 lockdep_assert_held(&ar
->data_lock
);
3477 txrate
.flags
= ATH10K_HW_PREAMBLE(peer_stats
->ratecode
);
3478 txrate
.bw
= ATH10K_HW_BW(peer_stats
->flags
);
3479 txrate
.nss
= ATH10K_HW_NSS(peer_stats
->ratecode
);
3480 txrate
.mcs
= ATH10K_HW_MCS_RATE(peer_stats
->ratecode
);
3481 sgi
= ATH10K_HW_GI(peer_stats
->flags
);
3482 skip_auto_rate
= ATH10K_FW_SKIPPED_RATE_CTRL(peer_stats
->flags
);
3484 /* Firmware's rate control skips broadcast/management frames,
3485 * if host has configure fixed rates and in some other special cases.
3490 if (txrate
.flags
== WMI_RATE_PREAMBLE_VHT
&& txrate
.mcs
> 9) {
3491 ath10k_warn(ar
, "Invalid VHT mcs %hhd peer stats", txrate
.mcs
);
3495 if (txrate
.flags
== WMI_RATE_PREAMBLE_HT
&&
3496 (txrate
.mcs
> 7 || txrate
.nss
< 1)) {
3497 ath10k_warn(ar
, "Invalid HT mcs %hhd nss %hhd peer stats",
3498 txrate
.mcs
, txrate
.nss
);
3502 memset(&arsta
->txrate
, 0, sizeof(arsta
->txrate
));
3503 memset(&arsta
->tx_info
.status
, 0, sizeof(arsta
->tx_info
.status
));
3504 if (txrate
.flags
== WMI_RATE_PREAMBLE_CCK
||
3505 txrate
.flags
== WMI_RATE_PREAMBLE_OFDM
) {
3506 rate
= ATH10K_HW_LEGACY_RATE(peer_stats
->ratecode
);
3507 /* This is hacky, FW sends CCK rate 5.5Mbps as 6 */
3508 if (rate
== 6 && txrate
.flags
== WMI_RATE_PREAMBLE_CCK
)
3510 rate_idx
= ath10k_get_legacy_rate_idx(ar
, rate
);
3513 arsta
->txrate
.legacy
= rate
;
3514 } else if (txrate
.flags
== WMI_RATE_PREAMBLE_HT
) {
3515 arsta
->txrate
.flags
= RATE_INFO_FLAGS_MCS
;
3516 arsta
->txrate
.mcs
= txrate
.mcs
+ 8 * (txrate
.nss
- 1);
3518 arsta
->txrate
.flags
= RATE_INFO_FLAGS_VHT_MCS
;
3519 arsta
->txrate
.mcs
= txrate
.mcs
;
3522 switch (txrate
.flags
) {
3523 case WMI_RATE_PREAMBLE_OFDM
:
3524 if (arsta
->arvif
&& arsta
->arvif
->vif
)
3525 conf
= rcu_dereference(arsta
->arvif
->vif
->chanctx_conf
);
3526 if (conf
&& conf
->def
.chan
->band
== NL80211_BAND_5GHZ
)
3527 arsta
->tx_info
.status
.rates
[0].idx
= rate_idx
- 4;
3529 case WMI_RATE_PREAMBLE_CCK
:
3530 arsta
->tx_info
.status
.rates
[0].idx
= rate_idx
;
3532 arsta
->tx_info
.status
.rates
[0].flags
|=
3533 (IEEE80211_TX_RC_USE_SHORT_PREAMBLE
|
3534 IEEE80211_TX_RC_SHORT_GI
);
3536 case WMI_RATE_PREAMBLE_HT
:
3537 arsta
->tx_info
.status
.rates
[0].idx
=
3538 txrate
.mcs
+ ((txrate
.nss
- 1) * 8);
3540 arsta
->tx_info
.status
.rates
[0].flags
|=
3541 IEEE80211_TX_RC_SHORT_GI
;
3542 arsta
->tx_info
.status
.rates
[0].flags
|= IEEE80211_TX_RC_MCS
;
3544 case WMI_RATE_PREAMBLE_VHT
:
3545 ieee80211_rate_set_vht(&arsta
->tx_info
.status
.rates
[0],
3546 txrate
.mcs
, txrate
.nss
);
3548 arsta
->tx_info
.status
.rates
[0].flags
|=
3549 IEEE80211_TX_RC_SHORT_GI
;
3550 arsta
->tx_info
.status
.rates
[0].flags
|= IEEE80211_TX_RC_VHT_MCS
;
3554 arsta
->txrate
.nss
= txrate
.nss
;
3555 arsta
->txrate
.bw
= ath10k_bw_to_mac80211_bw(txrate
.bw
);
3556 arsta
->last_tx_bitrate
= cfg80211_calculate_bitrate(&arsta
->txrate
);
3558 arsta
->txrate
.flags
|= RATE_INFO_FLAGS_SHORT_GI
;
3560 switch (arsta
->txrate
.bw
) {
3561 case RATE_INFO_BW_40
:
3562 arsta
->tx_info
.status
.rates
[0].flags
|=
3563 IEEE80211_TX_RC_40_MHZ_WIDTH
;
3565 case RATE_INFO_BW_80
:
3566 arsta
->tx_info
.status
.rates
[0].flags
|=
3567 IEEE80211_TX_RC_80_MHZ_WIDTH
;
3571 if (peer_stats
->succ_pkts
) {
3572 arsta
->tx_info
.flags
= IEEE80211_TX_STAT_ACK
;
3573 arsta
->tx_info
.status
.rates
[0].count
= 1;
3574 ieee80211_tx_rate_update(ar
->hw
, sta
, &arsta
->tx_info
);
3577 if (ath10k_debug_is_extd_tx_stats_enabled(ar
))
3578 ath10k_accumulate_per_peer_tx_stats(ar
, arsta
, peer_stats
,
3582 static void ath10k_htt_fetch_peer_stats(struct ath10k
*ar
,
3583 struct sk_buff
*skb
)
3585 struct htt_resp
*resp
= (struct htt_resp
*)skb
->data
;
3586 struct ath10k_per_peer_tx_stats
*p_tx_stats
= &ar
->peer_tx_stats
;
3587 struct htt_per_peer_tx_stats_ind
*tx_stats
;
3588 struct ieee80211_sta
*sta
;
3589 struct ath10k_peer
*peer
;
3591 u8 ppdu_len
, num_ppdu
;
3593 num_ppdu
= resp
->peer_tx_stats
.num_ppdu
;
3594 ppdu_len
= resp
->peer_tx_stats
.ppdu_len
* sizeof(__le32
);
3596 if (skb
->len
< sizeof(struct htt_resp_hdr
) + num_ppdu
* ppdu_len
) {
3597 ath10k_warn(ar
, "Invalid peer stats buf length %d\n", skb
->len
);
3601 tx_stats
= (struct htt_per_peer_tx_stats_ind
*)
3602 (resp
->peer_tx_stats
.payload
);
3603 peer_id
= __le16_to_cpu(tx_stats
->peer_id
);
3606 spin_lock_bh(&ar
->data_lock
);
3607 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
3608 if (!peer
|| !peer
->sta
) {
3609 ath10k_warn(ar
, "Invalid peer id %d peer stats buffer\n",
3615 for (i
= 0; i
< num_ppdu
; i
++) {
3616 tx_stats
= (struct htt_per_peer_tx_stats_ind
*)
3617 (resp
->peer_tx_stats
.payload
+ i
* ppdu_len
);
3619 p_tx_stats
->succ_bytes
= __le32_to_cpu(tx_stats
->succ_bytes
);
3620 p_tx_stats
->retry_bytes
= __le32_to_cpu(tx_stats
->retry_bytes
);
3621 p_tx_stats
->failed_bytes
=
3622 __le32_to_cpu(tx_stats
->failed_bytes
);
3623 p_tx_stats
->ratecode
= tx_stats
->ratecode
;
3624 p_tx_stats
->flags
= tx_stats
->flags
;
3625 p_tx_stats
->succ_pkts
= __le16_to_cpu(tx_stats
->succ_pkts
);
3626 p_tx_stats
->retry_pkts
= __le16_to_cpu(tx_stats
->retry_pkts
);
3627 p_tx_stats
->failed_pkts
= __le16_to_cpu(tx_stats
->failed_pkts
);
3628 p_tx_stats
->duration
= __le16_to_cpu(tx_stats
->tx_duration
);
3630 ath10k_update_per_peer_tx_stats(ar
, sta
, p_tx_stats
);
3634 spin_unlock_bh(&ar
->data_lock
);
3638 static void ath10k_fetch_10_2_tx_stats(struct ath10k
*ar
, u8
*data
)
3640 struct ath10k_pktlog_hdr
*hdr
= (struct ath10k_pktlog_hdr
*)data
;
3641 struct ath10k_per_peer_tx_stats
*p_tx_stats
= &ar
->peer_tx_stats
;
3642 struct ath10k_10_2_peer_tx_stats
*tx_stats
;
3643 struct ieee80211_sta
*sta
;
3644 struct ath10k_peer
*peer
;
3645 u16 log_type
= __le16_to_cpu(hdr
->log_type
);
3648 if (log_type
!= ATH_PKTLOG_TYPE_TX_STAT
)
3651 tx_stats
= (struct ath10k_10_2_peer_tx_stats
*)((hdr
->payload
) +
3652 ATH10K_10_2_TX_STATS_OFFSET
);
3654 if (!tx_stats
->tx_ppdu_cnt
)
3657 peer_id
= tx_stats
->peer_id
;
3660 spin_lock_bh(&ar
->data_lock
);
3661 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
3662 if (!peer
|| !peer
->sta
) {
3663 ath10k_warn(ar
, "Invalid peer id %d in peer stats buffer\n",
3669 for (i
= 0; i
< tx_stats
->tx_ppdu_cnt
; i
++) {
3670 p_tx_stats
->succ_bytes
=
3671 __le16_to_cpu(tx_stats
->success_bytes
[i
]);
3672 p_tx_stats
->retry_bytes
=
3673 __le16_to_cpu(tx_stats
->retry_bytes
[i
]);
3674 p_tx_stats
->failed_bytes
=
3675 __le16_to_cpu(tx_stats
->failed_bytes
[i
]);
3676 p_tx_stats
->ratecode
= tx_stats
->ratecode
[i
];
3677 p_tx_stats
->flags
= tx_stats
->flags
[i
];
3678 p_tx_stats
->succ_pkts
= tx_stats
->success_pkts
[i
];
3679 p_tx_stats
->retry_pkts
= tx_stats
->retry_pkts
[i
];
3680 p_tx_stats
->failed_pkts
= tx_stats
->failed_pkts
[i
];
3682 ath10k_update_per_peer_tx_stats(ar
, sta
, p_tx_stats
);
3684 spin_unlock_bh(&ar
->data_lock
);
3690 spin_unlock_bh(&ar
->data_lock
);
3694 static int ath10k_htt_rx_pn_len(enum htt_security_types sec_type
)
3697 case HTT_SECURITY_TKIP
:
3698 case HTT_SECURITY_TKIP_NOMIC
:
3699 case HTT_SECURITY_AES_CCMP
:
3706 static void ath10k_htt_rx_sec_ind_handler(struct ath10k
*ar
,
3707 struct htt_security_indication
*ev
)
3709 enum htt_txrx_sec_cast_type sec_index
;
3710 enum htt_security_types sec_type
;
3711 struct ath10k_peer
*peer
;
3713 spin_lock_bh(&ar
->data_lock
);
3715 peer
= ath10k_peer_find_by_id(ar
, __le16_to_cpu(ev
->peer_id
));
3717 ath10k_warn(ar
, "failed to find peer id %d for security indication",
3718 __le16_to_cpu(ev
->peer_id
));
3722 sec_type
= MS(ev
->flags
, HTT_SECURITY_TYPE
);
3724 if (ev
->flags
& HTT_SECURITY_IS_UNICAST
)
3725 sec_index
= HTT_TXRX_SEC_UCAST
;
3727 sec_index
= HTT_TXRX_SEC_MCAST
;
3729 peer
->rx_pn
[sec_index
].sec_type
= sec_type
;
3730 peer
->rx_pn
[sec_index
].pn_len
= ath10k_htt_rx_pn_len(sec_type
);
3732 memset(peer
->tids_last_pn_valid
, 0, sizeof(peer
->tids_last_pn_valid
));
3733 memset(peer
->tids_last_pn
, 0, sizeof(peer
->tids_last_pn
));
3736 spin_unlock_bh(&ar
->data_lock
);
3739 bool ath10k_htt_t2h_msg_handler(struct ath10k
*ar
, struct sk_buff
*skb
)
3741 struct ath10k_htt
*htt
= &ar
->htt
;
3742 struct htt_resp
*resp
= (struct htt_resp
*)skb
->data
;
3743 enum htt_t2h_msg_type type
;
3745 /* confirm alignment */
3746 if (!IS_ALIGNED((unsigned long)skb
->data
, 4))
3747 ath10k_warn(ar
, "unaligned htt message, expect trouble\n");
3749 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx, msg_type: 0x%0X\n",
3750 resp
->hdr
.msg_type
);
3752 if (resp
->hdr
.msg_type
>= ar
->htt
.t2h_msg_types_max
) {
3753 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
3754 resp
->hdr
.msg_type
, ar
->htt
.t2h_msg_types_max
);
3757 type
= ar
->htt
.t2h_msg_types
[resp
->hdr
.msg_type
];
3760 case HTT_T2H_MSG_TYPE_VERSION_CONF
: {
3761 htt
->target_version_major
= resp
->ver_resp
.major
;
3762 htt
->target_version_minor
= resp
->ver_resp
.minor
;
3763 complete(&htt
->target_version_received
);
3766 case HTT_T2H_MSG_TYPE_RX_IND
:
3767 if (ar
->bus_param
.dev_type
!= ATH10K_DEV_TYPE_HL
) {
3768 ath10k_htt_rx_proc_rx_ind_ll(htt
, &resp
->rx_ind
);
3770 skb_queue_tail(&htt
->rx_indication_head
, skb
);
3774 case HTT_T2H_MSG_TYPE_PEER_MAP
: {
3775 struct htt_peer_map_event ev
= {
3776 .vdev_id
= resp
->peer_map
.vdev_id
,
3777 .peer_id
= __le16_to_cpu(resp
->peer_map
.peer_id
),
3779 memcpy(ev
.addr
, resp
->peer_map
.addr
, sizeof(ev
.addr
));
3780 ath10k_peer_map_event(htt
, &ev
);
3783 case HTT_T2H_MSG_TYPE_PEER_UNMAP
: {
3784 struct htt_peer_unmap_event ev
= {
3785 .peer_id
= __le16_to_cpu(resp
->peer_unmap
.peer_id
),
3787 ath10k_peer_unmap_event(htt
, &ev
);
3790 case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION
: {
3791 struct htt_tx_done tx_done
= {};
3792 int status
= __le32_to_cpu(resp
->mgmt_tx_completion
.status
);
3793 int info
= __le32_to_cpu(resp
->mgmt_tx_completion
.info
);
3795 tx_done
.msdu_id
= __le32_to_cpu(resp
->mgmt_tx_completion
.desc_id
);
3798 case HTT_MGMT_TX_STATUS_OK
:
3799 tx_done
.status
= HTT_TX_COMPL_STATE_ACK
;
3800 if (test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS
,
3802 (resp
->mgmt_tx_completion
.flags
&
3803 HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI
)) {
3805 FIELD_GET(HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK
,
3809 case HTT_MGMT_TX_STATUS_RETRY
:
3810 tx_done
.status
= HTT_TX_COMPL_STATE_NOACK
;
3812 case HTT_MGMT_TX_STATUS_DROP
:
3813 tx_done
.status
= HTT_TX_COMPL_STATE_DISCARD
;
3817 status
= ath10k_txrx_tx_unref(htt
, &tx_done
);
3819 spin_lock_bh(&htt
->tx_lock
);
3820 ath10k_htt_tx_mgmt_dec_pending(htt
);
3821 spin_unlock_bh(&htt
->tx_lock
);
3825 case HTT_T2H_MSG_TYPE_TX_COMPL_IND
:
3826 ath10k_htt_rx_tx_compl_ind(htt
->ar
, skb
);
3828 case HTT_T2H_MSG_TYPE_SEC_IND
: {
3829 struct ath10k
*ar
= htt
->ar
;
3830 struct htt_security_indication
*ev
= &resp
->security_indication
;
3832 ath10k_htt_rx_sec_ind_handler(ar
, ev
);
3833 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
3834 "sec ind peer_id %d unicast %d type %d\n",
3835 __le16_to_cpu(ev
->peer_id
),
3836 !!(ev
->flags
& HTT_SECURITY_IS_UNICAST
),
3837 MS(ev
->flags
, HTT_SECURITY_TYPE
));
3838 complete(&ar
->install_key_done
);
3841 case HTT_T2H_MSG_TYPE_RX_FRAG_IND
: {
3842 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt event: ",
3843 skb
->data
, skb
->len
);
3844 atomic_inc(&htt
->num_mpdus_ready
);
3846 return ath10k_htt_rx_proc_rx_frag_ind(htt
,
3851 case HTT_T2H_MSG_TYPE_TEST
:
3853 case HTT_T2H_MSG_TYPE_STATS_CONF
:
3854 trace_ath10k_htt_stats(ar
, skb
->data
, skb
->len
);
3856 case HTT_T2H_MSG_TYPE_TX_INSPECT_IND
:
3857 /* Firmware can return tx frames if it's unable to fully
3858 * process them and suspects host may be able to fix it. ath10k
3859 * sends all tx frames as already inspected so this shouldn't
3860 * happen unless fw has a bug.
3862 ath10k_warn(ar
, "received an unexpected htt tx inspect event\n");
3864 case HTT_T2H_MSG_TYPE_RX_ADDBA
:
3865 ath10k_htt_rx_addba(ar
, resp
);
3867 case HTT_T2H_MSG_TYPE_RX_DELBA
:
3868 ath10k_htt_rx_delba(ar
, resp
);
3870 case HTT_T2H_MSG_TYPE_PKTLOG
: {
3871 trace_ath10k_htt_pktlog(ar
, resp
->pktlog_msg
.payload
,
3873 offsetof(struct htt_resp
,
3874 pktlog_msg
.payload
));
3876 if (ath10k_peer_stats_enabled(ar
))
3877 ath10k_fetch_10_2_tx_stats(ar
,
3878 resp
->pktlog_msg
.payload
);
3881 case HTT_T2H_MSG_TYPE_RX_FLUSH
: {
3882 /* Ignore this event because mac80211 takes care of Rx
3883 * aggregation reordering.
3887 case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND
: {
3888 skb_queue_tail(&htt
->rx_in_ord_compl_q
, skb
);
3891 case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND
:
3893 case HTT_T2H_MSG_TYPE_CHAN_CHANGE
: {
3894 u32 phymode
= __le32_to_cpu(resp
->chan_change
.phymode
);
3895 u32 freq
= __le32_to_cpu(resp
->chan_change
.freq
);
3897 ar
->tgt_oper_chan
= ieee80211_get_channel(ar
->hw
->wiphy
, freq
);
3898 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
3899 "htt chan change freq %u phymode %s\n",
3900 freq
, ath10k_wmi_phymode_str(phymode
));
3903 case HTT_T2H_MSG_TYPE_AGGR_CONF
:
3905 case HTT_T2H_MSG_TYPE_TX_FETCH_IND
: {
3906 struct sk_buff
*tx_fetch_ind
= skb_copy(skb
, GFP_ATOMIC
);
3908 if (!tx_fetch_ind
) {
3909 ath10k_warn(ar
, "failed to copy htt tx fetch ind\n");
3912 skb_queue_tail(&htt
->tx_fetch_ind_q
, tx_fetch_ind
);
3915 case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM
:
3916 ath10k_htt_rx_tx_fetch_confirm(ar
, skb
);
3918 case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND
:
3919 ath10k_htt_rx_tx_mode_switch_ind(ar
, skb
);
3921 case HTT_T2H_MSG_TYPE_PEER_STATS
:
3922 ath10k_htt_fetch_peer_stats(ar
, skb
);
3924 case HTT_T2H_MSG_TYPE_EN_STATS
:
3926 ath10k_warn(ar
, "htt event (%d) not handled\n",
3927 resp
->hdr
.msg_type
);
3928 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt event: ",
3929 skb
->data
, skb
->len
);
3934 EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler
);
3936 void ath10k_htt_rx_pktlog_completion_handler(struct ath10k
*ar
,
3937 struct sk_buff
*skb
)
3939 trace_ath10k_htt_pktlog(ar
, skb
->data
, skb
->len
);
3940 dev_kfree_skb_any(skb
);
3942 EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler
);
3944 static int ath10k_htt_rx_deliver_msdu(struct ath10k
*ar
, int quota
, int budget
)
3946 struct sk_buff
*skb
;
3948 while (quota
< budget
) {
3949 if (skb_queue_empty(&ar
->htt
.rx_msdus_q
))
3952 skb
= skb_dequeue(&ar
->htt
.rx_msdus_q
);
3955 ath10k_process_rx(ar
, skb
);
3962 int ath10k_htt_rx_hl_indication(struct ath10k
*ar
, int budget
)
3964 struct htt_resp
*resp
;
3965 struct ath10k_htt
*htt
= &ar
->htt
;
3966 struct sk_buff
*skb
;
3970 for (quota
= 0; quota
< budget
; quota
++) {
3971 skb
= skb_dequeue(&htt
->rx_indication_head
);
3975 resp
= (struct htt_resp
*)skb
->data
;
3977 release
= ath10k_htt_rx_proc_rx_ind_hl(htt
,
3981 HTT_RX_NON_TKIP_MIC
);
3984 dev_kfree_skb_any(skb
);
3986 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "rx indication poll pending count:%d\n",
3987 skb_queue_len(&htt
->rx_indication_head
));
3991 EXPORT_SYMBOL(ath10k_htt_rx_hl_indication
);
3993 int ath10k_htt_txrx_compl_task(struct ath10k
*ar
, int budget
)
3995 struct ath10k_htt
*htt
= &ar
->htt
;
3996 struct htt_tx_done tx_done
= {};
3997 struct sk_buff_head tx_ind_q
;
3998 struct sk_buff
*skb
;
3999 unsigned long flags
;
4000 int quota
= 0, done
, ret
;
4001 bool resched_napi
= false;
4003 __skb_queue_head_init(&tx_ind_q
);
4005 /* Process pending frames before dequeuing more data
4008 quota
= ath10k_htt_rx_deliver_msdu(ar
, quota
, budget
);
4009 if (quota
== budget
) {
4010 resched_napi
= true;
4014 while ((skb
= skb_dequeue(&htt
->rx_in_ord_compl_q
))) {
4015 spin_lock_bh(&htt
->rx_ring
.lock
);
4016 ret
= ath10k_htt_rx_in_ord_ind(ar
, skb
);
4017 spin_unlock_bh(&htt
->rx_ring
.lock
);
4019 dev_kfree_skb_any(skb
);
4021 resched_napi
= true;
4026 while (atomic_read(&htt
->num_mpdus_ready
)) {
4027 ret
= ath10k_htt_rx_handle_amsdu(htt
);
4029 resched_napi
= true;
4032 atomic_dec(&htt
->num_mpdus_ready
);
4035 /* Deliver received data after processing data from hardware */
4036 quota
= ath10k_htt_rx_deliver_msdu(ar
, quota
, budget
);
4038 /* From NAPI documentation:
4039 * The napi poll() function may also process TX completions, in which
4040 * case if it processes the entire TX ring then it should count that
4041 * work as the rest of the budget.
4043 if ((quota
< budget
) && !kfifo_is_empty(&htt
->txdone_fifo
))
4046 /* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
4047 * From kfifo_get() documentation:
4048 * Note that with only one concurrent reader and one concurrent writer,
4049 * you don't need extra locking to use these macro.
4051 while (kfifo_get(&htt
->txdone_fifo
, &tx_done
))
4052 ath10k_txrx_tx_unref(htt
, &tx_done
);
4054 ath10k_mac_tx_push_pending(ar
);
4056 spin_lock_irqsave(&htt
->tx_fetch_ind_q
.lock
, flags
);
4057 skb_queue_splice_init(&htt
->tx_fetch_ind_q
, &tx_ind_q
);
4058 spin_unlock_irqrestore(&htt
->tx_fetch_ind_q
.lock
, flags
);
4060 while ((skb
= __skb_dequeue(&tx_ind_q
))) {
4061 ath10k_htt_rx_tx_fetch_ind(ar
, skb
);
4062 dev_kfree_skb_any(skb
);
4066 ath10k_htt_rx_msdu_buff_replenish(htt
);
4067 /* In case of rx failure or more data to read, report budget
4068 * to reschedule NAPI poll
4070 done
= resched_napi
? budget
: quota
;
4074 EXPORT_SYMBOL(ath10k_htt_txrx_compl_task
);
4076 static const struct ath10k_htt_rx_ops htt_rx_ops_32
= {
4077 .htt_get_rx_ring_size
= ath10k_htt_get_rx_ring_size_32
,
4078 .htt_config_paddrs_ring
= ath10k_htt_config_paddrs_ring_32
,
4079 .htt_set_paddrs_ring
= ath10k_htt_set_paddrs_ring_32
,
4080 .htt_get_vaddr_ring
= ath10k_htt_get_vaddr_ring_32
,
4081 .htt_reset_paddrs_ring
= ath10k_htt_reset_paddrs_ring_32
,
4084 static const struct ath10k_htt_rx_ops htt_rx_ops_64
= {
4085 .htt_get_rx_ring_size
= ath10k_htt_get_rx_ring_size_64
,
4086 .htt_config_paddrs_ring
= ath10k_htt_config_paddrs_ring_64
,
4087 .htt_set_paddrs_ring
= ath10k_htt_set_paddrs_ring_64
,
4088 .htt_get_vaddr_ring
= ath10k_htt_get_vaddr_ring_64
,
4089 .htt_reset_paddrs_ring
= ath10k_htt_reset_paddrs_ring_64
,
4092 static const struct ath10k_htt_rx_ops htt_rx_ops_hl
= {
4093 .htt_rx_proc_rx_frag_ind
= ath10k_htt_rx_proc_rx_frag_ind_hl
,
4096 void ath10k_htt_set_rx_ops(struct ath10k_htt
*htt
)
4098 struct ath10k
*ar
= htt
->ar
;
4100 if (ar
->bus_param
.dev_type
== ATH10K_DEV_TYPE_HL
)
4101 htt
->rx_ops
= &htt_rx_ops_hl
;
4102 else if (ar
->hw_params
.target_64bit
)
4103 htt
->rx_ops
= &htt_rx_ops_64
;
4105 htt
->rx_ops
= &htt_rx_ops_32
;