1 // SPDX-License-Identifier: ISC
3 * Copyright (c) 2005-2011 Atheros Communications Inc.
4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
16 #include <linux/log2.h>
17 #include <linux/bitfield.h>
19 /* when under memory pressure rx ring refill may fail and needs a retry */
20 #define HTT_RX_RING_REFILL_RETRY_MS 50
22 #define HTT_RX_RING_REFILL_RESCHED_MS 5
24 static int ath10k_htt_rx_get_csum_state(struct sk_buff
*skb
);
26 static struct sk_buff
*
27 ath10k_htt_rx_find_skb_paddr(struct ath10k
*ar
, u64 paddr
)
29 struct ath10k_skb_rxcb
*rxcb
;
31 hash_for_each_possible(ar
->htt
.rx_ring
.skb_table
, rxcb
, hlist
, paddr
)
32 if (rxcb
->paddr
== paddr
)
33 return ATH10K_RXCB_SKB(rxcb
);
39 static void ath10k_htt_rx_ring_free(struct ath10k_htt
*htt
)
42 struct ath10k_skb_rxcb
*rxcb
;
46 if (htt
->rx_ring
.in_ord_rx
) {
47 hash_for_each_safe(htt
->rx_ring
.skb_table
, i
, n
, rxcb
, hlist
) {
48 skb
= ATH10K_RXCB_SKB(rxcb
);
49 dma_unmap_single(htt
->ar
->dev
, rxcb
->paddr
,
50 skb
->len
+ skb_tailroom(skb
),
52 hash_del(&rxcb
->hlist
);
53 dev_kfree_skb_any(skb
);
56 for (i
= 0; i
< htt
->rx_ring
.size
; i
++) {
57 skb
= htt
->rx_ring
.netbufs_ring
[i
];
61 rxcb
= ATH10K_SKB_RXCB(skb
);
62 dma_unmap_single(htt
->ar
->dev
, rxcb
->paddr
,
63 skb
->len
+ skb_tailroom(skb
),
65 dev_kfree_skb_any(skb
);
69 htt
->rx_ring
.fill_cnt
= 0;
70 hash_init(htt
->rx_ring
.skb_table
);
71 memset(htt
->rx_ring
.netbufs_ring
, 0,
72 htt
->rx_ring
.size
* sizeof(htt
->rx_ring
.netbufs_ring
[0]));
75 static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt
*htt
)
77 return htt
->rx_ring
.size
* sizeof(htt
->rx_ring
.paddrs_ring_32
);
80 static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt
*htt
)
82 return htt
->rx_ring
.size
* sizeof(htt
->rx_ring
.paddrs_ring_64
);
85 static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt
*htt
,
88 htt
->rx_ring
.paddrs_ring_32
= vaddr
;
91 static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt
*htt
,
94 htt
->rx_ring
.paddrs_ring_64
= vaddr
;
97 static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt
*htt
,
98 dma_addr_t paddr
, int idx
)
100 htt
->rx_ring
.paddrs_ring_32
[idx
] = __cpu_to_le32(paddr
);
103 static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt
*htt
,
104 dma_addr_t paddr
, int idx
)
106 htt
->rx_ring
.paddrs_ring_64
[idx
] = __cpu_to_le64(paddr
);
109 static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt
*htt
, int idx
)
111 htt
->rx_ring
.paddrs_ring_32
[idx
] = 0;
114 static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt
*htt
, int idx
)
116 htt
->rx_ring
.paddrs_ring_64
[idx
] = 0;
119 static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt
*htt
)
121 return (void *)htt
->rx_ring
.paddrs_ring_32
;
124 static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt
*htt
)
126 return (void *)htt
->rx_ring
.paddrs_ring_64
;
129 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt
*htt
, int num
)
131 struct htt_rx_desc
*rx_desc
;
132 struct ath10k_skb_rxcb
*rxcb
;
137 /* The Full Rx Reorder firmware has no way of telling the host
138 * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
139 * To keep things simple make sure ring is always half empty. This
140 * guarantees there'll be no replenishment overruns possible.
142 BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL
>= HTT_RX_RING_SIZE
/ 2);
144 idx
= __le32_to_cpu(*htt
->rx_ring
.alloc_idx
.vaddr
);
146 if (idx
< 0 || idx
>= htt
->rx_ring
.size
) {
147 ath10k_err(htt
->ar
, "rx ring index is not valid, firmware malfunctioning?\n");
148 idx
&= htt
->rx_ring
.size_mask
;
154 skb
= dev_alloc_skb(HTT_RX_BUF_SIZE
+ HTT_RX_DESC_ALIGN
);
160 if (!IS_ALIGNED((unsigned long)skb
->data
, HTT_RX_DESC_ALIGN
))
162 PTR_ALIGN(skb
->data
, HTT_RX_DESC_ALIGN
) -
165 /* Clear rx_desc attention word before posting to Rx ring */
166 rx_desc
= (struct htt_rx_desc
*)skb
->data
;
167 rx_desc
->attention
.flags
= __cpu_to_le32(0);
169 paddr
= dma_map_single(htt
->ar
->dev
, skb
->data
,
170 skb
->len
+ skb_tailroom(skb
),
173 if (unlikely(dma_mapping_error(htt
->ar
->dev
, paddr
))) {
174 dev_kfree_skb_any(skb
);
179 rxcb
= ATH10K_SKB_RXCB(skb
);
181 htt
->rx_ring
.netbufs_ring
[idx
] = skb
;
182 ath10k_htt_set_paddrs_ring(htt
, paddr
, idx
);
183 htt
->rx_ring
.fill_cnt
++;
185 if (htt
->rx_ring
.in_ord_rx
) {
186 hash_add(htt
->rx_ring
.skb_table
,
187 &ATH10K_SKB_RXCB(skb
)->hlist
,
193 idx
&= htt
->rx_ring
.size_mask
;
198 * Make sure the rx buffer is updated before available buffer
199 * index to avoid any potential rx ring corruption.
202 *htt
->rx_ring
.alloc_idx
.vaddr
= __cpu_to_le32(idx
);
206 static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt
*htt
, int num
)
208 lockdep_assert_held(&htt
->rx_ring
.lock
);
209 return __ath10k_htt_rx_ring_fill_n(htt
, num
);
212 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt
*htt
)
214 int ret
, num_deficit
, num_to_fill
;
216 /* Refilling the whole RX ring buffer proves to be a bad idea. The
217 * reason is RX may take up significant amount of CPU cycles and starve
218 * other tasks, e.g. TX on an ethernet device while acting as a bridge
219 * with ath10k wlan interface. This ended up with very poor performance
220 * once CPU the host system was overwhelmed with RX on ath10k.
222 * By limiting the number of refills the replenishing occurs
223 * progressively. This in turns makes use of the fact tasklets are
224 * processed in FIFO order. This means actual RX processing can starve
225 * out refilling. If there's not enough buffers on RX ring FW will not
226 * report RX until it is refilled with enough buffers. This
227 * automatically balances load wrt to CPU power.
229 * This probably comes at a cost of lower maximum throughput but
230 * improves the average and stability.
232 spin_lock_bh(&htt
->rx_ring
.lock
);
233 num_deficit
= htt
->rx_ring
.fill_level
- htt
->rx_ring
.fill_cnt
;
234 num_to_fill
= min(ATH10K_HTT_MAX_NUM_REFILL
, num_deficit
);
235 num_deficit
-= num_to_fill
;
236 ret
= ath10k_htt_rx_ring_fill_n(htt
, num_to_fill
);
237 if (ret
== -ENOMEM
) {
239 * Failed to fill it to the desired level -
240 * we'll start a timer and try again next time.
241 * As long as enough buffers are left in the ring for
242 * another A-MPDU rx, no special recovery is needed.
244 mod_timer(&htt
->rx_ring
.refill_retry_timer
, jiffies
+
245 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS
));
246 } else if (num_deficit
> 0) {
247 mod_timer(&htt
->rx_ring
.refill_retry_timer
, jiffies
+
248 msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS
));
250 spin_unlock_bh(&htt
->rx_ring
.lock
);
253 static void ath10k_htt_rx_ring_refill_retry(struct timer_list
*t
)
255 struct ath10k_htt
*htt
= from_timer(htt
, t
, rx_ring
.refill_retry_timer
);
257 ath10k_htt_rx_msdu_buff_replenish(htt
);
260 int ath10k_htt_rx_ring_refill(struct ath10k
*ar
)
262 struct ath10k_htt
*htt
= &ar
->htt
;
265 if (ar
->bus_param
.dev_type
== ATH10K_DEV_TYPE_HL
)
268 spin_lock_bh(&htt
->rx_ring
.lock
);
269 ret
= ath10k_htt_rx_ring_fill_n(htt
, (htt
->rx_ring
.fill_level
-
270 htt
->rx_ring
.fill_cnt
));
273 ath10k_htt_rx_ring_free(htt
);
275 spin_unlock_bh(&htt
->rx_ring
.lock
);
280 void ath10k_htt_rx_free(struct ath10k_htt
*htt
)
282 if (htt
->ar
->bus_param
.dev_type
== ATH10K_DEV_TYPE_HL
)
285 del_timer_sync(&htt
->rx_ring
.refill_retry_timer
);
287 skb_queue_purge(&htt
->rx_msdus_q
);
288 skb_queue_purge(&htt
->rx_in_ord_compl_q
);
289 skb_queue_purge(&htt
->tx_fetch_ind_q
);
291 spin_lock_bh(&htt
->rx_ring
.lock
);
292 ath10k_htt_rx_ring_free(htt
);
293 spin_unlock_bh(&htt
->rx_ring
.lock
);
295 dma_free_coherent(htt
->ar
->dev
,
296 ath10k_htt_get_rx_ring_size(htt
),
297 ath10k_htt_get_vaddr_ring(htt
),
298 htt
->rx_ring
.base_paddr
);
300 dma_free_coherent(htt
->ar
->dev
,
301 sizeof(*htt
->rx_ring
.alloc_idx
.vaddr
),
302 htt
->rx_ring
.alloc_idx
.vaddr
,
303 htt
->rx_ring
.alloc_idx
.paddr
);
305 kfree(htt
->rx_ring
.netbufs_ring
);
308 static inline struct sk_buff
*ath10k_htt_rx_netbuf_pop(struct ath10k_htt
*htt
)
310 struct ath10k
*ar
= htt
->ar
;
312 struct sk_buff
*msdu
;
314 lockdep_assert_held(&htt
->rx_ring
.lock
);
316 if (htt
->rx_ring
.fill_cnt
== 0) {
317 ath10k_warn(ar
, "tried to pop sk_buff from an empty rx ring\n");
321 idx
= htt
->rx_ring
.sw_rd_idx
.msdu_payld
;
322 msdu
= htt
->rx_ring
.netbufs_ring
[idx
];
323 htt
->rx_ring
.netbufs_ring
[idx
] = NULL
;
324 ath10k_htt_reset_paddrs_ring(htt
, idx
);
327 idx
&= htt
->rx_ring
.size_mask
;
328 htt
->rx_ring
.sw_rd_idx
.msdu_payld
= idx
;
329 htt
->rx_ring
.fill_cnt
--;
331 dma_unmap_single(htt
->ar
->dev
,
332 ATH10K_SKB_RXCB(msdu
)->paddr
,
333 msdu
->len
+ skb_tailroom(msdu
),
335 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt rx netbuf pop: ",
336 msdu
->data
, msdu
->len
+ skb_tailroom(msdu
));
341 /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
342 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt
*htt
,
343 struct sk_buff_head
*amsdu
)
345 struct ath10k
*ar
= htt
->ar
;
346 int msdu_len
, msdu_chaining
= 0;
347 struct sk_buff
*msdu
;
348 struct htt_rx_desc
*rx_desc
;
350 lockdep_assert_held(&htt
->rx_ring
.lock
);
353 int last_msdu
, msdu_len_invalid
, msdu_chained
;
355 msdu
= ath10k_htt_rx_netbuf_pop(htt
);
357 __skb_queue_purge(amsdu
);
361 __skb_queue_tail(amsdu
, msdu
);
363 rx_desc
= (struct htt_rx_desc
*)msdu
->data
;
365 /* FIXME: we must report msdu payload since this is what caller
368 skb_put(msdu
, offsetof(struct htt_rx_desc
, msdu_payload
));
369 skb_pull(msdu
, offsetof(struct htt_rx_desc
, msdu_payload
));
372 * Sanity check - confirm the HW is finished filling in the
374 * If the HW and SW are working correctly, then it's guaranteed
375 * that the HW's MAC DMA is done before this point in the SW.
376 * To prevent the case that we handle a stale Rx descriptor,
377 * just assert for now until we have a way to recover.
379 if (!(__le32_to_cpu(rx_desc
->attention
.flags
)
380 & RX_ATTENTION_FLAGS_MSDU_DONE
)) {
381 __skb_queue_purge(amsdu
);
385 msdu_len_invalid
= !!(__le32_to_cpu(rx_desc
->attention
.flags
)
386 & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR
|
387 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR
));
388 msdu_len
= MS(__le32_to_cpu(rx_desc
->msdu_start
.common
.info0
),
389 RX_MSDU_START_INFO0_MSDU_LENGTH
);
390 msdu_chained
= rx_desc
->frag_info
.ring2_more_count
;
392 if (msdu_len_invalid
)
396 skb_put(msdu
, min(msdu_len
, HTT_RX_MSDU_SIZE
));
397 msdu_len
-= msdu
->len
;
399 /* Note: Chained buffers do not contain rx descriptor */
400 while (msdu_chained
--) {
401 msdu
= ath10k_htt_rx_netbuf_pop(htt
);
403 __skb_queue_purge(amsdu
);
407 __skb_queue_tail(amsdu
, msdu
);
409 skb_put(msdu
, min(msdu_len
, HTT_RX_BUF_SIZE
));
410 msdu_len
-= msdu
->len
;
414 last_msdu
= __le32_to_cpu(rx_desc
->msdu_end
.common
.info0
) &
415 RX_MSDU_END_INFO0_LAST_MSDU
;
417 trace_ath10k_htt_rx_desc(ar
, &rx_desc
->attention
,
418 sizeof(*rx_desc
) - sizeof(u32
));
424 if (skb_queue_empty(amsdu
))
428 * Don't refill the ring yet.
430 * First, the elements popped here are still in use - it is not
431 * safe to overwrite them until the matching call to
432 * mpdu_desc_list_next. Second, for efficiency it is preferable to
433 * refill the rx ring with 1 PPDU's worth of rx buffers (something
434 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
435 * (something like 3 buffers). Consequently, we'll rely on the txrx
436 * SW to tell us when it is done pulling all the PPDU's rx buffers
437 * out of the rx ring, and then refill it just once.
440 return msdu_chaining
;
443 static struct sk_buff
*ath10k_htt_rx_pop_paddr(struct ath10k_htt
*htt
,
446 struct ath10k
*ar
= htt
->ar
;
447 struct ath10k_skb_rxcb
*rxcb
;
448 struct sk_buff
*msdu
;
450 lockdep_assert_held(&htt
->rx_ring
.lock
);
452 msdu
= ath10k_htt_rx_find_skb_paddr(ar
, paddr
);
456 rxcb
= ATH10K_SKB_RXCB(msdu
);
457 hash_del(&rxcb
->hlist
);
458 htt
->rx_ring
.fill_cnt
--;
460 dma_unmap_single(htt
->ar
->dev
, rxcb
->paddr
,
461 msdu
->len
+ skb_tailroom(msdu
),
463 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt rx netbuf pop: ",
464 msdu
->data
, msdu
->len
+ skb_tailroom(msdu
));
469 static inline void ath10k_htt_append_frag_list(struct sk_buff
*skb_head
,
470 struct sk_buff
*frag_list
,
471 unsigned int frag_len
)
473 skb_shinfo(skb_head
)->frag_list
= frag_list
;
474 skb_head
->data_len
= frag_len
;
475 skb_head
->len
+= skb_head
->data_len
;
478 static int ath10k_htt_rx_handle_amsdu_mon_32(struct ath10k_htt
*htt
,
479 struct sk_buff
*msdu
,
480 struct htt_rx_in_ord_msdu_desc
**msdu_desc
)
482 struct ath10k
*ar
= htt
->ar
;
484 struct sk_buff
*frag_buf
;
485 struct sk_buff
*prev_frag_buf
;
487 struct htt_rx_in_ord_msdu_desc
*ind_desc
= *msdu_desc
;
488 struct htt_rx_desc
*rxd
;
489 int amsdu_len
= __le16_to_cpu(ind_desc
->msdu_len
);
491 rxd
= (void *)msdu
->data
;
492 trace_ath10k_htt_rx_desc(ar
, rxd
, sizeof(*rxd
));
494 skb_put(msdu
, sizeof(struct htt_rx_desc
));
495 skb_pull(msdu
, sizeof(struct htt_rx_desc
));
496 skb_put(msdu
, min(amsdu_len
, HTT_RX_MSDU_SIZE
));
497 amsdu_len
-= msdu
->len
;
499 last_frag
= ind_desc
->reserved
;
502 ath10k_warn(ar
, "invalid amsdu len %u, left %d",
503 __le16_to_cpu(ind_desc
->msdu_len
),
510 paddr
= __le32_to_cpu(ind_desc
->msdu_paddr
);
511 frag_buf
= ath10k_htt_rx_pop_paddr(htt
, paddr
);
513 ath10k_warn(ar
, "failed to pop frag-1 paddr: 0x%x", paddr
);
517 skb_put(frag_buf
, min(amsdu_len
, HTT_RX_BUF_SIZE
));
518 ath10k_htt_append_frag_list(msdu
, frag_buf
, amsdu_len
);
520 amsdu_len
-= frag_buf
->len
;
521 prev_frag_buf
= frag_buf
;
522 last_frag
= ind_desc
->reserved
;
525 paddr
= __le32_to_cpu(ind_desc
->msdu_paddr
);
526 frag_buf
= ath10k_htt_rx_pop_paddr(htt
, paddr
);
528 ath10k_warn(ar
, "failed to pop frag-n paddr: 0x%x",
530 prev_frag_buf
->next
= NULL
;
534 skb_put(frag_buf
, min(amsdu_len
, HTT_RX_BUF_SIZE
));
535 last_frag
= ind_desc
->reserved
;
536 amsdu_len
-= frag_buf
->len
;
538 prev_frag_buf
->next
= frag_buf
;
539 prev_frag_buf
= frag_buf
;
543 ath10k_warn(ar
, "invalid amsdu len %u, left %d",
544 __le16_to_cpu(ind_desc
->msdu_len
), amsdu_len
);
547 *msdu_desc
= ind_desc
;
549 prev_frag_buf
->next
= NULL
;
554 ath10k_htt_rx_handle_amsdu_mon_64(struct ath10k_htt
*htt
,
555 struct sk_buff
*msdu
,
556 struct htt_rx_in_ord_msdu_desc_ext
**msdu_desc
)
558 struct ath10k
*ar
= htt
->ar
;
560 struct sk_buff
*frag_buf
;
561 struct sk_buff
*prev_frag_buf
;
563 struct htt_rx_in_ord_msdu_desc_ext
*ind_desc
= *msdu_desc
;
564 struct htt_rx_desc
*rxd
;
565 int amsdu_len
= __le16_to_cpu(ind_desc
->msdu_len
);
567 rxd
= (void *)msdu
->data
;
568 trace_ath10k_htt_rx_desc(ar
, rxd
, sizeof(*rxd
));
570 skb_put(msdu
, sizeof(struct htt_rx_desc
));
571 skb_pull(msdu
, sizeof(struct htt_rx_desc
));
572 skb_put(msdu
, min(amsdu_len
, HTT_RX_MSDU_SIZE
));
573 amsdu_len
-= msdu
->len
;
575 last_frag
= ind_desc
->reserved
;
578 ath10k_warn(ar
, "invalid amsdu len %u, left %d",
579 __le16_to_cpu(ind_desc
->msdu_len
),
586 paddr
= __le64_to_cpu(ind_desc
->msdu_paddr
);
587 frag_buf
= ath10k_htt_rx_pop_paddr(htt
, paddr
);
589 ath10k_warn(ar
, "failed to pop frag-1 paddr: 0x%llx", paddr
);
593 skb_put(frag_buf
, min(amsdu_len
, HTT_RX_BUF_SIZE
));
594 ath10k_htt_append_frag_list(msdu
, frag_buf
, amsdu_len
);
596 amsdu_len
-= frag_buf
->len
;
597 prev_frag_buf
= frag_buf
;
598 last_frag
= ind_desc
->reserved
;
601 paddr
= __le64_to_cpu(ind_desc
->msdu_paddr
);
602 frag_buf
= ath10k_htt_rx_pop_paddr(htt
, paddr
);
604 ath10k_warn(ar
, "failed to pop frag-n paddr: 0x%llx",
606 prev_frag_buf
->next
= NULL
;
610 skb_put(frag_buf
, min(amsdu_len
, HTT_RX_BUF_SIZE
));
611 last_frag
= ind_desc
->reserved
;
612 amsdu_len
-= frag_buf
->len
;
614 prev_frag_buf
->next
= frag_buf
;
615 prev_frag_buf
= frag_buf
;
619 ath10k_warn(ar
, "invalid amsdu len %u, left %d",
620 __le16_to_cpu(ind_desc
->msdu_len
), amsdu_len
);
623 *msdu_desc
= ind_desc
;
625 prev_frag_buf
->next
= NULL
;
629 static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt
*htt
,
630 struct htt_rx_in_ord_ind
*ev
,
631 struct sk_buff_head
*list
)
633 struct ath10k
*ar
= htt
->ar
;
634 struct htt_rx_in_ord_msdu_desc
*msdu_desc
= ev
->msdu_descs32
;
635 struct htt_rx_desc
*rxd
;
636 struct sk_buff
*msdu
;
641 lockdep_assert_held(&htt
->rx_ring
.lock
);
643 msdu_count
= __le16_to_cpu(ev
->msdu_count
);
644 is_offload
= !!(ev
->info
& HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK
);
646 while (msdu_count
--) {
647 paddr
= __le32_to_cpu(msdu_desc
->msdu_paddr
);
649 msdu
= ath10k_htt_rx_pop_paddr(htt
, paddr
);
651 __skb_queue_purge(list
);
655 if (!is_offload
&& ar
->monitor_arvif
) {
656 ret
= ath10k_htt_rx_handle_amsdu_mon_32(htt
, msdu
,
659 __skb_queue_purge(list
);
662 __skb_queue_tail(list
, msdu
);
667 __skb_queue_tail(list
, msdu
);
670 rxd
= (void *)msdu
->data
;
672 trace_ath10k_htt_rx_desc(ar
, rxd
, sizeof(*rxd
));
674 skb_put(msdu
, sizeof(*rxd
));
675 skb_pull(msdu
, sizeof(*rxd
));
676 skb_put(msdu
, __le16_to_cpu(msdu_desc
->msdu_len
));
678 if (!(__le32_to_cpu(rxd
->attention
.flags
) &
679 RX_ATTENTION_FLAGS_MSDU_DONE
)) {
680 ath10k_warn(htt
->ar
, "tried to pop an incomplete frame, oops!\n");
691 static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt
*htt
,
692 struct htt_rx_in_ord_ind
*ev
,
693 struct sk_buff_head
*list
)
695 struct ath10k
*ar
= htt
->ar
;
696 struct htt_rx_in_ord_msdu_desc_ext
*msdu_desc
= ev
->msdu_descs64
;
697 struct htt_rx_desc
*rxd
;
698 struct sk_buff
*msdu
;
703 lockdep_assert_held(&htt
->rx_ring
.lock
);
705 msdu_count
= __le16_to_cpu(ev
->msdu_count
);
706 is_offload
= !!(ev
->info
& HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK
);
708 while (msdu_count
--) {
709 paddr
= __le64_to_cpu(msdu_desc
->msdu_paddr
);
710 msdu
= ath10k_htt_rx_pop_paddr(htt
, paddr
);
712 __skb_queue_purge(list
);
716 if (!is_offload
&& ar
->monitor_arvif
) {
717 ret
= ath10k_htt_rx_handle_amsdu_mon_64(htt
, msdu
,
720 __skb_queue_purge(list
);
723 __skb_queue_tail(list
, msdu
);
728 __skb_queue_tail(list
, msdu
);
731 rxd
= (void *)msdu
->data
;
733 trace_ath10k_htt_rx_desc(ar
, rxd
, sizeof(*rxd
));
735 skb_put(msdu
, sizeof(*rxd
));
736 skb_pull(msdu
, sizeof(*rxd
));
737 skb_put(msdu
, __le16_to_cpu(msdu_desc
->msdu_len
));
739 if (!(__le32_to_cpu(rxd
->attention
.flags
) &
740 RX_ATTENTION_FLAGS_MSDU_DONE
)) {
741 ath10k_warn(htt
->ar
, "tried to pop an incomplete frame, oops!\n");
752 int ath10k_htt_rx_alloc(struct ath10k_htt
*htt
)
754 struct ath10k
*ar
= htt
->ar
;
756 void *vaddr
, *vaddr_ring
;
758 struct timer_list
*timer
= &htt
->rx_ring
.refill_retry_timer
;
760 if (ar
->bus_param
.dev_type
== ATH10K_DEV_TYPE_HL
)
763 htt
->rx_confused
= false;
765 /* XXX: The fill level could be changed during runtime in response to
766 * the host processing latency. Is this really worth it?
768 htt
->rx_ring
.size
= HTT_RX_RING_SIZE
;
769 htt
->rx_ring
.size_mask
= htt
->rx_ring
.size
- 1;
770 htt
->rx_ring
.fill_level
= ar
->hw_params
.rx_ring_fill_level
;
772 if (!is_power_of_2(htt
->rx_ring
.size
)) {
773 ath10k_warn(ar
, "htt rx ring size is not power of 2\n");
777 htt
->rx_ring
.netbufs_ring
=
778 kcalloc(htt
->rx_ring
.size
, sizeof(struct sk_buff
*),
780 if (!htt
->rx_ring
.netbufs_ring
)
783 size
= ath10k_htt_get_rx_ring_size(htt
);
785 vaddr_ring
= dma_alloc_coherent(htt
->ar
->dev
, size
, &paddr
, GFP_KERNEL
);
789 ath10k_htt_config_paddrs_ring(htt
, vaddr_ring
);
790 htt
->rx_ring
.base_paddr
= paddr
;
792 vaddr
= dma_alloc_coherent(htt
->ar
->dev
,
793 sizeof(*htt
->rx_ring
.alloc_idx
.vaddr
),
798 htt
->rx_ring
.alloc_idx
.vaddr
= vaddr
;
799 htt
->rx_ring
.alloc_idx
.paddr
= paddr
;
800 htt
->rx_ring
.sw_rd_idx
.msdu_payld
= htt
->rx_ring
.size_mask
;
801 *htt
->rx_ring
.alloc_idx
.vaddr
= 0;
803 /* Initialize the Rx refill retry timer */
804 timer_setup(timer
, ath10k_htt_rx_ring_refill_retry
, 0);
806 spin_lock_init(&htt
->rx_ring
.lock
);
808 htt
->rx_ring
.fill_cnt
= 0;
809 htt
->rx_ring
.sw_rd_idx
.msdu_payld
= 0;
810 hash_init(htt
->rx_ring
.skb_table
);
812 skb_queue_head_init(&htt
->rx_msdus_q
);
813 skb_queue_head_init(&htt
->rx_in_ord_compl_q
);
814 skb_queue_head_init(&htt
->tx_fetch_ind_q
);
815 atomic_set(&htt
->num_mpdus_ready
, 0);
817 ath10k_dbg(ar
, ATH10K_DBG_BOOT
, "htt rx ring size %d fill_level %d\n",
818 htt
->rx_ring
.size
, htt
->rx_ring
.fill_level
);
822 dma_free_coherent(htt
->ar
->dev
,
823 ath10k_htt_get_rx_ring_size(htt
),
825 htt
->rx_ring
.base_paddr
);
827 kfree(htt
->rx_ring
.netbufs_ring
);
832 static int ath10k_htt_rx_crypto_param_len(struct ath10k
*ar
,
833 enum htt_rx_mpdu_encrypt_type type
)
836 case HTT_RX_MPDU_ENCRYPT_NONE
:
838 case HTT_RX_MPDU_ENCRYPT_WEP40
:
839 case HTT_RX_MPDU_ENCRYPT_WEP104
:
840 return IEEE80211_WEP_IV_LEN
;
841 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC
:
842 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA
:
843 return IEEE80211_TKIP_IV_LEN
;
844 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2
:
845 return IEEE80211_CCMP_HDR_LEN
;
846 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2
:
847 return IEEE80211_CCMP_256_HDR_LEN
;
848 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2
:
849 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2
:
850 return IEEE80211_GCMP_HDR_LEN
;
851 case HTT_RX_MPDU_ENCRYPT_WEP128
:
852 case HTT_RX_MPDU_ENCRYPT_WAPI
:
856 ath10k_warn(ar
, "unsupported encryption type %d\n", type
);
860 #define MICHAEL_MIC_LEN 8
862 static int ath10k_htt_rx_crypto_mic_len(struct ath10k
*ar
,
863 enum htt_rx_mpdu_encrypt_type type
)
866 case HTT_RX_MPDU_ENCRYPT_NONE
:
867 case HTT_RX_MPDU_ENCRYPT_WEP40
:
868 case HTT_RX_MPDU_ENCRYPT_WEP104
:
869 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC
:
870 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA
:
872 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2
:
873 return IEEE80211_CCMP_MIC_LEN
;
874 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2
:
875 return IEEE80211_CCMP_256_MIC_LEN
;
876 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2
:
877 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2
:
878 return IEEE80211_GCMP_MIC_LEN
;
879 case HTT_RX_MPDU_ENCRYPT_WEP128
:
880 case HTT_RX_MPDU_ENCRYPT_WAPI
:
884 ath10k_warn(ar
, "unsupported encryption type %d\n", type
);
888 static int ath10k_htt_rx_crypto_icv_len(struct ath10k
*ar
,
889 enum htt_rx_mpdu_encrypt_type type
)
892 case HTT_RX_MPDU_ENCRYPT_NONE
:
893 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2
:
894 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2
:
895 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2
:
896 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2
:
898 case HTT_RX_MPDU_ENCRYPT_WEP40
:
899 case HTT_RX_MPDU_ENCRYPT_WEP104
:
900 return IEEE80211_WEP_ICV_LEN
;
901 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC
:
902 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA
:
903 return IEEE80211_TKIP_ICV_LEN
;
904 case HTT_RX_MPDU_ENCRYPT_WEP128
:
905 case HTT_RX_MPDU_ENCRYPT_WAPI
:
909 ath10k_warn(ar
, "unsupported encryption type %d\n", type
);
913 struct amsdu_subframe_hdr
{
919 #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
921 static inline u8
ath10k_bw_to_mac80211_bw(u8 bw
)
927 ret
= RATE_INFO_BW_20
;
930 ret
= RATE_INFO_BW_40
;
933 ret
= RATE_INFO_BW_80
;
936 ret
= RATE_INFO_BW_160
;
943 static void ath10k_htt_rx_h_rates(struct ath10k
*ar
,
944 struct ieee80211_rx_status
*status
,
945 struct htt_rx_desc
*rxd
)
947 struct ieee80211_supported_band
*sband
;
948 u8 cck
, rate
, bw
, sgi
, mcs
, nss
;
951 u32 info1
, info2
, info3
;
954 info1
= __le32_to_cpu(rxd
->ppdu_start
.info1
);
955 info2
= __le32_to_cpu(rxd
->ppdu_start
.info2
);
956 info3
= __le32_to_cpu(rxd
->ppdu_start
.info3
);
958 preamble
= MS(info1
, RX_PPDU_START_INFO1_PREAMBLE_TYPE
);
962 /* To get legacy rate index band is required. Since band can't
963 * be undefined check if freq is non-zero.
968 cck
= info1
& RX_PPDU_START_INFO1_L_SIG_RATE_SELECT
;
969 rate
= MS(info1
, RX_PPDU_START_INFO1_L_SIG_RATE
);
970 rate
&= ~RX_PPDU_START_RATE_FLAG
;
972 sband
= &ar
->mac
.sbands
[status
->band
];
973 status
->rate_idx
= ath10k_mac_hw_rate_to_idx(sband
, rate
, cck
);
976 case HTT_RX_HT_WITH_TXBF
:
977 /* HT-SIG - Table 20-11 in info2 and info3 */
980 bw
= (info2
>> 7) & 1;
981 sgi
= (info3
>> 7) & 1;
983 status
->rate_idx
= mcs
;
984 status
->encoding
= RX_ENC_HT
;
986 status
->enc_flags
|= RX_ENC_FLAG_SHORT_GI
;
988 status
->bw
= RATE_INFO_BW_40
;
991 case HTT_RX_VHT_WITH_TXBF
:
992 /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
997 stbc
= (info2
>> 3) & 1;
998 group_id
= (info2
>> 4) & 0x3F;
1000 if (GROUP_ID_IS_SU_MIMO(group_id
)) {
1001 mcs
= (info3
>> 4) & 0x0F;
1002 nsts_su
= ((info2
>> 10) & 0x07);
1004 nss
= (nsts_su
>> 2) + 1;
1006 nss
= (nsts_su
+ 1);
1008 /* Hardware doesn't decode VHT-SIG-B into Rx descriptor
1009 * so it's impossible to decode MCS. Also since
1010 * firmware consumes Group Id Management frames host
1011 * has no knowledge regarding group/user position
1012 * mapping so it's impossible to pick the correct Nsts
1015 * Bandwidth and SGI are valid so report the rateinfo
1016 * on best-effort basis.
1023 ath10k_warn(ar
, "invalid MCS received %u\n", mcs
);
1024 ath10k_warn(ar
, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
1025 __le32_to_cpu(rxd
->attention
.flags
),
1026 __le32_to_cpu(rxd
->mpdu_start
.info0
),
1027 __le32_to_cpu(rxd
->mpdu_start
.info1
),
1028 __le32_to_cpu(rxd
->msdu_start
.common
.info0
),
1029 __le32_to_cpu(rxd
->msdu_start
.common
.info1
),
1030 rxd
->ppdu_start
.info0
,
1031 __le32_to_cpu(rxd
->ppdu_start
.info1
),
1032 __le32_to_cpu(rxd
->ppdu_start
.info2
),
1033 __le32_to_cpu(rxd
->ppdu_start
.info3
),
1034 __le32_to_cpu(rxd
->ppdu_start
.info4
));
1036 ath10k_warn(ar
, "msdu end %08x mpdu end %08x\n",
1037 __le32_to_cpu(rxd
->msdu_end
.common
.info0
),
1038 __le32_to_cpu(rxd
->mpdu_end
.info0
));
1040 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
,
1041 "rx desc msdu payload: ",
1042 rxd
->msdu_payload
, 50);
1045 status
->rate_idx
= mcs
;
1049 status
->enc_flags
|= RX_ENC_FLAG_SHORT_GI
;
1051 status
->bw
= ath10k_bw_to_mac80211_bw(bw
);
1052 status
->encoding
= RX_ENC_VHT
;
1059 static struct ieee80211_channel
*
1060 ath10k_htt_rx_h_peer_channel(struct ath10k
*ar
, struct htt_rx_desc
*rxd
)
1062 struct ath10k_peer
*peer
;
1063 struct ath10k_vif
*arvif
;
1064 struct cfg80211_chan_def def
;
1067 lockdep_assert_held(&ar
->data_lock
);
1072 if (rxd
->attention
.flags
&
1073 __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID
))
1076 if (!(rxd
->msdu_end
.common
.info0
&
1077 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU
)))
1080 peer_id
= MS(__le32_to_cpu(rxd
->mpdu_start
.info0
),
1081 RX_MPDU_START_INFO0_PEER_IDX
);
1083 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
1087 arvif
= ath10k_get_arvif(ar
, peer
->vdev_id
);
1088 if (WARN_ON_ONCE(!arvif
))
1091 if (ath10k_mac_vif_chan(arvif
->vif
, &def
))
1097 static struct ieee80211_channel
*
1098 ath10k_htt_rx_h_vdev_channel(struct ath10k
*ar
, u32 vdev_id
)
1100 struct ath10k_vif
*arvif
;
1101 struct cfg80211_chan_def def
;
1103 lockdep_assert_held(&ar
->data_lock
);
1105 list_for_each_entry(arvif
, &ar
->arvifs
, list
) {
1106 if (arvif
->vdev_id
== vdev_id
&&
1107 ath10k_mac_vif_chan(arvif
->vif
, &def
) == 0)
1115 ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw
*hw
,
1116 struct ieee80211_chanctx_conf
*conf
,
1119 struct cfg80211_chan_def
*def
= data
;
1124 static struct ieee80211_channel
*
1125 ath10k_htt_rx_h_any_channel(struct ath10k
*ar
)
1127 struct cfg80211_chan_def def
= {};
1129 ieee80211_iter_chan_contexts_atomic(ar
->hw
,
1130 ath10k_htt_rx_h_any_chan_iter
,
1136 static bool ath10k_htt_rx_h_channel(struct ath10k
*ar
,
1137 struct ieee80211_rx_status
*status
,
1138 struct htt_rx_desc
*rxd
,
1141 struct ieee80211_channel
*ch
;
1143 spin_lock_bh(&ar
->data_lock
);
1144 ch
= ar
->scan_channel
;
1146 ch
= ar
->rx_channel
;
1148 ch
= ath10k_htt_rx_h_peer_channel(ar
, rxd
);
1150 ch
= ath10k_htt_rx_h_vdev_channel(ar
, vdev_id
);
1152 ch
= ath10k_htt_rx_h_any_channel(ar
);
1154 ch
= ar
->tgt_oper_chan
;
1155 spin_unlock_bh(&ar
->data_lock
);
1160 status
->band
= ch
->band
;
1161 status
->freq
= ch
->center_freq
;
1166 static void ath10k_htt_rx_h_signal(struct ath10k
*ar
,
1167 struct ieee80211_rx_status
*status
,
1168 struct htt_rx_desc
*rxd
)
1172 for (i
= 0; i
< IEEE80211_MAX_CHAINS
; i
++) {
1173 status
->chains
&= ~BIT(i
);
1175 if (rxd
->ppdu_start
.rssi_chains
[i
].pri20_mhz
!= 0x80) {
1176 status
->chain_signal
[i
] = ATH10K_DEFAULT_NOISE_FLOOR
+
1177 rxd
->ppdu_start
.rssi_chains
[i
].pri20_mhz
;
1179 status
->chains
|= BIT(i
);
1183 /* FIXME: Get real NF */
1184 status
->signal
= ATH10K_DEFAULT_NOISE_FLOOR
+
1185 rxd
->ppdu_start
.rssi_comb
;
1186 status
->flag
&= ~RX_FLAG_NO_SIGNAL_VAL
;
1189 static void ath10k_htt_rx_h_mactime(struct ath10k
*ar
,
1190 struct ieee80211_rx_status
*status
,
1191 struct htt_rx_desc
*rxd
)
1193 /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
1194 * means all prior MSDUs in a PPDU are reported to mac80211 without the
1195 * TSF. Is it worth holding frames until end of PPDU is known?
1197 * FIXME: Can we get/compute 64bit TSF?
1199 status
->mactime
= __le32_to_cpu(rxd
->ppdu_end
.common
.tsf_timestamp
);
1200 status
->flag
|= RX_FLAG_MACTIME_END
;
1203 static void ath10k_htt_rx_h_ppdu(struct ath10k
*ar
,
1204 struct sk_buff_head
*amsdu
,
1205 struct ieee80211_rx_status
*status
,
1208 struct sk_buff
*first
;
1209 struct htt_rx_desc
*rxd
;
1213 if (skb_queue_empty(amsdu
))
1216 first
= skb_peek(amsdu
);
1217 rxd
= (void *)first
->data
- sizeof(*rxd
);
1219 is_first_ppdu
= !!(rxd
->attention
.flags
&
1220 __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU
));
1221 is_last_ppdu
= !!(rxd
->attention
.flags
&
1222 __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU
));
1224 if (is_first_ppdu
) {
1225 /* New PPDU starts so clear out the old per-PPDU status. */
1227 status
->rate_idx
= 0;
1229 status
->encoding
= RX_ENC_LEGACY
;
1230 status
->bw
= RATE_INFO_BW_20
;
1232 status
->flag
&= ~RX_FLAG_MACTIME_END
;
1233 status
->flag
|= RX_FLAG_NO_SIGNAL_VAL
;
1235 status
->flag
&= ~(RX_FLAG_AMPDU_IS_LAST
);
1236 status
->flag
|= RX_FLAG_AMPDU_DETAILS
| RX_FLAG_AMPDU_LAST_KNOWN
;
1237 status
->ampdu_reference
= ar
->ampdu_reference
;
1239 ath10k_htt_rx_h_signal(ar
, status
, rxd
);
1240 ath10k_htt_rx_h_channel(ar
, status
, rxd
, vdev_id
);
1241 ath10k_htt_rx_h_rates(ar
, status
, rxd
);
1245 ath10k_htt_rx_h_mactime(ar
, status
, rxd
);
1247 /* set ampdu last segment flag */
1248 status
->flag
|= RX_FLAG_AMPDU_IS_LAST
;
1249 ar
->ampdu_reference
++;
1253 static const char * const tid_to_ac
[] = {
1264 static char *ath10k_get_tid(struct ieee80211_hdr
*hdr
, char *out
, size_t size
)
1269 if (!ieee80211_is_data_qos(hdr
->frame_control
))
1272 qc
= ieee80211_get_qos_ctl(hdr
);
1273 tid
= *qc
& IEEE80211_QOS_CTL_TID_MASK
;
1275 snprintf(out
, size
, "tid %d (%s)", tid
, tid_to_ac
[tid
]);
1277 snprintf(out
, size
, "tid %d", tid
);
1282 static void ath10k_htt_rx_h_queue_msdu(struct ath10k
*ar
,
1283 struct ieee80211_rx_status
*rx_status
,
1284 struct sk_buff
*skb
)
1286 struct ieee80211_rx_status
*status
;
1288 status
= IEEE80211_SKB_RXCB(skb
);
1289 *status
= *rx_status
;
1291 skb_queue_tail(&ar
->htt
.rx_msdus_q
, skb
);
1294 static void ath10k_process_rx(struct ath10k
*ar
, struct sk_buff
*skb
)
1296 struct ieee80211_rx_status
*status
;
1297 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
1300 status
= IEEE80211_SKB_RXCB(skb
);
1302 if (!(ar
->filter_flags
& FIF_FCSFAIL
) &&
1303 status
->flag
& RX_FLAG_FAILED_FCS_CRC
) {
1304 ar
->stats
.rx_crc_err_drop
++;
1305 dev_kfree_skb_any(skb
);
1309 ath10k_dbg(ar
, ATH10K_DBG_DATA
,
1310 "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
1313 ieee80211_get_SA(hdr
),
1314 ath10k_get_tid(hdr
, tid
, sizeof(tid
)),
1315 is_multicast_ether_addr(ieee80211_get_DA(hdr
)) ?
1317 (__le16_to_cpu(hdr
->seq_ctrl
) & IEEE80211_SCTL_SEQ
) >> 4,
1318 (status
->encoding
== RX_ENC_LEGACY
) ? "legacy" : "",
1319 (status
->encoding
== RX_ENC_HT
) ? "ht" : "",
1320 (status
->encoding
== RX_ENC_VHT
) ? "vht" : "",
1321 (status
->bw
== RATE_INFO_BW_40
) ? "40" : "",
1322 (status
->bw
== RATE_INFO_BW_80
) ? "80" : "",
1323 (status
->bw
== RATE_INFO_BW_160
) ? "160" : "",
1324 status
->enc_flags
& RX_ENC_FLAG_SHORT_GI
? "sgi " : "",
1328 status
->band
, status
->flag
,
1329 !!(status
->flag
& RX_FLAG_FAILED_FCS_CRC
),
1330 !!(status
->flag
& RX_FLAG_MMIC_ERROR
),
1331 !!(status
->flag
& RX_FLAG_AMSDU_MORE
));
1332 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "rx skb: ",
1333 skb
->data
, skb
->len
);
1334 trace_ath10k_rx_hdr(ar
, skb
->data
, skb
->len
);
1335 trace_ath10k_rx_payload(ar
, skb
->data
, skb
->len
);
1337 ieee80211_rx_napi(ar
->hw
, NULL
, skb
, &ar
->napi
);
1340 static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k
*ar
,
1341 struct ieee80211_hdr
*hdr
)
1343 int len
= ieee80211_hdrlen(hdr
->frame_control
);
1345 if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING
,
1346 ar
->running_fw
->fw_file
.fw_features
))
1347 len
= round_up(len
, 4);
1352 static void ath10k_htt_rx_h_undecap_raw(struct ath10k
*ar
,
1353 struct sk_buff
*msdu
,
1354 struct ieee80211_rx_status
*status
,
1355 enum htt_rx_mpdu_encrypt_type enctype
,
1357 const u8 first_hdr
[64])
1359 struct ieee80211_hdr
*hdr
;
1360 struct htt_rx_desc
*rxd
;
1365 bool msdu_limit_err
;
1366 int bytes_aligned
= ar
->hw_params
.decap_align_bytes
;
1369 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
1370 is_first
= !!(rxd
->msdu_end
.common
.info0
&
1371 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU
));
1372 is_last
= !!(rxd
->msdu_end
.common
.info0
&
1373 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU
));
1375 /* Delivered decapped frame:
1377 * [crypto param] <-- can be trimmed if !fcs_err &&
1378 * !decrypt_err && !peer_idx_invalid
1379 * [amsdu header] <-- only if A-MSDU
1382 * [FCS] <-- at end, needs to be trimmed
1385 /* Some hardwares(QCA99x0 variants) limit number of msdus in a-msdu when
1386 * deaggregate, so that unwanted MSDU-deaggregation is avoided for
1387 * error packets. If limit exceeds, hw sends all remaining MSDUs as
1388 * a single last MSDU with this msdu limit error set.
1390 msdu_limit_err
= ath10k_rx_desc_msdu_limit_error(&ar
->hw_params
, rxd
);
1392 /* If MSDU limit error happens, then don't warn on, the partial raw MSDU
1393 * without first MSDU is expected in that case, and handled later here.
1395 /* This probably shouldn't happen but warn just in case */
1396 if (WARN_ON_ONCE(!is_first
&& !msdu_limit_err
))
1399 /* This probably shouldn't happen but warn just in case */
1400 if (WARN_ON_ONCE(!(is_first
&& is_last
) && !msdu_limit_err
))
1403 skb_trim(msdu
, msdu
->len
- FCS_LEN
);
1405 /* Push original 80211 header */
1406 if (unlikely(msdu_limit_err
)) {
1407 hdr
= (struct ieee80211_hdr
*)first_hdr
;
1408 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1409 crypto_len
= ath10k_htt_rx_crypto_param_len(ar
, enctype
);
1411 if (ieee80211_is_data_qos(hdr
->frame_control
)) {
1412 qos
= ieee80211_get_qos_ctl(hdr
);
1413 qos
[0] |= IEEE80211_QOS_CTL_A_MSDU_PRESENT
;
1417 memcpy(skb_push(msdu
, crypto_len
),
1418 (void *)hdr
+ round_up(hdr_len
, bytes_aligned
),
1421 memcpy(skb_push(msdu
, hdr_len
), hdr
, hdr_len
);
1424 /* In most cases this will be true for sniffed frames. It makes sense
1425 * to deliver them as-is without stripping the crypto param. This is
1426 * necessary for software based decryption.
1428 * If there's no error then the frame is decrypted. At least that is
1429 * the case for frames that come in via fragmented rx indication.
1434 /* The payload is decrypted so strip crypto params. Start from tail
1435 * since hdr is used to compute some stuff.
1438 hdr
= (void *)msdu
->data
;
1441 if (status
->flag
& RX_FLAG_IV_STRIPPED
) {
1442 skb_trim(msdu
, msdu
->len
-
1443 ath10k_htt_rx_crypto_mic_len(ar
, enctype
));
1445 skb_trim(msdu
, msdu
->len
-
1446 ath10k_htt_rx_crypto_icv_len(ar
, enctype
));
1449 if (status
->flag
& RX_FLAG_MIC_STRIPPED
)
1450 skb_trim(msdu
, msdu
->len
-
1451 ath10k_htt_rx_crypto_mic_len(ar
, enctype
));
1454 if (status
->flag
& RX_FLAG_ICV_STRIPPED
)
1455 skb_trim(msdu
, msdu
->len
-
1456 ath10k_htt_rx_crypto_icv_len(ar
, enctype
));
1460 if ((status
->flag
& RX_FLAG_MMIC_STRIPPED
) &&
1461 !ieee80211_has_morefrags(hdr
->frame_control
) &&
1462 enctype
== HTT_RX_MPDU_ENCRYPT_TKIP_WPA
)
1463 skb_trim(msdu
, msdu
->len
- MICHAEL_MIC_LEN
);
1466 if (status
->flag
& RX_FLAG_IV_STRIPPED
) {
1467 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1468 crypto_len
= ath10k_htt_rx_crypto_param_len(ar
, enctype
);
1470 memmove((void *)msdu
->data
+ crypto_len
,
1471 (void *)msdu
->data
, hdr_len
);
1472 skb_pull(msdu
, crypto_len
);
1476 static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k
*ar
,
1477 struct sk_buff
*msdu
,
1478 struct ieee80211_rx_status
*status
,
1479 const u8 first_hdr
[64],
1480 enum htt_rx_mpdu_encrypt_type enctype
)
1482 struct ieee80211_hdr
*hdr
;
1483 struct htt_rx_desc
*rxd
;
1488 int bytes_aligned
= ar
->hw_params
.decap_align_bytes
;
1490 /* Delivered decapped frame:
1491 * [nwifi 802.11 header] <-- replaced with 802.11 hdr
1494 * Note: The nwifi header doesn't have QoS Control and is
1495 * (always?) a 3addr frame.
1497 * Note2: There's no A-MSDU subframe header. Even if it's part
1501 /* pull decapped header and copy SA & DA */
1502 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
1504 l3_pad_bytes
= ath10k_rx_desc_get_l3_pad_bytes(&ar
->hw_params
, rxd
);
1505 skb_put(msdu
, l3_pad_bytes
);
1507 hdr
= (struct ieee80211_hdr
*)(msdu
->data
+ l3_pad_bytes
);
1509 hdr_len
= ath10k_htt_rx_nwifi_hdrlen(ar
, hdr
);
1510 ether_addr_copy(da
, ieee80211_get_DA(hdr
));
1511 ether_addr_copy(sa
, ieee80211_get_SA(hdr
));
1512 skb_pull(msdu
, hdr_len
);
1514 /* push original 802.11 header */
1515 hdr
= (struct ieee80211_hdr
*)first_hdr
;
1516 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1518 if (!(status
->flag
& RX_FLAG_IV_STRIPPED
)) {
1519 memcpy(skb_push(msdu
,
1520 ath10k_htt_rx_crypto_param_len(ar
, enctype
)),
1521 (void *)hdr
+ round_up(hdr_len
, bytes_aligned
),
1522 ath10k_htt_rx_crypto_param_len(ar
, enctype
));
1525 memcpy(skb_push(msdu
, hdr_len
), hdr
, hdr_len
);
1527 /* original 802.11 header has a different DA and in
1528 * case of 4addr it may also have different SA
1530 hdr
= (struct ieee80211_hdr
*)msdu
->data
;
1531 ether_addr_copy(ieee80211_get_DA(hdr
), da
);
1532 ether_addr_copy(ieee80211_get_SA(hdr
), sa
);
1535 static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k
*ar
,
1536 struct sk_buff
*msdu
,
1537 enum htt_rx_mpdu_encrypt_type enctype
)
1539 struct ieee80211_hdr
*hdr
;
1540 struct htt_rx_desc
*rxd
;
1541 size_t hdr_len
, crypto_len
;
1543 bool is_first
, is_last
, is_amsdu
;
1544 int bytes_aligned
= ar
->hw_params
.decap_align_bytes
;
1546 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
1547 hdr
= (void *)rxd
->rx_hdr_status
;
1549 is_first
= !!(rxd
->msdu_end
.common
.info0
&
1550 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU
));
1551 is_last
= !!(rxd
->msdu_end
.common
.info0
&
1552 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU
));
1553 is_amsdu
= !(is_first
&& is_last
);
1558 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1559 crypto_len
= ath10k_htt_rx_crypto_param_len(ar
, enctype
);
1561 rfc1042
+= round_up(hdr_len
, bytes_aligned
) +
1562 round_up(crypto_len
, bytes_aligned
);
1566 rfc1042
+= sizeof(struct amsdu_subframe_hdr
);
1571 static void ath10k_htt_rx_h_undecap_eth(struct ath10k
*ar
,
1572 struct sk_buff
*msdu
,
1573 struct ieee80211_rx_status
*status
,
1574 const u8 first_hdr
[64],
1575 enum htt_rx_mpdu_encrypt_type enctype
)
1577 struct ieee80211_hdr
*hdr
;
1584 struct htt_rx_desc
*rxd
;
1585 int bytes_aligned
= ar
->hw_params
.decap_align_bytes
;
1587 /* Delivered decapped frame:
1588 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
1592 rfc1042
= ath10k_htt_rx_h_find_rfc1042(ar
, msdu
, enctype
);
1593 if (WARN_ON_ONCE(!rfc1042
))
1596 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
1597 l3_pad_bytes
= ath10k_rx_desc_get_l3_pad_bytes(&ar
->hw_params
, rxd
);
1598 skb_put(msdu
, l3_pad_bytes
);
1599 skb_pull(msdu
, l3_pad_bytes
);
1601 /* pull decapped header and copy SA & DA */
1602 eth
= (struct ethhdr
*)msdu
->data
;
1603 ether_addr_copy(da
, eth
->h_dest
);
1604 ether_addr_copy(sa
, eth
->h_source
);
1605 skb_pull(msdu
, sizeof(struct ethhdr
));
1607 /* push rfc1042/llc/snap */
1608 memcpy(skb_push(msdu
, sizeof(struct rfc1042_hdr
)), rfc1042
,
1609 sizeof(struct rfc1042_hdr
));
1611 /* push original 802.11 header */
1612 hdr
= (struct ieee80211_hdr
*)first_hdr
;
1613 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1615 if (!(status
->flag
& RX_FLAG_IV_STRIPPED
)) {
1616 memcpy(skb_push(msdu
,
1617 ath10k_htt_rx_crypto_param_len(ar
, enctype
)),
1618 (void *)hdr
+ round_up(hdr_len
, bytes_aligned
),
1619 ath10k_htt_rx_crypto_param_len(ar
, enctype
));
1622 memcpy(skb_push(msdu
, hdr_len
), hdr
, hdr_len
);
1624 /* original 802.11 header has a different DA and in
1625 * case of 4addr it may also have different SA
1627 hdr
= (struct ieee80211_hdr
*)msdu
->data
;
1628 ether_addr_copy(ieee80211_get_DA(hdr
), da
);
1629 ether_addr_copy(ieee80211_get_SA(hdr
), sa
);
1632 static void ath10k_htt_rx_h_undecap_snap(struct ath10k
*ar
,
1633 struct sk_buff
*msdu
,
1634 struct ieee80211_rx_status
*status
,
1635 const u8 first_hdr
[64],
1636 enum htt_rx_mpdu_encrypt_type enctype
)
1638 struct ieee80211_hdr
*hdr
;
1641 struct htt_rx_desc
*rxd
;
1642 int bytes_aligned
= ar
->hw_params
.decap_align_bytes
;
1644 /* Delivered decapped frame:
1645 * [amsdu header] <-- replaced with 802.11 hdr
1650 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
1651 l3_pad_bytes
= ath10k_rx_desc_get_l3_pad_bytes(&ar
->hw_params
, rxd
);
1653 skb_put(msdu
, l3_pad_bytes
);
1654 skb_pull(msdu
, sizeof(struct amsdu_subframe_hdr
) + l3_pad_bytes
);
1656 hdr
= (struct ieee80211_hdr
*)first_hdr
;
1657 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1659 if (!(status
->flag
& RX_FLAG_IV_STRIPPED
)) {
1660 memcpy(skb_push(msdu
,
1661 ath10k_htt_rx_crypto_param_len(ar
, enctype
)),
1662 (void *)hdr
+ round_up(hdr_len
, bytes_aligned
),
1663 ath10k_htt_rx_crypto_param_len(ar
, enctype
));
1666 memcpy(skb_push(msdu
, hdr_len
), hdr
, hdr_len
);
1669 static void ath10k_htt_rx_h_undecap(struct ath10k
*ar
,
1670 struct sk_buff
*msdu
,
1671 struct ieee80211_rx_status
*status
,
1673 enum htt_rx_mpdu_encrypt_type enctype
,
1676 struct htt_rx_desc
*rxd
;
1677 enum rx_msdu_decap_format decap
;
1679 /* First msdu's decapped header:
1680 * [802.11 header] <-- padded to 4 bytes long
1681 * [crypto param] <-- padded to 4 bytes long
1682 * [amsdu header] <-- only if A-MSDU
1685 * Other (2nd, 3rd, ..) msdu's decapped header:
1686 * [amsdu header] <-- only if A-MSDU
1690 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
1691 decap
= MS(__le32_to_cpu(rxd
->msdu_start
.common
.info1
),
1692 RX_MSDU_START_INFO1_DECAP_FORMAT
);
1695 case RX_MSDU_DECAP_RAW
:
1696 ath10k_htt_rx_h_undecap_raw(ar
, msdu
, status
, enctype
,
1697 is_decrypted
, first_hdr
);
1699 case RX_MSDU_DECAP_NATIVE_WIFI
:
1700 ath10k_htt_rx_h_undecap_nwifi(ar
, msdu
, status
, first_hdr
,
1703 case RX_MSDU_DECAP_ETHERNET2_DIX
:
1704 ath10k_htt_rx_h_undecap_eth(ar
, msdu
, status
, first_hdr
, enctype
);
1706 case RX_MSDU_DECAP_8023_SNAP_LLC
:
1707 ath10k_htt_rx_h_undecap_snap(ar
, msdu
, status
, first_hdr
,
1713 static int ath10k_htt_rx_get_csum_state(struct sk_buff
*skb
)
1715 struct htt_rx_desc
*rxd
;
1717 bool is_ip4
, is_ip6
;
1718 bool is_tcp
, is_udp
;
1719 bool ip_csum_ok
, tcpudp_csum_ok
;
1721 rxd
= (void *)skb
->data
- sizeof(*rxd
);
1722 flags
= __le32_to_cpu(rxd
->attention
.flags
);
1723 info
= __le32_to_cpu(rxd
->msdu_start
.common
.info1
);
1725 is_ip4
= !!(info
& RX_MSDU_START_INFO1_IPV4_PROTO
);
1726 is_ip6
= !!(info
& RX_MSDU_START_INFO1_IPV6_PROTO
);
1727 is_tcp
= !!(info
& RX_MSDU_START_INFO1_TCP_PROTO
);
1728 is_udp
= !!(info
& RX_MSDU_START_INFO1_UDP_PROTO
);
1729 ip_csum_ok
= !(flags
& RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL
);
1730 tcpudp_csum_ok
= !(flags
& RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL
);
1732 if (!is_ip4
&& !is_ip6
)
1733 return CHECKSUM_NONE
;
1734 if (!is_tcp
&& !is_udp
)
1735 return CHECKSUM_NONE
;
1737 return CHECKSUM_NONE
;
1738 if (!tcpudp_csum_ok
)
1739 return CHECKSUM_NONE
;
1741 return CHECKSUM_UNNECESSARY
;
1744 static void ath10k_htt_rx_h_csum_offload(struct sk_buff
*msdu
)
1746 msdu
->ip_summed
= ath10k_htt_rx_get_csum_state(msdu
);
1749 static void ath10k_htt_rx_h_mpdu(struct ath10k
*ar
,
1750 struct sk_buff_head
*amsdu
,
1751 struct ieee80211_rx_status
*status
,
1752 bool fill_crypt_header
,
1754 enum ath10k_pkt_rx_err
*err
)
1756 struct sk_buff
*first
;
1757 struct sk_buff
*last
;
1758 struct sk_buff
*msdu
;
1759 struct htt_rx_desc
*rxd
;
1760 struct ieee80211_hdr
*hdr
;
1761 enum htt_rx_mpdu_encrypt_type enctype
;
1765 bool has_crypto_err
;
1767 bool has_peer_idx_invalid
;
1772 if (skb_queue_empty(amsdu
))
1775 first
= skb_peek(amsdu
);
1776 rxd
= (void *)first
->data
- sizeof(*rxd
);
1778 is_mgmt
= !!(rxd
->attention
.flags
&
1779 __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE
));
1781 enctype
= MS(__le32_to_cpu(rxd
->mpdu_start
.info0
),
1782 RX_MPDU_START_INFO0_ENCRYPT_TYPE
);
1784 /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
1785 * decapped header. It'll be used for undecapping of each MSDU.
1787 hdr
= (void *)rxd
->rx_hdr_status
;
1788 memcpy(first_hdr
, hdr
, RX_HTT_HDR_STATUS_LEN
);
1791 memcpy(rx_hdr
, hdr
, RX_HTT_HDR_STATUS_LEN
);
1793 /* Each A-MSDU subframe will use the original header as the base and be
1794 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
1796 hdr
= (void *)first_hdr
;
1798 if (ieee80211_is_data_qos(hdr
->frame_control
)) {
1799 qos
= ieee80211_get_qos_ctl(hdr
);
1800 qos
[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT
;
1803 /* Some attention flags are valid only in the last MSDU. */
1804 last
= skb_peek_tail(amsdu
);
1805 rxd
= (void *)last
->data
- sizeof(*rxd
);
1806 attention
= __le32_to_cpu(rxd
->attention
.flags
);
1808 has_fcs_err
= !!(attention
& RX_ATTENTION_FLAGS_FCS_ERR
);
1809 has_crypto_err
= !!(attention
& RX_ATTENTION_FLAGS_DECRYPT_ERR
);
1810 has_tkip_err
= !!(attention
& RX_ATTENTION_FLAGS_TKIP_MIC_ERR
);
1811 has_peer_idx_invalid
= !!(attention
& RX_ATTENTION_FLAGS_PEER_IDX_INVALID
);
1813 /* Note: If hardware captures an encrypted frame that it can't decrypt,
1814 * e.g. due to fcs error, missing peer or invalid key data it will
1815 * report the frame as raw.
1817 is_decrypted
= (enctype
!= HTT_RX_MPDU_ENCRYPT_NONE
&&
1820 !has_peer_idx_invalid
);
1822 /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
1823 status
->flag
&= ~(RX_FLAG_FAILED_FCS_CRC
|
1824 RX_FLAG_MMIC_ERROR
|
1826 RX_FLAG_IV_STRIPPED
|
1827 RX_FLAG_ONLY_MONITOR
|
1828 RX_FLAG_MMIC_STRIPPED
);
1831 status
->flag
|= RX_FLAG_FAILED_FCS_CRC
;
1834 status
->flag
|= RX_FLAG_MMIC_ERROR
;
1838 *err
= ATH10K_PKT_RX_ERR_FCS
;
1839 else if (has_tkip_err
)
1840 *err
= ATH10K_PKT_RX_ERR_TKIP
;
1841 else if (has_crypto_err
)
1842 *err
= ATH10K_PKT_RX_ERR_CRYPT
;
1843 else if (has_peer_idx_invalid
)
1844 *err
= ATH10K_PKT_RX_ERR_PEER_IDX_INVAL
;
1847 /* Firmware reports all necessary management frames via WMI already.
1848 * They are not reported to monitor interfaces at all so pass the ones
1849 * coming via HTT to monitor interfaces instead. This simplifies
1853 status
->flag
|= RX_FLAG_ONLY_MONITOR
;
1856 status
->flag
|= RX_FLAG_DECRYPTED
;
1858 if (likely(!is_mgmt
))
1859 status
->flag
|= RX_FLAG_MMIC_STRIPPED
;
1861 if (fill_crypt_header
)
1862 status
->flag
|= RX_FLAG_MIC_STRIPPED
|
1863 RX_FLAG_ICV_STRIPPED
;
1865 status
->flag
|= RX_FLAG_IV_STRIPPED
;
1868 skb_queue_walk(amsdu
, msdu
) {
1869 ath10k_htt_rx_h_csum_offload(msdu
);
1870 ath10k_htt_rx_h_undecap(ar
, msdu
, status
, first_hdr
, enctype
,
1873 /* Undecapping involves copying the original 802.11 header back
1874 * to sk_buff. If frame is protected and hardware has decrypted
1875 * it then remove the protected bit.
1882 if (fill_crypt_header
)
1885 hdr
= (void *)msdu
->data
;
1886 hdr
->frame_control
&= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED
);
1890 static void ath10k_htt_rx_h_enqueue(struct ath10k
*ar
,
1891 struct sk_buff_head
*amsdu
,
1892 struct ieee80211_rx_status
*status
)
1894 struct sk_buff
*msdu
;
1895 struct sk_buff
*first_subframe
;
1897 first_subframe
= skb_peek(amsdu
);
1899 while ((msdu
= __skb_dequeue(amsdu
))) {
1900 /* Setup per-MSDU flags */
1901 if (skb_queue_empty(amsdu
))
1902 status
->flag
&= ~RX_FLAG_AMSDU_MORE
;
1904 status
->flag
|= RX_FLAG_AMSDU_MORE
;
1906 if (msdu
== first_subframe
) {
1907 first_subframe
= NULL
;
1908 status
->flag
&= ~RX_FLAG_ALLOW_SAME_PN
;
1910 status
->flag
|= RX_FLAG_ALLOW_SAME_PN
;
1913 ath10k_htt_rx_h_queue_msdu(ar
, status
, msdu
);
1917 static int ath10k_unchain_msdu(struct sk_buff_head
*amsdu
,
1918 unsigned long *unchain_cnt
)
1920 struct sk_buff
*skb
, *first
;
1923 int amsdu_len
= skb_queue_len(amsdu
);
1925 /* TODO: Might could optimize this by using
1926 * skb_try_coalesce or similar method to
1927 * decrease copying, or maybe get mac80211 to
1928 * provide a way to just receive a list of
1932 first
= __skb_dequeue(amsdu
);
1934 /* Allocate total length all at once. */
1935 skb_queue_walk(amsdu
, skb
)
1936 total_len
+= skb
->len
;
1938 space
= total_len
- skb_tailroom(first
);
1940 (pskb_expand_head(first
, 0, space
, GFP_ATOMIC
) < 0)) {
1941 /* TODO: bump some rx-oom error stat */
1942 /* put it back together so we can free the
1943 * whole list at once.
1945 __skb_queue_head(amsdu
, first
);
1949 /* Walk list again, copying contents into
1952 while ((skb
= __skb_dequeue(amsdu
))) {
1953 skb_copy_from_linear_data(skb
, skb_put(first
, skb
->len
),
1955 dev_kfree_skb_any(skb
);
1958 __skb_queue_head(amsdu
, first
);
1960 *unchain_cnt
+= amsdu_len
- 1;
1965 static void ath10k_htt_rx_h_unchain(struct ath10k
*ar
,
1966 struct sk_buff_head
*amsdu
,
1967 unsigned long *drop_cnt
,
1968 unsigned long *unchain_cnt
)
1970 struct sk_buff
*first
;
1971 struct htt_rx_desc
*rxd
;
1972 enum rx_msdu_decap_format decap
;
1974 first
= skb_peek(amsdu
);
1975 rxd
= (void *)first
->data
- sizeof(*rxd
);
1976 decap
= MS(__le32_to_cpu(rxd
->msdu_start
.common
.info1
),
1977 RX_MSDU_START_INFO1_DECAP_FORMAT
);
1979 /* FIXME: Current unchaining logic can only handle simple case of raw
1980 * msdu chaining. If decapping is other than raw the chaining may be
1981 * more complex and this isn't handled by the current code. Don't even
1982 * try re-constructing such frames - it'll be pretty much garbage.
1984 if (decap
!= RX_MSDU_DECAP_RAW
||
1985 skb_queue_len(amsdu
) != 1 + rxd
->frag_info
.ring2_more_count
) {
1986 *drop_cnt
+= skb_queue_len(amsdu
);
1987 __skb_queue_purge(amsdu
);
1991 ath10k_unchain_msdu(amsdu
, unchain_cnt
);
1994 static bool ath10k_htt_rx_amsdu_allowed(struct ath10k
*ar
,
1995 struct sk_buff_head
*amsdu
,
1996 struct ieee80211_rx_status
*rx_status
)
1998 /* FIXME: It might be a good idea to do some fuzzy-testing to drop
1999 * invalid/dangerous frames.
2002 if (!rx_status
->freq
) {
2003 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "no channel configured; ignoring frame(s)!\n");
2007 if (test_bit(ATH10K_CAC_RUNNING
, &ar
->dev_flags
)) {
2008 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx cac running\n");
2015 static void ath10k_htt_rx_h_filter(struct ath10k
*ar
,
2016 struct sk_buff_head
*amsdu
,
2017 struct ieee80211_rx_status
*rx_status
,
2018 unsigned long *drop_cnt
)
2020 if (skb_queue_empty(amsdu
))
2023 if (ath10k_htt_rx_amsdu_allowed(ar
, amsdu
, rx_status
))
2027 *drop_cnt
+= skb_queue_len(amsdu
);
2029 __skb_queue_purge(amsdu
);
2032 static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt
*htt
)
2034 struct ath10k
*ar
= htt
->ar
;
2035 struct ieee80211_rx_status
*rx_status
= &htt
->rx_status
;
2036 struct sk_buff_head amsdu
;
2038 unsigned long drop_cnt
= 0;
2039 unsigned long unchain_cnt
= 0;
2040 unsigned long drop_cnt_filter
= 0;
2041 unsigned long msdus_to_queue
, num_msdus
;
2042 enum ath10k_pkt_rx_err err
= ATH10K_PKT_RX_ERR_MAX
;
2043 u8 first_hdr
[RX_HTT_HDR_STATUS_LEN
];
2045 __skb_queue_head_init(&amsdu
);
2047 spin_lock_bh(&htt
->rx_ring
.lock
);
2048 if (htt
->rx_confused
) {
2049 spin_unlock_bh(&htt
->rx_ring
.lock
);
2052 ret
= ath10k_htt_rx_amsdu_pop(htt
, &amsdu
);
2053 spin_unlock_bh(&htt
->rx_ring
.lock
);
2056 ath10k_warn(ar
, "rx ring became corrupted: %d\n", ret
);
2057 __skb_queue_purge(&amsdu
);
2058 /* FIXME: It's probably a good idea to reboot the
2059 * device instead of leaving it inoperable.
2061 htt
->rx_confused
= true;
2065 num_msdus
= skb_queue_len(&amsdu
);
2067 ath10k_htt_rx_h_ppdu(ar
, &amsdu
, rx_status
, 0xffff);
2069 /* only for ret = 1 indicates chained msdus */
2071 ath10k_htt_rx_h_unchain(ar
, &amsdu
, &drop_cnt
, &unchain_cnt
);
2073 ath10k_htt_rx_h_filter(ar
, &amsdu
, rx_status
, &drop_cnt_filter
);
2074 ath10k_htt_rx_h_mpdu(ar
, &amsdu
, rx_status
, true, first_hdr
, &err
);
2075 msdus_to_queue
= skb_queue_len(&amsdu
);
2076 ath10k_htt_rx_h_enqueue(ar
, &amsdu
, rx_status
);
2078 ath10k_sta_update_rx_tid_stats(ar
, first_hdr
, num_msdus
, err
,
2079 unchain_cnt
, drop_cnt
, drop_cnt_filter
,
2085 static void ath10k_htt_rx_mpdu_desc_pn_hl(struct htt_hl_rx_desc
*rx_desc
,
2086 union htt_rx_pn_t
*pn
,
2089 switch (pn_len_bits
) {
2091 pn
->pn48
= __le32_to_cpu(rx_desc
->pn_31_0
) +
2092 ((u64
)(__le32_to_cpu(rx_desc
->u0
.pn_63_32
) & 0xFFFF) << 32);
2095 pn
->pn24
= __le32_to_cpu(rx_desc
->pn_31_0
);
2100 static bool ath10k_htt_rx_pn_cmp48(union htt_rx_pn_t
*new_pn
,
2101 union htt_rx_pn_t
*old_pn
)
2103 return ((new_pn
->pn48
& 0xffffffffffffULL
) <=
2104 (old_pn
->pn48
& 0xffffffffffffULL
));
2107 static bool ath10k_htt_rx_pn_check_replay_hl(struct ath10k
*ar
,
2108 struct ath10k_peer
*peer
,
2109 struct htt_rx_indication_hl
*rx
)
2111 bool last_pn_valid
, pn_invalid
= false;
2112 enum htt_txrx_sec_cast_type sec_index
;
2113 enum htt_security_types sec_type
;
2114 union htt_rx_pn_t new_pn
= {0};
2115 struct htt_hl_rx_desc
*rx_desc
;
2116 union htt_rx_pn_t
*last_pn
;
2117 u32 rx_desc_info
, tid
;
2118 int num_mpdu_ranges
;
2120 lockdep_assert_held(&ar
->data_lock
);
2125 if (!(rx
->fw_desc
.flags
& FW_RX_DESC_FLAGS_FIRST_MSDU
))
2128 num_mpdu_ranges
= MS(__le32_to_cpu(rx
->hdr
.info1
),
2129 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES
);
2131 rx_desc
= (struct htt_hl_rx_desc
*)&rx
->mpdu_ranges
[num_mpdu_ranges
];
2132 rx_desc_info
= __le32_to_cpu(rx_desc
->info
);
2134 if (!MS(rx_desc_info
, HTT_RX_DESC_HL_INFO_ENCRYPTED
))
2137 tid
= MS(rx
->hdr
.info0
, HTT_RX_INDICATION_INFO0_EXT_TID
);
2138 last_pn_valid
= peer
->tids_last_pn_valid
[tid
];
2139 last_pn
= &peer
->tids_last_pn
[tid
];
2141 if (MS(rx_desc_info
, HTT_RX_DESC_HL_INFO_MCAST_BCAST
))
2142 sec_index
= HTT_TXRX_SEC_MCAST
;
2144 sec_index
= HTT_TXRX_SEC_UCAST
;
2146 sec_type
= peer
->rx_pn
[sec_index
].sec_type
;
2147 ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc
, &new_pn
, peer
->rx_pn
[sec_index
].pn_len
);
2149 if (sec_type
!= HTT_SECURITY_AES_CCMP
&&
2150 sec_type
!= HTT_SECURITY_TKIP
&&
2151 sec_type
!= HTT_SECURITY_TKIP_NOMIC
)
2155 pn_invalid
= ath10k_htt_rx_pn_cmp48(&new_pn
, last_pn
);
2157 peer
->tids_last_pn_valid
[tid
] = true;
2160 last_pn
->pn48
= new_pn
.pn48
;
2165 static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt
*htt
,
2166 struct htt_rx_indication_hl
*rx
,
2167 struct sk_buff
*skb
,
2168 enum htt_rx_pn_check_type check_pn_type
,
2169 enum htt_rx_tkip_demic_type tkip_mic_type
)
2171 struct ath10k
*ar
= htt
->ar
;
2172 struct ath10k_peer
*peer
;
2173 struct htt_rx_indication_mpdu_range
*mpdu_ranges
;
2174 struct fw_rx_desc_hl
*fw_desc
;
2175 enum htt_txrx_sec_cast_type sec_index
;
2176 enum htt_security_types sec_type
;
2177 union htt_rx_pn_t new_pn
= {0};
2178 struct htt_hl_rx_desc
*rx_desc
;
2179 struct ieee80211_hdr
*hdr
;
2180 struct ieee80211_rx_status
*rx_status
;
2183 int num_mpdu_ranges
;
2185 struct ieee80211_channel
*ch
;
2186 bool pn_invalid
, qos
, first_msdu
;
2187 u32 tid
, rx_desc_info
;
2189 peer_id
= __le16_to_cpu(rx
->hdr
.peer_id
);
2190 tid
= MS(rx
->hdr
.info0
, HTT_RX_INDICATION_INFO0_EXT_TID
);
2192 spin_lock_bh(&ar
->data_lock
);
2193 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
2194 spin_unlock_bh(&ar
->data_lock
);
2195 if (!peer
&& peer_id
!= HTT_INVALID_PEERID
)
2196 ath10k_warn(ar
, "Got RX ind from invalid peer: %u\n", peer_id
);
2201 num_mpdu_ranges
= MS(__le32_to_cpu(rx
->hdr
.info1
),
2202 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES
);
2203 mpdu_ranges
= htt_rx_ind_get_mpdu_ranges_hl(rx
);
2204 fw_desc
= &rx
->fw_desc
;
2205 rx_desc_len
= fw_desc
->len
;
2207 /* I have not yet seen any case where num_mpdu_ranges > 1.
2208 * qcacld does not seem handle that case either, so we introduce the
2209 * same limitiation here as well.
2211 if (num_mpdu_ranges
> 1)
2213 "Unsupported number of MPDU ranges: %d, ignoring all but the first\n",
2216 if (mpdu_ranges
->mpdu_range_status
!=
2217 HTT_RX_IND_MPDU_STATUS_OK
&&
2218 mpdu_ranges
->mpdu_range_status
!=
2219 HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR
) {
2220 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt mpdu_range_status %d\n",
2221 mpdu_ranges
->mpdu_range_status
);
2225 rx_desc
= (struct htt_hl_rx_desc
*)&rx
->mpdu_ranges
[num_mpdu_ranges
];
2226 rx_desc_info
= __le32_to_cpu(rx_desc
->info
);
2228 if (MS(rx_desc_info
, HTT_RX_DESC_HL_INFO_MCAST_BCAST
))
2229 sec_index
= HTT_TXRX_SEC_MCAST
;
2231 sec_index
= HTT_TXRX_SEC_UCAST
;
2233 sec_type
= peer
->rx_pn
[sec_index
].sec_type
;
2234 first_msdu
= rx
->fw_desc
.flags
& FW_RX_DESC_FLAGS_FIRST_MSDU
;
2236 ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc
, &new_pn
, peer
->rx_pn
[sec_index
].pn_len
);
2238 if (check_pn_type
== HTT_RX_PN_CHECK
&& tid
>= IEEE80211_NUM_TIDS
) {
2239 spin_lock_bh(&ar
->data_lock
);
2240 pn_invalid
= ath10k_htt_rx_pn_check_replay_hl(ar
, peer
, rx
);
2241 spin_unlock_bh(&ar
->data_lock
);
2247 /* Strip off all headers before the MAC header before delivery to
2250 tot_hdr_len
= sizeof(struct htt_resp_hdr
) + sizeof(rx
->hdr
) +
2251 sizeof(rx
->ppdu
) + sizeof(rx
->prefix
) +
2252 sizeof(rx
->fw_desc
) +
2253 sizeof(*mpdu_ranges
) * num_mpdu_ranges
+ rx_desc_len
;
2255 skb_pull(skb
, tot_hdr_len
);
2257 hdr
= (struct ieee80211_hdr
*)skb
->data
;
2258 qos
= ieee80211_is_data_qos(hdr
->frame_control
);
2260 rx_status
= IEEE80211_SKB_RXCB(skb
);
2261 memset(rx_status
, 0, sizeof(*rx_status
));
2263 if (rx
->ppdu
.combined_rssi
== 0) {
2264 /* SDIO firmware does not provide signal */
2265 rx_status
->signal
= 0;
2266 rx_status
->flag
|= RX_FLAG_NO_SIGNAL_VAL
;
2268 rx_status
->signal
= ATH10K_DEFAULT_NOISE_FLOOR
+
2269 rx
->ppdu
.combined_rssi
;
2270 rx_status
->flag
&= ~RX_FLAG_NO_SIGNAL_VAL
;
2273 spin_lock_bh(&ar
->data_lock
);
2274 ch
= ar
->scan_channel
;
2276 ch
= ar
->rx_channel
;
2278 ch
= ath10k_htt_rx_h_any_channel(ar
);
2280 ch
= ar
->tgt_oper_chan
;
2281 spin_unlock_bh(&ar
->data_lock
);
2284 rx_status
->band
= ch
->band
;
2285 rx_status
->freq
= ch
->center_freq
;
2287 if (rx
->fw_desc
.flags
& FW_RX_DESC_FLAGS_LAST_MSDU
)
2288 rx_status
->flag
&= ~RX_FLAG_AMSDU_MORE
;
2290 rx_status
->flag
|= RX_FLAG_AMSDU_MORE
;
2292 /* Not entirely sure about this, but all frames from the chipset has
2293 * the protected flag set even though they have already been decrypted.
2294 * Unmasking this flag is necessary in order for mac80211 not to drop
2296 * TODO: Verify this is always the case or find out a way to check
2297 * if there has been hw decryption.
2299 if (ieee80211_has_protected(hdr
->frame_control
)) {
2300 hdr
->frame_control
&= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED
);
2301 rx_status
->flag
|= RX_FLAG_DECRYPTED
|
2302 RX_FLAG_IV_STRIPPED
|
2303 RX_FLAG_MMIC_STRIPPED
;
2305 if (tid
< IEEE80211_NUM_TIDS
&&
2307 check_pn_type
== HTT_RX_PN_CHECK
&&
2308 (sec_type
== HTT_SECURITY_AES_CCMP
||
2309 sec_type
== HTT_SECURITY_TKIP
||
2310 sec_type
== HTT_SECURITY_TKIP_NOMIC
)) {
2313 __le64 pn48
= cpu_to_le64(new_pn
.pn48
);
2315 hdr
= (struct ieee80211_hdr
*)skb
->data
;
2316 offset
= ieee80211_hdrlen(hdr
->frame_control
);
2317 hdr
->frame_control
|= __cpu_to_le16(IEEE80211_FCTL_PROTECTED
);
2318 rx_status
->flag
&= ~RX_FLAG_IV_STRIPPED
;
2320 memmove(skb
->data
- IEEE80211_CCMP_HDR_LEN
,
2322 skb_push(skb
, IEEE80211_CCMP_HDR_LEN
);
2323 ivp
= skb
->data
+ offset
;
2324 memset(skb
->data
+ offset
, 0, IEEE80211_CCMP_HDR_LEN
);
2326 ivp
[IEEE80211_WEP_IV_LEN
- 1] |= ATH10K_IEEE80211_EXTIV
;
2328 for (i
= 0; i
< ARRAY_SIZE(peer
->keys
); i
++) {
2329 if (peer
->keys
[i
] &&
2330 peer
->keys
[i
]->flags
& IEEE80211_KEY_FLAG_PAIRWISE
)
2331 keyidx
= peer
->keys
[i
]->keyidx
;
2335 ivp
[IEEE80211_WEP_IV_LEN
- 1] |= keyidx
<< 6;
2337 if (sec_type
== HTT_SECURITY_AES_CCMP
) {
2338 rx_status
->flag
|= RX_FLAG_MIC_STRIPPED
;
2340 memcpy(skb
->data
+ offset
, &pn48
, 2);
2341 /* pn 1, pn 3 , pn 34 , pn 5 */
2342 memcpy(skb
->data
+ offset
+ 4, ((u8
*)&pn48
) + 2, 4);
2344 rx_status
->flag
|= RX_FLAG_ICV_STRIPPED
;
2346 memcpy(skb
->data
+ offset
+ 2, &pn48
, 1);
2348 memcpy(skb
->data
+ offset
, ((u8
*)&pn48
) + 1, 1);
2349 /* TSC 2 , TSC 3 , TSC 4 , TSC 5*/
2350 memcpy(skb
->data
+ offset
+ 4, ((u8
*)&pn48
) + 2, 4);
2355 if (tkip_mic_type
== HTT_RX_TKIP_MIC
)
2356 rx_status
->flag
&= ~RX_FLAG_IV_STRIPPED
&
2357 ~RX_FLAG_MMIC_STRIPPED
;
2359 if (mpdu_ranges
->mpdu_range_status
== HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR
)
2360 rx_status
->flag
|= RX_FLAG_MMIC_ERROR
;
2362 if (!qos
&& tid
< IEEE80211_NUM_TIDS
) {
2364 __le16 qos_ctrl
= 0;
2366 hdr
= (struct ieee80211_hdr
*)skb
->data
;
2367 offset
= ieee80211_hdrlen(hdr
->frame_control
);
2369 hdr
->frame_control
|= cpu_to_le16(IEEE80211_STYPE_QOS_DATA
);
2370 memmove(skb
->data
- IEEE80211_QOS_CTL_LEN
, skb
->data
, offset
);
2371 skb_push(skb
, IEEE80211_QOS_CTL_LEN
);
2372 qos_ctrl
= cpu_to_le16(tid
);
2373 memcpy(skb
->data
+ offset
, &qos_ctrl
, IEEE80211_QOS_CTL_LEN
);
2377 ieee80211_rx_napi(ar
->hw
, NULL
, skb
, &ar
->napi
);
2379 ieee80211_rx_ni(ar
->hw
, skb
);
2381 /* We have delivered the skb to the upper layers (mac80211) so we
2386 /* Tell the caller that it must free the skb since we have not
2392 static int ath10k_htt_rx_frag_tkip_decap_nomic(struct sk_buff
*skb
,
2398 orig_hdr
= skb
->data
;
2399 ivp
= orig_hdr
+ hdr_len
+ head_len
;
2401 /* the ExtIV bit is always set to 1 for TKIP */
2402 if (!(ivp
[IEEE80211_WEP_IV_LEN
- 1] & ATH10K_IEEE80211_EXTIV
))
2405 memmove(orig_hdr
+ IEEE80211_TKIP_IV_LEN
, orig_hdr
, head_len
+ hdr_len
);
2406 skb_pull(skb
, IEEE80211_TKIP_IV_LEN
);
2407 skb_trim(skb
, skb
->len
- ATH10K_IEEE80211_TKIP_MICLEN
);
2411 static int ath10k_htt_rx_frag_tkip_decap_withmic(struct sk_buff
*skb
,
2417 orig_hdr
= skb
->data
;
2418 ivp
= orig_hdr
+ hdr_len
+ head_len
;
2420 /* the ExtIV bit is always set to 1 for TKIP */
2421 if (!(ivp
[IEEE80211_WEP_IV_LEN
- 1] & ATH10K_IEEE80211_EXTIV
))
2424 memmove(orig_hdr
+ IEEE80211_TKIP_IV_LEN
, orig_hdr
, head_len
+ hdr_len
);
2425 skb_pull(skb
, IEEE80211_TKIP_IV_LEN
);
2426 skb_trim(skb
, skb
->len
- IEEE80211_TKIP_ICV_LEN
);
2430 static int ath10k_htt_rx_frag_ccmp_decap(struct sk_buff
*skb
,
2436 orig_hdr
= skb
->data
;
2437 ivp
= orig_hdr
+ hdr_len
+ head_len
;
2439 /* the ExtIV bit is always set to 1 for CCMP */
2440 if (!(ivp
[IEEE80211_WEP_IV_LEN
- 1] & ATH10K_IEEE80211_EXTIV
))
2443 skb_trim(skb
, skb
->len
- IEEE80211_CCMP_MIC_LEN
);
2444 memmove(orig_hdr
+ IEEE80211_CCMP_HDR_LEN
, orig_hdr
, head_len
+ hdr_len
);
2445 skb_pull(skb
, IEEE80211_CCMP_HDR_LEN
);
2449 static int ath10k_htt_rx_frag_wep_decap(struct sk_buff
*skb
,
2455 orig_hdr
= skb
->data
;
2457 memmove(orig_hdr
+ IEEE80211_WEP_IV_LEN
,
2458 orig_hdr
, head_len
+ hdr_len
);
2459 skb_pull(skb
, IEEE80211_WEP_IV_LEN
);
2460 skb_trim(skb
, skb
->len
- IEEE80211_WEP_ICV_LEN
);
2464 static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt
*htt
,
2465 struct htt_rx_fragment_indication
*rx
,
2466 struct sk_buff
*skb
)
2468 struct ath10k
*ar
= htt
->ar
;
2469 enum htt_rx_tkip_demic_type tkip_mic
= HTT_RX_NON_TKIP_MIC
;
2470 enum htt_txrx_sec_cast_type sec_index
;
2471 struct htt_rx_indication_hl
*rx_hl
;
2472 enum htt_security_types sec_type
;
2473 u32 tid
, frag
, seq
, rx_desc_info
;
2474 union htt_rx_pn_t new_pn
= {0};
2475 struct htt_hl_rx_desc
*rx_desc
;
2476 u16 peer_id
, sc
, hdr_space
;
2477 union htt_rx_pn_t
*last_pn
;
2478 struct ieee80211_hdr
*hdr
;
2479 int ret
, num_mpdu_ranges
;
2480 struct ath10k_peer
*peer
;
2481 struct htt_resp
*resp
;
2484 resp
= (struct htt_resp
*)(skb
->data
+ HTT_RX_FRAG_IND_INFO0_HEADER_LEN
);
2485 skb_pull(skb
, HTT_RX_FRAG_IND_INFO0_HEADER_LEN
);
2486 skb_trim(skb
, skb
->len
- FCS_LEN
);
2488 peer_id
= __le16_to_cpu(rx
->peer_id
);
2489 rx_hl
= (struct htt_rx_indication_hl
*)(&resp
->rx_ind_hl
);
2491 spin_lock_bh(&ar
->data_lock
);
2492 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
2494 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "invalid peer: %u\n", peer_id
);
2498 num_mpdu_ranges
= MS(__le32_to_cpu(rx_hl
->hdr
.info1
),
2499 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES
);
2501 tot_hdr_len
= sizeof(struct htt_resp_hdr
) +
2502 sizeof(rx_hl
->hdr
) +
2503 sizeof(rx_hl
->ppdu
) +
2504 sizeof(rx_hl
->prefix
) +
2505 sizeof(rx_hl
->fw_desc
) +
2506 sizeof(struct htt_rx_indication_mpdu_range
) * num_mpdu_ranges
;
2508 tid
= MS(rx_hl
->hdr
.info0
, HTT_RX_INDICATION_INFO0_EXT_TID
);
2509 rx_desc
= (struct htt_hl_rx_desc
*)(skb
->data
+ tot_hdr_len
);
2510 rx_desc_info
= __le32_to_cpu(rx_desc
->info
);
2512 if (!MS(rx_desc_info
, HTT_RX_DESC_HL_INFO_ENCRYPTED
)) {
2513 spin_unlock_bh(&ar
->data_lock
);
2514 return ath10k_htt_rx_proc_rx_ind_hl(htt
, &resp
->rx_ind_hl
, skb
,
2515 HTT_RX_NON_PN_CHECK
,
2516 HTT_RX_NON_TKIP_MIC
);
2519 hdr
= (struct ieee80211_hdr
*)((u8
*)rx_desc
+ rx_hl
->fw_desc
.len
);
2521 if (ieee80211_has_retry(hdr
->frame_control
))
2524 hdr_space
= ieee80211_hdrlen(hdr
->frame_control
);
2525 sc
= __le16_to_cpu(hdr
->seq_ctrl
);
2526 seq
= (sc
& IEEE80211_SCTL_SEQ
) >> 4;
2527 frag
= sc
& IEEE80211_SCTL_FRAG
;
2529 sec_index
= MS(rx_desc_info
, HTT_RX_DESC_HL_INFO_MCAST_BCAST
) ?
2530 HTT_TXRX_SEC_MCAST
: HTT_TXRX_SEC_UCAST
;
2531 sec_type
= peer
->rx_pn
[sec_index
].sec_type
;
2532 ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc
, &new_pn
, peer
->rx_pn
[sec_index
].pn_len
);
2535 case HTT_SECURITY_TKIP
:
2536 tkip_mic
= HTT_RX_TKIP_MIC
;
2537 ret
= ath10k_htt_rx_frag_tkip_decap_withmic(skb
,
2544 case HTT_SECURITY_TKIP_NOMIC
:
2545 ret
= ath10k_htt_rx_frag_tkip_decap_nomic(skb
,
2552 case HTT_SECURITY_AES_CCMP
:
2553 ret
= ath10k_htt_rx_frag_ccmp_decap(skb
,
2554 tot_hdr_len
+ rx_hl
->fw_desc
.len
,
2559 case HTT_SECURITY_WEP128
:
2560 case HTT_SECURITY_WEP104
:
2561 case HTT_SECURITY_WEP40
:
2562 ret
= ath10k_htt_rx_frag_wep_decap(skb
,
2563 tot_hdr_len
+ rx_hl
->fw_desc
.len
,
2572 resp
= (struct htt_resp
*)(skb
->data
);
2574 if (sec_type
!= HTT_SECURITY_AES_CCMP
&&
2575 sec_type
!= HTT_SECURITY_TKIP
&&
2576 sec_type
!= HTT_SECURITY_TKIP_NOMIC
) {
2577 spin_unlock_bh(&ar
->data_lock
);
2578 return ath10k_htt_rx_proc_rx_ind_hl(htt
, &resp
->rx_ind_hl
, skb
,
2579 HTT_RX_NON_PN_CHECK
,
2580 HTT_RX_NON_TKIP_MIC
);
2583 last_pn
= &peer
->frag_tids_last_pn
[tid
];
2586 if (ath10k_htt_rx_pn_check_replay_hl(ar
, peer
, &resp
->rx_ind_hl
))
2589 last_pn
->pn48
= new_pn
.pn48
;
2590 peer
->frag_tids_seq
[tid
] = seq
;
2591 } else if (sec_type
== HTT_SECURITY_AES_CCMP
) {
2592 if (seq
!= peer
->frag_tids_seq
[tid
])
2595 if (new_pn
.pn48
!= last_pn
->pn48
+ 1)
2598 last_pn
->pn48
= new_pn
.pn48
;
2599 last_pn
= &peer
->tids_last_pn
[tid
];
2600 last_pn
->pn48
= new_pn
.pn48
;
2603 spin_unlock_bh(&ar
->data_lock
);
2605 return ath10k_htt_rx_proc_rx_ind_hl(htt
, &resp
->rx_ind_hl
, skb
,
2606 HTT_RX_NON_PN_CHECK
, tkip_mic
);
2609 spin_unlock_bh(&ar
->data_lock
);
2611 /* Tell the caller that it must free the skb since we have not
2617 static void ath10k_htt_rx_proc_rx_ind_ll(struct ath10k_htt
*htt
,
2618 struct htt_rx_indication
*rx
)
2620 struct ath10k
*ar
= htt
->ar
;
2621 struct htt_rx_indication_mpdu_range
*mpdu_ranges
;
2622 int num_mpdu_ranges
;
2623 int i
, mpdu_count
= 0;
2627 num_mpdu_ranges
= MS(__le32_to_cpu(rx
->hdr
.info1
),
2628 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES
);
2629 peer_id
= __le16_to_cpu(rx
->hdr
.peer_id
);
2630 tid
= MS(rx
->hdr
.info0
, HTT_RX_INDICATION_INFO0_EXT_TID
);
2632 mpdu_ranges
= htt_rx_ind_get_mpdu_ranges(rx
);
2634 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt rx ind: ",
2635 rx
, struct_size(rx
, mpdu_ranges
, num_mpdu_ranges
));
2637 for (i
= 0; i
< num_mpdu_ranges
; i
++)
2638 mpdu_count
+= mpdu_ranges
[i
].mpdu_count
;
2640 atomic_add(mpdu_count
, &htt
->num_mpdus_ready
);
2642 ath10k_sta_update_rx_tid_stats_ampdu(ar
, peer_id
, tid
, mpdu_ranges
,
2646 static void ath10k_htt_rx_tx_compl_ind(struct ath10k
*ar
,
2647 struct sk_buff
*skb
)
2649 struct ath10k_htt
*htt
= &ar
->htt
;
2650 struct htt_resp
*resp
= (struct htt_resp
*)skb
->data
;
2651 struct htt_tx_done tx_done
= {};
2652 int status
= MS(resp
->data_tx_completion
.flags
, HTT_DATA_TX_STATUS
);
2653 __le16 msdu_id
, *msdus
;
2654 bool rssi_enabled
= false;
2655 u8 msdu_count
= 0, num_airtime_records
, tid
;
2657 struct htt_data_tx_compl_ppdu_dur
*ppdu_info
;
2658 struct ath10k_peer
*peer
;
2659 u16 ppdu_info_offset
= 0, peer_id
;
2663 case HTT_DATA_TX_STATUS_NO_ACK
:
2664 tx_done
.status
= HTT_TX_COMPL_STATE_NOACK
;
2666 case HTT_DATA_TX_STATUS_OK
:
2667 tx_done
.status
= HTT_TX_COMPL_STATE_ACK
;
2669 case HTT_DATA_TX_STATUS_DISCARD
:
2670 case HTT_DATA_TX_STATUS_POSTPONE
:
2671 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL
:
2672 tx_done
.status
= HTT_TX_COMPL_STATE_DISCARD
;
2675 ath10k_warn(ar
, "unhandled tx completion status %d\n", status
);
2676 tx_done
.status
= HTT_TX_COMPL_STATE_DISCARD
;
2680 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt tx completion num_msdus %d\n",
2681 resp
->data_tx_completion
.num_msdus
);
2683 msdu_count
= resp
->data_tx_completion
.num_msdus
;
2684 msdus
= resp
->data_tx_completion
.msdus
;
2685 rssi_enabled
= ath10k_is_rssi_enable(&ar
->hw_params
, resp
);
2688 htt_pad
= ath10k_tx_data_rssi_get_pad_bytes(&ar
->hw_params
,
2691 for (i
= 0; i
< msdu_count
; i
++) {
2693 tx_done
.msdu_id
= __le16_to_cpu(msdu_id
);
2696 /* Total no of MSDUs should be even,
2697 * if odd MSDUs are sent firmware fills
2698 * last msdu id with 0xffff
2700 if (msdu_count
& 0x01) {
2701 msdu_id
= msdus
[msdu_count
+ i
+ 1 + htt_pad
];
2702 tx_done
.ack_rssi
= __le16_to_cpu(msdu_id
);
2704 msdu_id
= msdus
[msdu_count
+ i
+ htt_pad
];
2705 tx_done
.ack_rssi
= __le16_to_cpu(msdu_id
);
2709 /* kfifo_put: In practice firmware shouldn't fire off per-CE
2710 * interrupt and main interrupt (MSI/-X range case) for the same
2711 * HTC service so it should be safe to use kfifo_put w/o lock.
2713 * From kfifo_put() documentation:
2714 * Note that with only one concurrent reader and one concurrent
2715 * writer, you don't need extra locking to use these macro.
2717 if (ar
->bus_param
.dev_type
== ATH10K_DEV_TYPE_HL
) {
2718 ath10k_txrx_tx_unref(htt
, &tx_done
);
2719 } else if (!kfifo_put(&htt
->txdone_fifo
, tx_done
)) {
2720 ath10k_warn(ar
, "txdone fifo overrun, msdu_id %d status %d\n",
2721 tx_done
.msdu_id
, tx_done
.status
);
2722 ath10k_txrx_tx_unref(htt
, &tx_done
);
2726 if (!(resp
->data_tx_completion
.flags2
& HTT_TX_CMPL_FLAG_PPDU_DURATION_PRESENT
))
2729 ppdu_info_offset
= (msdu_count
& 0x01) ? msdu_count
+ 1 : msdu_count
;
2732 ppdu_info_offset
+= ppdu_info_offset
;
2734 if (resp
->data_tx_completion
.flags2
&
2735 (HTT_TX_CMPL_FLAG_PPID_PRESENT
| HTT_TX_CMPL_FLAG_PA_PRESENT
))
2736 ppdu_info_offset
+= 2;
2738 ppdu_info
= (struct htt_data_tx_compl_ppdu_dur
*)&msdus
[ppdu_info_offset
];
2739 num_airtime_records
= FIELD_GET(HTT_TX_COMPL_PPDU_DUR_INFO0_NUM_ENTRIES_MASK
,
2740 __le32_to_cpu(ppdu_info
->info0
));
2742 for (i
= 0; i
< num_airtime_records
; i
++) {
2743 struct htt_data_tx_ppdu_dur
*ppdu_dur
;
2746 ppdu_dur
= &ppdu_info
->ppdu_dur
[i
];
2747 info0
= __le32_to_cpu(ppdu_dur
->info0
);
2749 peer_id
= FIELD_GET(HTT_TX_PPDU_DUR_INFO0_PEER_ID_MASK
,
2752 spin_lock_bh(&ar
->data_lock
);
2754 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
2755 if (!peer
|| !peer
->sta
) {
2756 spin_unlock_bh(&ar
->data_lock
);
2761 tid
= FIELD_GET(HTT_TX_PPDU_DUR_INFO0_TID_MASK
, info0
) &
2762 IEEE80211_QOS_CTL_TID_MASK
;
2763 tx_duration
= __le32_to_cpu(ppdu_dur
->tx_duration
);
2765 ieee80211_sta_register_airtime(peer
->sta
, tid
, tx_duration
, 0);
2767 spin_unlock_bh(&ar
->data_lock
);
2772 static void ath10k_htt_rx_addba(struct ath10k
*ar
, struct htt_resp
*resp
)
2774 struct htt_rx_addba
*ev
= &resp
->rx_addba
;
2775 struct ath10k_peer
*peer
;
2776 struct ath10k_vif
*arvif
;
2777 u16 info0
, tid
, peer_id
;
2779 info0
= __le16_to_cpu(ev
->info0
);
2780 tid
= MS(info0
, HTT_RX_BA_INFO0_TID
);
2781 peer_id
= MS(info0
, HTT_RX_BA_INFO0_PEER_ID
);
2783 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
2784 "htt rx addba tid %hu peer_id %hu size %hhu\n",
2785 tid
, peer_id
, ev
->window_size
);
2787 spin_lock_bh(&ar
->data_lock
);
2788 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
2790 ath10k_warn(ar
, "received addba event for invalid peer_id: %hu\n",
2792 spin_unlock_bh(&ar
->data_lock
);
2796 arvif
= ath10k_get_arvif(ar
, peer
->vdev_id
);
2798 ath10k_warn(ar
, "received addba event for invalid vdev_id: %u\n",
2800 spin_unlock_bh(&ar
->data_lock
);
2804 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
2805 "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
2806 peer
->addr
, tid
, ev
->window_size
);
2808 ieee80211_start_rx_ba_session_offl(arvif
->vif
, peer
->addr
, tid
);
2809 spin_unlock_bh(&ar
->data_lock
);
2812 static void ath10k_htt_rx_delba(struct ath10k
*ar
, struct htt_resp
*resp
)
2814 struct htt_rx_delba
*ev
= &resp
->rx_delba
;
2815 struct ath10k_peer
*peer
;
2816 struct ath10k_vif
*arvif
;
2817 u16 info0
, tid
, peer_id
;
2819 info0
= __le16_to_cpu(ev
->info0
);
2820 tid
= MS(info0
, HTT_RX_BA_INFO0_TID
);
2821 peer_id
= MS(info0
, HTT_RX_BA_INFO0_PEER_ID
);
2823 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
2824 "htt rx delba tid %hu peer_id %hu\n",
2827 spin_lock_bh(&ar
->data_lock
);
2828 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
2830 ath10k_warn(ar
, "received addba event for invalid peer_id: %hu\n",
2832 spin_unlock_bh(&ar
->data_lock
);
2836 arvif
= ath10k_get_arvif(ar
, peer
->vdev_id
);
2838 ath10k_warn(ar
, "received addba event for invalid vdev_id: %u\n",
2840 spin_unlock_bh(&ar
->data_lock
);
2844 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
2845 "htt rx stop rx ba session sta %pM tid %hu\n",
2848 ieee80211_stop_rx_ba_session_offl(arvif
->vif
, peer
->addr
, tid
);
2849 spin_unlock_bh(&ar
->data_lock
);
2852 static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head
*list
,
2853 struct sk_buff_head
*amsdu
)
2855 struct sk_buff
*msdu
;
2856 struct htt_rx_desc
*rxd
;
2858 if (skb_queue_empty(list
))
2861 if (WARN_ON(!skb_queue_empty(amsdu
)))
2864 while ((msdu
= __skb_dequeue(list
))) {
2865 __skb_queue_tail(amsdu
, msdu
);
2867 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
2868 if (rxd
->msdu_end
.common
.info0
&
2869 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU
))
2873 msdu
= skb_peek_tail(amsdu
);
2874 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
2875 if (!(rxd
->msdu_end
.common
.info0
&
2876 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU
))) {
2877 skb_queue_splice_init(amsdu
, list
);
2884 static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status
*status
,
2885 struct sk_buff
*skb
)
2887 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
2889 if (!ieee80211_has_protected(hdr
->frame_control
))
2892 /* Offloaded frames are already decrypted but firmware insists they are
2893 * protected in the 802.11 header. Strip the flag. Otherwise mac80211
2894 * will drop the frame.
2897 hdr
->frame_control
&= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED
);
2898 status
->flag
|= RX_FLAG_DECRYPTED
|
2899 RX_FLAG_IV_STRIPPED
|
2900 RX_FLAG_MMIC_STRIPPED
;
2903 static void ath10k_htt_rx_h_rx_offload(struct ath10k
*ar
,
2904 struct sk_buff_head
*list
)
2906 struct ath10k_htt
*htt
= &ar
->htt
;
2907 struct ieee80211_rx_status
*status
= &htt
->rx_status
;
2908 struct htt_rx_offload_msdu
*rx
;
2909 struct sk_buff
*msdu
;
2912 while ((msdu
= __skb_dequeue(list
))) {
2913 /* Offloaded frames don't have Rx descriptor. Instead they have
2914 * a short meta information header.
2917 rx
= (void *)msdu
->data
;
2919 skb_put(msdu
, sizeof(*rx
));
2920 skb_pull(msdu
, sizeof(*rx
));
2922 if (skb_tailroom(msdu
) < __le16_to_cpu(rx
->msdu_len
)) {
2923 ath10k_warn(ar
, "dropping frame: offloaded rx msdu is too long!\n");
2924 dev_kfree_skb_any(msdu
);
2928 skb_put(msdu
, __le16_to_cpu(rx
->msdu_len
));
2930 /* Offloaded rx header length isn't multiple of 2 nor 4 so the
2931 * actual payload is unaligned. Align the frame. Otherwise
2932 * mac80211 complains. This shouldn't reduce performance much
2933 * because these offloaded frames are rare.
2935 offset
= 4 - ((unsigned long)msdu
->data
& 3);
2936 skb_put(msdu
, offset
);
2937 memmove(msdu
->data
+ offset
, msdu
->data
, msdu
->len
);
2938 skb_pull(msdu
, offset
);
2940 /* FIXME: The frame is NWifi. Re-construct QoS Control
2941 * if possible later.
2944 memset(status
, 0, sizeof(*status
));
2945 status
->flag
|= RX_FLAG_NO_SIGNAL_VAL
;
2947 ath10k_htt_rx_h_rx_offload_prot(status
, msdu
);
2948 ath10k_htt_rx_h_channel(ar
, status
, NULL
, rx
->vdev_id
);
2949 ath10k_htt_rx_h_queue_msdu(ar
, status
, msdu
);
2953 static int ath10k_htt_rx_in_ord_ind(struct ath10k
*ar
, struct sk_buff
*skb
)
2955 struct ath10k_htt
*htt
= &ar
->htt
;
2956 struct htt_resp
*resp
= (void *)skb
->data
;
2957 struct ieee80211_rx_status
*status
= &htt
->rx_status
;
2958 struct sk_buff_head list
;
2959 struct sk_buff_head amsdu
;
2968 lockdep_assert_held(&htt
->rx_ring
.lock
);
2970 if (htt
->rx_confused
)
2973 skb_pull(skb
, sizeof(resp
->hdr
));
2974 skb_pull(skb
, sizeof(resp
->rx_in_ord_ind
));
2976 peer_id
= __le16_to_cpu(resp
->rx_in_ord_ind
.peer_id
);
2977 msdu_count
= __le16_to_cpu(resp
->rx_in_ord_ind
.msdu_count
);
2978 vdev_id
= resp
->rx_in_ord_ind
.vdev_id
;
2979 tid
= SM(resp
->rx_in_ord_ind
.info
, HTT_RX_IN_ORD_IND_INFO_TID
);
2980 offload
= !!(resp
->rx_in_ord_ind
.info
&
2981 HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK
);
2982 frag
= !!(resp
->rx_in_ord_ind
.info
& HTT_RX_IN_ORD_IND_INFO_FRAG_MASK
);
2984 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
2985 "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
2986 vdev_id
, peer_id
, tid
, offload
, frag
, msdu_count
);
2988 if (skb
->len
< msdu_count
* sizeof(*resp
->rx_in_ord_ind
.msdu_descs32
)) {
2989 ath10k_warn(ar
, "dropping invalid in order rx indication\n");
2993 /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
2994 * extracted and processed.
2996 __skb_queue_head_init(&list
);
2997 if (ar
->hw_params
.target_64bit
)
2998 ret
= ath10k_htt_rx_pop_paddr64_list(htt
, &resp
->rx_in_ord_ind
,
3001 ret
= ath10k_htt_rx_pop_paddr32_list(htt
, &resp
->rx_in_ord_ind
,
3005 ath10k_warn(ar
, "failed to pop paddr list: %d\n", ret
);
3006 htt
->rx_confused
= true;
3010 /* Offloaded frames are very different and need to be handled
3014 ath10k_htt_rx_h_rx_offload(ar
, &list
);
3016 while (!skb_queue_empty(&list
)) {
3017 __skb_queue_head_init(&amsdu
);
3018 ret
= ath10k_htt_rx_extract_amsdu(&list
, &amsdu
);
3021 /* Note: The in-order indication may report interleaved
3022 * frames from different PPDUs meaning reported rx rate
3023 * to mac80211 isn't accurate/reliable. It's still
3024 * better to report something than nothing though. This
3025 * should still give an idea about rx rate to the user.
3027 ath10k_htt_rx_h_ppdu(ar
, &amsdu
, status
, vdev_id
);
3028 ath10k_htt_rx_h_filter(ar
, &amsdu
, status
, NULL
);
3029 ath10k_htt_rx_h_mpdu(ar
, &amsdu
, status
, false, NULL
,
3031 ath10k_htt_rx_h_enqueue(ar
, &amsdu
, status
);
3036 /* Should not happen. */
3037 ath10k_warn(ar
, "failed to extract amsdu: %d\n", ret
);
3038 htt
->rx_confused
= true;
3039 __skb_queue_purge(&list
);
3046 static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k
*ar
,
3047 const __le32
*resp_ids
,
3053 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch confirm num_resp_ids %d\n",
3056 for (i
= 0; i
< num_resp_ids
; i
++) {
3057 resp_id
= le32_to_cpu(resp_ids
[i
]);
3059 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch confirm resp_id %u\n",
3062 /* TODO: free resp_id */
3066 static void ath10k_htt_rx_tx_fetch_ind(struct ath10k
*ar
, struct sk_buff
*skb
)
3068 struct ieee80211_hw
*hw
= ar
->hw
;
3069 struct ieee80211_txq
*txq
;
3070 struct htt_resp
*resp
= (struct htt_resp
*)skb
->data
;
3071 struct htt_tx_fetch_record
*record
;
3073 size_t max_num_bytes
;
3074 size_t max_num_msdus
;
3077 const __le32
*resp_ids
;
3086 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch ind\n");
3088 len
= sizeof(resp
->hdr
) + sizeof(resp
->tx_fetch_ind
);
3089 if (unlikely(skb
->len
< len
)) {
3090 ath10k_warn(ar
, "received corrupted tx_fetch_ind event: buffer too short\n");
3094 num_records
= le16_to_cpu(resp
->tx_fetch_ind
.num_records
);
3095 num_resp_ids
= le16_to_cpu(resp
->tx_fetch_ind
.num_resp_ids
);
3097 len
+= sizeof(resp
->tx_fetch_ind
.records
[0]) * num_records
;
3098 len
+= sizeof(resp
->tx_fetch_ind
.resp_ids
[0]) * num_resp_ids
;
3100 if (unlikely(skb
->len
< len
)) {
3101 ath10k_warn(ar
, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
3105 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n",
3106 num_records
, num_resp_ids
,
3107 le16_to_cpu(resp
->tx_fetch_ind
.fetch_seq_num
));
3109 if (!ar
->htt
.tx_q_state
.enabled
) {
3110 ath10k_warn(ar
, "received unexpected tx_fetch_ind event: not enabled\n");
3114 if (ar
->htt
.tx_q_state
.mode
== HTT_TX_MODE_SWITCH_PUSH
) {
3115 ath10k_warn(ar
, "received unexpected tx_fetch_ind event: in push mode\n");
3121 for (i
= 0; i
< num_records
; i
++) {
3122 record
= &resp
->tx_fetch_ind
.records
[i
];
3123 peer_id
= MS(le16_to_cpu(record
->info
),
3124 HTT_TX_FETCH_RECORD_INFO_PEER_ID
);
3125 tid
= MS(le16_to_cpu(record
->info
),
3126 HTT_TX_FETCH_RECORD_INFO_TID
);
3127 max_num_msdus
= le16_to_cpu(record
->num_msdus
);
3128 max_num_bytes
= le32_to_cpu(record
->num_bytes
);
3130 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n",
3131 i
, peer_id
, tid
, max_num_msdus
, max_num_bytes
);
3133 if (unlikely(peer_id
>= ar
->htt
.tx_q_state
.num_peers
) ||
3134 unlikely(tid
>= ar
->htt
.tx_q_state
.num_tids
)) {
3135 ath10k_warn(ar
, "received out of range peer_id %hu tid %hhu\n",
3140 spin_lock_bh(&ar
->data_lock
);
3141 txq
= ath10k_mac_txq_lookup(ar
, peer_id
, tid
);
3142 spin_unlock_bh(&ar
->data_lock
);
3144 /* It is okay to release the lock and use txq because RCU read
3148 if (unlikely(!txq
)) {
3149 ath10k_warn(ar
, "failed to lookup txq for peer_id %hu tid %hhu\n",
3157 ieee80211_txq_schedule_start(hw
, txq
->ac
);
3158 may_tx
= ieee80211_txq_may_transmit(hw
, txq
);
3159 while (num_msdus
< max_num_msdus
&&
3160 num_bytes
< max_num_bytes
) {
3164 ret
= ath10k_mac_tx_push_txq(hw
, txq
);
3171 ieee80211_return_txq(hw
, txq
, false);
3172 ieee80211_txq_schedule_end(hw
, txq
->ac
);
3174 record
->num_msdus
= cpu_to_le16(num_msdus
);
3175 record
->num_bytes
= cpu_to_le32(num_bytes
);
3177 ath10k_htt_tx_txq_recalc(hw
, txq
);
3182 resp_ids
= ath10k_htt_get_tx_fetch_ind_resp_ids(&resp
->tx_fetch_ind
);
3183 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar
, resp_ids
, num_resp_ids
);
3185 ret
= ath10k_htt_tx_fetch_resp(ar
,
3186 resp
->tx_fetch_ind
.token
,
3187 resp
->tx_fetch_ind
.fetch_seq_num
,
3188 resp
->tx_fetch_ind
.records
,
3190 if (unlikely(ret
)) {
3191 ath10k_warn(ar
, "failed to submit tx fetch resp for token 0x%08x: %d\n",
3192 le32_to_cpu(resp
->tx_fetch_ind
.token
), ret
);
3193 /* FIXME: request fw restart */
3196 ath10k_htt_tx_txq_sync(ar
);
3199 static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k
*ar
,
3200 struct sk_buff
*skb
)
3202 const struct htt_resp
*resp
= (void *)skb
->data
;
3206 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch confirm\n");
3208 len
= sizeof(resp
->hdr
) + sizeof(resp
->tx_fetch_confirm
);
3209 if (unlikely(skb
->len
< len
)) {
3210 ath10k_warn(ar
, "received corrupted tx_fetch_confirm event: buffer too short\n");
3214 num_resp_ids
= le16_to_cpu(resp
->tx_fetch_confirm
.num_resp_ids
);
3215 len
+= sizeof(resp
->tx_fetch_confirm
.resp_ids
[0]) * num_resp_ids
;
3217 if (unlikely(skb
->len
< len
)) {
3218 ath10k_warn(ar
, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
3222 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar
,
3223 resp
->tx_fetch_confirm
.resp_ids
,
3227 static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k
*ar
,
3228 struct sk_buff
*skb
)
3230 const struct htt_resp
*resp
= (void *)skb
->data
;
3231 const struct htt_tx_mode_switch_record
*record
;
3232 struct ieee80211_txq
*txq
;
3233 struct ath10k_txq
*artxq
;
3236 enum htt_tx_mode_switch_mode mode
;
3245 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx mode switch ind\n");
3247 len
= sizeof(resp
->hdr
) + sizeof(resp
->tx_mode_switch_ind
);
3248 if (unlikely(skb
->len
< len
)) {
3249 ath10k_warn(ar
, "received corrupted tx_mode_switch_ind event: buffer too short\n");
3253 info0
= le16_to_cpu(resp
->tx_mode_switch_ind
.info0
);
3254 info1
= le16_to_cpu(resp
->tx_mode_switch_ind
.info1
);
3256 enable
= !!(info0
& HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE
);
3257 num_records
= MS(info0
, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD
);
3258 mode
= MS(info1
, HTT_TX_MODE_SWITCH_IND_INFO1_MODE
);
3259 threshold
= MS(info1
, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD
);
3261 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
3262 "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n",
3263 info0
, info1
, enable
, num_records
, mode
, threshold
);
3265 len
+= sizeof(resp
->tx_mode_switch_ind
.records
[0]) * num_records
;
3267 if (unlikely(skb
->len
< len
)) {
3268 ath10k_warn(ar
, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
3273 case HTT_TX_MODE_SWITCH_PUSH
:
3274 case HTT_TX_MODE_SWITCH_PUSH_PULL
:
3277 ath10k_warn(ar
, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
3285 ar
->htt
.tx_q_state
.enabled
= enable
;
3286 ar
->htt
.tx_q_state
.mode
= mode
;
3287 ar
->htt
.tx_q_state
.num_push_allowed
= threshold
;
3291 for (i
= 0; i
< num_records
; i
++) {
3292 record
= &resp
->tx_mode_switch_ind
.records
[i
];
3293 info0
= le16_to_cpu(record
->info0
);
3294 peer_id
= MS(info0
, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID
);
3295 tid
= MS(info0
, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID
);
3297 if (unlikely(peer_id
>= ar
->htt
.tx_q_state
.num_peers
) ||
3298 unlikely(tid
>= ar
->htt
.tx_q_state
.num_tids
)) {
3299 ath10k_warn(ar
, "received out of range peer_id %hu tid %hhu\n",
3304 spin_lock_bh(&ar
->data_lock
);
3305 txq
= ath10k_mac_txq_lookup(ar
, peer_id
, tid
);
3306 spin_unlock_bh(&ar
->data_lock
);
3308 /* It is okay to release the lock and use txq because RCU read
3312 if (unlikely(!txq
)) {
3313 ath10k_warn(ar
, "failed to lookup txq for peer_id %hu tid %hhu\n",
3318 spin_lock_bh(&ar
->htt
.tx_lock
);
3319 artxq
= (void *)txq
->drv_priv
;
3320 artxq
->num_push_allowed
= le16_to_cpu(record
->num_max_msdus
);
3321 spin_unlock_bh(&ar
->htt
.tx_lock
);
3326 ath10k_mac_tx_push_pending(ar
);
3329 void ath10k_htt_htc_t2h_msg_handler(struct ath10k
*ar
, struct sk_buff
*skb
)
3333 release
= ath10k_htt_t2h_msg_handler(ar
, skb
);
3335 /* Free the indication buffer */
3337 dev_kfree_skb_any(skb
);
3340 static inline s8
ath10k_get_legacy_rate_idx(struct ath10k
*ar
, u8 rate
)
3342 static const u8 legacy_rates
[] = {1, 2, 5, 11, 6, 9, 12,
3343 18, 24, 36, 48, 54};
3346 for (i
= 0; i
< ARRAY_SIZE(legacy_rates
); i
++) {
3347 if (rate
== legacy_rates
[i
])
3351 ath10k_warn(ar
, "Invalid legacy rate %hhd peer stats", rate
);
3356 ath10k_accumulate_per_peer_tx_stats(struct ath10k
*ar
,
3357 struct ath10k_sta
*arsta
,
3358 struct ath10k_per_peer_tx_stats
*pstats
,
3361 struct rate_info
*txrate
= &arsta
->txrate
;
3362 struct ath10k_htt_tx_stats
*tx_stats
;
3363 int idx
, ht_idx
, gi
, mcs
, bw
, nss
;
3364 unsigned long flags
;
3366 if (!arsta
->tx_stats
)
3369 tx_stats
= arsta
->tx_stats
;
3370 flags
= txrate
->flags
;
3371 gi
= test_bit(ATH10K_RATE_INFO_FLAGS_SGI_BIT
, &flags
);
3372 mcs
= ATH10K_HW_MCS_RATE(pstats
->ratecode
);
3375 ht_idx
= mcs
+ (nss
- 1) * 8;
3376 idx
= mcs
* 8 + 8 * 10 * (nss
- 1);
3379 #define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name]
3381 if (txrate
->flags
& RATE_INFO_FLAGS_VHT_MCS
) {
3382 STATS_OP_FMT(SUCC
).vht
[0][mcs
] += pstats
->succ_bytes
;
3383 STATS_OP_FMT(SUCC
).vht
[1][mcs
] += pstats
->succ_pkts
;
3384 STATS_OP_FMT(FAIL
).vht
[0][mcs
] += pstats
->failed_bytes
;
3385 STATS_OP_FMT(FAIL
).vht
[1][mcs
] += pstats
->failed_pkts
;
3386 STATS_OP_FMT(RETRY
).vht
[0][mcs
] += pstats
->retry_bytes
;
3387 STATS_OP_FMT(RETRY
).vht
[1][mcs
] += pstats
->retry_pkts
;
3388 } else if (txrate
->flags
& RATE_INFO_FLAGS_MCS
) {
3389 STATS_OP_FMT(SUCC
).ht
[0][ht_idx
] += pstats
->succ_bytes
;
3390 STATS_OP_FMT(SUCC
).ht
[1][ht_idx
] += pstats
->succ_pkts
;
3391 STATS_OP_FMT(FAIL
).ht
[0][ht_idx
] += pstats
->failed_bytes
;
3392 STATS_OP_FMT(FAIL
).ht
[1][ht_idx
] += pstats
->failed_pkts
;
3393 STATS_OP_FMT(RETRY
).ht
[0][ht_idx
] += pstats
->retry_bytes
;
3394 STATS_OP_FMT(RETRY
).ht
[1][ht_idx
] += pstats
->retry_pkts
;
3396 mcs
= legacy_rate_idx
;
3398 STATS_OP_FMT(SUCC
).legacy
[0][mcs
] += pstats
->succ_bytes
;
3399 STATS_OP_FMT(SUCC
).legacy
[1][mcs
] += pstats
->succ_pkts
;
3400 STATS_OP_FMT(FAIL
).legacy
[0][mcs
] += pstats
->failed_bytes
;
3401 STATS_OP_FMT(FAIL
).legacy
[1][mcs
] += pstats
->failed_pkts
;
3402 STATS_OP_FMT(RETRY
).legacy
[0][mcs
] += pstats
->retry_bytes
;
3403 STATS_OP_FMT(RETRY
).legacy
[1][mcs
] += pstats
->retry_pkts
;
3406 if (ATH10K_HW_AMPDU(pstats
->flags
)) {
3407 tx_stats
->ba_fails
+= ATH10K_HW_BA_FAIL(pstats
->flags
);
3409 if (txrate
->flags
& RATE_INFO_FLAGS_MCS
) {
3410 STATS_OP_FMT(AMPDU
).ht
[0][ht_idx
] +=
3411 pstats
->succ_bytes
+ pstats
->retry_bytes
;
3412 STATS_OP_FMT(AMPDU
).ht
[1][ht_idx
] +=
3413 pstats
->succ_pkts
+ pstats
->retry_pkts
;
3415 STATS_OP_FMT(AMPDU
).vht
[0][mcs
] +=
3416 pstats
->succ_bytes
+ pstats
->retry_bytes
;
3417 STATS_OP_FMT(AMPDU
).vht
[1][mcs
] +=
3418 pstats
->succ_pkts
+ pstats
->retry_pkts
;
3420 STATS_OP_FMT(AMPDU
).bw
[0][bw
] +=
3421 pstats
->succ_bytes
+ pstats
->retry_bytes
;
3422 STATS_OP_FMT(AMPDU
).nss
[0][nss
- 1] +=
3423 pstats
->succ_bytes
+ pstats
->retry_bytes
;
3424 STATS_OP_FMT(AMPDU
).gi
[0][gi
] +=
3425 pstats
->succ_bytes
+ pstats
->retry_bytes
;
3426 STATS_OP_FMT(AMPDU
).rate_table
[0][idx
] +=
3427 pstats
->succ_bytes
+ pstats
->retry_bytes
;
3428 STATS_OP_FMT(AMPDU
).bw
[1][bw
] +=
3429 pstats
->succ_pkts
+ pstats
->retry_pkts
;
3430 STATS_OP_FMT(AMPDU
).nss
[1][nss
- 1] +=
3431 pstats
->succ_pkts
+ pstats
->retry_pkts
;
3432 STATS_OP_FMT(AMPDU
).gi
[1][gi
] +=
3433 pstats
->succ_pkts
+ pstats
->retry_pkts
;
3434 STATS_OP_FMT(AMPDU
).rate_table
[1][idx
] +=
3435 pstats
->succ_pkts
+ pstats
->retry_pkts
;
3437 tx_stats
->ack_fails
+=
3438 ATH10K_HW_BA_FAIL(pstats
->flags
);
3441 STATS_OP_FMT(SUCC
).bw
[0][bw
] += pstats
->succ_bytes
;
3442 STATS_OP_FMT(SUCC
).nss
[0][nss
- 1] += pstats
->succ_bytes
;
3443 STATS_OP_FMT(SUCC
).gi
[0][gi
] += pstats
->succ_bytes
;
3445 STATS_OP_FMT(SUCC
).bw
[1][bw
] += pstats
->succ_pkts
;
3446 STATS_OP_FMT(SUCC
).nss
[1][nss
- 1] += pstats
->succ_pkts
;
3447 STATS_OP_FMT(SUCC
).gi
[1][gi
] += pstats
->succ_pkts
;
3449 STATS_OP_FMT(FAIL
).bw
[0][bw
] += pstats
->failed_bytes
;
3450 STATS_OP_FMT(FAIL
).nss
[0][nss
- 1] += pstats
->failed_bytes
;
3451 STATS_OP_FMT(FAIL
).gi
[0][gi
] += pstats
->failed_bytes
;
3453 STATS_OP_FMT(FAIL
).bw
[1][bw
] += pstats
->failed_pkts
;
3454 STATS_OP_FMT(FAIL
).nss
[1][nss
- 1] += pstats
->failed_pkts
;
3455 STATS_OP_FMT(FAIL
).gi
[1][gi
] += pstats
->failed_pkts
;
3457 STATS_OP_FMT(RETRY
).bw
[0][bw
] += pstats
->retry_bytes
;
3458 STATS_OP_FMT(RETRY
).nss
[0][nss
- 1] += pstats
->retry_bytes
;
3459 STATS_OP_FMT(RETRY
).gi
[0][gi
] += pstats
->retry_bytes
;
3461 STATS_OP_FMT(RETRY
).bw
[1][bw
] += pstats
->retry_pkts
;
3462 STATS_OP_FMT(RETRY
).nss
[1][nss
- 1] += pstats
->retry_pkts
;
3463 STATS_OP_FMT(RETRY
).gi
[1][gi
] += pstats
->retry_pkts
;
3465 if (txrate
->flags
>= RATE_INFO_FLAGS_MCS
) {
3466 STATS_OP_FMT(SUCC
).rate_table
[0][idx
] += pstats
->succ_bytes
;
3467 STATS_OP_FMT(SUCC
).rate_table
[1][idx
] += pstats
->succ_pkts
;
3468 STATS_OP_FMT(FAIL
).rate_table
[0][idx
] += pstats
->failed_bytes
;
3469 STATS_OP_FMT(FAIL
).rate_table
[1][idx
] += pstats
->failed_pkts
;
3470 STATS_OP_FMT(RETRY
).rate_table
[0][idx
] += pstats
->retry_bytes
;
3471 STATS_OP_FMT(RETRY
).rate_table
[1][idx
] += pstats
->retry_pkts
;
3474 tx_stats
->tx_duration
+= pstats
->duration
;
3478 ath10k_update_per_peer_tx_stats(struct ath10k
*ar
,
3479 struct ieee80211_sta
*sta
,
3480 struct ath10k_per_peer_tx_stats
*peer_stats
)
3482 struct ath10k_sta
*arsta
= (struct ath10k_sta
*)sta
->drv_priv
;
3483 struct ieee80211_chanctx_conf
*conf
= NULL
;
3486 bool skip_auto_rate
;
3487 struct rate_info txrate
;
3489 lockdep_assert_held(&ar
->data_lock
);
3491 txrate
.flags
= ATH10K_HW_PREAMBLE(peer_stats
->ratecode
);
3492 txrate
.bw
= ATH10K_HW_BW(peer_stats
->flags
);
3493 txrate
.nss
= ATH10K_HW_NSS(peer_stats
->ratecode
);
3494 txrate
.mcs
= ATH10K_HW_MCS_RATE(peer_stats
->ratecode
);
3495 sgi
= ATH10K_HW_GI(peer_stats
->flags
);
3496 skip_auto_rate
= ATH10K_FW_SKIPPED_RATE_CTRL(peer_stats
->flags
);
3498 /* Firmware's rate control skips broadcast/management frames,
3499 * if host has configure fixed rates and in some other special cases.
3504 if (txrate
.flags
== WMI_RATE_PREAMBLE_VHT
&& txrate
.mcs
> 9) {
3505 ath10k_warn(ar
, "Invalid VHT mcs %hhd peer stats", txrate
.mcs
);
3509 if (txrate
.flags
== WMI_RATE_PREAMBLE_HT
&&
3510 (txrate
.mcs
> 7 || txrate
.nss
< 1)) {
3511 ath10k_warn(ar
, "Invalid HT mcs %hhd nss %hhd peer stats",
3512 txrate
.mcs
, txrate
.nss
);
3516 memset(&arsta
->txrate
, 0, sizeof(arsta
->txrate
));
3517 memset(&arsta
->tx_info
.status
, 0, sizeof(arsta
->tx_info
.status
));
3518 if (txrate
.flags
== WMI_RATE_PREAMBLE_CCK
||
3519 txrate
.flags
== WMI_RATE_PREAMBLE_OFDM
) {
3520 rate
= ATH10K_HW_LEGACY_RATE(peer_stats
->ratecode
);
3521 /* This is hacky, FW sends CCK rate 5.5Mbps as 6 */
3522 if (rate
== 6 && txrate
.flags
== WMI_RATE_PREAMBLE_CCK
)
3524 rate_idx
= ath10k_get_legacy_rate_idx(ar
, rate
);
3527 arsta
->txrate
.legacy
= rate
;
3528 } else if (txrate
.flags
== WMI_RATE_PREAMBLE_HT
) {
3529 arsta
->txrate
.flags
= RATE_INFO_FLAGS_MCS
;
3530 arsta
->txrate
.mcs
= txrate
.mcs
+ 8 * (txrate
.nss
- 1);
3532 arsta
->txrate
.flags
= RATE_INFO_FLAGS_VHT_MCS
;
3533 arsta
->txrate
.mcs
= txrate
.mcs
;
3536 switch (txrate
.flags
) {
3537 case WMI_RATE_PREAMBLE_OFDM
:
3538 if (arsta
->arvif
&& arsta
->arvif
->vif
)
3539 conf
= rcu_dereference(arsta
->arvif
->vif
->chanctx_conf
);
3540 if (conf
&& conf
->def
.chan
->band
== NL80211_BAND_5GHZ
)
3541 arsta
->tx_info
.status
.rates
[0].idx
= rate_idx
- 4;
3543 case WMI_RATE_PREAMBLE_CCK
:
3544 arsta
->tx_info
.status
.rates
[0].idx
= rate_idx
;
3546 arsta
->tx_info
.status
.rates
[0].flags
|=
3547 (IEEE80211_TX_RC_USE_SHORT_PREAMBLE
|
3548 IEEE80211_TX_RC_SHORT_GI
);
3550 case WMI_RATE_PREAMBLE_HT
:
3551 arsta
->tx_info
.status
.rates
[0].idx
=
3552 txrate
.mcs
+ ((txrate
.nss
- 1) * 8);
3554 arsta
->tx_info
.status
.rates
[0].flags
|=
3555 IEEE80211_TX_RC_SHORT_GI
;
3556 arsta
->tx_info
.status
.rates
[0].flags
|= IEEE80211_TX_RC_MCS
;
3558 case WMI_RATE_PREAMBLE_VHT
:
3559 ieee80211_rate_set_vht(&arsta
->tx_info
.status
.rates
[0],
3560 txrate
.mcs
, txrate
.nss
);
3562 arsta
->tx_info
.status
.rates
[0].flags
|=
3563 IEEE80211_TX_RC_SHORT_GI
;
3564 arsta
->tx_info
.status
.rates
[0].flags
|= IEEE80211_TX_RC_VHT_MCS
;
3568 arsta
->txrate
.nss
= txrate
.nss
;
3569 arsta
->txrate
.bw
= ath10k_bw_to_mac80211_bw(txrate
.bw
);
3570 arsta
->last_tx_bitrate
= cfg80211_calculate_bitrate(&arsta
->txrate
);
3572 arsta
->txrate
.flags
|= RATE_INFO_FLAGS_SHORT_GI
;
3574 switch (arsta
->txrate
.bw
) {
3575 case RATE_INFO_BW_40
:
3576 arsta
->tx_info
.status
.rates
[0].flags
|=
3577 IEEE80211_TX_RC_40_MHZ_WIDTH
;
3579 case RATE_INFO_BW_80
:
3580 arsta
->tx_info
.status
.rates
[0].flags
|=
3581 IEEE80211_TX_RC_80_MHZ_WIDTH
;
3585 if (peer_stats
->succ_pkts
) {
3586 arsta
->tx_info
.flags
= IEEE80211_TX_STAT_ACK
;
3587 arsta
->tx_info
.status
.rates
[0].count
= 1;
3588 ieee80211_tx_rate_update(ar
->hw
, sta
, &arsta
->tx_info
);
3591 if (ar
->htt
.disable_tx_comp
) {
3592 arsta
->tx_failed
+= peer_stats
->failed_pkts
;
3593 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "tx failed %d\n",
3597 arsta
->tx_retries
+= peer_stats
->retry_pkts
;
3598 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt tx retries %d", arsta
->tx_retries
);
3600 if (ath10k_debug_is_extd_tx_stats_enabled(ar
))
3601 ath10k_accumulate_per_peer_tx_stats(ar
, arsta
, peer_stats
,
3605 static void ath10k_htt_fetch_peer_stats(struct ath10k
*ar
,
3606 struct sk_buff
*skb
)
3608 struct htt_resp
*resp
= (struct htt_resp
*)skb
->data
;
3609 struct ath10k_per_peer_tx_stats
*p_tx_stats
= &ar
->peer_tx_stats
;
3610 struct htt_per_peer_tx_stats_ind
*tx_stats
;
3611 struct ieee80211_sta
*sta
;
3612 struct ath10k_peer
*peer
;
3614 u8 ppdu_len
, num_ppdu
;
3616 num_ppdu
= resp
->peer_tx_stats
.num_ppdu
;
3617 ppdu_len
= resp
->peer_tx_stats
.ppdu_len
* sizeof(__le32
);
3619 if (skb
->len
< sizeof(struct htt_resp_hdr
) + num_ppdu
* ppdu_len
) {
3620 ath10k_warn(ar
, "Invalid peer stats buf length %d\n", skb
->len
);
3624 tx_stats
= (struct htt_per_peer_tx_stats_ind
*)
3625 (resp
->peer_tx_stats
.payload
);
3626 peer_id
= __le16_to_cpu(tx_stats
->peer_id
);
3629 spin_lock_bh(&ar
->data_lock
);
3630 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
3631 if (!peer
|| !peer
->sta
) {
3632 ath10k_warn(ar
, "Invalid peer id %d peer stats buffer\n",
3638 for (i
= 0; i
< num_ppdu
; i
++) {
3639 tx_stats
= (struct htt_per_peer_tx_stats_ind
*)
3640 (resp
->peer_tx_stats
.payload
+ i
* ppdu_len
);
3642 p_tx_stats
->succ_bytes
= __le32_to_cpu(tx_stats
->succ_bytes
);
3643 p_tx_stats
->retry_bytes
= __le32_to_cpu(tx_stats
->retry_bytes
);
3644 p_tx_stats
->failed_bytes
=
3645 __le32_to_cpu(tx_stats
->failed_bytes
);
3646 p_tx_stats
->ratecode
= tx_stats
->ratecode
;
3647 p_tx_stats
->flags
= tx_stats
->flags
;
3648 p_tx_stats
->succ_pkts
= __le16_to_cpu(tx_stats
->succ_pkts
);
3649 p_tx_stats
->retry_pkts
= __le16_to_cpu(tx_stats
->retry_pkts
);
3650 p_tx_stats
->failed_pkts
= __le16_to_cpu(tx_stats
->failed_pkts
);
3651 p_tx_stats
->duration
= __le16_to_cpu(tx_stats
->tx_duration
);
3653 ath10k_update_per_peer_tx_stats(ar
, sta
, p_tx_stats
);
3657 spin_unlock_bh(&ar
->data_lock
);
3661 static void ath10k_fetch_10_2_tx_stats(struct ath10k
*ar
, u8
*data
)
3663 struct ath10k_pktlog_hdr
*hdr
= (struct ath10k_pktlog_hdr
*)data
;
3664 struct ath10k_per_peer_tx_stats
*p_tx_stats
= &ar
->peer_tx_stats
;
3665 struct ath10k_10_2_peer_tx_stats
*tx_stats
;
3666 struct ieee80211_sta
*sta
;
3667 struct ath10k_peer
*peer
;
3668 u16 log_type
= __le16_to_cpu(hdr
->log_type
);
3671 if (log_type
!= ATH_PKTLOG_TYPE_TX_STAT
)
3674 tx_stats
= (struct ath10k_10_2_peer_tx_stats
*)((hdr
->payload
) +
3675 ATH10K_10_2_TX_STATS_OFFSET
);
3677 if (!tx_stats
->tx_ppdu_cnt
)
3680 peer_id
= tx_stats
->peer_id
;
3683 spin_lock_bh(&ar
->data_lock
);
3684 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
3685 if (!peer
|| !peer
->sta
) {
3686 ath10k_warn(ar
, "Invalid peer id %d in peer stats buffer\n",
3692 for (i
= 0; i
< tx_stats
->tx_ppdu_cnt
; i
++) {
3693 p_tx_stats
->succ_bytes
=
3694 __le16_to_cpu(tx_stats
->success_bytes
[i
]);
3695 p_tx_stats
->retry_bytes
=
3696 __le16_to_cpu(tx_stats
->retry_bytes
[i
]);
3697 p_tx_stats
->failed_bytes
=
3698 __le16_to_cpu(tx_stats
->failed_bytes
[i
]);
3699 p_tx_stats
->ratecode
= tx_stats
->ratecode
[i
];
3700 p_tx_stats
->flags
= tx_stats
->flags
[i
];
3701 p_tx_stats
->succ_pkts
= tx_stats
->success_pkts
[i
];
3702 p_tx_stats
->retry_pkts
= tx_stats
->retry_pkts
[i
];
3703 p_tx_stats
->failed_pkts
= tx_stats
->failed_pkts
[i
];
3705 ath10k_update_per_peer_tx_stats(ar
, sta
, p_tx_stats
);
3707 spin_unlock_bh(&ar
->data_lock
);
3713 spin_unlock_bh(&ar
->data_lock
);
3717 static int ath10k_htt_rx_pn_len(enum htt_security_types sec_type
)
3720 case HTT_SECURITY_TKIP
:
3721 case HTT_SECURITY_TKIP_NOMIC
:
3722 case HTT_SECURITY_AES_CCMP
:
3729 static void ath10k_htt_rx_sec_ind_handler(struct ath10k
*ar
,
3730 struct htt_security_indication
*ev
)
3732 enum htt_txrx_sec_cast_type sec_index
;
3733 enum htt_security_types sec_type
;
3734 struct ath10k_peer
*peer
;
3736 spin_lock_bh(&ar
->data_lock
);
3738 peer
= ath10k_peer_find_by_id(ar
, __le16_to_cpu(ev
->peer_id
));
3740 ath10k_warn(ar
, "failed to find peer id %d for security indication",
3741 __le16_to_cpu(ev
->peer_id
));
3745 sec_type
= MS(ev
->flags
, HTT_SECURITY_TYPE
);
3747 if (ev
->flags
& HTT_SECURITY_IS_UNICAST
)
3748 sec_index
= HTT_TXRX_SEC_UCAST
;
3750 sec_index
= HTT_TXRX_SEC_MCAST
;
3752 peer
->rx_pn
[sec_index
].sec_type
= sec_type
;
3753 peer
->rx_pn
[sec_index
].pn_len
= ath10k_htt_rx_pn_len(sec_type
);
3755 memset(peer
->tids_last_pn_valid
, 0, sizeof(peer
->tids_last_pn_valid
));
3756 memset(peer
->tids_last_pn
, 0, sizeof(peer
->tids_last_pn
));
3759 spin_unlock_bh(&ar
->data_lock
);
3762 bool ath10k_htt_t2h_msg_handler(struct ath10k
*ar
, struct sk_buff
*skb
)
3764 struct ath10k_htt
*htt
= &ar
->htt
;
3765 struct htt_resp
*resp
= (struct htt_resp
*)skb
->data
;
3766 enum htt_t2h_msg_type type
;
3768 /* confirm alignment */
3769 if (!IS_ALIGNED((unsigned long)skb
->data
, 4))
3770 ath10k_warn(ar
, "unaligned htt message, expect trouble\n");
3772 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx, msg_type: 0x%0X\n",
3773 resp
->hdr
.msg_type
);
3775 if (resp
->hdr
.msg_type
>= ar
->htt
.t2h_msg_types_max
) {
3776 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
3777 resp
->hdr
.msg_type
, ar
->htt
.t2h_msg_types_max
);
3780 type
= ar
->htt
.t2h_msg_types
[resp
->hdr
.msg_type
];
3783 case HTT_T2H_MSG_TYPE_VERSION_CONF
: {
3784 htt
->target_version_major
= resp
->ver_resp
.major
;
3785 htt
->target_version_minor
= resp
->ver_resp
.minor
;
3786 complete(&htt
->target_version_received
);
3789 case HTT_T2H_MSG_TYPE_RX_IND
:
3790 if (ar
->bus_param
.dev_type
!= ATH10K_DEV_TYPE_HL
) {
3791 ath10k_htt_rx_proc_rx_ind_ll(htt
, &resp
->rx_ind
);
3793 skb_queue_tail(&htt
->rx_indication_head
, skb
);
3797 case HTT_T2H_MSG_TYPE_PEER_MAP
: {
3798 struct htt_peer_map_event ev
= {
3799 .vdev_id
= resp
->peer_map
.vdev_id
,
3800 .peer_id
= __le16_to_cpu(resp
->peer_map
.peer_id
),
3802 memcpy(ev
.addr
, resp
->peer_map
.addr
, sizeof(ev
.addr
));
3803 ath10k_peer_map_event(htt
, &ev
);
3806 case HTT_T2H_MSG_TYPE_PEER_UNMAP
: {
3807 struct htt_peer_unmap_event ev
= {
3808 .peer_id
= __le16_to_cpu(resp
->peer_unmap
.peer_id
),
3810 ath10k_peer_unmap_event(htt
, &ev
);
3813 case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION
: {
3814 struct htt_tx_done tx_done
= {};
3815 struct ath10k_htt
*htt
= &ar
->htt
;
3816 struct ath10k_htc
*htc
= &ar
->htc
;
3817 struct ath10k_htc_ep
*ep
= &ar
->htc
.endpoint
[htt
->eid
];
3818 int status
= __le32_to_cpu(resp
->mgmt_tx_completion
.status
);
3819 int info
= __le32_to_cpu(resp
->mgmt_tx_completion
.info
);
3821 tx_done
.msdu_id
= __le32_to_cpu(resp
->mgmt_tx_completion
.desc_id
);
3824 case HTT_MGMT_TX_STATUS_OK
:
3825 tx_done
.status
= HTT_TX_COMPL_STATE_ACK
;
3826 if (test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS
,
3828 (resp
->mgmt_tx_completion
.flags
&
3829 HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI
)) {
3831 FIELD_GET(HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK
,
3835 case HTT_MGMT_TX_STATUS_RETRY
:
3836 tx_done
.status
= HTT_TX_COMPL_STATE_NOACK
;
3838 case HTT_MGMT_TX_STATUS_DROP
:
3839 tx_done
.status
= HTT_TX_COMPL_STATE_DISCARD
;
3843 if (htt
->disable_tx_comp
) {
3844 spin_lock_bh(&htc
->tx_lock
);
3846 spin_unlock_bh(&htc
->tx_lock
);
3849 status
= ath10k_txrx_tx_unref(htt
, &tx_done
);
3851 spin_lock_bh(&htt
->tx_lock
);
3852 ath10k_htt_tx_mgmt_dec_pending(htt
);
3853 spin_unlock_bh(&htt
->tx_lock
);
3857 case HTT_T2H_MSG_TYPE_TX_COMPL_IND
:
3858 ath10k_htt_rx_tx_compl_ind(htt
->ar
, skb
);
3860 case HTT_T2H_MSG_TYPE_SEC_IND
: {
3861 struct ath10k
*ar
= htt
->ar
;
3862 struct htt_security_indication
*ev
= &resp
->security_indication
;
3864 ath10k_htt_rx_sec_ind_handler(ar
, ev
);
3865 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
3866 "sec ind peer_id %d unicast %d type %d\n",
3867 __le16_to_cpu(ev
->peer_id
),
3868 !!(ev
->flags
& HTT_SECURITY_IS_UNICAST
),
3869 MS(ev
->flags
, HTT_SECURITY_TYPE
));
3870 complete(&ar
->install_key_done
);
3873 case HTT_T2H_MSG_TYPE_RX_FRAG_IND
: {
3874 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt event: ",
3875 skb
->data
, skb
->len
);
3876 atomic_inc(&htt
->num_mpdus_ready
);
3878 return ath10k_htt_rx_proc_rx_frag_ind(htt
,
3882 case HTT_T2H_MSG_TYPE_TEST
:
3884 case HTT_T2H_MSG_TYPE_STATS_CONF
:
3885 trace_ath10k_htt_stats(ar
, skb
->data
, skb
->len
);
3887 case HTT_T2H_MSG_TYPE_TX_INSPECT_IND
:
3888 /* Firmware can return tx frames if it's unable to fully
3889 * process them and suspects host may be able to fix it. ath10k
3890 * sends all tx frames as already inspected so this shouldn't
3891 * happen unless fw has a bug.
3893 ath10k_warn(ar
, "received an unexpected htt tx inspect event\n");
3895 case HTT_T2H_MSG_TYPE_RX_ADDBA
:
3896 ath10k_htt_rx_addba(ar
, resp
);
3898 case HTT_T2H_MSG_TYPE_RX_DELBA
:
3899 ath10k_htt_rx_delba(ar
, resp
);
3901 case HTT_T2H_MSG_TYPE_PKTLOG
: {
3902 trace_ath10k_htt_pktlog(ar
, resp
->pktlog_msg
.payload
,
3904 offsetof(struct htt_resp
,
3905 pktlog_msg
.payload
));
3907 if (ath10k_peer_stats_enabled(ar
))
3908 ath10k_fetch_10_2_tx_stats(ar
,
3909 resp
->pktlog_msg
.payload
);
3912 case HTT_T2H_MSG_TYPE_RX_FLUSH
: {
3913 /* Ignore this event because mac80211 takes care of Rx
3914 * aggregation reordering.
3918 case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND
: {
3919 skb_queue_tail(&htt
->rx_in_ord_compl_q
, skb
);
3922 case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND
: {
3923 struct ath10k_htt
*htt
= &ar
->htt
;
3924 struct ath10k_htc
*htc
= &ar
->htc
;
3925 struct ath10k_htc_ep
*ep
= &ar
->htc
.endpoint
[htt
->eid
];
3926 u32 msg_word
= __le32_to_cpu(*(__le32
*)resp
);
3927 int htt_credit_delta
;
3929 htt_credit_delta
= HTT_TX_CREDIT_DELTA_ABS_GET(msg_word
);
3930 if (HTT_TX_CREDIT_SIGN_BIT_GET(msg_word
))
3931 htt_credit_delta
= -htt_credit_delta
;
3933 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
3934 "htt credit update delta %d\n",
3937 if (htt
->disable_tx_comp
) {
3938 spin_lock_bh(&htc
->tx_lock
);
3939 ep
->tx_credits
+= htt_credit_delta
;
3940 spin_unlock_bh(&htc
->tx_lock
);
3941 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
3942 "htt credit total %d\n",
3944 ep
->ep_ops
.ep_tx_credits(htc
->ar
);
3948 case HTT_T2H_MSG_TYPE_CHAN_CHANGE
: {
3949 u32 phymode
= __le32_to_cpu(resp
->chan_change
.phymode
);
3950 u32 freq
= __le32_to_cpu(resp
->chan_change
.freq
);
3952 ar
->tgt_oper_chan
= ieee80211_get_channel(ar
->hw
->wiphy
, freq
);
3953 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
3954 "htt chan change freq %u phymode %s\n",
3955 freq
, ath10k_wmi_phymode_str(phymode
));
3958 case HTT_T2H_MSG_TYPE_AGGR_CONF
:
3960 case HTT_T2H_MSG_TYPE_TX_FETCH_IND
: {
3961 struct sk_buff
*tx_fetch_ind
= skb_copy(skb
, GFP_ATOMIC
);
3963 if (!tx_fetch_ind
) {
3964 ath10k_warn(ar
, "failed to copy htt tx fetch ind\n");
3967 skb_queue_tail(&htt
->tx_fetch_ind_q
, tx_fetch_ind
);
3970 case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM
:
3971 ath10k_htt_rx_tx_fetch_confirm(ar
, skb
);
3973 case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND
:
3974 ath10k_htt_rx_tx_mode_switch_ind(ar
, skb
);
3976 case HTT_T2H_MSG_TYPE_PEER_STATS
:
3977 ath10k_htt_fetch_peer_stats(ar
, skb
);
3979 case HTT_T2H_MSG_TYPE_EN_STATS
:
3981 ath10k_warn(ar
, "htt event (%d) not handled\n",
3982 resp
->hdr
.msg_type
);
3983 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt event: ",
3984 skb
->data
, skb
->len
);
3989 EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler
);
3991 void ath10k_htt_rx_pktlog_completion_handler(struct ath10k
*ar
,
3992 struct sk_buff
*skb
)
3994 trace_ath10k_htt_pktlog(ar
, skb
->data
, skb
->len
);
3995 dev_kfree_skb_any(skb
);
3997 EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler
);
3999 static int ath10k_htt_rx_deliver_msdu(struct ath10k
*ar
, int quota
, int budget
)
4001 struct sk_buff
*skb
;
4003 while (quota
< budget
) {
4004 if (skb_queue_empty(&ar
->htt
.rx_msdus_q
))
4007 skb
= skb_dequeue(&ar
->htt
.rx_msdus_q
);
4010 ath10k_process_rx(ar
, skb
);
4017 int ath10k_htt_rx_hl_indication(struct ath10k
*ar
, int budget
)
4019 struct htt_resp
*resp
;
4020 struct ath10k_htt
*htt
= &ar
->htt
;
4021 struct sk_buff
*skb
;
4025 for (quota
= 0; quota
< budget
; quota
++) {
4026 skb
= skb_dequeue(&htt
->rx_indication_head
);
4030 resp
= (struct htt_resp
*)skb
->data
;
4032 release
= ath10k_htt_rx_proc_rx_ind_hl(htt
,
4036 HTT_RX_NON_TKIP_MIC
);
4039 dev_kfree_skb_any(skb
);
4041 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "rx indication poll pending count:%d\n",
4042 skb_queue_len(&htt
->rx_indication_head
));
4046 EXPORT_SYMBOL(ath10k_htt_rx_hl_indication
);
4048 int ath10k_htt_txrx_compl_task(struct ath10k
*ar
, int budget
)
4050 struct ath10k_htt
*htt
= &ar
->htt
;
4051 struct htt_tx_done tx_done
= {};
4052 struct sk_buff_head tx_ind_q
;
4053 struct sk_buff
*skb
;
4054 unsigned long flags
;
4055 int quota
= 0, done
, ret
;
4056 bool resched_napi
= false;
4058 __skb_queue_head_init(&tx_ind_q
);
4060 /* Process pending frames before dequeuing more data
4063 quota
= ath10k_htt_rx_deliver_msdu(ar
, quota
, budget
);
4064 if (quota
== budget
) {
4065 resched_napi
= true;
4069 while ((skb
= skb_dequeue(&htt
->rx_in_ord_compl_q
))) {
4070 spin_lock_bh(&htt
->rx_ring
.lock
);
4071 ret
= ath10k_htt_rx_in_ord_ind(ar
, skb
);
4072 spin_unlock_bh(&htt
->rx_ring
.lock
);
4074 dev_kfree_skb_any(skb
);
4076 resched_napi
= true;
4081 while (atomic_read(&htt
->num_mpdus_ready
)) {
4082 ret
= ath10k_htt_rx_handle_amsdu(htt
);
4084 resched_napi
= true;
4087 atomic_dec(&htt
->num_mpdus_ready
);
4090 /* Deliver received data after processing data from hardware */
4091 quota
= ath10k_htt_rx_deliver_msdu(ar
, quota
, budget
);
4093 /* From NAPI documentation:
4094 * The napi poll() function may also process TX completions, in which
4095 * case if it processes the entire TX ring then it should count that
4096 * work as the rest of the budget.
4098 if ((quota
< budget
) && !kfifo_is_empty(&htt
->txdone_fifo
))
4101 /* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
4102 * From kfifo_get() documentation:
4103 * Note that with only one concurrent reader and one concurrent writer,
4104 * you don't need extra locking to use these macro.
4106 while (kfifo_get(&htt
->txdone_fifo
, &tx_done
))
4107 ath10k_txrx_tx_unref(htt
, &tx_done
);
4109 ath10k_mac_tx_push_pending(ar
);
4111 spin_lock_irqsave(&htt
->tx_fetch_ind_q
.lock
, flags
);
4112 skb_queue_splice_init(&htt
->tx_fetch_ind_q
, &tx_ind_q
);
4113 spin_unlock_irqrestore(&htt
->tx_fetch_ind_q
.lock
, flags
);
4115 while ((skb
= __skb_dequeue(&tx_ind_q
))) {
4116 ath10k_htt_rx_tx_fetch_ind(ar
, skb
);
4117 dev_kfree_skb_any(skb
);
4121 ath10k_htt_rx_msdu_buff_replenish(htt
);
4122 /* In case of rx failure or more data to read, report budget
4123 * to reschedule NAPI poll
4125 done
= resched_napi
? budget
: quota
;
4129 EXPORT_SYMBOL(ath10k_htt_txrx_compl_task
);
4131 static const struct ath10k_htt_rx_ops htt_rx_ops_32
= {
4132 .htt_get_rx_ring_size
= ath10k_htt_get_rx_ring_size_32
,
4133 .htt_config_paddrs_ring
= ath10k_htt_config_paddrs_ring_32
,
4134 .htt_set_paddrs_ring
= ath10k_htt_set_paddrs_ring_32
,
4135 .htt_get_vaddr_ring
= ath10k_htt_get_vaddr_ring_32
,
4136 .htt_reset_paddrs_ring
= ath10k_htt_reset_paddrs_ring_32
,
4139 static const struct ath10k_htt_rx_ops htt_rx_ops_64
= {
4140 .htt_get_rx_ring_size
= ath10k_htt_get_rx_ring_size_64
,
4141 .htt_config_paddrs_ring
= ath10k_htt_config_paddrs_ring_64
,
4142 .htt_set_paddrs_ring
= ath10k_htt_set_paddrs_ring_64
,
4143 .htt_get_vaddr_ring
= ath10k_htt_get_vaddr_ring_64
,
4144 .htt_reset_paddrs_ring
= ath10k_htt_reset_paddrs_ring_64
,
4147 static const struct ath10k_htt_rx_ops htt_rx_ops_hl
= {
4148 .htt_rx_proc_rx_frag_ind
= ath10k_htt_rx_proc_rx_frag_ind_hl
,
4151 void ath10k_htt_set_rx_ops(struct ath10k_htt
*htt
)
4153 struct ath10k
*ar
= htt
->ar
;
4155 if (ar
->bus_param
.dev_type
== ATH10K_DEV_TYPE_HL
)
4156 htt
->rx_ops
= &htt_rx_ops_hl
;
4157 else if (ar
->hw_params
.target_64bit
)
4158 htt
->rx_ops
= &htt_rx_ops_64
;
4160 htt
->rx_ops
= &htt_rx_ops_32
;