2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
26 #include <linux/log2.h>
28 /* when under memory pressure rx ring refill may fail and needs a retry */
29 #define HTT_RX_RING_REFILL_RETRY_MS 50
31 #define HTT_RX_RING_REFILL_RESCHED_MS 5
33 static int ath10k_htt_rx_get_csum_state(struct sk_buff
*skb
);
35 static struct sk_buff
*
36 ath10k_htt_rx_find_skb_paddr(struct ath10k
*ar
, u64 paddr
)
38 struct ath10k_skb_rxcb
*rxcb
;
40 hash_for_each_possible(ar
->htt
.rx_ring
.skb_table
, rxcb
, hlist
, paddr
)
41 if (rxcb
->paddr
== paddr
)
42 return ATH10K_RXCB_SKB(rxcb
);
48 static void ath10k_htt_rx_ring_free(struct ath10k_htt
*htt
)
51 struct ath10k_skb_rxcb
*rxcb
;
55 if (htt
->rx_ring
.in_ord_rx
) {
56 hash_for_each_safe(htt
->rx_ring
.skb_table
, i
, n
, rxcb
, hlist
) {
57 skb
= ATH10K_RXCB_SKB(rxcb
);
58 dma_unmap_single(htt
->ar
->dev
, rxcb
->paddr
,
59 skb
->len
+ skb_tailroom(skb
),
61 hash_del(&rxcb
->hlist
);
62 dev_kfree_skb_any(skb
);
65 for (i
= 0; i
< htt
->rx_ring
.size
; i
++) {
66 skb
= htt
->rx_ring
.netbufs_ring
[i
];
70 rxcb
= ATH10K_SKB_RXCB(skb
);
71 dma_unmap_single(htt
->ar
->dev
, rxcb
->paddr
,
72 skb
->len
+ skb_tailroom(skb
),
74 dev_kfree_skb_any(skb
);
78 htt
->rx_ring
.fill_cnt
= 0;
79 hash_init(htt
->rx_ring
.skb_table
);
80 memset(htt
->rx_ring
.netbufs_ring
, 0,
81 htt
->rx_ring
.size
* sizeof(htt
->rx_ring
.netbufs_ring
[0]));
84 static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt
*htt
)
86 return htt
->rx_ring
.size
* sizeof(htt
->rx_ring
.paddrs_ring_32
);
89 static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt
*htt
)
91 return htt
->rx_ring
.size
* sizeof(htt
->rx_ring
.paddrs_ring_64
);
94 static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt
*htt
,
97 htt
->rx_ring
.paddrs_ring_32
= vaddr
;
100 static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt
*htt
,
103 htt
->rx_ring
.paddrs_ring_64
= vaddr
;
106 static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt
*htt
,
107 dma_addr_t paddr
, int idx
)
109 htt
->rx_ring
.paddrs_ring_32
[idx
] = __cpu_to_le32(paddr
);
112 static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt
*htt
,
113 dma_addr_t paddr
, int idx
)
115 htt
->rx_ring
.paddrs_ring_64
[idx
] = __cpu_to_le64(paddr
);
118 static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt
*htt
, int idx
)
120 htt
->rx_ring
.paddrs_ring_32
[idx
] = 0;
123 static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt
*htt
, int idx
)
125 htt
->rx_ring
.paddrs_ring_64
[idx
] = 0;
128 static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt
*htt
)
130 return (void *)htt
->rx_ring
.paddrs_ring_32
;
133 static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt
*htt
)
135 return (void *)htt
->rx_ring
.paddrs_ring_64
;
138 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt
*htt
, int num
)
140 struct htt_rx_desc
*rx_desc
;
141 struct ath10k_skb_rxcb
*rxcb
;
146 /* The Full Rx Reorder firmware has no way of telling the host
147 * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
148 * To keep things simple make sure ring is always half empty. This
149 * guarantees there'll be no replenishment overruns possible.
151 BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL
>= HTT_RX_RING_SIZE
/ 2);
153 idx
= __le32_to_cpu(*htt
->rx_ring
.alloc_idx
.vaddr
);
155 skb
= dev_alloc_skb(HTT_RX_BUF_SIZE
+ HTT_RX_DESC_ALIGN
);
161 if (!IS_ALIGNED((unsigned long)skb
->data
, HTT_RX_DESC_ALIGN
))
163 PTR_ALIGN(skb
->data
, HTT_RX_DESC_ALIGN
) -
166 /* Clear rx_desc attention word before posting to Rx ring */
167 rx_desc
= (struct htt_rx_desc
*)skb
->data
;
168 rx_desc
->attention
.flags
= __cpu_to_le32(0);
170 paddr
= dma_map_single(htt
->ar
->dev
, skb
->data
,
171 skb
->len
+ skb_tailroom(skb
),
174 if (unlikely(dma_mapping_error(htt
->ar
->dev
, paddr
))) {
175 dev_kfree_skb_any(skb
);
180 rxcb
= ATH10K_SKB_RXCB(skb
);
182 htt
->rx_ring
.netbufs_ring
[idx
] = skb
;
183 htt
->rx_ops
->htt_set_paddrs_ring(htt
, paddr
, idx
);
184 htt
->rx_ring
.fill_cnt
++;
186 if (htt
->rx_ring
.in_ord_rx
) {
187 hash_add(htt
->rx_ring
.skb_table
,
188 &ATH10K_SKB_RXCB(skb
)->hlist
,
194 idx
&= htt
->rx_ring
.size_mask
;
199 * Make sure the rx buffer is updated before available buffer
200 * index to avoid any potential rx ring corruption.
203 *htt
->rx_ring
.alloc_idx
.vaddr
= __cpu_to_le32(idx
);
207 static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt
*htt
, int num
)
209 lockdep_assert_held(&htt
->rx_ring
.lock
);
210 return __ath10k_htt_rx_ring_fill_n(htt
, num
);
213 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt
*htt
)
215 int ret
, num_deficit
, num_to_fill
;
217 /* Refilling the whole RX ring buffer proves to be a bad idea. The
218 * reason is RX may take up significant amount of CPU cycles and starve
219 * other tasks, e.g. TX on an ethernet device while acting as a bridge
220 * with ath10k wlan interface. This ended up with very poor performance
221 * once CPU the host system was overwhelmed with RX on ath10k.
223 * By limiting the number of refills the replenishing occurs
224 * progressively. This in turns makes use of the fact tasklets are
225 * processed in FIFO order. This means actual RX processing can starve
226 * out refilling. If there's not enough buffers on RX ring FW will not
227 * report RX until it is refilled with enough buffers. This
228 * automatically balances load wrt to CPU power.
230 * This probably comes at a cost of lower maximum throughput but
231 * improves the average and stability.
233 spin_lock_bh(&htt
->rx_ring
.lock
);
234 num_deficit
= htt
->rx_ring
.fill_level
- htt
->rx_ring
.fill_cnt
;
235 num_to_fill
= min(ATH10K_HTT_MAX_NUM_REFILL
, num_deficit
);
236 num_deficit
-= num_to_fill
;
237 ret
= ath10k_htt_rx_ring_fill_n(htt
, num_to_fill
);
238 if (ret
== -ENOMEM
) {
240 * Failed to fill it to the desired level -
241 * we'll start a timer and try again next time.
242 * As long as enough buffers are left in the ring for
243 * another A-MPDU rx, no special recovery is needed.
245 mod_timer(&htt
->rx_ring
.refill_retry_timer
, jiffies
+
246 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS
));
247 } else if (num_deficit
> 0) {
248 mod_timer(&htt
->rx_ring
.refill_retry_timer
, jiffies
+
249 msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS
));
251 spin_unlock_bh(&htt
->rx_ring
.lock
);
254 static void ath10k_htt_rx_ring_refill_retry(struct timer_list
*t
)
256 struct ath10k_htt
*htt
= from_timer(htt
, t
, rx_ring
.refill_retry_timer
);
258 ath10k_htt_rx_msdu_buff_replenish(htt
);
261 int ath10k_htt_rx_ring_refill(struct ath10k
*ar
)
263 struct ath10k_htt
*htt
= &ar
->htt
;
266 spin_lock_bh(&htt
->rx_ring
.lock
);
267 ret
= ath10k_htt_rx_ring_fill_n(htt
, (htt
->rx_ring
.fill_level
-
268 htt
->rx_ring
.fill_cnt
));
269 spin_unlock_bh(&htt
->rx_ring
.lock
);
272 ath10k_htt_rx_ring_free(htt
);
277 void ath10k_htt_rx_free(struct ath10k_htt
*htt
)
279 del_timer_sync(&htt
->rx_ring
.refill_retry_timer
);
281 skb_queue_purge(&htt
->rx_msdus_q
);
282 skb_queue_purge(&htt
->rx_in_ord_compl_q
);
283 skb_queue_purge(&htt
->tx_fetch_ind_q
);
285 ath10k_htt_rx_ring_free(htt
);
287 dma_free_coherent(htt
->ar
->dev
,
288 htt
->rx_ops
->htt_get_rx_ring_size(htt
),
289 htt
->rx_ops
->htt_get_vaddr_ring(htt
),
290 htt
->rx_ring
.base_paddr
);
292 dma_free_coherent(htt
->ar
->dev
,
293 sizeof(*htt
->rx_ring
.alloc_idx
.vaddr
),
294 htt
->rx_ring
.alloc_idx
.vaddr
,
295 htt
->rx_ring
.alloc_idx
.paddr
);
297 kfree(htt
->rx_ring
.netbufs_ring
);
300 static inline struct sk_buff
*ath10k_htt_rx_netbuf_pop(struct ath10k_htt
*htt
)
302 struct ath10k
*ar
= htt
->ar
;
304 struct sk_buff
*msdu
;
306 lockdep_assert_held(&htt
->rx_ring
.lock
);
308 if (htt
->rx_ring
.fill_cnt
== 0) {
309 ath10k_warn(ar
, "tried to pop sk_buff from an empty rx ring\n");
313 idx
= htt
->rx_ring
.sw_rd_idx
.msdu_payld
;
314 msdu
= htt
->rx_ring
.netbufs_ring
[idx
];
315 htt
->rx_ring
.netbufs_ring
[idx
] = NULL
;
316 htt
->rx_ops
->htt_reset_paddrs_ring(htt
, idx
);
319 idx
&= htt
->rx_ring
.size_mask
;
320 htt
->rx_ring
.sw_rd_idx
.msdu_payld
= idx
;
321 htt
->rx_ring
.fill_cnt
--;
323 dma_unmap_single(htt
->ar
->dev
,
324 ATH10K_SKB_RXCB(msdu
)->paddr
,
325 msdu
->len
+ skb_tailroom(msdu
),
327 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt rx netbuf pop: ",
328 msdu
->data
, msdu
->len
+ skb_tailroom(msdu
));
333 /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
334 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt
*htt
,
335 struct sk_buff_head
*amsdu
)
337 struct ath10k
*ar
= htt
->ar
;
338 int msdu_len
, msdu_chaining
= 0;
339 struct sk_buff
*msdu
;
340 struct htt_rx_desc
*rx_desc
;
342 lockdep_assert_held(&htt
->rx_ring
.lock
);
345 int last_msdu
, msdu_len_invalid
, msdu_chained
;
347 msdu
= ath10k_htt_rx_netbuf_pop(htt
);
349 __skb_queue_purge(amsdu
);
353 __skb_queue_tail(amsdu
, msdu
);
355 rx_desc
= (struct htt_rx_desc
*)msdu
->data
;
357 /* FIXME: we must report msdu payload since this is what caller
360 skb_put(msdu
, offsetof(struct htt_rx_desc
, msdu_payload
));
361 skb_pull(msdu
, offsetof(struct htt_rx_desc
, msdu_payload
));
364 * Sanity check - confirm the HW is finished filling in the
366 * If the HW and SW are working correctly, then it's guaranteed
367 * that the HW's MAC DMA is done before this point in the SW.
368 * To prevent the case that we handle a stale Rx descriptor,
369 * just assert for now until we have a way to recover.
371 if (!(__le32_to_cpu(rx_desc
->attention
.flags
)
372 & RX_ATTENTION_FLAGS_MSDU_DONE
)) {
373 __skb_queue_purge(amsdu
);
377 msdu_len_invalid
= !!(__le32_to_cpu(rx_desc
->attention
.flags
)
378 & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR
|
379 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR
));
380 msdu_len
= MS(__le32_to_cpu(rx_desc
->msdu_start
.common
.info0
),
381 RX_MSDU_START_INFO0_MSDU_LENGTH
);
382 msdu_chained
= rx_desc
->frag_info
.ring2_more_count
;
384 if (msdu_len_invalid
)
388 skb_put(msdu
, min(msdu_len
, HTT_RX_MSDU_SIZE
));
389 msdu_len
-= msdu
->len
;
391 /* Note: Chained buffers do not contain rx descriptor */
392 while (msdu_chained
--) {
393 msdu
= ath10k_htt_rx_netbuf_pop(htt
);
395 __skb_queue_purge(amsdu
);
399 __skb_queue_tail(amsdu
, msdu
);
401 skb_put(msdu
, min(msdu_len
, HTT_RX_BUF_SIZE
));
402 msdu_len
-= msdu
->len
;
406 last_msdu
= __le32_to_cpu(rx_desc
->msdu_end
.common
.info0
) &
407 RX_MSDU_END_INFO0_LAST_MSDU
;
409 trace_ath10k_htt_rx_desc(ar
, &rx_desc
->attention
,
410 sizeof(*rx_desc
) - sizeof(u32
));
416 if (skb_queue_empty(amsdu
))
420 * Don't refill the ring yet.
422 * First, the elements popped here are still in use - it is not
423 * safe to overwrite them until the matching call to
424 * mpdu_desc_list_next. Second, for efficiency it is preferable to
425 * refill the rx ring with 1 PPDU's worth of rx buffers (something
426 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
427 * (something like 3 buffers). Consequently, we'll rely on the txrx
428 * SW to tell us when it is done pulling all the PPDU's rx buffers
429 * out of the rx ring, and then refill it just once.
432 return msdu_chaining
;
435 static struct sk_buff
*ath10k_htt_rx_pop_paddr(struct ath10k_htt
*htt
,
438 struct ath10k
*ar
= htt
->ar
;
439 struct ath10k_skb_rxcb
*rxcb
;
440 struct sk_buff
*msdu
;
442 lockdep_assert_held(&htt
->rx_ring
.lock
);
444 msdu
= ath10k_htt_rx_find_skb_paddr(ar
, paddr
);
448 rxcb
= ATH10K_SKB_RXCB(msdu
);
449 hash_del(&rxcb
->hlist
);
450 htt
->rx_ring
.fill_cnt
--;
452 dma_unmap_single(htt
->ar
->dev
, rxcb
->paddr
,
453 msdu
->len
+ skb_tailroom(msdu
),
455 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt rx netbuf pop: ",
456 msdu
->data
, msdu
->len
+ skb_tailroom(msdu
));
461 static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt
*htt
,
462 struct htt_rx_in_ord_ind
*ev
,
463 struct sk_buff_head
*list
)
465 struct ath10k
*ar
= htt
->ar
;
466 struct htt_rx_in_ord_msdu_desc
*msdu_desc
= ev
->msdu_descs32
;
467 struct htt_rx_desc
*rxd
;
468 struct sk_buff
*msdu
;
473 lockdep_assert_held(&htt
->rx_ring
.lock
);
475 msdu_count
= __le16_to_cpu(ev
->msdu_count
);
476 is_offload
= !!(ev
->info
& HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK
);
478 while (msdu_count
--) {
479 paddr
= __le32_to_cpu(msdu_desc
->msdu_paddr
);
481 msdu
= ath10k_htt_rx_pop_paddr(htt
, paddr
);
483 __skb_queue_purge(list
);
487 __skb_queue_tail(list
, msdu
);
490 rxd
= (void *)msdu
->data
;
492 trace_ath10k_htt_rx_desc(ar
, rxd
, sizeof(*rxd
));
494 skb_put(msdu
, sizeof(*rxd
));
495 skb_pull(msdu
, sizeof(*rxd
));
496 skb_put(msdu
, __le16_to_cpu(msdu_desc
->msdu_len
));
498 if (!(__le32_to_cpu(rxd
->attention
.flags
) &
499 RX_ATTENTION_FLAGS_MSDU_DONE
)) {
500 ath10k_warn(htt
->ar
, "tried to pop an incomplete frame, oops!\n");
511 static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt
*htt
,
512 struct htt_rx_in_ord_ind
*ev
,
513 struct sk_buff_head
*list
)
515 struct ath10k
*ar
= htt
->ar
;
516 struct htt_rx_in_ord_msdu_desc_ext
*msdu_desc
= ev
->msdu_descs64
;
517 struct htt_rx_desc
*rxd
;
518 struct sk_buff
*msdu
;
523 lockdep_assert_held(&htt
->rx_ring
.lock
);
525 msdu_count
= __le16_to_cpu(ev
->msdu_count
);
526 is_offload
= !!(ev
->info
& HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK
);
528 while (msdu_count
--) {
529 paddr
= __le64_to_cpu(msdu_desc
->msdu_paddr
);
530 msdu
= ath10k_htt_rx_pop_paddr(htt
, paddr
);
532 __skb_queue_purge(list
);
536 __skb_queue_tail(list
, msdu
);
539 rxd
= (void *)msdu
->data
;
541 trace_ath10k_htt_rx_desc(ar
, rxd
, sizeof(*rxd
));
543 skb_put(msdu
, sizeof(*rxd
));
544 skb_pull(msdu
, sizeof(*rxd
));
545 skb_put(msdu
, __le16_to_cpu(msdu_desc
->msdu_len
));
547 if (!(__le32_to_cpu(rxd
->attention
.flags
) &
548 RX_ATTENTION_FLAGS_MSDU_DONE
)) {
549 ath10k_warn(htt
->ar
, "tried to pop an incomplete frame, oops!\n");
560 int ath10k_htt_rx_alloc(struct ath10k_htt
*htt
)
562 struct ath10k
*ar
= htt
->ar
;
564 void *vaddr
, *vaddr_ring
;
566 struct timer_list
*timer
= &htt
->rx_ring
.refill_retry_timer
;
568 htt
->rx_confused
= false;
570 /* XXX: The fill level could be changed during runtime in response to
571 * the host processing latency. Is this really worth it?
573 htt
->rx_ring
.size
= HTT_RX_RING_SIZE
;
574 htt
->rx_ring
.size_mask
= htt
->rx_ring
.size
- 1;
575 htt
->rx_ring
.fill_level
= ar
->hw_params
.rx_ring_fill_level
;
577 if (!is_power_of_2(htt
->rx_ring
.size
)) {
578 ath10k_warn(ar
, "htt rx ring size is not power of 2\n");
582 htt
->rx_ring
.netbufs_ring
=
583 kzalloc(htt
->rx_ring
.size
* sizeof(struct sk_buff
*),
585 if (!htt
->rx_ring
.netbufs_ring
)
588 size
= htt
->rx_ops
->htt_get_rx_ring_size(htt
);
590 vaddr_ring
= dma_alloc_coherent(htt
->ar
->dev
, size
, &paddr
, GFP_KERNEL
);
594 htt
->rx_ops
->htt_config_paddrs_ring(htt
, vaddr_ring
);
595 htt
->rx_ring
.base_paddr
= paddr
;
597 vaddr
= dma_alloc_coherent(htt
->ar
->dev
,
598 sizeof(*htt
->rx_ring
.alloc_idx
.vaddr
),
603 htt
->rx_ring
.alloc_idx
.vaddr
= vaddr
;
604 htt
->rx_ring
.alloc_idx
.paddr
= paddr
;
605 htt
->rx_ring
.sw_rd_idx
.msdu_payld
= htt
->rx_ring
.size_mask
;
606 *htt
->rx_ring
.alloc_idx
.vaddr
= 0;
608 /* Initialize the Rx refill retry timer */
609 timer_setup(timer
, ath10k_htt_rx_ring_refill_retry
, 0);
611 spin_lock_init(&htt
->rx_ring
.lock
);
613 htt
->rx_ring
.fill_cnt
= 0;
614 htt
->rx_ring
.sw_rd_idx
.msdu_payld
= 0;
615 hash_init(htt
->rx_ring
.skb_table
);
617 skb_queue_head_init(&htt
->rx_msdus_q
);
618 skb_queue_head_init(&htt
->rx_in_ord_compl_q
);
619 skb_queue_head_init(&htt
->tx_fetch_ind_q
);
620 atomic_set(&htt
->num_mpdus_ready
, 0);
622 ath10k_dbg(ar
, ATH10K_DBG_BOOT
, "htt rx ring size %d fill_level %d\n",
623 htt
->rx_ring
.size
, htt
->rx_ring
.fill_level
);
627 dma_free_coherent(htt
->ar
->dev
,
628 htt
->rx_ops
->htt_get_rx_ring_size(htt
),
630 htt
->rx_ring
.base_paddr
);
632 kfree(htt
->rx_ring
.netbufs_ring
);
637 static int ath10k_htt_rx_crypto_param_len(struct ath10k
*ar
,
638 enum htt_rx_mpdu_encrypt_type type
)
641 case HTT_RX_MPDU_ENCRYPT_NONE
:
643 case HTT_RX_MPDU_ENCRYPT_WEP40
:
644 case HTT_RX_MPDU_ENCRYPT_WEP104
:
645 return IEEE80211_WEP_IV_LEN
;
646 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC
:
647 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA
:
648 return IEEE80211_TKIP_IV_LEN
;
649 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2
:
650 return IEEE80211_CCMP_HDR_LEN
;
651 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2
:
652 return IEEE80211_CCMP_256_HDR_LEN
;
653 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2
:
654 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2
:
655 return IEEE80211_GCMP_HDR_LEN
;
656 case HTT_RX_MPDU_ENCRYPT_WEP128
:
657 case HTT_RX_MPDU_ENCRYPT_WAPI
:
661 ath10k_warn(ar
, "unsupported encryption type %d\n", type
);
665 #define MICHAEL_MIC_LEN 8
667 static int ath10k_htt_rx_crypto_mic_len(struct ath10k
*ar
,
668 enum htt_rx_mpdu_encrypt_type type
)
671 case HTT_RX_MPDU_ENCRYPT_NONE
:
672 case HTT_RX_MPDU_ENCRYPT_WEP40
:
673 case HTT_RX_MPDU_ENCRYPT_WEP104
:
674 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC
:
675 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA
:
677 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2
:
678 return IEEE80211_CCMP_MIC_LEN
;
679 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2
:
680 return IEEE80211_CCMP_256_MIC_LEN
;
681 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2
:
682 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2
:
683 return IEEE80211_GCMP_MIC_LEN
;
684 case HTT_RX_MPDU_ENCRYPT_WEP128
:
685 case HTT_RX_MPDU_ENCRYPT_WAPI
:
689 ath10k_warn(ar
, "unsupported encryption type %d\n", type
);
693 static int ath10k_htt_rx_crypto_icv_len(struct ath10k
*ar
,
694 enum htt_rx_mpdu_encrypt_type type
)
697 case HTT_RX_MPDU_ENCRYPT_NONE
:
698 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2
:
699 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2
:
700 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2
:
701 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2
:
703 case HTT_RX_MPDU_ENCRYPT_WEP40
:
704 case HTT_RX_MPDU_ENCRYPT_WEP104
:
705 return IEEE80211_WEP_ICV_LEN
;
706 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC
:
707 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA
:
708 return IEEE80211_TKIP_ICV_LEN
;
709 case HTT_RX_MPDU_ENCRYPT_WEP128
:
710 case HTT_RX_MPDU_ENCRYPT_WAPI
:
714 ath10k_warn(ar
, "unsupported encryption type %d\n", type
);
718 struct amsdu_subframe_hdr
{
724 #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
726 static void ath10k_htt_rx_h_rates(struct ath10k
*ar
,
727 struct ieee80211_rx_status
*status
,
728 struct htt_rx_desc
*rxd
)
730 struct ieee80211_supported_band
*sband
;
731 u8 cck
, rate
, bw
, sgi
, mcs
, nss
;
734 u32 info1
, info2
, info3
;
736 info1
= __le32_to_cpu(rxd
->ppdu_start
.info1
);
737 info2
= __le32_to_cpu(rxd
->ppdu_start
.info2
);
738 info3
= __le32_to_cpu(rxd
->ppdu_start
.info3
);
740 preamble
= MS(info1
, RX_PPDU_START_INFO1_PREAMBLE_TYPE
);
744 /* To get legacy rate index band is required. Since band can't
745 * be undefined check if freq is non-zero.
750 cck
= info1
& RX_PPDU_START_INFO1_L_SIG_RATE_SELECT
;
751 rate
= MS(info1
, RX_PPDU_START_INFO1_L_SIG_RATE
);
752 rate
&= ~RX_PPDU_START_RATE_FLAG
;
754 sband
= &ar
->mac
.sbands
[status
->band
];
755 status
->rate_idx
= ath10k_mac_hw_rate_to_idx(sband
, rate
, cck
);
758 case HTT_RX_HT_WITH_TXBF
:
759 /* HT-SIG - Table 20-11 in info2 and info3 */
762 bw
= (info2
>> 7) & 1;
763 sgi
= (info3
>> 7) & 1;
765 status
->rate_idx
= mcs
;
766 status
->encoding
= RX_ENC_HT
;
768 status
->enc_flags
|= RX_ENC_FLAG_SHORT_GI
;
770 status
->bw
= RATE_INFO_BW_40
;
773 case HTT_RX_VHT_WITH_TXBF
:
774 /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
779 group_id
= (info2
>> 4) & 0x3F;
781 if (GROUP_ID_IS_SU_MIMO(group_id
)) {
782 mcs
= (info3
>> 4) & 0x0F;
783 nss
= ((info2
>> 10) & 0x07) + 1;
785 /* Hardware doesn't decode VHT-SIG-B into Rx descriptor
786 * so it's impossible to decode MCS. Also since
787 * firmware consumes Group Id Management frames host
788 * has no knowledge regarding group/user position
789 * mapping so it's impossible to pick the correct Nsts
792 * Bandwidth and SGI are valid so report the rateinfo
793 * on best-effort basis.
800 ath10k_warn(ar
, "invalid MCS received %u\n", mcs
);
801 ath10k_warn(ar
, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
802 __le32_to_cpu(rxd
->attention
.flags
),
803 __le32_to_cpu(rxd
->mpdu_start
.info0
),
804 __le32_to_cpu(rxd
->mpdu_start
.info1
),
805 __le32_to_cpu(rxd
->msdu_start
.common
.info0
),
806 __le32_to_cpu(rxd
->msdu_start
.common
.info1
),
807 rxd
->ppdu_start
.info0
,
808 __le32_to_cpu(rxd
->ppdu_start
.info1
),
809 __le32_to_cpu(rxd
->ppdu_start
.info2
),
810 __le32_to_cpu(rxd
->ppdu_start
.info3
),
811 __le32_to_cpu(rxd
->ppdu_start
.info4
));
813 ath10k_warn(ar
, "msdu end %08x mpdu end %08x\n",
814 __le32_to_cpu(rxd
->msdu_end
.common
.info0
),
815 __le32_to_cpu(rxd
->mpdu_end
.info0
));
817 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
,
818 "rx desc msdu payload: ",
819 rxd
->msdu_payload
, 50);
822 status
->rate_idx
= mcs
;
826 status
->enc_flags
|= RX_ENC_FLAG_SHORT_GI
;
834 status
->bw
= RATE_INFO_BW_40
;
838 status
->bw
= RATE_INFO_BW_80
;
841 status
->bw
= RATE_INFO_BW_160
;
845 status
->encoding
= RX_ENC_VHT
;
852 static struct ieee80211_channel
*
853 ath10k_htt_rx_h_peer_channel(struct ath10k
*ar
, struct htt_rx_desc
*rxd
)
855 struct ath10k_peer
*peer
;
856 struct ath10k_vif
*arvif
;
857 struct cfg80211_chan_def def
;
860 lockdep_assert_held(&ar
->data_lock
);
865 if (rxd
->attention
.flags
&
866 __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID
))
869 if (!(rxd
->msdu_end
.common
.info0
&
870 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU
)))
873 peer_id
= MS(__le32_to_cpu(rxd
->mpdu_start
.info0
),
874 RX_MPDU_START_INFO0_PEER_IDX
);
876 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
880 arvif
= ath10k_get_arvif(ar
, peer
->vdev_id
);
881 if (WARN_ON_ONCE(!arvif
))
884 if (ath10k_mac_vif_chan(arvif
->vif
, &def
))
890 static struct ieee80211_channel
*
891 ath10k_htt_rx_h_vdev_channel(struct ath10k
*ar
, u32 vdev_id
)
893 struct ath10k_vif
*arvif
;
894 struct cfg80211_chan_def def
;
896 lockdep_assert_held(&ar
->data_lock
);
898 list_for_each_entry(arvif
, &ar
->arvifs
, list
) {
899 if (arvif
->vdev_id
== vdev_id
&&
900 ath10k_mac_vif_chan(arvif
->vif
, &def
) == 0)
908 ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw
*hw
,
909 struct ieee80211_chanctx_conf
*conf
,
912 struct cfg80211_chan_def
*def
= data
;
917 static struct ieee80211_channel
*
918 ath10k_htt_rx_h_any_channel(struct ath10k
*ar
)
920 struct cfg80211_chan_def def
= {};
922 ieee80211_iter_chan_contexts_atomic(ar
->hw
,
923 ath10k_htt_rx_h_any_chan_iter
,
929 static bool ath10k_htt_rx_h_channel(struct ath10k
*ar
,
930 struct ieee80211_rx_status
*status
,
931 struct htt_rx_desc
*rxd
,
934 struct ieee80211_channel
*ch
;
936 spin_lock_bh(&ar
->data_lock
);
937 ch
= ar
->scan_channel
;
941 ch
= ath10k_htt_rx_h_peer_channel(ar
, rxd
);
943 ch
= ath10k_htt_rx_h_vdev_channel(ar
, vdev_id
);
945 ch
= ath10k_htt_rx_h_any_channel(ar
);
947 ch
= ar
->tgt_oper_chan
;
948 spin_unlock_bh(&ar
->data_lock
);
953 status
->band
= ch
->band
;
954 status
->freq
= ch
->center_freq
;
959 static void ath10k_htt_rx_h_signal(struct ath10k
*ar
,
960 struct ieee80211_rx_status
*status
,
961 struct htt_rx_desc
*rxd
)
965 for (i
= 0; i
< IEEE80211_MAX_CHAINS
; i
++) {
966 status
->chains
&= ~BIT(i
);
968 if (rxd
->ppdu_start
.rssi_chains
[i
].pri20_mhz
!= 0x80) {
969 status
->chain_signal
[i
] = ATH10K_DEFAULT_NOISE_FLOOR
+
970 rxd
->ppdu_start
.rssi_chains
[i
].pri20_mhz
;
972 status
->chains
|= BIT(i
);
976 /* FIXME: Get real NF */
977 status
->signal
= ATH10K_DEFAULT_NOISE_FLOOR
+
978 rxd
->ppdu_start
.rssi_comb
;
979 status
->flag
&= ~RX_FLAG_NO_SIGNAL_VAL
;
982 static void ath10k_htt_rx_h_mactime(struct ath10k
*ar
,
983 struct ieee80211_rx_status
*status
,
984 struct htt_rx_desc
*rxd
)
986 /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
987 * means all prior MSDUs in a PPDU are reported to mac80211 without the
988 * TSF. Is it worth holding frames until end of PPDU is known?
990 * FIXME: Can we get/compute 64bit TSF?
992 status
->mactime
= __le32_to_cpu(rxd
->ppdu_end
.common
.tsf_timestamp
);
993 status
->flag
|= RX_FLAG_MACTIME_END
;
996 static void ath10k_htt_rx_h_ppdu(struct ath10k
*ar
,
997 struct sk_buff_head
*amsdu
,
998 struct ieee80211_rx_status
*status
,
1001 struct sk_buff
*first
;
1002 struct htt_rx_desc
*rxd
;
1006 if (skb_queue_empty(amsdu
))
1009 first
= skb_peek(amsdu
);
1010 rxd
= (void *)first
->data
- sizeof(*rxd
);
1012 is_first_ppdu
= !!(rxd
->attention
.flags
&
1013 __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU
));
1014 is_last_ppdu
= !!(rxd
->attention
.flags
&
1015 __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU
));
1017 if (is_first_ppdu
) {
1018 /* New PPDU starts so clear out the old per-PPDU status. */
1020 status
->rate_idx
= 0;
1022 status
->encoding
= RX_ENC_LEGACY
;
1023 status
->bw
= RATE_INFO_BW_20
;
1025 status
->flag
&= ~RX_FLAG_MACTIME_END
;
1026 status
->flag
|= RX_FLAG_NO_SIGNAL_VAL
;
1028 status
->flag
&= ~(RX_FLAG_AMPDU_IS_LAST
);
1029 status
->flag
|= RX_FLAG_AMPDU_DETAILS
| RX_FLAG_AMPDU_LAST_KNOWN
;
1030 status
->ampdu_reference
= ar
->ampdu_reference
;
1032 ath10k_htt_rx_h_signal(ar
, status
, rxd
);
1033 ath10k_htt_rx_h_channel(ar
, status
, rxd
, vdev_id
);
1034 ath10k_htt_rx_h_rates(ar
, status
, rxd
);
1038 ath10k_htt_rx_h_mactime(ar
, status
, rxd
);
1040 /* set ampdu last segment flag */
1041 status
->flag
|= RX_FLAG_AMPDU_IS_LAST
;
1042 ar
->ampdu_reference
++;
1046 static const char * const tid_to_ac
[] = {
1057 static char *ath10k_get_tid(struct ieee80211_hdr
*hdr
, char *out
, size_t size
)
1062 if (!ieee80211_is_data_qos(hdr
->frame_control
))
1065 qc
= ieee80211_get_qos_ctl(hdr
);
1066 tid
= *qc
& IEEE80211_QOS_CTL_TID_MASK
;
1068 snprintf(out
, size
, "tid %d (%s)", tid
, tid_to_ac
[tid
]);
1070 snprintf(out
, size
, "tid %d", tid
);
1075 static void ath10k_htt_rx_h_queue_msdu(struct ath10k
*ar
,
1076 struct ieee80211_rx_status
*rx_status
,
1077 struct sk_buff
*skb
)
1079 struct ieee80211_rx_status
*status
;
1081 status
= IEEE80211_SKB_RXCB(skb
);
1082 *status
= *rx_status
;
1084 __skb_queue_tail(&ar
->htt
.rx_msdus_q
, skb
);
1087 static void ath10k_process_rx(struct ath10k
*ar
, struct sk_buff
*skb
)
1089 struct ieee80211_rx_status
*status
;
1090 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
1093 status
= IEEE80211_SKB_RXCB(skb
);
1095 ath10k_dbg(ar
, ATH10K_DBG_DATA
,
1096 "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
1099 ieee80211_get_SA(hdr
),
1100 ath10k_get_tid(hdr
, tid
, sizeof(tid
)),
1101 is_multicast_ether_addr(ieee80211_get_DA(hdr
)) ?
1103 (__le16_to_cpu(hdr
->seq_ctrl
) & IEEE80211_SCTL_SEQ
) >> 4,
1104 (status
->encoding
== RX_ENC_LEGACY
) ? "legacy" : "",
1105 (status
->encoding
== RX_ENC_HT
) ? "ht" : "",
1106 (status
->encoding
== RX_ENC_VHT
) ? "vht" : "",
1107 (status
->bw
== RATE_INFO_BW_40
) ? "40" : "",
1108 (status
->bw
== RATE_INFO_BW_80
) ? "80" : "",
1109 (status
->bw
== RATE_INFO_BW_160
) ? "160" : "",
1110 status
->enc_flags
& RX_ENC_FLAG_SHORT_GI
? "sgi " : "",
1114 status
->band
, status
->flag
,
1115 !!(status
->flag
& RX_FLAG_FAILED_FCS_CRC
),
1116 !!(status
->flag
& RX_FLAG_MMIC_ERROR
),
1117 !!(status
->flag
& RX_FLAG_AMSDU_MORE
));
1118 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "rx skb: ",
1119 skb
->data
, skb
->len
);
1120 trace_ath10k_rx_hdr(ar
, skb
->data
, skb
->len
);
1121 trace_ath10k_rx_payload(ar
, skb
->data
, skb
->len
);
1123 ieee80211_rx_napi(ar
->hw
, NULL
, skb
, &ar
->napi
);
1126 static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k
*ar
,
1127 struct ieee80211_hdr
*hdr
)
1129 int len
= ieee80211_hdrlen(hdr
->frame_control
);
1131 if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING
,
1132 ar
->running_fw
->fw_file
.fw_features
))
1133 len
= round_up(len
, 4);
1138 static void ath10k_htt_rx_h_undecap_raw(struct ath10k
*ar
,
1139 struct sk_buff
*msdu
,
1140 struct ieee80211_rx_status
*status
,
1141 enum htt_rx_mpdu_encrypt_type enctype
,
1144 struct ieee80211_hdr
*hdr
;
1145 struct htt_rx_desc
*rxd
;
1151 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
1152 is_first
= !!(rxd
->msdu_end
.common
.info0
&
1153 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU
));
1154 is_last
= !!(rxd
->msdu_end
.common
.info0
&
1155 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU
));
1157 /* Delivered decapped frame:
1159 * [crypto param] <-- can be trimmed if !fcs_err &&
1160 * !decrypt_err && !peer_idx_invalid
1161 * [amsdu header] <-- only if A-MSDU
1164 * [FCS] <-- at end, needs to be trimmed
1167 /* This probably shouldn't happen but warn just in case */
1168 if (unlikely(WARN_ON_ONCE(!is_first
)))
1171 /* This probably shouldn't happen but warn just in case */
1172 if (unlikely(WARN_ON_ONCE(!(is_first
&& is_last
))))
1175 skb_trim(msdu
, msdu
->len
- FCS_LEN
);
1177 /* In most cases this will be true for sniffed frames. It makes sense
1178 * to deliver them as-is without stripping the crypto param. This is
1179 * necessary for software based decryption.
1181 * If there's no error then the frame is decrypted. At least that is
1182 * the case for frames that come in via fragmented rx indication.
1187 /* The payload is decrypted so strip crypto params. Start from tail
1188 * since hdr is used to compute some stuff.
1191 hdr
= (void *)msdu
->data
;
1194 if (status
->flag
& RX_FLAG_IV_STRIPPED
) {
1195 skb_trim(msdu
, msdu
->len
-
1196 ath10k_htt_rx_crypto_mic_len(ar
, enctype
));
1198 skb_trim(msdu
, msdu
->len
-
1199 ath10k_htt_rx_crypto_icv_len(ar
, enctype
));
1202 if (status
->flag
& RX_FLAG_MIC_STRIPPED
)
1203 skb_trim(msdu
, msdu
->len
-
1204 ath10k_htt_rx_crypto_mic_len(ar
, enctype
));
1207 if (status
->flag
& RX_FLAG_ICV_STRIPPED
)
1208 skb_trim(msdu
, msdu
->len
-
1209 ath10k_htt_rx_crypto_icv_len(ar
, enctype
));
1213 if ((status
->flag
& RX_FLAG_MMIC_STRIPPED
) &&
1214 !ieee80211_has_morefrags(hdr
->frame_control
) &&
1215 enctype
== HTT_RX_MPDU_ENCRYPT_TKIP_WPA
)
1216 skb_trim(msdu
, msdu
->len
- MICHAEL_MIC_LEN
);
1219 if (status
->flag
& RX_FLAG_IV_STRIPPED
) {
1220 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1221 crypto_len
= ath10k_htt_rx_crypto_param_len(ar
, enctype
);
1223 memmove((void *)msdu
->data
+ crypto_len
,
1224 (void *)msdu
->data
, hdr_len
);
1225 skb_pull(msdu
, crypto_len
);
1229 static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k
*ar
,
1230 struct sk_buff
*msdu
,
1231 struct ieee80211_rx_status
*status
,
1232 const u8 first_hdr
[64],
1233 enum htt_rx_mpdu_encrypt_type enctype
)
1235 struct ieee80211_hdr
*hdr
;
1236 struct htt_rx_desc
*rxd
;
1241 int bytes_aligned
= ar
->hw_params
.decap_align_bytes
;
1243 /* Delivered decapped frame:
1244 * [nwifi 802.11 header] <-- replaced with 802.11 hdr
1247 * Note: The nwifi header doesn't have QoS Control and is
1248 * (always?) a 3addr frame.
1250 * Note2: There's no A-MSDU subframe header. Even if it's part
1254 /* pull decapped header and copy SA & DA */
1255 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
1257 l3_pad_bytes
= ath10k_rx_desc_get_l3_pad_bytes(&ar
->hw_params
, rxd
);
1258 skb_put(msdu
, l3_pad_bytes
);
1260 hdr
= (struct ieee80211_hdr
*)(msdu
->data
+ l3_pad_bytes
);
1262 hdr_len
= ath10k_htt_rx_nwifi_hdrlen(ar
, hdr
);
1263 ether_addr_copy(da
, ieee80211_get_DA(hdr
));
1264 ether_addr_copy(sa
, ieee80211_get_SA(hdr
));
1265 skb_pull(msdu
, hdr_len
);
1267 /* push original 802.11 header */
1268 hdr
= (struct ieee80211_hdr
*)first_hdr
;
1269 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1271 if (!(status
->flag
& RX_FLAG_IV_STRIPPED
)) {
1272 memcpy(skb_push(msdu
,
1273 ath10k_htt_rx_crypto_param_len(ar
, enctype
)),
1274 (void *)hdr
+ round_up(hdr_len
, bytes_aligned
),
1275 ath10k_htt_rx_crypto_param_len(ar
, enctype
));
1278 memcpy(skb_push(msdu
, hdr_len
), hdr
, hdr_len
);
1280 /* original 802.11 header has a different DA and in
1281 * case of 4addr it may also have different SA
1283 hdr
= (struct ieee80211_hdr
*)msdu
->data
;
1284 ether_addr_copy(ieee80211_get_DA(hdr
), da
);
1285 ether_addr_copy(ieee80211_get_SA(hdr
), sa
);
1288 static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k
*ar
,
1289 struct sk_buff
*msdu
,
1290 enum htt_rx_mpdu_encrypt_type enctype
)
1292 struct ieee80211_hdr
*hdr
;
1293 struct htt_rx_desc
*rxd
;
1294 size_t hdr_len
, crypto_len
;
1296 bool is_first
, is_last
, is_amsdu
;
1297 int bytes_aligned
= ar
->hw_params
.decap_align_bytes
;
1299 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
1300 hdr
= (void *)rxd
->rx_hdr_status
;
1302 is_first
= !!(rxd
->msdu_end
.common
.info0
&
1303 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU
));
1304 is_last
= !!(rxd
->msdu_end
.common
.info0
&
1305 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU
));
1306 is_amsdu
= !(is_first
&& is_last
);
1311 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1312 crypto_len
= ath10k_htt_rx_crypto_param_len(ar
, enctype
);
1314 rfc1042
+= round_up(hdr_len
, bytes_aligned
) +
1315 round_up(crypto_len
, bytes_aligned
);
1319 rfc1042
+= sizeof(struct amsdu_subframe_hdr
);
1324 static void ath10k_htt_rx_h_undecap_eth(struct ath10k
*ar
,
1325 struct sk_buff
*msdu
,
1326 struct ieee80211_rx_status
*status
,
1327 const u8 first_hdr
[64],
1328 enum htt_rx_mpdu_encrypt_type enctype
)
1330 struct ieee80211_hdr
*hdr
;
1337 struct htt_rx_desc
*rxd
;
1338 int bytes_aligned
= ar
->hw_params
.decap_align_bytes
;
1340 /* Delivered decapped frame:
1341 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
1345 rfc1042
= ath10k_htt_rx_h_find_rfc1042(ar
, msdu
, enctype
);
1346 if (WARN_ON_ONCE(!rfc1042
))
1349 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
1350 l3_pad_bytes
= ath10k_rx_desc_get_l3_pad_bytes(&ar
->hw_params
, rxd
);
1351 skb_put(msdu
, l3_pad_bytes
);
1352 skb_pull(msdu
, l3_pad_bytes
);
1354 /* pull decapped header and copy SA & DA */
1355 eth
= (struct ethhdr
*)msdu
->data
;
1356 ether_addr_copy(da
, eth
->h_dest
);
1357 ether_addr_copy(sa
, eth
->h_source
);
1358 skb_pull(msdu
, sizeof(struct ethhdr
));
1360 /* push rfc1042/llc/snap */
1361 memcpy(skb_push(msdu
, sizeof(struct rfc1042_hdr
)), rfc1042
,
1362 sizeof(struct rfc1042_hdr
));
1364 /* push original 802.11 header */
1365 hdr
= (struct ieee80211_hdr
*)first_hdr
;
1366 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1368 if (!(status
->flag
& RX_FLAG_IV_STRIPPED
)) {
1369 memcpy(skb_push(msdu
,
1370 ath10k_htt_rx_crypto_param_len(ar
, enctype
)),
1371 (void *)hdr
+ round_up(hdr_len
, bytes_aligned
),
1372 ath10k_htt_rx_crypto_param_len(ar
, enctype
));
1375 memcpy(skb_push(msdu
, hdr_len
), hdr
, hdr_len
);
1377 /* original 802.11 header has a different DA and in
1378 * case of 4addr it may also have different SA
1380 hdr
= (struct ieee80211_hdr
*)msdu
->data
;
1381 ether_addr_copy(ieee80211_get_DA(hdr
), da
);
1382 ether_addr_copy(ieee80211_get_SA(hdr
), sa
);
1385 static void ath10k_htt_rx_h_undecap_snap(struct ath10k
*ar
,
1386 struct sk_buff
*msdu
,
1387 struct ieee80211_rx_status
*status
,
1388 const u8 first_hdr
[64],
1389 enum htt_rx_mpdu_encrypt_type enctype
)
1391 struct ieee80211_hdr
*hdr
;
1394 struct htt_rx_desc
*rxd
;
1395 int bytes_aligned
= ar
->hw_params
.decap_align_bytes
;
1397 /* Delivered decapped frame:
1398 * [amsdu header] <-- replaced with 802.11 hdr
1403 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
1404 l3_pad_bytes
= ath10k_rx_desc_get_l3_pad_bytes(&ar
->hw_params
, rxd
);
1406 skb_put(msdu
, l3_pad_bytes
);
1407 skb_pull(msdu
, sizeof(struct amsdu_subframe_hdr
) + l3_pad_bytes
);
1409 hdr
= (struct ieee80211_hdr
*)first_hdr
;
1410 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1412 if (!(status
->flag
& RX_FLAG_IV_STRIPPED
)) {
1413 memcpy(skb_push(msdu
,
1414 ath10k_htt_rx_crypto_param_len(ar
, enctype
)),
1415 (void *)hdr
+ round_up(hdr_len
, bytes_aligned
),
1416 ath10k_htt_rx_crypto_param_len(ar
, enctype
));
1419 memcpy(skb_push(msdu
, hdr_len
), hdr
, hdr_len
);
1422 static void ath10k_htt_rx_h_undecap(struct ath10k
*ar
,
1423 struct sk_buff
*msdu
,
1424 struct ieee80211_rx_status
*status
,
1426 enum htt_rx_mpdu_encrypt_type enctype
,
1429 struct htt_rx_desc
*rxd
;
1430 enum rx_msdu_decap_format decap
;
1432 /* First msdu's decapped header:
1433 * [802.11 header] <-- padded to 4 bytes long
1434 * [crypto param] <-- padded to 4 bytes long
1435 * [amsdu header] <-- only if A-MSDU
1438 * Other (2nd, 3rd, ..) msdu's decapped header:
1439 * [amsdu header] <-- only if A-MSDU
1443 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
1444 decap
= MS(__le32_to_cpu(rxd
->msdu_start
.common
.info1
),
1445 RX_MSDU_START_INFO1_DECAP_FORMAT
);
1448 case RX_MSDU_DECAP_RAW
:
1449 ath10k_htt_rx_h_undecap_raw(ar
, msdu
, status
, enctype
,
1452 case RX_MSDU_DECAP_NATIVE_WIFI
:
1453 ath10k_htt_rx_h_undecap_nwifi(ar
, msdu
, status
, first_hdr
,
1456 case RX_MSDU_DECAP_ETHERNET2_DIX
:
1457 ath10k_htt_rx_h_undecap_eth(ar
, msdu
, status
, first_hdr
, enctype
);
1459 case RX_MSDU_DECAP_8023_SNAP_LLC
:
1460 ath10k_htt_rx_h_undecap_snap(ar
, msdu
, status
, first_hdr
,
1466 static int ath10k_htt_rx_get_csum_state(struct sk_buff
*skb
)
1468 struct htt_rx_desc
*rxd
;
1470 bool is_ip4
, is_ip6
;
1471 bool is_tcp
, is_udp
;
1472 bool ip_csum_ok
, tcpudp_csum_ok
;
1474 rxd
= (void *)skb
->data
- sizeof(*rxd
);
1475 flags
= __le32_to_cpu(rxd
->attention
.flags
);
1476 info
= __le32_to_cpu(rxd
->msdu_start
.common
.info1
);
1478 is_ip4
= !!(info
& RX_MSDU_START_INFO1_IPV4_PROTO
);
1479 is_ip6
= !!(info
& RX_MSDU_START_INFO1_IPV6_PROTO
);
1480 is_tcp
= !!(info
& RX_MSDU_START_INFO1_TCP_PROTO
);
1481 is_udp
= !!(info
& RX_MSDU_START_INFO1_UDP_PROTO
);
1482 ip_csum_ok
= !(flags
& RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL
);
1483 tcpudp_csum_ok
= !(flags
& RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL
);
1485 if (!is_ip4
&& !is_ip6
)
1486 return CHECKSUM_NONE
;
1487 if (!is_tcp
&& !is_udp
)
1488 return CHECKSUM_NONE
;
1490 return CHECKSUM_NONE
;
1491 if (!tcpudp_csum_ok
)
1492 return CHECKSUM_NONE
;
1494 return CHECKSUM_UNNECESSARY
;
1497 static void ath10k_htt_rx_h_csum_offload(struct sk_buff
*msdu
)
1499 msdu
->ip_summed
= ath10k_htt_rx_get_csum_state(msdu
);
1502 static void ath10k_htt_rx_h_mpdu(struct ath10k
*ar
,
1503 struct sk_buff_head
*amsdu
,
1504 struct ieee80211_rx_status
*status
,
1505 bool fill_crypt_header
)
1507 struct sk_buff
*first
;
1508 struct sk_buff
*last
;
1509 struct sk_buff
*msdu
;
1510 struct htt_rx_desc
*rxd
;
1511 struct ieee80211_hdr
*hdr
;
1512 enum htt_rx_mpdu_encrypt_type enctype
;
1516 bool has_crypto_err
;
1518 bool has_peer_idx_invalid
;
1523 if (skb_queue_empty(amsdu
))
1526 first
= skb_peek(amsdu
);
1527 rxd
= (void *)first
->data
- sizeof(*rxd
);
1529 is_mgmt
= !!(rxd
->attention
.flags
&
1530 __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE
));
1532 enctype
= MS(__le32_to_cpu(rxd
->mpdu_start
.info0
),
1533 RX_MPDU_START_INFO0_ENCRYPT_TYPE
);
1535 /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
1536 * decapped header. It'll be used for undecapping of each MSDU.
1538 hdr
= (void *)rxd
->rx_hdr_status
;
1539 memcpy(first_hdr
, hdr
, RX_HTT_HDR_STATUS_LEN
);
1541 /* Each A-MSDU subframe will use the original header as the base and be
1542 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
1544 hdr
= (void *)first_hdr
;
1546 if (ieee80211_is_data_qos(hdr
->frame_control
)) {
1547 qos
= ieee80211_get_qos_ctl(hdr
);
1548 qos
[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT
;
1551 /* Some attention flags are valid only in the last MSDU. */
1552 last
= skb_peek_tail(amsdu
);
1553 rxd
= (void *)last
->data
- sizeof(*rxd
);
1554 attention
= __le32_to_cpu(rxd
->attention
.flags
);
1556 has_fcs_err
= !!(attention
& RX_ATTENTION_FLAGS_FCS_ERR
);
1557 has_crypto_err
= !!(attention
& RX_ATTENTION_FLAGS_DECRYPT_ERR
);
1558 has_tkip_err
= !!(attention
& RX_ATTENTION_FLAGS_TKIP_MIC_ERR
);
1559 has_peer_idx_invalid
= !!(attention
& RX_ATTENTION_FLAGS_PEER_IDX_INVALID
);
1561 /* Note: If hardware captures an encrypted frame that it can't decrypt,
1562 * e.g. due to fcs error, missing peer or invalid key data it will
1563 * report the frame as raw.
1565 is_decrypted
= (enctype
!= HTT_RX_MPDU_ENCRYPT_NONE
&&
1568 !has_peer_idx_invalid
);
1570 /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
1571 status
->flag
&= ~(RX_FLAG_FAILED_FCS_CRC
|
1572 RX_FLAG_MMIC_ERROR
|
1574 RX_FLAG_IV_STRIPPED
|
1575 RX_FLAG_ONLY_MONITOR
|
1576 RX_FLAG_MMIC_STRIPPED
);
1579 status
->flag
|= RX_FLAG_FAILED_FCS_CRC
;
1582 status
->flag
|= RX_FLAG_MMIC_ERROR
;
1584 /* Firmware reports all necessary management frames via WMI already.
1585 * They are not reported to monitor interfaces at all so pass the ones
1586 * coming via HTT to monitor interfaces instead. This simplifies
1590 status
->flag
|= RX_FLAG_ONLY_MONITOR
;
1593 status
->flag
|= RX_FLAG_DECRYPTED
;
1595 if (likely(!is_mgmt
))
1596 status
->flag
|= RX_FLAG_MMIC_STRIPPED
;
1598 if (fill_crypt_header
)
1599 status
->flag
|= RX_FLAG_MIC_STRIPPED
|
1600 RX_FLAG_ICV_STRIPPED
;
1602 status
->flag
|= RX_FLAG_IV_STRIPPED
;
1605 skb_queue_walk(amsdu
, msdu
) {
1606 ath10k_htt_rx_h_csum_offload(msdu
);
1607 ath10k_htt_rx_h_undecap(ar
, msdu
, status
, first_hdr
, enctype
,
1610 /* Undecapping involves copying the original 802.11 header back
1611 * to sk_buff. If frame is protected and hardware has decrypted
1612 * it then remove the protected bit.
1619 if (fill_crypt_header
)
1622 hdr
= (void *)msdu
->data
;
1623 hdr
->frame_control
&= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED
);
1627 static void ath10k_htt_rx_h_enqueue(struct ath10k
*ar
,
1628 struct sk_buff_head
*amsdu
,
1629 struct ieee80211_rx_status
*status
)
1631 struct sk_buff
*msdu
;
1632 struct sk_buff
*first_subframe
;
1634 first_subframe
= skb_peek(amsdu
);
1636 while ((msdu
= __skb_dequeue(amsdu
))) {
1637 /* Setup per-MSDU flags */
1638 if (skb_queue_empty(amsdu
))
1639 status
->flag
&= ~RX_FLAG_AMSDU_MORE
;
1641 status
->flag
|= RX_FLAG_AMSDU_MORE
;
1643 if (msdu
== first_subframe
) {
1644 first_subframe
= NULL
;
1645 status
->flag
&= ~RX_FLAG_ALLOW_SAME_PN
;
1647 status
->flag
|= RX_FLAG_ALLOW_SAME_PN
;
1650 ath10k_htt_rx_h_queue_msdu(ar
, status
, msdu
);
1654 static int ath10k_unchain_msdu(struct sk_buff_head
*amsdu
)
1656 struct sk_buff
*skb
, *first
;
1660 /* TODO: Might could optimize this by using
1661 * skb_try_coalesce or similar method to
1662 * decrease copying, or maybe get mac80211 to
1663 * provide a way to just receive a list of
1667 first
= __skb_dequeue(amsdu
);
1669 /* Allocate total length all at once. */
1670 skb_queue_walk(amsdu
, skb
)
1671 total_len
+= skb
->len
;
1673 space
= total_len
- skb_tailroom(first
);
1675 (pskb_expand_head(first
, 0, space
, GFP_ATOMIC
) < 0)) {
1676 /* TODO: bump some rx-oom error stat */
1677 /* put it back together so we can free the
1678 * whole list at once.
1680 __skb_queue_head(amsdu
, first
);
1684 /* Walk list again, copying contents into
1687 while ((skb
= __skb_dequeue(amsdu
))) {
1688 skb_copy_from_linear_data(skb
, skb_put(first
, skb
->len
),
1690 dev_kfree_skb_any(skb
);
1693 __skb_queue_head(amsdu
, first
);
1697 static void ath10k_htt_rx_h_unchain(struct ath10k
*ar
,
1698 struct sk_buff_head
*amsdu
)
1700 struct sk_buff
*first
;
1701 struct htt_rx_desc
*rxd
;
1702 enum rx_msdu_decap_format decap
;
1704 first
= skb_peek(amsdu
);
1705 rxd
= (void *)first
->data
- sizeof(*rxd
);
1706 decap
= MS(__le32_to_cpu(rxd
->msdu_start
.common
.info1
),
1707 RX_MSDU_START_INFO1_DECAP_FORMAT
);
1709 /* FIXME: Current unchaining logic can only handle simple case of raw
1710 * msdu chaining. If decapping is other than raw the chaining may be
1711 * more complex and this isn't handled by the current code. Don't even
1712 * try re-constructing such frames - it'll be pretty much garbage.
1714 if (decap
!= RX_MSDU_DECAP_RAW
||
1715 skb_queue_len(amsdu
) != 1 + rxd
->frag_info
.ring2_more_count
) {
1716 __skb_queue_purge(amsdu
);
1720 ath10k_unchain_msdu(amsdu
);
1723 static bool ath10k_htt_rx_amsdu_allowed(struct ath10k
*ar
,
1724 struct sk_buff_head
*amsdu
,
1725 struct ieee80211_rx_status
*rx_status
)
1727 /* FIXME: It might be a good idea to do some fuzzy-testing to drop
1728 * invalid/dangerous frames.
1731 if (!rx_status
->freq
) {
1732 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "no channel configured; ignoring frame(s)!\n");
1736 if (test_bit(ATH10K_CAC_RUNNING
, &ar
->dev_flags
)) {
1737 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx cac running\n");
1744 static void ath10k_htt_rx_h_filter(struct ath10k
*ar
,
1745 struct sk_buff_head
*amsdu
,
1746 struct ieee80211_rx_status
*rx_status
)
1748 if (skb_queue_empty(amsdu
))
1751 if (ath10k_htt_rx_amsdu_allowed(ar
, amsdu
, rx_status
))
1754 __skb_queue_purge(amsdu
);
1757 static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt
*htt
)
1759 struct ath10k
*ar
= htt
->ar
;
1760 struct ieee80211_rx_status
*rx_status
= &htt
->rx_status
;
1761 struct sk_buff_head amsdu
;
1764 __skb_queue_head_init(&amsdu
);
1766 spin_lock_bh(&htt
->rx_ring
.lock
);
1767 if (htt
->rx_confused
) {
1768 spin_unlock_bh(&htt
->rx_ring
.lock
);
1771 ret
= ath10k_htt_rx_amsdu_pop(htt
, &amsdu
);
1772 spin_unlock_bh(&htt
->rx_ring
.lock
);
1775 ath10k_warn(ar
, "rx ring became corrupted: %d\n", ret
);
1776 __skb_queue_purge(&amsdu
);
1777 /* FIXME: It's probably a good idea to reboot the
1778 * device instead of leaving it inoperable.
1780 htt
->rx_confused
= true;
1784 ath10k_htt_rx_h_ppdu(ar
, &amsdu
, rx_status
, 0xffff);
1786 /* only for ret = 1 indicates chained msdus */
1788 ath10k_htt_rx_h_unchain(ar
, &amsdu
);
1790 ath10k_htt_rx_h_filter(ar
, &amsdu
, rx_status
);
1791 ath10k_htt_rx_h_mpdu(ar
, &amsdu
, rx_status
, true);
1792 ath10k_htt_rx_h_enqueue(ar
, &amsdu
, rx_status
);
1797 static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt
*htt
,
1798 struct htt_rx_indication
*rx
)
1800 struct ath10k
*ar
= htt
->ar
;
1801 struct htt_rx_indication_mpdu_range
*mpdu_ranges
;
1802 int num_mpdu_ranges
;
1803 int i
, mpdu_count
= 0;
1805 num_mpdu_ranges
= MS(__le32_to_cpu(rx
->hdr
.info1
),
1806 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES
);
1807 mpdu_ranges
= htt_rx_ind_get_mpdu_ranges(rx
);
1809 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt rx ind: ",
1811 (sizeof(struct htt_rx_indication_mpdu_range
) *
1814 for (i
= 0; i
< num_mpdu_ranges
; i
++)
1815 mpdu_count
+= mpdu_ranges
[i
].mpdu_count
;
1817 atomic_add(mpdu_count
, &htt
->num_mpdus_ready
);
1820 static void ath10k_htt_rx_tx_compl_ind(struct ath10k
*ar
,
1821 struct sk_buff
*skb
)
1823 struct ath10k_htt
*htt
= &ar
->htt
;
1824 struct htt_resp
*resp
= (struct htt_resp
*)skb
->data
;
1825 struct htt_tx_done tx_done
= {};
1826 int status
= MS(resp
->data_tx_completion
.flags
, HTT_DATA_TX_STATUS
);
1831 case HTT_DATA_TX_STATUS_NO_ACK
:
1832 tx_done
.status
= HTT_TX_COMPL_STATE_NOACK
;
1834 case HTT_DATA_TX_STATUS_OK
:
1835 tx_done
.status
= HTT_TX_COMPL_STATE_ACK
;
1837 case HTT_DATA_TX_STATUS_DISCARD
:
1838 case HTT_DATA_TX_STATUS_POSTPONE
:
1839 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL
:
1840 tx_done
.status
= HTT_TX_COMPL_STATE_DISCARD
;
1843 ath10k_warn(ar
, "unhandled tx completion status %d\n", status
);
1844 tx_done
.status
= HTT_TX_COMPL_STATE_DISCARD
;
1848 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt tx completion num_msdus %d\n",
1849 resp
->data_tx_completion
.num_msdus
);
1851 for (i
= 0; i
< resp
->data_tx_completion
.num_msdus
; i
++) {
1852 msdu_id
= resp
->data_tx_completion
.msdus
[i
];
1853 tx_done
.msdu_id
= __le16_to_cpu(msdu_id
);
1855 /* kfifo_put: In practice firmware shouldn't fire off per-CE
1856 * interrupt and main interrupt (MSI/-X range case) for the same
1857 * HTC service so it should be safe to use kfifo_put w/o lock.
1859 * From kfifo_put() documentation:
1860 * Note that with only one concurrent reader and one concurrent
1861 * writer, you don't need extra locking to use these macro.
1863 if (!kfifo_put(&htt
->txdone_fifo
, tx_done
)) {
1864 ath10k_warn(ar
, "txdone fifo overrun, msdu_id %d status %d\n",
1865 tx_done
.msdu_id
, tx_done
.status
);
1866 ath10k_txrx_tx_unref(htt
, &tx_done
);
1871 static void ath10k_htt_rx_addba(struct ath10k
*ar
, struct htt_resp
*resp
)
1873 struct htt_rx_addba
*ev
= &resp
->rx_addba
;
1874 struct ath10k_peer
*peer
;
1875 struct ath10k_vif
*arvif
;
1876 u16 info0
, tid
, peer_id
;
1878 info0
= __le16_to_cpu(ev
->info0
);
1879 tid
= MS(info0
, HTT_RX_BA_INFO0_TID
);
1880 peer_id
= MS(info0
, HTT_RX_BA_INFO0_PEER_ID
);
1882 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
1883 "htt rx addba tid %hu peer_id %hu size %hhu\n",
1884 tid
, peer_id
, ev
->window_size
);
1886 spin_lock_bh(&ar
->data_lock
);
1887 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
1889 ath10k_warn(ar
, "received addba event for invalid peer_id: %hu\n",
1891 spin_unlock_bh(&ar
->data_lock
);
1895 arvif
= ath10k_get_arvif(ar
, peer
->vdev_id
);
1897 ath10k_warn(ar
, "received addba event for invalid vdev_id: %u\n",
1899 spin_unlock_bh(&ar
->data_lock
);
1903 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
1904 "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
1905 peer
->addr
, tid
, ev
->window_size
);
1907 ieee80211_start_rx_ba_session_offl(arvif
->vif
, peer
->addr
, tid
);
1908 spin_unlock_bh(&ar
->data_lock
);
1911 static void ath10k_htt_rx_delba(struct ath10k
*ar
, struct htt_resp
*resp
)
1913 struct htt_rx_delba
*ev
= &resp
->rx_delba
;
1914 struct ath10k_peer
*peer
;
1915 struct ath10k_vif
*arvif
;
1916 u16 info0
, tid
, peer_id
;
1918 info0
= __le16_to_cpu(ev
->info0
);
1919 tid
= MS(info0
, HTT_RX_BA_INFO0_TID
);
1920 peer_id
= MS(info0
, HTT_RX_BA_INFO0_PEER_ID
);
1922 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
1923 "htt rx delba tid %hu peer_id %hu\n",
1926 spin_lock_bh(&ar
->data_lock
);
1927 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
1929 ath10k_warn(ar
, "received addba event for invalid peer_id: %hu\n",
1931 spin_unlock_bh(&ar
->data_lock
);
1935 arvif
= ath10k_get_arvif(ar
, peer
->vdev_id
);
1937 ath10k_warn(ar
, "received addba event for invalid vdev_id: %u\n",
1939 spin_unlock_bh(&ar
->data_lock
);
1943 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
1944 "htt rx stop rx ba session sta %pM tid %hu\n",
1947 ieee80211_stop_rx_ba_session_offl(arvif
->vif
, peer
->addr
, tid
);
1948 spin_unlock_bh(&ar
->data_lock
);
1951 static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head
*list
,
1952 struct sk_buff_head
*amsdu
)
1954 struct sk_buff
*msdu
;
1955 struct htt_rx_desc
*rxd
;
1957 if (skb_queue_empty(list
))
1960 if (WARN_ON(!skb_queue_empty(amsdu
)))
1963 while ((msdu
= __skb_dequeue(list
))) {
1964 __skb_queue_tail(amsdu
, msdu
);
1966 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
1967 if (rxd
->msdu_end
.common
.info0
&
1968 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU
))
1972 msdu
= skb_peek_tail(amsdu
);
1973 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
1974 if (!(rxd
->msdu_end
.common
.info0
&
1975 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU
))) {
1976 skb_queue_splice_init(amsdu
, list
);
1983 static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status
*status
,
1984 struct sk_buff
*skb
)
1986 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
1988 if (!ieee80211_has_protected(hdr
->frame_control
))
1991 /* Offloaded frames are already decrypted but firmware insists they are
1992 * protected in the 802.11 header. Strip the flag. Otherwise mac80211
1993 * will drop the frame.
1996 hdr
->frame_control
&= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED
);
1997 status
->flag
|= RX_FLAG_DECRYPTED
|
1998 RX_FLAG_IV_STRIPPED
|
1999 RX_FLAG_MMIC_STRIPPED
;
2002 static void ath10k_htt_rx_h_rx_offload(struct ath10k
*ar
,
2003 struct sk_buff_head
*list
)
2005 struct ath10k_htt
*htt
= &ar
->htt
;
2006 struct ieee80211_rx_status
*status
= &htt
->rx_status
;
2007 struct htt_rx_offload_msdu
*rx
;
2008 struct sk_buff
*msdu
;
2011 while ((msdu
= __skb_dequeue(list
))) {
2012 /* Offloaded frames don't have Rx descriptor. Instead they have
2013 * a short meta information header.
2016 rx
= (void *)msdu
->data
;
2018 skb_put(msdu
, sizeof(*rx
));
2019 skb_pull(msdu
, sizeof(*rx
));
2021 if (skb_tailroom(msdu
) < __le16_to_cpu(rx
->msdu_len
)) {
2022 ath10k_warn(ar
, "dropping frame: offloaded rx msdu is too long!\n");
2023 dev_kfree_skb_any(msdu
);
2027 skb_put(msdu
, __le16_to_cpu(rx
->msdu_len
));
2029 /* Offloaded rx header length isn't multiple of 2 nor 4 so the
2030 * actual payload is unaligned. Align the frame. Otherwise
2031 * mac80211 complains. This shouldn't reduce performance much
2032 * because these offloaded frames are rare.
2034 offset
= 4 - ((unsigned long)msdu
->data
& 3);
2035 skb_put(msdu
, offset
);
2036 memmove(msdu
->data
+ offset
, msdu
->data
, msdu
->len
);
2037 skb_pull(msdu
, offset
);
2039 /* FIXME: The frame is NWifi. Re-construct QoS Control
2040 * if possible later.
2043 memset(status
, 0, sizeof(*status
));
2044 status
->flag
|= RX_FLAG_NO_SIGNAL_VAL
;
2046 ath10k_htt_rx_h_rx_offload_prot(status
, msdu
);
2047 ath10k_htt_rx_h_channel(ar
, status
, NULL
, rx
->vdev_id
);
2048 ath10k_htt_rx_h_queue_msdu(ar
, status
, msdu
);
2052 static int ath10k_htt_rx_in_ord_ind(struct ath10k
*ar
, struct sk_buff
*skb
)
2054 struct ath10k_htt
*htt
= &ar
->htt
;
2055 struct htt_resp
*resp
= (void *)skb
->data
;
2056 struct ieee80211_rx_status
*status
= &htt
->rx_status
;
2057 struct sk_buff_head list
;
2058 struct sk_buff_head amsdu
;
2067 lockdep_assert_held(&htt
->rx_ring
.lock
);
2069 if (htt
->rx_confused
)
2072 skb_pull(skb
, sizeof(resp
->hdr
));
2073 skb_pull(skb
, sizeof(resp
->rx_in_ord_ind
));
2075 peer_id
= __le16_to_cpu(resp
->rx_in_ord_ind
.peer_id
);
2076 msdu_count
= __le16_to_cpu(resp
->rx_in_ord_ind
.msdu_count
);
2077 vdev_id
= resp
->rx_in_ord_ind
.vdev_id
;
2078 tid
= SM(resp
->rx_in_ord_ind
.info
, HTT_RX_IN_ORD_IND_INFO_TID
);
2079 offload
= !!(resp
->rx_in_ord_ind
.info
&
2080 HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK
);
2081 frag
= !!(resp
->rx_in_ord_ind
.info
& HTT_RX_IN_ORD_IND_INFO_FRAG_MASK
);
2083 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
2084 "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
2085 vdev_id
, peer_id
, tid
, offload
, frag
, msdu_count
);
2087 if (skb
->len
< msdu_count
* sizeof(*resp
->rx_in_ord_ind
.msdu_descs32
)) {
2088 ath10k_warn(ar
, "dropping invalid in order rx indication\n");
2092 /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
2093 * extracted and processed.
2095 __skb_queue_head_init(&list
);
2096 if (ar
->hw_params
.target_64bit
)
2097 ret
= ath10k_htt_rx_pop_paddr64_list(htt
, &resp
->rx_in_ord_ind
,
2100 ret
= ath10k_htt_rx_pop_paddr32_list(htt
, &resp
->rx_in_ord_ind
,
2104 ath10k_warn(ar
, "failed to pop paddr list: %d\n", ret
);
2105 htt
->rx_confused
= true;
2109 /* Offloaded frames are very different and need to be handled
2113 ath10k_htt_rx_h_rx_offload(ar
, &list
);
2115 while (!skb_queue_empty(&list
)) {
2116 __skb_queue_head_init(&amsdu
);
2117 ret
= ath10k_htt_rx_extract_amsdu(&list
, &amsdu
);
2120 /* Note: The in-order indication may report interleaved
2121 * frames from different PPDUs meaning reported rx rate
2122 * to mac80211 isn't accurate/reliable. It's still
2123 * better to report something than nothing though. This
2124 * should still give an idea about rx rate to the user.
2126 ath10k_htt_rx_h_ppdu(ar
, &amsdu
, status
, vdev_id
);
2127 ath10k_htt_rx_h_filter(ar
, &amsdu
, status
);
2128 ath10k_htt_rx_h_mpdu(ar
, &amsdu
, status
, false);
2129 ath10k_htt_rx_h_enqueue(ar
, &amsdu
, status
);
2134 /* Should not happen. */
2135 ath10k_warn(ar
, "failed to extract amsdu: %d\n", ret
);
2136 htt
->rx_confused
= true;
2137 __skb_queue_purge(&list
);
2144 static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k
*ar
,
2145 const __le32
*resp_ids
,
2151 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch confirm num_resp_ids %d\n",
2154 for (i
= 0; i
< num_resp_ids
; i
++) {
2155 resp_id
= le32_to_cpu(resp_ids
[i
]);
2157 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch confirm resp_id %u\n",
2160 /* TODO: free resp_id */
2164 static void ath10k_htt_rx_tx_fetch_ind(struct ath10k
*ar
, struct sk_buff
*skb
)
2166 struct ieee80211_hw
*hw
= ar
->hw
;
2167 struct ieee80211_txq
*txq
;
2168 struct htt_resp
*resp
= (struct htt_resp
*)skb
->data
;
2169 struct htt_tx_fetch_record
*record
;
2171 size_t max_num_bytes
;
2172 size_t max_num_msdus
;
2175 const __le32
*resp_ids
;
2183 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch ind\n");
2185 len
= sizeof(resp
->hdr
) + sizeof(resp
->tx_fetch_ind
);
2186 if (unlikely(skb
->len
< len
)) {
2187 ath10k_warn(ar
, "received corrupted tx_fetch_ind event: buffer too short\n");
2191 num_records
= le16_to_cpu(resp
->tx_fetch_ind
.num_records
);
2192 num_resp_ids
= le16_to_cpu(resp
->tx_fetch_ind
.num_resp_ids
);
2194 len
+= sizeof(resp
->tx_fetch_ind
.records
[0]) * num_records
;
2195 len
+= sizeof(resp
->tx_fetch_ind
.resp_ids
[0]) * num_resp_ids
;
2197 if (unlikely(skb
->len
< len
)) {
2198 ath10k_warn(ar
, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
2202 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n",
2203 num_records
, num_resp_ids
,
2204 le16_to_cpu(resp
->tx_fetch_ind
.fetch_seq_num
));
2206 if (!ar
->htt
.tx_q_state
.enabled
) {
2207 ath10k_warn(ar
, "received unexpected tx_fetch_ind event: not enabled\n");
2211 if (ar
->htt
.tx_q_state
.mode
== HTT_TX_MODE_SWITCH_PUSH
) {
2212 ath10k_warn(ar
, "received unexpected tx_fetch_ind event: in push mode\n");
2218 for (i
= 0; i
< num_records
; i
++) {
2219 record
= &resp
->tx_fetch_ind
.records
[i
];
2220 peer_id
= MS(le16_to_cpu(record
->info
),
2221 HTT_TX_FETCH_RECORD_INFO_PEER_ID
);
2222 tid
= MS(le16_to_cpu(record
->info
),
2223 HTT_TX_FETCH_RECORD_INFO_TID
);
2224 max_num_msdus
= le16_to_cpu(record
->num_msdus
);
2225 max_num_bytes
= le32_to_cpu(record
->num_bytes
);
2227 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n",
2228 i
, peer_id
, tid
, max_num_msdus
, max_num_bytes
);
2230 if (unlikely(peer_id
>= ar
->htt
.tx_q_state
.num_peers
) ||
2231 unlikely(tid
>= ar
->htt
.tx_q_state
.num_tids
)) {
2232 ath10k_warn(ar
, "received out of range peer_id %hu tid %hhu\n",
2237 spin_lock_bh(&ar
->data_lock
);
2238 txq
= ath10k_mac_txq_lookup(ar
, peer_id
, tid
);
2239 spin_unlock_bh(&ar
->data_lock
);
2241 /* It is okay to release the lock and use txq because RCU read
2245 if (unlikely(!txq
)) {
2246 ath10k_warn(ar
, "failed to lookup txq for peer_id %hu tid %hhu\n",
2254 while (num_msdus
< max_num_msdus
&&
2255 num_bytes
< max_num_bytes
) {
2256 ret
= ath10k_mac_tx_push_txq(hw
, txq
);
2264 record
->num_msdus
= cpu_to_le16(num_msdus
);
2265 record
->num_bytes
= cpu_to_le32(num_bytes
);
2267 ath10k_htt_tx_txq_recalc(hw
, txq
);
2272 resp_ids
= ath10k_htt_get_tx_fetch_ind_resp_ids(&resp
->tx_fetch_ind
);
2273 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar
, resp_ids
, num_resp_ids
);
2275 ret
= ath10k_htt_tx_fetch_resp(ar
,
2276 resp
->tx_fetch_ind
.token
,
2277 resp
->tx_fetch_ind
.fetch_seq_num
,
2278 resp
->tx_fetch_ind
.records
,
2280 if (unlikely(ret
)) {
2281 ath10k_warn(ar
, "failed to submit tx fetch resp for token 0x%08x: %d\n",
2282 le32_to_cpu(resp
->tx_fetch_ind
.token
), ret
);
2283 /* FIXME: request fw restart */
2286 ath10k_htt_tx_txq_sync(ar
);
2289 static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k
*ar
,
2290 struct sk_buff
*skb
)
2292 const struct htt_resp
*resp
= (void *)skb
->data
;
2296 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch confirm\n");
2298 len
= sizeof(resp
->hdr
) + sizeof(resp
->tx_fetch_confirm
);
2299 if (unlikely(skb
->len
< len
)) {
2300 ath10k_warn(ar
, "received corrupted tx_fetch_confirm event: buffer too short\n");
2304 num_resp_ids
= le16_to_cpu(resp
->tx_fetch_confirm
.num_resp_ids
);
2305 len
+= sizeof(resp
->tx_fetch_confirm
.resp_ids
[0]) * num_resp_ids
;
2307 if (unlikely(skb
->len
< len
)) {
2308 ath10k_warn(ar
, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
2312 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar
,
2313 resp
->tx_fetch_confirm
.resp_ids
,
2317 static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k
*ar
,
2318 struct sk_buff
*skb
)
2320 const struct htt_resp
*resp
= (void *)skb
->data
;
2321 const struct htt_tx_mode_switch_record
*record
;
2322 struct ieee80211_txq
*txq
;
2323 struct ath10k_txq
*artxq
;
2326 enum htt_tx_mode_switch_mode mode
;
2335 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx mode switch ind\n");
2337 len
= sizeof(resp
->hdr
) + sizeof(resp
->tx_mode_switch_ind
);
2338 if (unlikely(skb
->len
< len
)) {
2339 ath10k_warn(ar
, "received corrupted tx_mode_switch_ind event: buffer too short\n");
2343 info0
= le16_to_cpu(resp
->tx_mode_switch_ind
.info0
);
2344 info1
= le16_to_cpu(resp
->tx_mode_switch_ind
.info1
);
2346 enable
= !!(info0
& HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE
);
2347 num_records
= MS(info0
, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD
);
2348 mode
= MS(info1
, HTT_TX_MODE_SWITCH_IND_INFO1_MODE
);
2349 threshold
= MS(info1
, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD
);
2351 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
2352 "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n",
2353 info0
, info1
, enable
, num_records
, mode
, threshold
);
2355 len
+= sizeof(resp
->tx_mode_switch_ind
.records
[0]) * num_records
;
2357 if (unlikely(skb
->len
< len
)) {
2358 ath10k_warn(ar
, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
2363 case HTT_TX_MODE_SWITCH_PUSH
:
2364 case HTT_TX_MODE_SWITCH_PUSH_PULL
:
2367 ath10k_warn(ar
, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
2375 ar
->htt
.tx_q_state
.enabled
= enable
;
2376 ar
->htt
.tx_q_state
.mode
= mode
;
2377 ar
->htt
.tx_q_state
.num_push_allowed
= threshold
;
2381 for (i
= 0; i
< num_records
; i
++) {
2382 record
= &resp
->tx_mode_switch_ind
.records
[i
];
2383 info0
= le16_to_cpu(record
->info0
);
2384 peer_id
= MS(info0
, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID
);
2385 tid
= MS(info0
, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID
);
2387 if (unlikely(peer_id
>= ar
->htt
.tx_q_state
.num_peers
) ||
2388 unlikely(tid
>= ar
->htt
.tx_q_state
.num_tids
)) {
2389 ath10k_warn(ar
, "received out of range peer_id %hu tid %hhu\n",
2394 spin_lock_bh(&ar
->data_lock
);
2395 txq
= ath10k_mac_txq_lookup(ar
, peer_id
, tid
);
2396 spin_unlock_bh(&ar
->data_lock
);
2398 /* It is okay to release the lock and use txq because RCU read
2402 if (unlikely(!txq
)) {
2403 ath10k_warn(ar
, "failed to lookup txq for peer_id %hu tid %hhu\n",
2408 spin_lock_bh(&ar
->htt
.tx_lock
);
2409 artxq
= (void *)txq
->drv_priv
;
2410 artxq
->num_push_allowed
= le16_to_cpu(record
->num_max_msdus
);
2411 spin_unlock_bh(&ar
->htt
.tx_lock
);
2416 ath10k_mac_tx_push_pending(ar
);
2419 void ath10k_htt_htc_t2h_msg_handler(struct ath10k
*ar
, struct sk_buff
*skb
)
2423 release
= ath10k_htt_t2h_msg_handler(ar
, skb
);
2425 /* Free the indication buffer */
2427 dev_kfree_skb_any(skb
);
2430 static inline bool is_valid_legacy_rate(u8 rate
)
2432 static const u8 legacy_rates
[] = {1, 2, 5, 11, 6, 9, 12,
2433 18, 24, 36, 48, 54};
2436 for (i
= 0; i
< ARRAY_SIZE(legacy_rates
); i
++) {
2437 if (rate
== legacy_rates
[i
])
2445 ath10k_update_per_peer_tx_stats(struct ath10k
*ar
,
2446 struct ieee80211_sta
*sta
,
2447 struct ath10k_per_peer_tx_stats
*peer_stats
)
2449 struct ath10k_sta
*arsta
= (struct ath10k_sta
*)sta
->drv_priv
;
2451 struct rate_info txrate
;
2453 lockdep_assert_held(&ar
->data_lock
);
2455 txrate
.flags
= ATH10K_HW_PREAMBLE(peer_stats
->ratecode
);
2456 txrate
.bw
= ATH10K_HW_BW(peer_stats
->flags
);
2457 txrate
.nss
= ATH10K_HW_NSS(peer_stats
->ratecode
);
2458 txrate
.mcs
= ATH10K_HW_MCS_RATE(peer_stats
->ratecode
);
2459 sgi
= ATH10K_HW_GI(peer_stats
->flags
);
2461 if (txrate
.flags
== WMI_RATE_PREAMBLE_VHT
&& txrate
.mcs
> 9) {
2462 ath10k_warn(ar
, "Invalid VHT mcs %hhd peer stats", txrate
.mcs
);
2466 if (txrate
.flags
== WMI_RATE_PREAMBLE_HT
&&
2467 (txrate
.mcs
> 7 || txrate
.nss
< 1)) {
2468 ath10k_warn(ar
, "Invalid HT mcs %hhd nss %hhd peer stats",
2469 txrate
.mcs
, txrate
.nss
);
2473 memset(&arsta
->txrate
, 0, sizeof(arsta
->txrate
));
2475 if (txrate
.flags
== WMI_RATE_PREAMBLE_CCK
||
2476 txrate
.flags
== WMI_RATE_PREAMBLE_OFDM
) {
2477 rate
= ATH10K_HW_LEGACY_RATE(peer_stats
->ratecode
);
2479 if (!is_valid_legacy_rate(rate
)) {
2480 ath10k_warn(ar
, "Invalid legacy rate %hhd peer stats",
2485 /* This is hacky, FW sends CCK rate 5.5Mbps as 6 */
2487 if (rate
== 60 && txrate
.flags
== WMI_RATE_PREAMBLE_CCK
)
2489 arsta
->txrate
.legacy
= rate
;
2490 } else if (txrate
.flags
== WMI_RATE_PREAMBLE_HT
) {
2491 arsta
->txrate
.flags
= RATE_INFO_FLAGS_MCS
;
2492 arsta
->txrate
.mcs
= txrate
.mcs
+ 8 * (txrate
.nss
- 1);
2494 arsta
->txrate
.flags
= RATE_INFO_FLAGS_VHT_MCS
;
2495 arsta
->txrate
.mcs
= txrate
.mcs
;
2499 arsta
->txrate
.flags
|= RATE_INFO_FLAGS_SHORT_GI
;
2501 arsta
->txrate
.nss
= txrate
.nss
;
2502 arsta
->txrate
.bw
= txrate
.bw
+ RATE_INFO_BW_20
;
2505 static void ath10k_htt_fetch_peer_stats(struct ath10k
*ar
,
2506 struct sk_buff
*skb
)
2508 struct htt_resp
*resp
= (struct htt_resp
*)skb
->data
;
2509 struct ath10k_per_peer_tx_stats
*p_tx_stats
= &ar
->peer_tx_stats
;
2510 struct htt_per_peer_tx_stats_ind
*tx_stats
;
2511 struct ieee80211_sta
*sta
;
2512 struct ath10k_peer
*peer
;
2514 u8 ppdu_len
, num_ppdu
;
2516 num_ppdu
= resp
->peer_tx_stats
.num_ppdu
;
2517 ppdu_len
= resp
->peer_tx_stats
.ppdu_len
* sizeof(__le32
);
2519 if (skb
->len
< sizeof(struct htt_resp_hdr
) + num_ppdu
* ppdu_len
) {
2520 ath10k_warn(ar
, "Invalid peer stats buf length %d\n", skb
->len
);
2524 tx_stats
= (struct htt_per_peer_tx_stats_ind
*)
2525 (resp
->peer_tx_stats
.payload
);
2526 peer_id
= __le16_to_cpu(tx_stats
->peer_id
);
2529 spin_lock_bh(&ar
->data_lock
);
2530 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
2532 ath10k_warn(ar
, "Invalid peer id %d peer stats buffer\n",
2538 for (i
= 0; i
< num_ppdu
; i
++) {
2539 tx_stats
= (struct htt_per_peer_tx_stats_ind
*)
2540 (resp
->peer_tx_stats
.payload
+ i
* ppdu_len
);
2542 p_tx_stats
->succ_bytes
= __le32_to_cpu(tx_stats
->succ_bytes
);
2543 p_tx_stats
->retry_bytes
= __le32_to_cpu(tx_stats
->retry_bytes
);
2544 p_tx_stats
->failed_bytes
=
2545 __le32_to_cpu(tx_stats
->failed_bytes
);
2546 p_tx_stats
->ratecode
= tx_stats
->ratecode
;
2547 p_tx_stats
->flags
= tx_stats
->flags
;
2548 p_tx_stats
->succ_pkts
= __le16_to_cpu(tx_stats
->succ_pkts
);
2549 p_tx_stats
->retry_pkts
= __le16_to_cpu(tx_stats
->retry_pkts
);
2550 p_tx_stats
->failed_pkts
= __le16_to_cpu(tx_stats
->failed_pkts
);
2552 ath10k_update_per_peer_tx_stats(ar
, sta
, p_tx_stats
);
2556 spin_unlock_bh(&ar
->data_lock
);
2560 static void ath10k_fetch_10_2_tx_stats(struct ath10k
*ar
, u8
*data
)
2562 struct ath10k_pktlog_hdr
*hdr
= (struct ath10k_pktlog_hdr
*)data
;
2563 struct ath10k_per_peer_tx_stats
*p_tx_stats
= &ar
->peer_tx_stats
;
2564 struct ath10k_10_2_peer_tx_stats
*tx_stats
;
2565 struct ieee80211_sta
*sta
;
2566 struct ath10k_peer
*peer
;
2567 u16 log_type
= __le16_to_cpu(hdr
->log_type
);
2570 if (log_type
!= ATH_PKTLOG_TYPE_TX_STAT
)
2573 tx_stats
= (struct ath10k_10_2_peer_tx_stats
*)((hdr
->payload
) +
2574 ATH10K_10_2_TX_STATS_OFFSET
);
2576 if (!tx_stats
->tx_ppdu_cnt
)
2579 peer_id
= tx_stats
->peer_id
;
2582 spin_lock_bh(&ar
->data_lock
);
2583 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
2585 ath10k_warn(ar
, "Invalid peer id %d in peer stats buffer\n",
2591 for (i
= 0; i
< tx_stats
->tx_ppdu_cnt
; i
++) {
2592 p_tx_stats
->succ_bytes
=
2593 __le16_to_cpu(tx_stats
->success_bytes
[i
]);
2594 p_tx_stats
->retry_bytes
=
2595 __le16_to_cpu(tx_stats
->retry_bytes
[i
]);
2596 p_tx_stats
->failed_bytes
=
2597 __le16_to_cpu(tx_stats
->failed_bytes
[i
]);
2598 p_tx_stats
->ratecode
= tx_stats
->ratecode
[i
];
2599 p_tx_stats
->flags
= tx_stats
->flags
[i
];
2600 p_tx_stats
->succ_pkts
= tx_stats
->success_pkts
[i
];
2601 p_tx_stats
->retry_pkts
= tx_stats
->retry_pkts
[i
];
2602 p_tx_stats
->failed_pkts
= tx_stats
->failed_pkts
[i
];
2604 ath10k_update_per_peer_tx_stats(ar
, sta
, p_tx_stats
);
2606 spin_unlock_bh(&ar
->data_lock
);
2612 spin_unlock_bh(&ar
->data_lock
);
2616 bool ath10k_htt_t2h_msg_handler(struct ath10k
*ar
, struct sk_buff
*skb
)
2618 struct ath10k_htt
*htt
= &ar
->htt
;
2619 struct htt_resp
*resp
= (struct htt_resp
*)skb
->data
;
2620 enum htt_t2h_msg_type type
;
2622 /* confirm alignment */
2623 if (!IS_ALIGNED((unsigned long)skb
->data
, 4))
2624 ath10k_warn(ar
, "unaligned htt message, expect trouble\n");
2626 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx, msg_type: 0x%0X\n",
2627 resp
->hdr
.msg_type
);
2629 if (resp
->hdr
.msg_type
>= ar
->htt
.t2h_msg_types_max
) {
2630 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
2631 resp
->hdr
.msg_type
, ar
->htt
.t2h_msg_types_max
);
2634 type
= ar
->htt
.t2h_msg_types
[resp
->hdr
.msg_type
];
2637 case HTT_T2H_MSG_TYPE_VERSION_CONF
: {
2638 htt
->target_version_major
= resp
->ver_resp
.major
;
2639 htt
->target_version_minor
= resp
->ver_resp
.minor
;
2640 complete(&htt
->target_version_received
);
2643 case HTT_T2H_MSG_TYPE_RX_IND
:
2644 ath10k_htt_rx_proc_rx_ind(htt
, &resp
->rx_ind
);
2646 case HTT_T2H_MSG_TYPE_PEER_MAP
: {
2647 struct htt_peer_map_event ev
= {
2648 .vdev_id
= resp
->peer_map
.vdev_id
,
2649 .peer_id
= __le16_to_cpu(resp
->peer_map
.peer_id
),
2651 memcpy(ev
.addr
, resp
->peer_map
.addr
, sizeof(ev
.addr
));
2652 ath10k_peer_map_event(htt
, &ev
);
2655 case HTT_T2H_MSG_TYPE_PEER_UNMAP
: {
2656 struct htt_peer_unmap_event ev
= {
2657 .peer_id
= __le16_to_cpu(resp
->peer_unmap
.peer_id
),
2659 ath10k_peer_unmap_event(htt
, &ev
);
2662 case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION
: {
2663 struct htt_tx_done tx_done
= {};
2664 int status
= __le32_to_cpu(resp
->mgmt_tx_completion
.status
);
2666 tx_done
.msdu_id
= __le32_to_cpu(resp
->mgmt_tx_completion
.desc_id
);
2669 case HTT_MGMT_TX_STATUS_OK
:
2670 tx_done
.status
= HTT_TX_COMPL_STATE_ACK
;
2672 case HTT_MGMT_TX_STATUS_RETRY
:
2673 tx_done
.status
= HTT_TX_COMPL_STATE_NOACK
;
2675 case HTT_MGMT_TX_STATUS_DROP
:
2676 tx_done
.status
= HTT_TX_COMPL_STATE_DISCARD
;
2680 status
= ath10k_txrx_tx_unref(htt
, &tx_done
);
2682 spin_lock_bh(&htt
->tx_lock
);
2683 ath10k_htt_tx_mgmt_dec_pending(htt
);
2684 spin_unlock_bh(&htt
->tx_lock
);
2688 case HTT_T2H_MSG_TYPE_TX_COMPL_IND
:
2689 ath10k_htt_rx_tx_compl_ind(htt
->ar
, skb
);
2691 case HTT_T2H_MSG_TYPE_SEC_IND
: {
2692 struct ath10k
*ar
= htt
->ar
;
2693 struct htt_security_indication
*ev
= &resp
->security_indication
;
2695 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
2696 "sec ind peer_id %d unicast %d type %d\n",
2697 __le16_to_cpu(ev
->peer_id
),
2698 !!(ev
->flags
& HTT_SECURITY_IS_UNICAST
),
2699 MS(ev
->flags
, HTT_SECURITY_TYPE
));
2700 complete(&ar
->install_key_done
);
2703 case HTT_T2H_MSG_TYPE_RX_FRAG_IND
: {
2704 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt event: ",
2705 skb
->data
, skb
->len
);
2706 atomic_inc(&htt
->num_mpdus_ready
);
2709 case HTT_T2H_MSG_TYPE_TEST
:
2711 case HTT_T2H_MSG_TYPE_STATS_CONF
:
2712 trace_ath10k_htt_stats(ar
, skb
->data
, skb
->len
);
2714 case HTT_T2H_MSG_TYPE_TX_INSPECT_IND
:
2715 /* Firmware can return tx frames if it's unable to fully
2716 * process them and suspects host may be able to fix it. ath10k
2717 * sends all tx frames as already inspected so this shouldn't
2718 * happen unless fw has a bug.
2720 ath10k_warn(ar
, "received an unexpected htt tx inspect event\n");
2722 case HTT_T2H_MSG_TYPE_RX_ADDBA
:
2723 ath10k_htt_rx_addba(ar
, resp
);
2725 case HTT_T2H_MSG_TYPE_RX_DELBA
:
2726 ath10k_htt_rx_delba(ar
, resp
);
2728 case HTT_T2H_MSG_TYPE_PKTLOG
: {
2729 trace_ath10k_htt_pktlog(ar
, resp
->pktlog_msg
.payload
,
2731 offsetof(struct htt_resp
,
2732 pktlog_msg
.payload
));
2734 if (ath10k_peer_stats_enabled(ar
))
2735 ath10k_fetch_10_2_tx_stats(ar
,
2736 resp
->pktlog_msg
.payload
);
2739 case HTT_T2H_MSG_TYPE_RX_FLUSH
: {
2740 /* Ignore this event because mac80211 takes care of Rx
2741 * aggregation reordering.
2745 case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND
: {
2746 __skb_queue_tail(&htt
->rx_in_ord_compl_q
, skb
);
2749 case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND
:
2751 case HTT_T2H_MSG_TYPE_CHAN_CHANGE
: {
2752 u32 phymode
= __le32_to_cpu(resp
->chan_change
.phymode
);
2753 u32 freq
= __le32_to_cpu(resp
->chan_change
.freq
);
2755 ar
->tgt_oper_chan
= ieee80211_get_channel(ar
->hw
->wiphy
, freq
);
2756 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
2757 "htt chan change freq %u phymode %s\n",
2758 freq
, ath10k_wmi_phymode_str(phymode
));
2761 case HTT_T2H_MSG_TYPE_AGGR_CONF
:
2763 case HTT_T2H_MSG_TYPE_TX_FETCH_IND
: {
2764 struct sk_buff
*tx_fetch_ind
= skb_copy(skb
, GFP_ATOMIC
);
2766 if (!tx_fetch_ind
) {
2767 ath10k_warn(ar
, "failed to copy htt tx fetch ind\n");
2770 skb_queue_tail(&htt
->tx_fetch_ind_q
, tx_fetch_ind
);
2773 case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM
:
2774 ath10k_htt_rx_tx_fetch_confirm(ar
, skb
);
2776 case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND
:
2777 ath10k_htt_rx_tx_mode_switch_ind(ar
, skb
);
2779 case HTT_T2H_MSG_TYPE_PEER_STATS
:
2780 ath10k_htt_fetch_peer_stats(ar
, skb
);
2782 case HTT_T2H_MSG_TYPE_EN_STATS
:
2784 ath10k_warn(ar
, "htt event (%d) not handled\n",
2785 resp
->hdr
.msg_type
);
2786 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt event: ",
2787 skb
->data
, skb
->len
);
2792 EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler
);
2794 void ath10k_htt_rx_pktlog_completion_handler(struct ath10k
*ar
,
2795 struct sk_buff
*skb
)
2797 trace_ath10k_htt_pktlog(ar
, skb
->data
, skb
->len
);
2798 dev_kfree_skb_any(skb
);
2800 EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler
);
2802 static int ath10k_htt_rx_deliver_msdu(struct ath10k
*ar
, int quota
, int budget
)
2804 struct sk_buff
*skb
;
2806 while (quota
< budget
) {
2807 if (skb_queue_empty(&ar
->htt
.rx_msdus_q
))
2810 skb
= __skb_dequeue(&ar
->htt
.rx_msdus_q
);
2813 ath10k_process_rx(ar
, skb
);
2820 int ath10k_htt_txrx_compl_task(struct ath10k
*ar
, int budget
)
2822 struct ath10k_htt
*htt
= &ar
->htt
;
2823 struct htt_tx_done tx_done
= {};
2824 struct sk_buff_head tx_ind_q
;
2825 struct sk_buff
*skb
;
2826 unsigned long flags
;
2827 int quota
= 0, done
, ret
;
2828 bool resched_napi
= false;
2830 __skb_queue_head_init(&tx_ind_q
);
2832 /* Process pending frames before dequeuing more data
2835 quota
= ath10k_htt_rx_deliver_msdu(ar
, quota
, budget
);
2836 if (quota
== budget
) {
2837 resched_napi
= true;
2841 while ((skb
= __skb_dequeue(&htt
->rx_in_ord_compl_q
))) {
2842 spin_lock_bh(&htt
->rx_ring
.lock
);
2843 ret
= ath10k_htt_rx_in_ord_ind(ar
, skb
);
2844 spin_unlock_bh(&htt
->rx_ring
.lock
);
2846 dev_kfree_skb_any(skb
);
2848 resched_napi
= true;
2853 while (atomic_read(&htt
->num_mpdus_ready
)) {
2854 ret
= ath10k_htt_rx_handle_amsdu(htt
);
2856 resched_napi
= true;
2859 atomic_dec(&htt
->num_mpdus_ready
);
2862 /* Deliver received data after processing data from hardware */
2863 quota
= ath10k_htt_rx_deliver_msdu(ar
, quota
, budget
);
2865 /* From NAPI documentation:
2866 * The napi poll() function may also process TX completions, in which
2867 * case if it processes the entire TX ring then it should count that
2868 * work as the rest of the budget.
2870 if ((quota
< budget
) && !kfifo_is_empty(&htt
->txdone_fifo
))
2873 /* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
2874 * From kfifo_get() documentation:
2875 * Note that with only one concurrent reader and one concurrent writer,
2876 * you don't need extra locking to use these macro.
2878 while (kfifo_get(&htt
->txdone_fifo
, &tx_done
))
2879 ath10k_txrx_tx_unref(htt
, &tx_done
);
2881 ath10k_mac_tx_push_pending(ar
);
2883 spin_lock_irqsave(&htt
->tx_fetch_ind_q
.lock
, flags
);
2884 skb_queue_splice_init(&htt
->tx_fetch_ind_q
, &tx_ind_q
);
2885 spin_unlock_irqrestore(&htt
->tx_fetch_ind_q
.lock
, flags
);
2887 while ((skb
= __skb_dequeue(&tx_ind_q
))) {
2888 ath10k_htt_rx_tx_fetch_ind(ar
, skb
);
2889 dev_kfree_skb_any(skb
);
2893 ath10k_htt_rx_msdu_buff_replenish(htt
);
2894 /* In case of rx failure or more data to read, report budget
2895 * to reschedule NAPI poll
2897 done
= resched_napi
? budget
: quota
;
2901 EXPORT_SYMBOL(ath10k_htt_txrx_compl_task
);
2903 static const struct ath10k_htt_rx_ops htt_rx_ops_32
= {
2904 .htt_get_rx_ring_size
= ath10k_htt_get_rx_ring_size_32
,
2905 .htt_config_paddrs_ring
= ath10k_htt_config_paddrs_ring_32
,
2906 .htt_set_paddrs_ring
= ath10k_htt_set_paddrs_ring_32
,
2907 .htt_get_vaddr_ring
= ath10k_htt_get_vaddr_ring_32
,
2908 .htt_reset_paddrs_ring
= ath10k_htt_reset_paddrs_ring_32
,
2911 static const struct ath10k_htt_rx_ops htt_rx_ops_64
= {
2912 .htt_get_rx_ring_size
= ath10k_htt_get_rx_ring_size_64
,
2913 .htt_config_paddrs_ring
= ath10k_htt_config_paddrs_ring_64
,
2914 .htt_set_paddrs_ring
= ath10k_htt_set_paddrs_ring_64
,
2915 .htt_get_vaddr_ring
= ath10k_htt_get_vaddr_ring_64
,
2916 .htt_reset_paddrs_ring
= ath10k_htt_reset_paddrs_ring_64
,
2919 void ath10k_htt_set_rx_ops(struct ath10k_htt
*htt
)
2921 struct ath10k
*ar
= htt
->ar
;
2923 if (ar
->hw_params
.target_64bit
)
2924 htt
->rx_ops
= &htt_rx_ops_64
;
2926 htt
->rx_ops
= &htt_rx_ops_32
;