2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
4 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
27 #include <linux/log2.h>
28 #include <linux/bitfield.h>
30 /* when under memory pressure rx ring refill may fail and needs a retry */
31 #define HTT_RX_RING_REFILL_RETRY_MS 50
33 #define HTT_RX_RING_REFILL_RESCHED_MS 5
35 static int ath10k_htt_rx_get_csum_state(struct sk_buff
*skb
);
37 static struct sk_buff
*
38 ath10k_htt_rx_find_skb_paddr(struct ath10k
*ar
, u64 paddr
)
40 struct ath10k_skb_rxcb
*rxcb
;
42 hash_for_each_possible(ar
->htt
.rx_ring
.skb_table
, rxcb
, hlist
, paddr
)
43 if (rxcb
->paddr
== paddr
)
44 return ATH10K_RXCB_SKB(rxcb
);
50 static void ath10k_htt_rx_ring_free(struct ath10k_htt
*htt
)
53 struct ath10k_skb_rxcb
*rxcb
;
57 if (htt
->rx_ring
.in_ord_rx
) {
58 hash_for_each_safe(htt
->rx_ring
.skb_table
, i
, n
, rxcb
, hlist
) {
59 skb
= ATH10K_RXCB_SKB(rxcb
);
60 dma_unmap_single(htt
->ar
->dev
, rxcb
->paddr
,
61 skb
->len
+ skb_tailroom(skb
),
63 hash_del(&rxcb
->hlist
);
64 dev_kfree_skb_any(skb
);
67 for (i
= 0; i
< htt
->rx_ring
.size
; i
++) {
68 skb
= htt
->rx_ring
.netbufs_ring
[i
];
72 rxcb
= ATH10K_SKB_RXCB(skb
);
73 dma_unmap_single(htt
->ar
->dev
, rxcb
->paddr
,
74 skb
->len
+ skb_tailroom(skb
),
76 dev_kfree_skb_any(skb
);
80 htt
->rx_ring
.fill_cnt
= 0;
81 hash_init(htt
->rx_ring
.skb_table
);
82 memset(htt
->rx_ring
.netbufs_ring
, 0,
83 htt
->rx_ring
.size
* sizeof(htt
->rx_ring
.netbufs_ring
[0]));
86 static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt
*htt
)
88 return htt
->rx_ring
.size
* sizeof(htt
->rx_ring
.paddrs_ring_32
);
91 static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt
*htt
)
93 return htt
->rx_ring
.size
* sizeof(htt
->rx_ring
.paddrs_ring_64
);
96 static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt
*htt
,
99 htt
->rx_ring
.paddrs_ring_32
= vaddr
;
102 static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt
*htt
,
105 htt
->rx_ring
.paddrs_ring_64
= vaddr
;
108 static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt
*htt
,
109 dma_addr_t paddr
, int idx
)
111 htt
->rx_ring
.paddrs_ring_32
[idx
] = __cpu_to_le32(paddr
);
114 static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt
*htt
,
115 dma_addr_t paddr
, int idx
)
117 htt
->rx_ring
.paddrs_ring_64
[idx
] = __cpu_to_le64(paddr
);
120 static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt
*htt
, int idx
)
122 htt
->rx_ring
.paddrs_ring_32
[idx
] = 0;
125 static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt
*htt
, int idx
)
127 htt
->rx_ring
.paddrs_ring_64
[idx
] = 0;
130 static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt
*htt
)
132 return (void *)htt
->rx_ring
.paddrs_ring_32
;
135 static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt
*htt
)
137 return (void *)htt
->rx_ring
.paddrs_ring_64
;
140 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt
*htt
, int num
)
142 struct htt_rx_desc
*rx_desc
;
143 struct ath10k_skb_rxcb
*rxcb
;
148 /* The Full Rx Reorder firmware has no way of telling the host
149 * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
150 * To keep things simple make sure ring is always half empty. This
151 * guarantees there'll be no replenishment overruns possible.
153 BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL
>= HTT_RX_RING_SIZE
/ 2);
155 idx
= __le32_to_cpu(*htt
->rx_ring
.alloc_idx
.vaddr
);
157 skb
= dev_alloc_skb(HTT_RX_BUF_SIZE
+ HTT_RX_DESC_ALIGN
);
163 if (!IS_ALIGNED((unsigned long)skb
->data
, HTT_RX_DESC_ALIGN
))
165 PTR_ALIGN(skb
->data
, HTT_RX_DESC_ALIGN
) -
168 /* Clear rx_desc attention word before posting to Rx ring */
169 rx_desc
= (struct htt_rx_desc
*)skb
->data
;
170 rx_desc
->attention
.flags
= __cpu_to_le32(0);
172 paddr
= dma_map_single(htt
->ar
->dev
, skb
->data
,
173 skb
->len
+ skb_tailroom(skb
),
176 if (unlikely(dma_mapping_error(htt
->ar
->dev
, paddr
))) {
177 dev_kfree_skb_any(skb
);
182 rxcb
= ATH10K_SKB_RXCB(skb
);
184 htt
->rx_ring
.netbufs_ring
[idx
] = skb
;
185 ath10k_htt_set_paddrs_ring(htt
, paddr
, idx
);
186 htt
->rx_ring
.fill_cnt
++;
188 if (htt
->rx_ring
.in_ord_rx
) {
189 hash_add(htt
->rx_ring
.skb_table
,
190 &ATH10K_SKB_RXCB(skb
)->hlist
,
196 idx
&= htt
->rx_ring
.size_mask
;
201 * Make sure the rx buffer is updated before available buffer
202 * index to avoid any potential rx ring corruption.
205 *htt
->rx_ring
.alloc_idx
.vaddr
= __cpu_to_le32(idx
);
209 static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt
*htt
, int num
)
211 lockdep_assert_held(&htt
->rx_ring
.lock
);
212 return __ath10k_htt_rx_ring_fill_n(htt
, num
);
215 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt
*htt
)
217 int ret
, num_deficit
, num_to_fill
;
219 /* Refilling the whole RX ring buffer proves to be a bad idea. The
220 * reason is RX may take up significant amount of CPU cycles and starve
221 * other tasks, e.g. TX on an ethernet device while acting as a bridge
222 * with ath10k wlan interface. This ended up with very poor performance
223 * once CPU the host system was overwhelmed with RX on ath10k.
225 * By limiting the number of refills the replenishing occurs
226 * progressively. This in turns makes use of the fact tasklets are
227 * processed in FIFO order. This means actual RX processing can starve
228 * out refilling. If there's not enough buffers on RX ring FW will not
229 * report RX until it is refilled with enough buffers. This
230 * automatically balances load wrt to CPU power.
232 * This probably comes at a cost of lower maximum throughput but
233 * improves the average and stability.
235 spin_lock_bh(&htt
->rx_ring
.lock
);
236 num_deficit
= htt
->rx_ring
.fill_level
- htt
->rx_ring
.fill_cnt
;
237 num_to_fill
= min(ATH10K_HTT_MAX_NUM_REFILL
, num_deficit
);
238 num_deficit
-= num_to_fill
;
239 ret
= ath10k_htt_rx_ring_fill_n(htt
, num_to_fill
);
240 if (ret
== -ENOMEM
) {
242 * Failed to fill it to the desired level -
243 * we'll start a timer and try again next time.
244 * As long as enough buffers are left in the ring for
245 * another A-MPDU rx, no special recovery is needed.
247 mod_timer(&htt
->rx_ring
.refill_retry_timer
, jiffies
+
248 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS
));
249 } else if (num_deficit
> 0) {
250 mod_timer(&htt
->rx_ring
.refill_retry_timer
, jiffies
+
251 msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS
));
253 spin_unlock_bh(&htt
->rx_ring
.lock
);
256 static void ath10k_htt_rx_ring_refill_retry(struct timer_list
*t
)
258 struct ath10k_htt
*htt
= from_timer(htt
, t
, rx_ring
.refill_retry_timer
);
260 ath10k_htt_rx_msdu_buff_replenish(htt
);
263 int ath10k_htt_rx_ring_refill(struct ath10k
*ar
)
265 struct ath10k_htt
*htt
= &ar
->htt
;
268 if (ar
->dev_type
== ATH10K_DEV_TYPE_HL
)
271 spin_lock_bh(&htt
->rx_ring
.lock
);
272 ret
= ath10k_htt_rx_ring_fill_n(htt
, (htt
->rx_ring
.fill_level
-
273 htt
->rx_ring
.fill_cnt
));
276 ath10k_htt_rx_ring_free(htt
);
278 spin_unlock_bh(&htt
->rx_ring
.lock
);
283 void ath10k_htt_rx_free(struct ath10k_htt
*htt
)
285 if (htt
->ar
->dev_type
== ATH10K_DEV_TYPE_HL
)
288 del_timer_sync(&htt
->rx_ring
.refill_retry_timer
);
290 skb_queue_purge(&htt
->rx_msdus_q
);
291 skb_queue_purge(&htt
->rx_in_ord_compl_q
);
292 skb_queue_purge(&htt
->tx_fetch_ind_q
);
294 spin_lock_bh(&htt
->rx_ring
.lock
);
295 ath10k_htt_rx_ring_free(htt
);
296 spin_unlock_bh(&htt
->rx_ring
.lock
);
298 dma_free_coherent(htt
->ar
->dev
,
299 ath10k_htt_get_rx_ring_size(htt
),
300 ath10k_htt_get_vaddr_ring(htt
),
301 htt
->rx_ring
.base_paddr
);
303 dma_free_coherent(htt
->ar
->dev
,
304 sizeof(*htt
->rx_ring
.alloc_idx
.vaddr
),
305 htt
->rx_ring
.alloc_idx
.vaddr
,
306 htt
->rx_ring
.alloc_idx
.paddr
);
308 kfree(htt
->rx_ring
.netbufs_ring
);
311 static inline struct sk_buff
*ath10k_htt_rx_netbuf_pop(struct ath10k_htt
*htt
)
313 struct ath10k
*ar
= htt
->ar
;
315 struct sk_buff
*msdu
;
317 lockdep_assert_held(&htt
->rx_ring
.lock
);
319 if (htt
->rx_ring
.fill_cnt
== 0) {
320 ath10k_warn(ar
, "tried to pop sk_buff from an empty rx ring\n");
324 idx
= htt
->rx_ring
.sw_rd_idx
.msdu_payld
;
325 msdu
= htt
->rx_ring
.netbufs_ring
[idx
];
326 htt
->rx_ring
.netbufs_ring
[idx
] = NULL
;
327 ath10k_htt_reset_paddrs_ring(htt
, idx
);
330 idx
&= htt
->rx_ring
.size_mask
;
331 htt
->rx_ring
.sw_rd_idx
.msdu_payld
= idx
;
332 htt
->rx_ring
.fill_cnt
--;
334 dma_unmap_single(htt
->ar
->dev
,
335 ATH10K_SKB_RXCB(msdu
)->paddr
,
336 msdu
->len
+ skb_tailroom(msdu
),
338 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt rx netbuf pop: ",
339 msdu
->data
, msdu
->len
+ skb_tailroom(msdu
));
344 /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
345 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt
*htt
,
346 struct sk_buff_head
*amsdu
)
348 struct ath10k
*ar
= htt
->ar
;
349 int msdu_len
, msdu_chaining
= 0;
350 struct sk_buff
*msdu
;
351 struct htt_rx_desc
*rx_desc
;
353 lockdep_assert_held(&htt
->rx_ring
.lock
);
356 int last_msdu
, msdu_len_invalid
, msdu_chained
;
358 msdu
= ath10k_htt_rx_netbuf_pop(htt
);
360 __skb_queue_purge(amsdu
);
364 __skb_queue_tail(amsdu
, msdu
);
366 rx_desc
= (struct htt_rx_desc
*)msdu
->data
;
368 /* FIXME: we must report msdu payload since this is what caller
371 skb_put(msdu
, offsetof(struct htt_rx_desc
, msdu_payload
));
372 skb_pull(msdu
, offsetof(struct htt_rx_desc
, msdu_payload
));
375 * Sanity check - confirm the HW is finished filling in the
377 * If the HW and SW are working correctly, then it's guaranteed
378 * that the HW's MAC DMA is done before this point in the SW.
379 * To prevent the case that we handle a stale Rx descriptor,
380 * just assert for now until we have a way to recover.
382 if (!(__le32_to_cpu(rx_desc
->attention
.flags
)
383 & RX_ATTENTION_FLAGS_MSDU_DONE
)) {
384 __skb_queue_purge(amsdu
);
388 msdu_len_invalid
= !!(__le32_to_cpu(rx_desc
->attention
.flags
)
389 & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR
|
390 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR
));
391 msdu_len
= MS(__le32_to_cpu(rx_desc
->msdu_start
.common
.info0
),
392 RX_MSDU_START_INFO0_MSDU_LENGTH
);
393 msdu_chained
= rx_desc
->frag_info
.ring2_more_count
;
395 if (msdu_len_invalid
)
399 skb_put(msdu
, min(msdu_len
, HTT_RX_MSDU_SIZE
));
400 msdu_len
-= msdu
->len
;
402 /* Note: Chained buffers do not contain rx descriptor */
403 while (msdu_chained
--) {
404 msdu
= ath10k_htt_rx_netbuf_pop(htt
);
406 __skb_queue_purge(amsdu
);
410 __skb_queue_tail(amsdu
, msdu
);
412 skb_put(msdu
, min(msdu_len
, HTT_RX_BUF_SIZE
));
413 msdu_len
-= msdu
->len
;
417 last_msdu
= __le32_to_cpu(rx_desc
->msdu_end
.common
.info0
) &
418 RX_MSDU_END_INFO0_LAST_MSDU
;
420 trace_ath10k_htt_rx_desc(ar
, &rx_desc
->attention
,
421 sizeof(*rx_desc
) - sizeof(u32
));
427 if (skb_queue_empty(amsdu
))
431 * Don't refill the ring yet.
433 * First, the elements popped here are still in use - it is not
434 * safe to overwrite them until the matching call to
435 * mpdu_desc_list_next. Second, for efficiency it is preferable to
436 * refill the rx ring with 1 PPDU's worth of rx buffers (something
437 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
438 * (something like 3 buffers). Consequently, we'll rely on the txrx
439 * SW to tell us when it is done pulling all the PPDU's rx buffers
440 * out of the rx ring, and then refill it just once.
443 return msdu_chaining
;
446 static struct sk_buff
*ath10k_htt_rx_pop_paddr(struct ath10k_htt
*htt
,
449 struct ath10k
*ar
= htt
->ar
;
450 struct ath10k_skb_rxcb
*rxcb
;
451 struct sk_buff
*msdu
;
453 lockdep_assert_held(&htt
->rx_ring
.lock
);
455 msdu
= ath10k_htt_rx_find_skb_paddr(ar
, paddr
);
459 rxcb
= ATH10K_SKB_RXCB(msdu
);
460 hash_del(&rxcb
->hlist
);
461 htt
->rx_ring
.fill_cnt
--;
463 dma_unmap_single(htt
->ar
->dev
, rxcb
->paddr
,
464 msdu
->len
+ skb_tailroom(msdu
),
466 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt rx netbuf pop: ",
467 msdu
->data
, msdu
->len
+ skb_tailroom(msdu
));
472 static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt
*htt
,
473 struct htt_rx_in_ord_ind
*ev
,
474 struct sk_buff_head
*list
)
476 struct ath10k
*ar
= htt
->ar
;
477 struct htt_rx_in_ord_msdu_desc
*msdu_desc
= ev
->msdu_descs32
;
478 struct htt_rx_desc
*rxd
;
479 struct sk_buff
*msdu
;
484 lockdep_assert_held(&htt
->rx_ring
.lock
);
486 msdu_count
= __le16_to_cpu(ev
->msdu_count
);
487 is_offload
= !!(ev
->info
& HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK
);
489 while (msdu_count
--) {
490 paddr
= __le32_to_cpu(msdu_desc
->msdu_paddr
);
492 msdu
= ath10k_htt_rx_pop_paddr(htt
, paddr
);
494 __skb_queue_purge(list
);
498 __skb_queue_tail(list
, msdu
);
501 rxd
= (void *)msdu
->data
;
503 trace_ath10k_htt_rx_desc(ar
, rxd
, sizeof(*rxd
));
505 skb_put(msdu
, sizeof(*rxd
));
506 skb_pull(msdu
, sizeof(*rxd
));
507 skb_put(msdu
, __le16_to_cpu(msdu_desc
->msdu_len
));
509 if (!(__le32_to_cpu(rxd
->attention
.flags
) &
510 RX_ATTENTION_FLAGS_MSDU_DONE
)) {
511 ath10k_warn(htt
->ar
, "tried to pop an incomplete frame, oops!\n");
522 static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt
*htt
,
523 struct htt_rx_in_ord_ind
*ev
,
524 struct sk_buff_head
*list
)
526 struct ath10k
*ar
= htt
->ar
;
527 struct htt_rx_in_ord_msdu_desc_ext
*msdu_desc
= ev
->msdu_descs64
;
528 struct htt_rx_desc
*rxd
;
529 struct sk_buff
*msdu
;
534 lockdep_assert_held(&htt
->rx_ring
.lock
);
536 msdu_count
= __le16_to_cpu(ev
->msdu_count
);
537 is_offload
= !!(ev
->info
& HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK
);
539 while (msdu_count
--) {
540 paddr
= __le64_to_cpu(msdu_desc
->msdu_paddr
);
541 msdu
= ath10k_htt_rx_pop_paddr(htt
, paddr
);
543 __skb_queue_purge(list
);
547 __skb_queue_tail(list
, msdu
);
550 rxd
= (void *)msdu
->data
;
552 trace_ath10k_htt_rx_desc(ar
, rxd
, sizeof(*rxd
));
554 skb_put(msdu
, sizeof(*rxd
));
555 skb_pull(msdu
, sizeof(*rxd
));
556 skb_put(msdu
, __le16_to_cpu(msdu_desc
->msdu_len
));
558 if (!(__le32_to_cpu(rxd
->attention
.flags
) &
559 RX_ATTENTION_FLAGS_MSDU_DONE
)) {
560 ath10k_warn(htt
->ar
, "tried to pop an incomplete frame, oops!\n");
571 int ath10k_htt_rx_alloc(struct ath10k_htt
*htt
)
573 struct ath10k
*ar
= htt
->ar
;
575 void *vaddr
, *vaddr_ring
;
577 struct timer_list
*timer
= &htt
->rx_ring
.refill_retry_timer
;
579 if (ar
->dev_type
== ATH10K_DEV_TYPE_HL
)
582 htt
->rx_confused
= false;
584 /* XXX: The fill level could be changed during runtime in response to
585 * the host processing latency. Is this really worth it?
587 htt
->rx_ring
.size
= HTT_RX_RING_SIZE
;
588 htt
->rx_ring
.size_mask
= htt
->rx_ring
.size
- 1;
589 htt
->rx_ring
.fill_level
= ar
->hw_params
.rx_ring_fill_level
;
591 if (!is_power_of_2(htt
->rx_ring
.size
)) {
592 ath10k_warn(ar
, "htt rx ring size is not power of 2\n");
596 htt
->rx_ring
.netbufs_ring
=
597 kcalloc(htt
->rx_ring
.size
, sizeof(struct sk_buff
*),
599 if (!htt
->rx_ring
.netbufs_ring
)
602 size
= ath10k_htt_get_rx_ring_size(htt
);
604 vaddr_ring
= dma_alloc_coherent(htt
->ar
->dev
, size
, &paddr
, GFP_KERNEL
);
608 ath10k_htt_config_paddrs_ring(htt
, vaddr_ring
);
609 htt
->rx_ring
.base_paddr
= paddr
;
611 vaddr
= dma_alloc_coherent(htt
->ar
->dev
,
612 sizeof(*htt
->rx_ring
.alloc_idx
.vaddr
),
617 htt
->rx_ring
.alloc_idx
.vaddr
= vaddr
;
618 htt
->rx_ring
.alloc_idx
.paddr
= paddr
;
619 htt
->rx_ring
.sw_rd_idx
.msdu_payld
= htt
->rx_ring
.size_mask
;
620 *htt
->rx_ring
.alloc_idx
.vaddr
= 0;
622 /* Initialize the Rx refill retry timer */
623 timer_setup(timer
, ath10k_htt_rx_ring_refill_retry
, 0);
625 spin_lock_init(&htt
->rx_ring
.lock
);
627 htt
->rx_ring
.fill_cnt
= 0;
628 htt
->rx_ring
.sw_rd_idx
.msdu_payld
= 0;
629 hash_init(htt
->rx_ring
.skb_table
);
631 skb_queue_head_init(&htt
->rx_msdus_q
);
632 skb_queue_head_init(&htt
->rx_in_ord_compl_q
);
633 skb_queue_head_init(&htt
->tx_fetch_ind_q
);
634 atomic_set(&htt
->num_mpdus_ready
, 0);
636 ath10k_dbg(ar
, ATH10K_DBG_BOOT
, "htt rx ring size %d fill_level %d\n",
637 htt
->rx_ring
.size
, htt
->rx_ring
.fill_level
);
641 dma_free_coherent(htt
->ar
->dev
,
642 ath10k_htt_get_rx_ring_size(htt
),
644 htt
->rx_ring
.base_paddr
);
646 kfree(htt
->rx_ring
.netbufs_ring
);
651 static int ath10k_htt_rx_crypto_param_len(struct ath10k
*ar
,
652 enum htt_rx_mpdu_encrypt_type type
)
655 case HTT_RX_MPDU_ENCRYPT_NONE
:
657 case HTT_RX_MPDU_ENCRYPT_WEP40
:
658 case HTT_RX_MPDU_ENCRYPT_WEP104
:
659 return IEEE80211_WEP_IV_LEN
;
660 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC
:
661 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA
:
662 return IEEE80211_TKIP_IV_LEN
;
663 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2
:
664 return IEEE80211_CCMP_HDR_LEN
;
665 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2
:
666 return IEEE80211_CCMP_256_HDR_LEN
;
667 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2
:
668 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2
:
669 return IEEE80211_GCMP_HDR_LEN
;
670 case HTT_RX_MPDU_ENCRYPT_WEP128
:
671 case HTT_RX_MPDU_ENCRYPT_WAPI
:
675 ath10k_warn(ar
, "unsupported encryption type %d\n", type
);
679 #define MICHAEL_MIC_LEN 8
681 static int ath10k_htt_rx_crypto_mic_len(struct ath10k
*ar
,
682 enum htt_rx_mpdu_encrypt_type type
)
685 case HTT_RX_MPDU_ENCRYPT_NONE
:
686 case HTT_RX_MPDU_ENCRYPT_WEP40
:
687 case HTT_RX_MPDU_ENCRYPT_WEP104
:
688 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC
:
689 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA
:
691 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2
:
692 return IEEE80211_CCMP_MIC_LEN
;
693 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2
:
694 return IEEE80211_CCMP_256_MIC_LEN
;
695 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2
:
696 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2
:
697 return IEEE80211_GCMP_MIC_LEN
;
698 case HTT_RX_MPDU_ENCRYPT_WEP128
:
699 case HTT_RX_MPDU_ENCRYPT_WAPI
:
703 ath10k_warn(ar
, "unsupported encryption type %d\n", type
);
707 static int ath10k_htt_rx_crypto_icv_len(struct ath10k
*ar
,
708 enum htt_rx_mpdu_encrypt_type type
)
711 case HTT_RX_MPDU_ENCRYPT_NONE
:
712 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2
:
713 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2
:
714 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2
:
715 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2
:
717 case HTT_RX_MPDU_ENCRYPT_WEP40
:
718 case HTT_RX_MPDU_ENCRYPT_WEP104
:
719 return IEEE80211_WEP_ICV_LEN
;
720 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC
:
721 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA
:
722 return IEEE80211_TKIP_ICV_LEN
;
723 case HTT_RX_MPDU_ENCRYPT_WEP128
:
724 case HTT_RX_MPDU_ENCRYPT_WAPI
:
728 ath10k_warn(ar
, "unsupported encryption type %d\n", type
);
732 struct amsdu_subframe_hdr
{
738 #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
740 static inline u8
ath10k_bw_to_mac80211_bw(u8 bw
)
746 ret
= RATE_INFO_BW_20
;
749 ret
= RATE_INFO_BW_40
;
752 ret
= RATE_INFO_BW_80
;
755 ret
= RATE_INFO_BW_160
;
762 static void ath10k_htt_rx_h_rates(struct ath10k
*ar
,
763 struct ieee80211_rx_status
*status
,
764 struct htt_rx_desc
*rxd
)
766 struct ieee80211_supported_band
*sband
;
767 u8 cck
, rate
, bw
, sgi
, mcs
, nss
;
770 u32 info1
, info2
, info3
;
772 info1
= __le32_to_cpu(rxd
->ppdu_start
.info1
);
773 info2
= __le32_to_cpu(rxd
->ppdu_start
.info2
);
774 info3
= __le32_to_cpu(rxd
->ppdu_start
.info3
);
776 preamble
= MS(info1
, RX_PPDU_START_INFO1_PREAMBLE_TYPE
);
780 /* To get legacy rate index band is required. Since band can't
781 * be undefined check if freq is non-zero.
786 cck
= info1
& RX_PPDU_START_INFO1_L_SIG_RATE_SELECT
;
787 rate
= MS(info1
, RX_PPDU_START_INFO1_L_SIG_RATE
);
788 rate
&= ~RX_PPDU_START_RATE_FLAG
;
790 sband
= &ar
->mac
.sbands
[status
->band
];
791 status
->rate_idx
= ath10k_mac_hw_rate_to_idx(sband
, rate
, cck
);
794 case HTT_RX_HT_WITH_TXBF
:
795 /* HT-SIG - Table 20-11 in info2 and info3 */
798 bw
= (info2
>> 7) & 1;
799 sgi
= (info3
>> 7) & 1;
801 status
->rate_idx
= mcs
;
802 status
->encoding
= RX_ENC_HT
;
804 status
->enc_flags
|= RX_ENC_FLAG_SHORT_GI
;
806 status
->bw
= RATE_INFO_BW_40
;
809 case HTT_RX_VHT_WITH_TXBF
:
810 /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
815 group_id
= (info2
>> 4) & 0x3F;
817 if (GROUP_ID_IS_SU_MIMO(group_id
)) {
818 mcs
= (info3
>> 4) & 0x0F;
819 nss
= ((info2
>> 10) & 0x07) + 1;
821 /* Hardware doesn't decode VHT-SIG-B into Rx descriptor
822 * so it's impossible to decode MCS. Also since
823 * firmware consumes Group Id Management frames host
824 * has no knowledge regarding group/user position
825 * mapping so it's impossible to pick the correct Nsts
828 * Bandwidth and SGI are valid so report the rateinfo
829 * on best-effort basis.
836 ath10k_warn(ar
, "invalid MCS received %u\n", mcs
);
837 ath10k_warn(ar
, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
838 __le32_to_cpu(rxd
->attention
.flags
),
839 __le32_to_cpu(rxd
->mpdu_start
.info0
),
840 __le32_to_cpu(rxd
->mpdu_start
.info1
),
841 __le32_to_cpu(rxd
->msdu_start
.common
.info0
),
842 __le32_to_cpu(rxd
->msdu_start
.common
.info1
),
843 rxd
->ppdu_start
.info0
,
844 __le32_to_cpu(rxd
->ppdu_start
.info1
),
845 __le32_to_cpu(rxd
->ppdu_start
.info2
),
846 __le32_to_cpu(rxd
->ppdu_start
.info3
),
847 __le32_to_cpu(rxd
->ppdu_start
.info4
));
849 ath10k_warn(ar
, "msdu end %08x mpdu end %08x\n",
850 __le32_to_cpu(rxd
->msdu_end
.common
.info0
),
851 __le32_to_cpu(rxd
->mpdu_end
.info0
));
853 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
,
854 "rx desc msdu payload: ",
855 rxd
->msdu_payload
, 50);
858 status
->rate_idx
= mcs
;
862 status
->enc_flags
|= RX_ENC_FLAG_SHORT_GI
;
864 status
->bw
= ath10k_bw_to_mac80211_bw(bw
);
865 status
->encoding
= RX_ENC_VHT
;
872 static struct ieee80211_channel
*
873 ath10k_htt_rx_h_peer_channel(struct ath10k
*ar
, struct htt_rx_desc
*rxd
)
875 struct ath10k_peer
*peer
;
876 struct ath10k_vif
*arvif
;
877 struct cfg80211_chan_def def
;
880 lockdep_assert_held(&ar
->data_lock
);
885 if (rxd
->attention
.flags
&
886 __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID
))
889 if (!(rxd
->msdu_end
.common
.info0
&
890 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU
)))
893 peer_id
= MS(__le32_to_cpu(rxd
->mpdu_start
.info0
),
894 RX_MPDU_START_INFO0_PEER_IDX
);
896 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
900 arvif
= ath10k_get_arvif(ar
, peer
->vdev_id
);
901 if (WARN_ON_ONCE(!arvif
))
904 if (ath10k_mac_vif_chan(arvif
->vif
, &def
))
910 static struct ieee80211_channel
*
911 ath10k_htt_rx_h_vdev_channel(struct ath10k
*ar
, u32 vdev_id
)
913 struct ath10k_vif
*arvif
;
914 struct cfg80211_chan_def def
;
916 lockdep_assert_held(&ar
->data_lock
);
918 list_for_each_entry(arvif
, &ar
->arvifs
, list
) {
919 if (arvif
->vdev_id
== vdev_id
&&
920 ath10k_mac_vif_chan(arvif
->vif
, &def
) == 0)
928 ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw
*hw
,
929 struct ieee80211_chanctx_conf
*conf
,
932 struct cfg80211_chan_def
*def
= data
;
937 static struct ieee80211_channel
*
938 ath10k_htt_rx_h_any_channel(struct ath10k
*ar
)
940 struct cfg80211_chan_def def
= {};
942 ieee80211_iter_chan_contexts_atomic(ar
->hw
,
943 ath10k_htt_rx_h_any_chan_iter
,
949 static bool ath10k_htt_rx_h_channel(struct ath10k
*ar
,
950 struct ieee80211_rx_status
*status
,
951 struct htt_rx_desc
*rxd
,
954 struct ieee80211_channel
*ch
;
956 spin_lock_bh(&ar
->data_lock
);
957 ch
= ar
->scan_channel
;
961 ch
= ath10k_htt_rx_h_peer_channel(ar
, rxd
);
963 ch
= ath10k_htt_rx_h_vdev_channel(ar
, vdev_id
);
965 ch
= ath10k_htt_rx_h_any_channel(ar
);
967 ch
= ar
->tgt_oper_chan
;
968 spin_unlock_bh(&ar
->data_lock
);
973 status
->band
= ch
->band
;
974 status
->freq
= ch
->center_freq
;
979 static void ath10k_htt_rx_h_signal(struct ath10k
*ar
,
980 struct ieee80211_rx_status
*status
,
981 struct htt_rx_desc
*rxd
)
985 for (i
= 0; i
< IEEE80211_MAX_CHAINS
; i
++) {
986 status
->chains
&= ~BIT(i
);
988 if (rxd
->ppdu_start
.rssi_chains
[i
].pri20_mhz
!= 0x80) {
989 status
->chain_signal
[i
] = ATH10K_DEFAULT_NOISE_FLOOR
+
990 rxd
->ppdu_start
.rssi_chains
[i
].pri20_mhz
;
992 status
->chains
|= BIT(i
);
996 /* FIXME: Get real NF */
997 status
->signal
= ATH10K_DEFAULT_NOISE_FLOOR
+
998 rxd
->ppdu_start
.rssi_comb
;
999 status
->flag
&= ~RX_FLAG_NO_SIGNAL_VAL
;
1002 static void ath10k_htt_rx_h_mactime(struct ath10k
*ar
,
1003 struct ieee80211_rx_status
*status
,
1004 struct htt_rx_desc
*rxd
)
1006 /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
1007 * means all prior MSDUs in a PPDU are reported to mac80211 without the
1008 * TSF. Is it worth holding frames until end of PPDU is known?
1010 * FIXME: Can we get/compute 64bit TSF?
1012 status
->mactime
= __le32_to_cpu(rxd
->ppdu_end
.common
.tsf_timestamp
);
1013 status
->flag
|= RX_FLAG_MACTIME_END
;
1016 static void ath10k_htt_rx_h_ppdu(struct ath10k
*ar
,
1017 struct sk_buff_head
*amsdu
,
1018 struct ieee80211_rx_status
*status
,
1021 struct sk_buff
*first
;
1022 struct htt_rx_desc
*rxd
;
1026 if (skb_queue_empty(amsdu
))
1029 first
= skb_peek(amsdu
);
1030 rxd
= (void *)first
->data
- sizeof(*rxd
);
1032 is_first_ppdu
= !!(rxd
->attention
.flags
&
1033 __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU
));
1034 is_last_ppdu
= !!(rxd
->attention
.flags
&
1035 __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU
));
1037 if (is_first_ppdu
) {
1038 /* New PPDU starts so clear out the old per-PPDU status. */
1040 status
->rate_idx
= 0;
1042 status
->encoding
= RX_ENC_LEGACY
;
1043 status
->bw
= RATE_INFO_BW_20
;
1045 status
->flag
&= ~RX_FLAG_MACTIME_END
;
1046 status
->flag
|= RX_FLAG_NO_SIGNAL_VAL
;
1048 status
->flag
&= ~(RX_FLAG_AMPDU_IS_LAST
);
1049 status
->flag
|= RX_FLAG_AMPDU_DETAILS
| RX_FLAG_AMPDU_LAST_KNOWN
;
1050 status
->ampdu_reference
= ar
->ampdu_reference
;
1052 ath10k_htt_rx_h_signal(ar
, status
, rxd
);
1053 ath10k_htt_rx_h_channel(ar
, status
, rxd
, vdev_id
);
1054 ath10k_htt_rx_h_rates(ar
, status
, rxd
);
1058 ath10k_htt_rx_h_mactime(ar
, status
, rxd
);
1060 /* set ampdu last segment flag */
1061 status
->flag
|= RX_FLAG_AMPDU_IS_LAST
;
1062 ar
->ampdu_reference
++;
1066 static const char * const tid_to_ac
[] = {
1077 static char *ath10k_get_tid(struct ieee80211_hdr
*hdr
, char *out
, size_t size
)
1082 if (!ieee80211_is_data_qos(hdr
->frame_control
))
1085 qc
= ieee80211_get_qos_ctl(hdr
);
1086 tid
= *qc
& IEEE80211_QOS_CTL_TID_MASK
;
1088 snprintf(out
, size
, "tid %d (%s)", tid
, tid_to_ac
[tid
]);
1090 snprintf(out
, size
, "tid %d", tid
);
1095 static void ath10k_htt_rx_h_queue_msdu(struct ath10k
*ar
,
1096 struct ieee80211_rx_status
*rx_status
,
1097 struct sk_buff
*skb
)
1099 struct ieee80211_rx_status
*status
;
1101 status
= IEEE80211_SKB_RXCB(skb
);
1102 *status
= *rx_status
;
1104 skb_queue_tail(&ar
->htt
.rx_msdus_q
, skb
);
1107 static void ath10k_process_rx(struct ath10k
*ar
, struct sk_buff
*skb
)
1109 struct ieee80211_rx_status
*status
;
1110 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
1113 status
= IEEE80211_SKB_RXCB(skb
);
1115 ath10k_dbg(ar
, ATH10K_DBG_DATA
,
1116 "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
1119 ieee80211_get_SA(hdr
),
1120 ath10k_get_tid(hdr
, tid
, sizeof(tid
)),
1121 is_multicast_ether_addr(ieee80211_get_DA(hdr
)) ?
1123 (__le16_to_cpu(hdr
->seq_ctrl
) & IEEE80211_SCTL_SEQ
) >> 4,
1124 (status
->encoding
== RX_ENC_LEGACY
) ? "legacy" : "",
1125 (status
->encoding
== RX_ENC_HT
) ? "ht" : "",
1126 (status
->encoding
== RX_ENC_VHT
) ? "vht" : "",
1127 (status
->bw
== RATE_INFO_BW_40
) ? "40" : "",
1128 (status
->bw
== RATE_INFO_BW_80
) ? "80" : "",
1129 (status
->bw
== RATE_INFO_BW_160
) ? "160" : "",
1130 status
->enc_flags
& RX_ENC_FLAG_SHORT_GI
? "sgi " : "",
1134 status
->band
, status
->flag
,
1135 !!(status
->flag
& RX_FLAG_FAILED_FCS_CRC
),
1136 !!(status
->flag
& RX_FLAG_MMIC_ERROR
),
1137 !!(status
->flag
& RX_FLAG_AMSDU_MORE
));
1138 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "rx skb: ",
1139 skb
->data
, skb
->len
);
1140 trace_ath10k_rx_hdr(ar
, skb
->data
, skb
->len
);
1141 trace_ath10k_rx_payload(ar
, skb
->data
, skb
->len
);
1143 ieee80211_rx_napi(ar
->hw
, NULL
, skb
, &ar
->napi
);
1146 static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k
*ar
,
1147 struct ieee80211_hdr
*hdr
)
1149 int len
= ieee80211_hdrlen(hdr
->frame_control
);
1151 if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING
,
1152 ar
->running_fw
->fw_file
.fw_features
))
1153 len
= round_up(len
, 4);
1158 static void ath10k_htt_rx_h_undecap_raw(struct ath10k
*ar
,
1159 struct sk_buff
*msdu
,
1160 struct ieee80211_rx_status
*status
,
1161 enum htt_rx_mpdu_encrypt_type enctype
,
1164 struct ieee80211_hdr
*hdr
;
1165 struct htt_rx_desc
*rxd
;
1171 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
1172 is_first
= !!(rxd
->msdu_end
.common
.info0
&
1173 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU
));
1174 is_last
= !!(rxd
->msdu_end
.common
.info0
&
1175 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU
));
1177 /* Delivered decapped frame:
1179 * [crypto param] <-- can be trimmed if !fcs_err &&
1180 * !decrypt_err && !peer_idx_invalid
1181 * [amsdu header] <-- only if A-MSDU
1184 * [FCS] <-- at end, needs to be trimmed
1187 /* This probably shouldn't happen but warn just in case */
1188 if (WARN_ON_ONCE(!is_first
))
1191 /* This probably shouldn't happen but warn just in case */
1192 if (WARN_ON_ONCE(!(is_first
&& is_last
)))
1195 skb_trim(msdu
, msdu
->len
- FCS_LEN
);
1197 /* In most cases this will be true for sniffed frames. It makes sense
1198 * to deliver them as-is without stripping the crypto param. This is
1199 * necessary for software based decryption.
1201 * If there's no error then the frame is decrypted. At least that is
1202 * the case for frames that come in via fragmented rx indication.
1207 /* The payload is decrypted so strip crypto params. Start from tail
1208 * since hdr is used to compute some stuff.
1211 hdr
= (void *)msdu
->data
;
1214 if (status
->flag
& RX_FLAG_IV_STRIPPED
) {
1215 skb_trim(msdu
, msdu
->len
-
1216 ath10k_htt_rx_crypto_mic_len(ar
, enctype
));
1218 skb_trim(msdu
, msdu
->len
-
1219 ath10k_htt_rx_crypto_icv_len(ar
, enctype
));
1222 if (status
->flag
& RX_FLAG_MIC_STRIPPED
)
1223 skb_trim(msdu
, msdu
->len
-
1224 ath10k_htt_rx_crypto_mic_len(ar
, enctype
));
1227 if (status
->flag
& RX_FLAG_ICV_STRIPPED
)
1228 skb_trim(msdu
, msdu
->len
-
1229 ath10k_htt_rx_crypto_icv_len(ar
, enctype
));
1233 if ((status
->flag
& RX_FLAG_MMIC_STRIPPED
) &&
1234 !ieee80211_has_morefrags(hdr
->frame_control
) &&
1235 enctype
== HTT_RX_MPDU_ENCRYPT_TKIP_WPA
)
1236 skb_trim(msdu
, msdu
->len
- MICHAEL_MIC_LEN
);
1239 if (status
->flag
& RX_FLAG_IV_STRIPPED
) {
1240 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1241 crypto_len
= ath10k_htt_rx_crypto_param_len(ar
, enctype
);
1243 memmove((void *)msdu
->data
+ crypto_len
,
1244 (void *)msdu
->data
, hdr_len
);
1245 skb_pull(msdu
, crypto_len
);
1249 static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k
*ar
,
1250 struct sk_buff
*msdu
,
1251 struct ieee80211_rx_status
*status
,
1252 const u8 first_hdr
[64],
1253 enum htt_rx_mpdu_encrypt_type enctype
)
1255 struct ieee80211_hdr
*hdr
;
1256 struct htt_rx_desc
*rxd
;
1261 int bytes_aligned
= ar
->hw_params
.decap_align_bytes
;
1263 /* Delivered decapped frame:
1264 * [nwifi 802.11 header] <-- replaced with 802.11 hdr
1267 * Note: The nwifi header doesn't have QoS Control and is
1268 * (always?) a 3addr frame.
1270 * Note2: There's no A-MSDU subframe header. Even if it's part
1274 /* pull decapped header and copy SA & DA */
1275 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
1277 l3_pad_bytes
= ath10k_rx_desc_get_l3_pad_bytes(&ar
->hw_params
, rxd
);
1278 skb_put(msdu
, l3_pad_bytes
);
1280 hdr
= (struct ieee80211_hdr
*)(msdu
->data
+ l3_pad_bytes
);
1282 hdr_len
= ath10k_htt_rx_nwifi_hdrlen(ar
, hdr
);
1283 ether_addr_copy(da
, ieee80211_get_DA(hdr
));
1284 ether_addr_copy(sa
, ieee80211_get_SA(hdr
));
1285 skb_pull(msdu
, hdr_len
);
1287 /* push original 802.11 header */
1288 hdr
= (struct ieee80211_hdr
*)first_hdr
;
1289 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1291 if (!(status
->flag
& RX_FLAG_IV_STRIPPED
)) {
1292 memcpy(skb_push(msdu
,
1293 ath10k_htt_rx_crypto_param_len(ar
, enctype
)),
1294 (void *)hdr
+ round_up(hdr_len
, bytes_aligned
),
1295 ath10k_htt_rx_crypto_param_len(ar
, enctype
));
1298 memcpy(skb_push(msdu
, hdr_len
), hdr
, hdr_len
);
1300 /* original 802.11 header has a different DA and in
1301 * case of 4addr it may also have different SA
1303 hdr
= (struct ieee80211_hdr
*)msdu
->data
;
1304 ether_addr_copy(ieee80211_get_DA(hdr
), da
);
1305 ether_addr_copy(ieee80211_get_SA(hdr
), sa
);
1308 static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k
*ar
,
1309 struct sk_buff
*msdu
,
1310 enum htt_rx_mpdu_encrypt_type enctype
)
1312 struct ieee80211_hdr
*hdr
;
1313 struct htt_rx_desc
*rxd
;
1314 size_t hdr_len
, crypto_len
;
1316 bool is_first
, is_last
, is_amsdu
;
1317 int bytes_aligned
= ar
->hw_params
.decap_align_bytes
;
1319 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
1320 hdr
= (void *)rxd
->rx_hdr_status
;
1322 is_first
= !!(rxd
->msdu_end
.common
.info0
&
1323 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU
));
1324 is_last
= !!(rxd
->msdu_end
.common
.info0
&
1325 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU
));
1326 is_amsdu
= !(is_first
&& is_last
);
1331 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1332 crypto_len
= ath10k_htt_rx_crypto_param_len(ar
, enctype
);
1334 rfc1042
+= round_up(hdr_len
, bytes_aligned
) +
1335 round_up(crypto_len
, bytes_aligned
);
1339 rfc1042
+= sizeof(struct amsdu_subframe_hdr
);
1344 static void ath10k_htt_rx_h_undecap_eth(struct ath10k
*ar
,
1345 struct sk_buff
*msdu
,
1346 struct ieee80211_rx_status
*status
,
1347 const u8 first_hdr
[64],
1348 enum htt_rx_mpdu_encrypt_type enctype
)
1350 struct ieee80211_hdr
*hdr
;
1357 struct htt_rx_desc
*rxd
;
1358 int bytes_aligned
= ar
->hw_params
.decap_align_bytes
;
1360 /* Delivered decapped frame:
1361 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
1365 rfc1042
= ath10k_htt_rx_h_find_rfc1042(ar
, msdu
, enctype
);
1366 if (WARN_ON_ONCE(!rfc1042
))
1369 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
1370 l3_pad_bytes
= ath10k_rx_desc_get_l3_pad_bytes(&ar
->hw_params
, rxd
);
1371 skb_put(msdu
, l3_pad_bytes
);
1372 skb_pull(msdu
, l3_pad_bytes
);
1374 /* pull decapped header and copy SA & DA */
1375 eth
= (struct ethhdr
*)msdu
->data
;
1376 ether_addr_copy(da
, eth
->h_dest
);
1377 ether_addr_copy(sa
, eth
->h_source
);
1378 skb_pull(msdu
, sizeof(struct ethhdr
));
1380 /* push rfc1042/llc/snap */
1381 memcpy(skb_push(msdu
, sizeof(struct rfc1042_hdr
)), rfc1042
,
1382 sizeof(struct rfc1042_hdr
));
1384 /* push original 802.11 header */
1385 hdr
= (struct ieee80211_hdr
*)first_hdr
;
1386 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1388 if (!(status
->flag
& RX_FLAG_IV_STRIPPED
)) {
1389 memcpy(skb_push(msdu
,
1390 ath10k_htt_rx_crypto_param_len(ar
, enctype
)),
1391 (void *)hdr
+ round_up(hdr_len
, bytes_aligned
),
1392 ath10k_htt_rx_crypto_param_len(ar
, enctype
));
1395 memcpy(skb_push(msdu
, hdr_len
), hdr
, hdr_len
);
1397 /* original 802.11 header has a different DA and in
1398 * case of 4addr it may also have different SA
1400 hdr
= (struct ieee80211_hdr
*)msdu
->data
;
1401 ether_addr_copy(ieee80211_get_DA(hdr
), da
);
1402 ether_addr_copy(ieee80211_get_SA(hdr
), sa
);
1405 static void ath10k_htt_rx_h_undecap_snap(struct ath10k
*ar
,
1406 struct sk_buff
*msdu
,
1407 struct ieee80211_rx_status
*status
,
1408 const u8 first_hdr
[64],
1409 enum htt_rx_mpdu_encrypt_type enctype
)
1411 struct ieee80211_hdr
*hdr
;
1414 struct htt_rx_desc
*rxd
;
1415 int bytes_aligned
= ar
->hw_params
.decap_align_bytes
;
1417 /* Delivered decapped frame:
1418 * [amsdu header] <-- replaced with 802.11 hdr
1423 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
1424 l3_pad_bytes
= ath10k_rx_desc_get_l3_pad_bytes(&ar
->hw_params
, rxd
);
1426 skb_put(msdu
, l3_pad_bytes
);
1427 skb_pull(msdu
, sizeof(struct amsdu_subframe_hdr
) + l3_pad_bytes
);
1429 hdr
= (struct ieee80211_hdr
*)first_hdr
;
1430 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1432 if (!(status
->flag
& RX_FLAG_IV_STRIPPED
)) {
1433 memcpy(skb_push(msdu
,
1434 ath10k_htt_rx_crypto_param_len(ar
, enctype
)),
1435 (void *)hdr
+ round_up(hdr_len
, bytes_aligned
),
1436 ath10k_htt_rx_crypto_param_len(ar
, enctype
));
1439 memcpy(skb_push(msdu
, hdr_len
), hdr
, hdr_len
);
1442 static void ath10k_htt_rx_h_undecap(struct ath10k
*ar
,
1443 struct sk_buff
*msdu
,
1444 struct ieee80211_rx_status
*status
,
1446 enum htt_rx_mpdu_encrypt_type enctype
,
1449 struct htt_rx_desc
*rxd
;
1450 enum rx_msdu_decap_format decap
;
1452 /* First msdu's decapped header:
1453 * [802.11 header] <-- padded to 4 bytes long
1454 * [crypto param] <-- padded to 4 bytes long
1455 * [amsdu header] <-- only if A-MSDU
1458 * Other (2nd, 3rd, ..) msdu's decapped header:
1459 * [amsdu header] <-- only if A-MSDU
1463 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
1464 decap
= MS(__le32_to_cpu(rxd
->msdu_start
.common
.info1
),
1465 RX_MSDU_START_INFO1_DECAP_FORMAT
);
1468 case RX_MSDU_DECAP_RAW
:
1469 ath10k_htt_rx_h_undecap_raw(ar
, msdu
, status
, enctype
,
1472 case RX_MSDU_DECAP_NATIVE_WIFI
:
1473 ath10k_htt_rx_h_undecap_nwifi(ar
, msdu
, status
, first_hdr
,
1476 case RX_MSDU_DECAP_ETHERNET2_DIX
:
1477 ath10k_htt_rx_h_undecap_eth(ar
, msdu
, status
, first_hdr
, enctype
);
1479 case RX_MSDU_DECAP_8023_SNAP_LLC
:
1480 ath10k_htt_rx_h_undecap_snap(ar
, msdu
, status
, first_hdr
,
1486 static int ath10k_htt_rx_get_csum_state(struct sk_buff
*skb
)
1488 struct htt_rx_desc
*rxd
;
1490 bool is_ip4
, is_ip6
;
1491 bool is_tcp
, is_udp
;
1492 bool ip_csum_ok
, tcpudp_csum_ok
;
1494 rxd
= (void *)skb
->data
- sizeof(*rxd
);
1495 flags
= __le32_to_cpu(rxd
->attention
.flags
);
1496 info
= __le32_to_cpu(rxd
->msdu_start
.common
.info1
);
1498 is_ip4
= !!(info
& RX_MSDU_START_INFO1_IPV4_PROTO
);
1499 is_ip6
= !!(info
& RX_MSDU_START_INFO1_IPV6_PROTO
);
1500 is_tcp
= !!(info
& RX_MSDU_START_INFO1_TCP_PROTO
);
1501 is_udp
= !!(info
& RX_MSDU_START_INFO1_UDP_PROTO
);
1502 ip_csum_ok
= !(flags
& RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL
);
1503 tcpudp_csum_ok
= !(flags
& RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL
);
1505 if (!is_ip4
&& !is_ip6
)
1506 return CHECKSUM_NONE
;
1507 if (!is_tcp
&& !is_udp
)
1508 return CHECKSUM_NONE
;
1510 return CHECKSUM_NONE
;
1511 if (!tcpudp_csum_ok
)
1512 return CHECKSUM_NONE
;
1514 return CHECKSUM_UNNECESSARY
;
1517 static void ath10k_htt_rx_h_csum_offload(struct sk_buff
*msdu
)
1519 msdu
->ip_summed
= ath10k_htt_rx_get_csum_state(msdu
);
1522 static void ath10k_htt_rx_h_mpdu(struct ath10k
*ar
,
1523 struct sk_buff_head
*amsdu
,
1524 struct ieee80211_rx_status
*status
,
1525 bool fill_crypt_header
,
1527 enum ath10k_pkt_rx_err
*err
)
1529 struct sk_buff
*first
;
1530 struct sk_buff
*last
;
1531 struct sk_buff
*msdu
;
1532 struct htt_rx_desc
*rxd
;
1533 struct ieee80211_hdr
*hdr
;
1534 enum htt_rx_mpdu_encrypt_type enctype
;
1538 bool has_crypto_err
;
1540 bool has_peer_idx_invalid
;
1545 if (skb_queue_empty(amsdu
))
1548 first
= skb_peek(amsdu
);
1549 rxd
= (void *)first
->data
- sizeof(*rxd
);
1551 is_mgmt
= !!(rxd
->attention
.flags
&
1552 __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE
));
1554 enctype
= MS(__le32_to_cpu(rxd
->mpdu_start
.info0
),
1555 RX_MPDU_START_INFO0_ENCRYPT_TYPE
);
1557 /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
1558 * decapped header. It'll be used for undecapping of each MSDU.
1560 hdr
= (void *)rxd
->rx_hdr_status
;
1561 memcpy(first_hdr
, hdr
, RX_HTT_HDR_STATUS_LEN
);
1564 memcpy(rx_hdr
, hdr
, RX_HTT_HDR_STATUS_LEN
);
1566 /* Each A-MSDU subframe will use the original header as the base and be
1567 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
1569 hdr
= (void *)first_hdr
;
1571 if (ieee80211_is_data_qos(hdr
->frame_control
)) {
1572 qos
= ieee80211_get_qos_ctl(hdr
);
1573 qos
[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT
;
1576 /* Some attention flags are valid only in the last MSDU. */
1577 last
= skb_peek_tail(amsdu
);
1578 rxd
= (void *)last
->data
- sizeof(*rxd
);
1579 attention
= __le32_to_cpu(rxd
->attention
.flags
);
1581 has_fcs_err
= !!(attention
& RX_ATTENTION_FLAGS_FCS_ERR
);
1582 has_crypto_err
= !!(attention
& RX_ATTENTION_FLAGS_DECRYPT_ERR
);
1583 has_tkip_err
= !!(attention
& RX_ATTENTION_FLAGS_TKIP_MIC_ERR
);
1584 has_peer_idx_invalid
= !!(attention
& RX_ATTENTION_FLAGS_PEER_IDX_INVALID
);
1586 /* Note: If hardware captures an encrypted frame that it can't decrypt,
1587 * e.g. due to fcs error, missing peer or invalid key data it will
1588 * report the frame as raw.
1590 is_decrypted
= (enctype
!= HTT_RX_MPDU_ENCRYPT_NONE
&&
1593 !has_peer_idx_invalid
);
1595 /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
1596 status
->flag
&= ~(RX_FLAG_FAILED_FCS_CRC
|
1597 RX_FLAG_MMIC_ERROR
|
1599 RX_FLAG_IV_STRIPPED
|
1600 RX_FLAG_ONLY_MONITOR
|
1601 RX_FLAG_MMIC_STRIPPED
);
1604 status
->flag
|= RX_FLAG_FAILED_FCS_CRC
;
1607 status
->flag
|= RX_FLAG_MMIC_ERROR
;
1611 *err
= ATH10K_PKT_RX_ERR_FCS
;
1612 else if (has_tkip_err
)
1613 *err
= ATH10K_PKT_RX_ERR_TKIP
;
1614 else if (has_crypto_err
)
1615 *err
= ATH10K_PKT_RX_ERR_CRYPT
;
1616 else if (has_peer_idx_invalid
)
1617 *err
= ATH10K_PKT_RX_ERR_PEER_IDX_INVAL
;
1620 /* Firmware reports all necessary management frames via WMI already.
1621 * They are not reported to monitor interfaces at all so pass the ones
1622 * coming via HTT to monitor interfaces instead. This simplifies
1626 status
->flag
|= RX_FLAG_ONLY_MONITOR
;
1629 status
->flag
|= RX_FLAG_DECRYPTED
;
1631 if (likely(!is_mgmt
))
1632 status
->flag
|= RX_FLAG_MMIC_STRIPPED
;
1634 if (fill_crypt_header
)
1635 status
->flag
|= RX_FLAG_MIC_STRIPPED
|
1636 RX_FLAG_ICV_STRIPPED
;
1638 status
->flag
|= RX_FLAG_IV_STRIPPED
;
1641 skb_queue_walk(amsdu
, msdu
) {
1642 ath10k_htt_rx_h_csum_offload(msdu
);
1643 ath10k_htt_rx_h_undecap(ar
, msdu
, status
, first_hdr
, enctype
,
1646 /* Undecapping involves copying the original 802.11 header back
1647 * to sk_buff. If frame is protected and hardware has decrypted
1648 * it then remove the protected bit.
1655 if (fill_crypt_header
)
1658 hdr
= (void *)msdu
->data
;
1659 hdr
->frame_control
&= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED
);
1663 static void ath10k_htt_rx_h_enqueue(struct ath10k
*ar
,
1664 struct sk_buff_head
*amsdu
,
1665 struct ieee80211_rx_status
*status
)
1667 struct sk_buff
*msdu
;
1668 struct sk_buff
*first_subframe
;
1670 first_subframe
= skb_peek(amsdu
);
1672 while ((msdu
= __skb_dequeue(amsdu
))) {
1673 /* Setup per-MSDU flags */
1674 if (skb_queue_empty(amsdu
))
1675 status
->flag
&= ~RX_FLAG_AMSDU_MORE
;
1677 status
->flag
|= RX_FLAG_AMSDU_MORE
;
1679 if (msdu
== first_subframe
) {
1680 first_subframe
= NULL
;
1681 status
->flag
&= ~RX_FLAG_ALLOW_SAME_PN
;
1683 status
->flag
|= RX_FLAG_ALLOW_SAME_PN
;
1686 ath10k_htt_rx_h_queue_msdu(ar
, status
, msdu
);
1690 static int ath10k_unchain_msdu(struct sk_buff_head
*amsdu
,
1691 unsigned long int *unchain_cnt
)
1693 struct sk_buff
*skb
, *first
;
1696 int amsdu_len
= skb_queue_len(amsdu
);
1698 /* TODO: Might could optimize this by using
1699 * skb_try_coalesce or similar method to
1700 * decrease copying, or maybe get mac80211 to
1701 * provide a way to just receive a list of
1705 first
= __skb_dequeue(amsdu
);
1707 /* Allocate total length all at once. */
1708 skb_queue_walk(amsdu
, skb
)
1709 total_len
+= skb
->len
;
1711 space
= total_len
- skb_tailroom(first
);
1713 (pskb_expand_head(first
, 0, space
, GFP_ATOMIC
) < 0)) {
1714 /* TODO: bump some rx-oom error stat */
1715 /* put it back together so we can free the
1716 * whole list at once.
1718 __skb_queue_head(amsdu
, first
);
1722 /* Walk list again, copying contents into
1725 while ((skb
= __skb_dequeue(amsdu
))) {
1726 skb_copy_from_linear_data(skb
, skb_put(first
, skb
->len
),
1728 dev_kfree_skb_any(skb
);
1731 __skb_queue_head(amsdu
, first
);
1733 *unchain_cnt
+= amsdu_len
- 1;
1738 static void ath10k_htt_rx_h_unchain(struct ath10k
*ar
,
1739 struct sk_buff_head
*amsdu
,
1740 unsigned long int *drop_cnt
,
1741 unsigned long int *unchain_cnt
)
1743 struct sk_buff
*first
;
1744 struct htt_rx_desc
*rxd
;
1745 enum rx_msdu_decap_format decap
;
1747 first
= skb_peek(amsdu
);
1748 rxd
= (void *)first
->data
- sizeof(*rxd
);
1749 decap
= MS(__le32_to_cpu(rxd
->msdu_start
.common
.info1
),
1750 RX_MSDU_START_INFO1_DECAP_FORMAT
);
1752 /* FIXME: Current unchaining logic can only handle simple case of raw
1753 * msdu chaining. If decapping is other than raw the chaining may be
1754 * more complex and this isn't handled by the current code. Don't even
1755 * try re-constructing such frames - it'll be pretty much garbage.
1757 if (decap
!= RX_MSDU_DECAP_RAW
||
1758 skb_queue_len(amsdu
) != 1 + rxd
->frag_info
.ring2_more_count
) {
1759 *drop_cnt
+= skb_queue_len(amsdu
);
1760 __skb_queue_purge(amsdu
);
1764 ath10k_unchain_msdu(amsdu
, unchain_cnt
);
1767 static bool ath10k_htt_rx_amsdu_allowed(struct ath10k
*ar
,
1768 struct sk_buff_head
*amsdu
,
1769 struct ieee80211_rx_status
*rx_status
)
1771 /* FIXME: It might be a good idea to do some fuzzy-testing to drop
1772 * invalid/dangerous frames.
1775 if (!rx_status
->freq
) {
1776 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "no channel configured; ignoring frame(s)!\n");
1780 if (test_bit(ATH10K_CAC_RUNNING
, &ar
->dev_flags
)) {
1781 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx cac running\n");
1788 static void ath10k_htt_rx_h_filter(struct ath10k
*ar
,
1789 struct sk_buff_head
*amsdu
,
1790 struct ieee80211_rx_status
*rx_status
,
1791 unsigned long int *drop_cnt
)
1793 if (skb_queue_empty(amsdu
))
1796 if (ath10k_htt_rx_amsdu_allowed(ar
, amsdu
, rx_status
))
1800 *drop_cnt
+= skb_queue_len(amsdu
);
1802 __skb_queue_purge(amsdu
);
1805 static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt
*htt
)
1807 struct ath10k
*ar
= htt
->ar
;
1808 struct ieee80211_rx_status
*rx_status
= &htt
->rx_status
;
1809 struct sk_buff_head amsdu
;
1811 unsigned long int drop_cnt
= 0;
1812 unsigned long int unchain_cnt
= 0;
1813 unsigned long int drop_cnt_filter
= 0;
1814 unsigned long int msdus_to_queue
, num_msdus
;
1815 enum ath10k_pkt_rx_err err
= ATH10K_PKT_RX_ERR_MAX
;
1816 u8 first_hdr
[RX_HTT_HDR_STATUS_LEN
];
1818 __skb_queue_head_init(&amsdu
);
1820 spin_lock_bh(&htt
->rx_ring
.lock
);
1821 if (htt
->rx_confused
) {
1822 spin_unlock_bh(&htt
->rx_ring
.lock
);
1825 ret
= ath10k_htt_rx_amsdu_pop(htt
, &amsdu
);
1826 spin_unlock_bh(&htt
->rx_ring
.lock
);
1829 ath10k_warn(ar
, "rx ring became corrupted: %d\n", ret
);
1830 __skb_queue_purge(&amsdu
);
1831 /* FIXME: It's probably a good idea to reboot the
1832 * device instead of leaving it inoperable.
1834 htt
->rx_confused
= true;
1838 num_msdus
= skb_queue_len(&amsdu
);
1840 ath10k_htt_rx_h_ppdu(ar
, &amsdu
, rx_status
, 0xffff);
1842 /* only for ret = 1 indicates chained msdus */
1844 ath10k_htt_rx_h_unchain(ar
, &amsdu
, &drop_cnt
, &unchain_cnt
);
1846 ath10k_htt_rx_h_filter(ar
, &amsdu
, rx_status
, &drop_cnt_filter
);
1847 ath10k_htt_rx_h_mpdu(ar
, &amsdu
, rx_status
, true, first_hdr
, &err
);
1848 msdus_to_queue
= skb_queue_len(&amsdu
);
1849 ath10k_htt_rx_h_enqueue(ar
, &amsdu
, rx_status
);
1851 ath10k_sta_update_rx_tid_stats(ar
, first_hdr
, num_msdus
, err
,
1852 unchain_cnt
, drop_cnt
, drop_cnt_filter
,
1858 static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt
*htt
,
1859 struct htt_rx_indication_hl
*rx
,
1860 struct sk_buff
*skb
)
1862 struct ath10k
*ar
= htt
->ar
;
1863 struct ath10k_peer
*peer
;
1864 struct htt_rx_indication_mpdu_range
*mpdu_ranges
;
1865 struct fw_rx_desc_hl
*fw_desc
;
1866 struct ieee80211_hdr
*hdr
;
1867 struct ieee80211_rx_status
*rx_status
;
1870 int num_mpdu_ranges
;
1872 struct ieee80211_channel
*ch
;
1874 peer_id
= __le16_to_cpu(rx
->hdr
.peer_id
);
1876 spin_lock_bh(&ar
->data_lock
);
1877 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
1878 spin_unlock_bh(&ar
->data_lock
);
1880 ath10k_warn(ar
, "Got RX ind from invalid peer: %u\n", peer_id
);
1882 num_mpdu_ranges
= MS(__le32_to_cpu(rx
->hdr
.info1
),
1883 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES
);
1884 mpdu_ranges
= htt_rx_ind_get_mpdu_ranges_hl(rx
);
1885 fw_desc
= &rx
->fw_desc
;
1886 rx_desc_len
= fw_desc
->len
;
1888 /* I have not yet seen any case where num_mpdu_ranges > 1.
1889 * qcacld does not seem handle that case either, so we introduce the
1890 * same limitiation here as well.
1892 if (num_mpdu_ranges
> 1)
1894 "Unsupported number of MPDU ranges: %d, ignoring all but the first\n",
1897 if (mpdu_ranges
->mpdu_range_status
!=
1898 HTT_RX_IND_MPDU_STATUS_OK
) {
1899 ath10k_warn(ar
, "MPDU range status: %d\n",
1900 mpdu_ranges
->mpdu_range_status
);
1904 /* Strip off all headers before the MAC header before delivery to
1907 tot_hdr_len
= sizeof(struct htt_resp_hdr
) + sizeof(rx
->hdr
) +
1908 sizeof(rx
->ppdu
) + sizeof(rx
->prefix
) +
1909 sizeof(rx
->fw_desc
) +
1910 sizeof(*mpdu_ranges
) * num_mpdu_ranges
+ rx_desc_len
;
1911 skb_pull(skb
, tot_hdr_len
);
1913 hdr
= (struct ieee80211_hdr
*)skb
->data
;
1914 rx_status
= IEEE80211_SKB_RXCB(skb
);
1915 rx_status
->chains
|= BIT(0);
1916 rx_status
->signal
= ATH10K_DEFAULT_NOISE_FLOOR
+
1917 rx
->ppdu
.combined_rssi
;
1918 rx_status
->flag
&= ~RX_FLAG_NO_SIGNAL_VAL
;
1920 spin_lock_bh(&ar
->data_lock
);
1921 ch
= ar
->scan_channel
;
1923 ch
= ar
->rx_channel
;
1925 ch
= ath10k_htt_rx_h_any_channel(ar
);
1927 ch
= ar
->tgt_oper_chan
;
1928 spin_unlock_bh(&ar
->data_lock
);
1931 rx_status
->band
= ch
->band
;
1932 rx_status
->freq
= ch
->center_freq
;
1934 if (rx
->fw_desc
.flags
& FW_RX_DESC_FLAGS_LAST_MSDU
)
1935 rx_status
->flag
&= ~RX_FLAG_AMSDU_MORE
;
1937 rx_status
->flag
|= RX_FLAG_AMSDU_MORE
;
1939 /* Not entirely sure about this, but all frames from the chipset has
1940 * the protected flag set even though they have already been decrypted.
1941 * Unmasking this flag is necessary in order for mac80211 not to drop
1943 * TODO: Verify this is always the case or find out a way to check
1944 * if there has been hw decryption.
1946 if (ieee80211_has_protected(hdr
->frame_control
)) {
1947 hdr
->frame_control
&= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED
);
1948 rx_status
->flag
|= RX_FLAG_DECRYPTED
|
1949 RX_FLAG_IV_STRIPPED
|
1950 RX_FLAG_MMIC_STRIPPED
;
1953 ieee80211_rx_ni(ar
->hw
, skb
);
1955 /* We have delivered the skb to the upper layers (mac80211) so we
1960 /* Tell the caller that it must free the skb since we have not
1966 static void ath10k_htt_rx_proc_rx_ind_ll(struct ath10k_htt
*htt
,
1967 struct htt_rx_indication
*rx
)
1969 struct ath10k
*ar
= htt
->ar
;
1970 struct htt_rx_indication_mpdu_range
*mpdu_ranges
;
1971 int num_mpdu_ranges
;
1972 int i
, mpdu_count
= 0;
1976 num_mpdu_ranges
= MS(__le32_to_cpu(rx
->hdr
.info1
),
1977 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES
);
1978 peer_id
= __le16_to_cpu(rx
->hdr
.peer_id
);
1979 tid
= MS(rx
->hdr
.info0
, HTT_RX_INDICATION_INFO0_EXT_TID
);
1981 mpdu_ranges
= htt_rx_ind_get_mpdu_ranges(rx
);
1983 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt rx ind: ",
1985 (sizeof(struct htt_rx_indication_mpdu_range
) *
1988 for (i
= 0; i
< num_mpdu_ranges
; i
++)
1989 mpdu_count
+= mpdu_ranges
[i
].mpdu_count
;
1991 atomic_add(mpdu_count
, &htt
->num_mpdus_ready
);
1993 ath10k_sta_update_rx_tid_stats_ampdu(ar
, peer_id
, tid
, mpdu_ranges
,
1997 static void ath10k_htt_rx_tx_compl_ind(struct ath10k
*ar
,
1998 struct sk_buff
*skb
)
2000 struct ath10k_htt
*htt
= &ar
->htt
;
2001 struct htt_resp
*resp
= (struct htt_resp
*)skb
->data
;
2002 struct htt_tx_done tx_done
= {};
2003 int status
= MS(resp
->data_tx_completion
.flags
, HTT_DATA_TX_STATUS
);
2004 __le16 msdu_id
, *msdus
;
2005 bool rssi_enabled
= false;
2010 case HTT_DATA_TX_STATUS_NO_ACK
:
2011 tx_done
.status
= HTT_TX_COMPL_STATE_NOACK
;
2013 case HTT_DATA_TX_STATUS_OK
:
2014 tx_done
.status
= HTT_TX_COMPL_STATE_ACK
;
2016 case HTT_DATA_TX_STATUS_DISCARD
:
2017 case HTT_DATA_TX_STATUS_POSTPONE
:
2018 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL
:
2019 tx_done
.status
= HTT_TX_COMPL_STATE_DISCARD
;
2022 ath10k_warn(ar
, "unhandled tx completion status %d\n", status
);
2023 tx_done
.status
= HTT_TX_COMPL_STATE_DISCARD
;
2027 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt tx completion num_msdus %d\n",
2028 resp
->data_tx_completion
.num_msdus
);
2030 msdu_count
= resp
->data_tx_completion
.num_msdus
;
2032 if (resp
->data_tx_completion
.flags2
& HTT_TX_CMPL_FLAG_DATA_RSSI
)
2033 rssi_enabled
= true;
2035 for (i
= 0; i
< msdu_count
; i
++) {
2036 msdus
= resp
->data_tx_completion
.msdus
;
2038 tx_done
.msdu_id
= __le16_to_cpu(msdu_id
);
2041 /* Total no of MSDUs should be even,
2042 * if odd MSDUs are sent firmware fills
2043 * last msdu id with 0xffff
2045 if (msdu_count
& 0x01) {
2046 msdu_id
= msdus
[msdu_count
+ i
+ 1];
2047 tx_done
.ack_rssi
= __le16_to_cpu(msdu_id
);
2049 msdu_id
= msdus
[msdu_count
+ i
];
2050 tx_done
.ack_rssi
= __le16_to_cpu(msdu_id
);
2054 /* kfifo_put: In practice firmware shouldn't fire off per-CE
2055 * interrupt and main interrupt (MSI/-X range case) for the same
2056 * HTC service so it should be safe to use kfifo_put w/o lock.
2058 * From kfifo_put() documentation:
2059 * Note that with only one concurrent reader and one concurrent
2060 * writer, you don't need extra locking to use these macro.
2062 if (!kfifo_put(&htt
->txdone_fifo
, tx_done
)) {
2063 ath10k_warn(ar
, "txdone fifo overrun, msdu_id %d status %d\n",
2064 tx_done
.msdu_id
, tx_done
.status
);
2065 ath10k_txrx_tx_unref(htt
, &tx_done
);
2070 static void ath10k_htt_rx_addba(struct ath10k
*ar
, struct htt_resp
*resp
)
2072 struct htt_rx_addba
*ev
= &resp
->rx_addba
;
2073 struct ath10k_peer
*peer
;
2074 struct ath10k_vif
*arvif
;
2075 u16 info0
, tid
, peer_id
;
2077 info0
= __le16_to_cpu(ev
->info0
);
2078 tid
= MS(info0
, HTT_RX_BA_INFO0_TID
);
2079 peer_id
= MS(info0
, HTT_RX_BA_INFO0_PEER_ID
);
2081 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
2082 "htt rx addba tid %hu peer_id %hu size %hhu\n",
2083 tid
, peer_id
, ev
->window_size
);
2085 spin_lock_bh(&ar
->data_lock
);
2086 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
2088 ath10k_warn(ar
, "received addba event for invalid peer_id: %hu\n",
2090 spin_unlock_bh(&ar
->data_lock
);
2094 arvif
= ath10k_get_arvif(ar
, peer
->vdev_id
);
2096 ath10k_warn(ar
, "received addba event for invalid vdev_id: %u\n",
2098 spin_unlock_bh(&ar
->data_lock
);
2102 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
2103 "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
2104 peer
->addr
, tid
, ev
->window_size
);
2106 ieee80211_start_rx_ba_session_offl(arvif
->vif
, peer
->addr
, tid
);
2107 spin_unlock_bh(&ar
->data_lock
);
2110 static void ath10k_htt_rx_delba(struct ath10k
*ar
, struct htt_resp
*resp
)
2112 struct htt_rx_delba
*ev
= &resp
->rx_delba
;
2113 struct ath10k_peer
*peer
;
2114 struct ath10k_vif
*arvif
;
2115 u16 info0
, tid
, peer_id
;
2117 info0
= __le16_to_cpu(ev
->info0
);
2118 tid
= MS(info0
, HTT_RX_BA_INFO0_TID
);
2119 peer_id
= MS(info0
, HTT_RX_BA_INFO0_PEER_ID
);
2121 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
2122 "htt rx delba tid %hu peer_id %hu\n",
2125 spin_lock_bh(&ar
->data_lock
);
2126 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
2128 ath10k_warn(ar
, "received addba event for invalid peer_id: %hu\n",
2130 spin_unlock_bh(&ar
->data_lock
);
2134 arvif
= ath10k_get_arvif(ar
, peer
->vdev_id
);
2136 ath10k_warn(ar
, "received addba event for invalid vdev_id: %u\n",
2138 spin_unlock_bh(&ar
->data_lock
);
2142 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
2143 "htt rx stop rx ba session sta %pM tid %hu\n",
2146 ieee80211_stop_rx_ba_session_offl(arvif
->vif
, peer
->addr
, tid
);
2147 spin_unlock_bh(&ar
->data_lock
);
2150 static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head
*list
,
2151 struct sk_buff_head
*amsdu
)
2153 struct sk_buff
*msdu
;
2154 struct htt_rx_desc
*rxd
;
2156 if (skb_queue_empty(list
))
2159 if (WARN_ON(!skb_queue_empty(amsdu
)))
2162 while ((msdu
= __skb_dequeue(list
))) {
2163 __skb_queue_tail(amsdu
, msdu
);
2165 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
2166 if (rxd
->msdu_end
.common
.info0
&
2167 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU
))
2171 msdu
= skb_peek_tail(amsdu
);
2172 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
2173 if (!(rxd
->msdu_end
.common
.info0
&
2174 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU
))) {
2175 skb_queue_splice_init(amsdu
, list
);
2182 static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status
*status
,
2183 struct sk_buff
*skb
)
2185 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
2187 if (!ieee80211_has_protected(hdr
->frame_control
))
2190 /* Offloaded frames are already decrypted but firmware insists they are
2191 * protected in the 802.11 header. Strip the flag. Otherwise mac80211
2192 * will drop the frame.
2195 hdr
->frame_control
&= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED
);
2196 status
->flag
|= RX_FLAG_DECRYPTED
|
2197 RX_FLAG_IV_STRIPPED
|
2198 RX_FLAG_MMIC_STRIPPED
;
2201 static void ath10k_htt_rx_h_rx_offload(struct ath10k
*ar
,
2202 struct sk_buff_head
*list
)
2204 struct ath10k_htt
*htt
= &ar
->htt
;
2205 struct ieee80211_rx_status
*status
= &htt
->rx_status
;
2206 struct htt_rx_offload_msdu
*rx
;
2207 struct sk_buff
*msdu
;
2210 while ((msdu
= __skb_dequeue(list
))) {
2211 /* Offloaded frames don't have Rx descriptor. Instead they have
2212 * a short meta information header.
2215 rx
= (void *)msdu
->data
;
2217 skb_put(msdu
, sizeof(*rx
));
2218 skb_pull(msdu
, sizeof(*rx
));
2220 if (skb_tailroom(msdu
) < __le16_to_cpu(rx
->msdu_len
)) {
2221 ath10k_warn(ar
, "dropping frame: offloaded rx msdu is too long!\n");
2222 dev_kfree_skb_any(msdu
);
2226 skb_put(msdu
, __le16_to_cpu(rx
->msdu_len
));
2228 /* Offloaded rx header length isn't multiple of 2 nor 4 so the
2229 * actual payload is unaligned. Align the frame. Otherwise
2230 * mac80211 complains. This shouldn't reduce performance much
2231 * because these offloaded frames are rare.
2233 offset
= 4 - ((unsigned long)msdu
->data
& 3);
2234 skb_put(msdu
, offset
);
2235 memmove(msdu
->data
+ offset
, msdu
->data
, msdu
->len
);
2236 skb_pull(msdu
, offset
);
2238 /* FIXME: The frame is NWifi. Re-construct QoS Control
2239 * if possible later.
2242 memset(status
, 0, sizeof(*status
));
2243 status
->flag
|= RX_FLAG_NO_SIGNAL_VAL
;
2245 ath10k_htt_rx_h_rx_offload_prot(status
, msdu
);
2246 ath10k_htt_rx_h_channel(ar
, status
, NULL
, rx
->vdev_id
);
2247 ath10k_htt_rx_h_queue_msdu(ar
, status
, msdu
);
2251 static int ath10k_htt_rx_in_ord_ind(struct ath10k
*ar
, struct sk_buff
*skb
)
2253 struct ath10k_htt
*htt
= &ar
->htt
;
2254 struct htt_resp
*resp
= (void *)skb
->data
;
2255 struct ieee80211_rx_status
*status
= &htt
->rx_status
;
2256 struct sk_buff_head list
;
2257 struct sk_buff_head amsdu
;
2266 lockdep_assert_held(&htt
->rx_ring
.lock
);
2268 if (htt
->rx_confused
)
2271 skb_pull(skb
, sizeof(resp
->hdr
));
2272 skb_pull(skb
, sizeof(resp
->rx_in_ord_ind
));
2274 peer_id
= __le16_to_cpu(resp
->rx_in_ord_ind
.peer_id
);
2275 msdu_count
= __le16_to_cpu(resp
->rx_in_ord_ind
.msdu_count
);
2276 vdev_id
= resp
->rx_in_ord_ind
.vdev_id
;
2277 tid
= SM(resp
->rx_in_ord_ind
.info
, HTT_RX_IN_ORD_IND_INFO_TID
);
2278 offload
= !!(resp
->rx_in_ord_ind
.info
&
2279 HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK
);
2280 frag
= !!(resp
->rx_in_ord_ind
.info
& HTT_RX_IN_ORD_IND_INFO_FRAG_MASK
);
2282 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
2283 "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
2284 vdev_id
, peer_id
, tid
, offload
, frag
, msdu_count
);
2286 if (skb
->len
< msdu_count
* sizeof(*resp
->rx_in_ord_ind
.msdu_descs32
)) {
2287 ath10k_warn(ar
, "dropping invalid in order rx indication\n");
2291 /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
2292 * extracted and processed.
2294 __skb_queue_head_init(&list
);
2295 if (ar
->hw_params
.target_64bit
)
2296 ret
= ath10k_htt_rx_pop_paddr64_list(htt
, &resp
->rx_in_ord_ind
,
2299 ret
= ath10k_htt_rx_pop_paddr32_list(htt
, &resp
->rx_in_ord_ind
,
2303 ath10k_warn(ar
, "failed to pop paddr list: %d\n", ret
);
2304 htt
->rx_confused
= true;
2308 /* Offloaded frames are very different and need to be handled
2312 ath10k_htt_rx_h_rx_offload(ar
, &list
);
2314 while (!skb_queue_empty(&list
)) {
2315 __skb_queue_head_init(&amsdu
);
2316 ret
= ath10k_htt_rx_extract_amsdu(&list
, &amsdu
);
2319 /* Note: The in-order indication may report interleaved
2320 * frames from different PPDUs meaning reported rx rate
2321 * to mac80211 isn't accurate/reliable. It's still
2322 * better to report something than nothing though. This
2323 * should still give an idea about rx rate to the user.
2325 ath10k_htt_rx_h_ppdu(ar
, &amsdu
, status
, vdev_id
);
2326 ath10k_htt_rx_h_filter(ar
, &amsdu
, status
, NULL
);
2327 ath10k_htt_rx_h_mpdu(ar
, &amsdu
, status
, false, NULL
,
2329 ath10k_htt_rx_h_enqueue(ar
, &amsdu
, status
);
2334 /* Should not happen. */
2335 ath10k_warn(ar
, "failed to extract amsdu: %d\n", ret
);
2336 htt
->rx_confused
= true;
2337 __skb_queue_purge(&list
);
2344 static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k
*ar
,
2345 const __le32
*resp_ids
,
2351 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch confirm num_resp_ids %d\n",
2354 for (i
= 0; i
< num_resp_ids
; i
++) {
2355 resp_id
= le32_to_cpu(resp_ids
[i
]);
2357 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch confirm resp_id %u\n",
2360 /* TODO: free resp_id */
2364 static void ath10k_htt_rx_tx_fetch_ind(struct ath10k
*ar
, struct sk_buff
*skb
)
2366 struct ieee80211_hw
*hw
= ar
->hw
;
2367 struct ieee80211_txq
*txq
;
2368 struct htt_resp
*resp
= (struct htt_resp
*)skb
->data
;
2369 struct htt_tx_fetch_record
*record
;
2371 size_t max_num_bytes
;
2372 size_t max_num_msdus
;
2375 const __le32
*resp_ids
;
2383 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch ind\n");
2385 len
= sizeof(resp
->hdr
) + sizeof(resp
->tx_fetch_ind
);
2386 if (unlikely(skb
->len
< len
)) {
2387 ath10k_warn(ar
, "received corrupted tx_fetch_ind event: buffer too short\n");
2391 num_records
= le16_to_cpu(resp
->tx_fetch_ind
.num_records
);
2392 num_resp_ids
= le16_to_cpu(resp
->tx_fetch_ind
.num_resp_ids
);
2394 len
+= sizeof(resp
->tx_fetch_ind
.records
[0]) * num_records
;
2395 len
+= sizeof(resp
->tx_fetch_ind
.resp_ids
[0]) * num_resp_ids
;
2397 if (unlikely(skb
->len
< len
)) {
2398 ath10k_warn(ar
, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
2402 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n",
2403 num_records
, num_resp_ids
,
2404 le16_to_cpu(resp
->tx_fetch_ind
.fetch_seq_num
));
2406 if (!ar
->htt
.tx_q_state
.enabled
) {
2407 ath10k_warn(ar
, "received unexpected tx_fetch_ind event: not enabled\n");
2411 if (ar
->htt
.tx_q_state
.mode
== HTT_TX_MODE_SWITCH_PUSH
) {
2412 ath10k_warn(ar
, "received unexpected tx_fetch_ind event: in push mode\n");
2418 for (i
= 0; i
< num_records
; i
++) {
2419 record
= &resp
->tx_fetch_ind
.records
[i
];
2420 peer_id
= MS(le16_to_cpu(record
->info
),
2421 HTT_TX_FETCH_RECORD_INFO_PEER_ID
);
2422 tid
= MS(le16_to_cpu(record
->info
),
2423 HTT_TX_FETCH_RECORD_INFO_TID
);
2424 max_num_msdus
= le16_to_cpu(record
->num_msdus
);
2425 max_num_bytes
= le32_to_cpu(record
->num_bytes
);
2427 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n",
2428 i
, peer_id
, tid
, max_num_msdus
, max_num_bytes
);
2430 if (unlikely(peer_id
>= ar
->htt
.tx_q_state
.num_peers
) ||
2431 unlikely(tid
>= ar
->htt
.tx_q_state
.num_tids
)) {
2432 ath10k_warn(ar
, "received out of range peer_id %hu tid %hhu\n",
2437 spin_lock_bh(&ar
->data_lock
);
2438 txq
= ath10k_mac_txq_lookup(ar
, peer_id
, tid
);
2439 spin_unlock_bh(&ar
->data_lock
);
2441 /* It is okay to release the lock and use txq because RCU read
2445 if (unlikely(!txq
)) {
2446 ath10k_warn(ar
, "failed to lookup txq for peer_id %hu tid %hhu\n",
2454 while (num_msdus
< max_num_msdus
&&
2455 num_bytes
< max_num_bytes
) {
2456 ret
= ath10k_mac_tx_push_txq(hw
, txq
);
2464 record
->num_msdus
= cpu_to_le16(num_msdus
);
2465 record
->num_bytes
= cpu_to_le32(num_bytes
);
2467 ath10k_htt_tx_txq_recalc(hw
, txq
);
2472 resp_ids
= ath10k_htt_get_tx_fetch_ind_resp_ids(&resp
->tx_fetch_ind
);
2473 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar
, resp_ids
, num_resp_ids
);
2475 ret
= ath10k_htt_tx_fetch_resp(ar
,
2476 resp
->tx_fetch_ind
.token
,
2477 resp
->tx_fetch_ind
.fetch_seq_num
,
2478 resp
->tx_fetch_ind
.records
,
2480 if (unlikely(ret
)) {
2481 ath10k_warn(ar
, "failed to submit tx fetch resp for token 0x%08x: %d\n",
2482 le32_to_cpu(resp
->tx_fetch_ind
.token
), ret
);
2483 /* FIXME: request fw restart */
2486 ath10k_htt_tx_txq_sync(ar
);
2489 static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k
*ar
,
2490 struct sk_buff
*skb
)
2492 const struct htt_resp
*resp
= (void *)skb
->data
;
2496 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch confirm\n");
2498 len
= sizeof(resp
->hdr
) + sizeof(resp
->tx_fetch_confirm
);
2499 if (unlikely(skb
->len
< len
)) {
2500 ath10k_warn(ar
, "received corrupted tx_fetch_confirm event: buffer too short\n");
2504 num_resp_ids
= le16_to_cpu(resp
->tx_fetch_confirm
.num_resp_ids
);
2505 len
+= sizeof(resp
->tx_fetch_confirm
.resp_ids
[0]) * num_resp_ids
;
2507 if (unlikely(skb
->len
< len
)) {
2508 ath10k_warn(ar
, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
2512 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar
,
2513 resp
->tx_fetch_confirm
.resp_ids
,
2517 static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k
*ar
,
2518 struct sk_buff
*skb
)
2520 const struct htt_resp
*resp
= (void *)skb
->data
;
2521 const struct htt_tx_mode_switch_record
*record
;
2522 struct ieee80211_txq
*txq
;
2523 struct ath10k_txq
*artxq
;
2526 enum htt_tx_mode_switch_mode mode
;
2535 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx mode switch ind\n");
2537 len
= sizeof(resp
->hdr
) + sizeof(resp
->tx_mode_switch_ind
);
2538 if (unlikely(skb
->len
< len
)) {
2539 ath10k_warn(ar
, "received corrupted tx_mode_switch_ind event: buffer too short\n");
2543 info0
= le16_to_cpu(resp
->tx_mode_switch_ind
.info0
);
2544 info1
= le16_to_cpu(resp
->tx_mode_switch_ind
.info1
);
2546 enable
= !!(info0
& HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE
);
2547 num_records
= MS(info0
, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD
);
2548 mode
= MS(info1
, HTT_TX_MODE_SWITCH_IND_INFO1_MODE
);
2549 threshold
= MS(info1
, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD
);
2551 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
2552 "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n",
2553 info0
, info1
, enable
, num_records
, mode
, threshold
);
2555 len
+= sizeof(resp
->tx_mode_switch_ind
.records
[0]) * num_records
;
2557 if (unlikely(skb
->len
< len
)) {
2558 ath10k_warn(ar
, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
2563 case HTT_TX_MODE_SWITCH_PUSH
:
2564 case HTT_TX_MODE_SWITCH_PUSH_PULL
:
2567 ath10k_warn(ar
, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
2575 ar
->htt
.tx_q_state
.enabled
= enable
;
2576 ar
->htt
.tx_q_state
.mode
= mode
;
2577 ar
->htt
.tx_q_state
.num_push_allowed
= threshold
;
2581 for (i
= 0; i
< num_records
; i
++) {
2582 record
= &resp
->tx_mode_switch_ind
.records
[i
];
2583 info0
= le16_to_cpu(record
->info0
);
2584 peer_id
= MS(info0
, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID
);
2585 tid
= MS(info0
, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID
);
2587 if (unlikely(peer_id
>= ar
->htt
.tx_q_state
.num_peers
) ||
2588 unlikely(tid
>= ar
->htt
.tx_q_state
.num_tids
)) {
2589 ath10k_warn(ar
, "received out of range peer_id %hu tid %hhu\n",
2594 spin_lock_bh(&ar
->data_lock
);
2595 txq
= ath10k_mac_txq_lookup(ar
, peer_id
, tid
);
2596 spin_unlock_bh(&ar
->data_lock
);
2598 /* It is okay to release the lock and use txq because RCU read
2602 if (unlikely(!txq
)) {
2603 ath10k_warn(ar
, "failed to lookup txq for peer_id %hu tid %hhu\n",
2608 spin_lock_bh(&ar
->htt
.tx_lock
);
2609 artxq
= (void *)txq
->drv_priv
;
2610 artxq
->num_push_allowed
= le16_to_cpu(record
->num_max_msdus
);
2611 spin_unlock_bh(&ar
->htt
.tx_lock
);
2616 ath10k_mac_tx_push_pending(ar
);
2619 void ath10k_htt_htc_t2h_msg_handler(struct ath10k
*ar
, struct sk_buff
*skb
)
2623 release
= ath10k_htt_t2h_msg_handler(ar
, skb
);
2625 /* Free the indication buffer */
2627 dev_kfree_skb_any(skb
);
2630 static inline int ath10k_get_legacy_rate_idx(struct ath10k
*ar
, u8 rate
)
2632 static const u8 legacy_rates
[] = {1, 2, 5, 11, 6, 9, 12,
2633 18, 24, 36, 48, 54};
2636 for (i
= 0; i
< ARRAY_SIZE(legacy_rates
); i
++) {
2637 if (rate
== legacy_rates
[i
])
2641 ath10k_warn(ar
, "Invalid legacy rate %hhd peer stats", rate
);
2646 ath10k_accumulate_per_peer_tx_stats(struct ath10k
*ar
,
2647 struct ath10k_sta
*arsta
,
2648 struct ath10k_per_peer_tx_stats
*pstats
,
2651 struct rate_info
*txrate
= &arsta
->txrate
;
2652 struct ath10k_htt_tx_stats
*tx_stats
;
2653 int ht_idx
, gi
, mcs
, bw
, nss
;
2655 if (!arsta
->tx_stats
)
2658 tx_stats
= arsta
->tx_stats
;
2659 gi
= (arsta
->txrate
.flags
& RATE_INFO_FLAGS_SHORT_GI
);
2660 ht_idx
= txrate
->mcs
+ txrate
->nss
* 8;
2665 #define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name]
2667 if (txrate
->flags
== RATE_INFO_FLAGS_VHT_MCS
) {
2668 STATS_OP_FMT(SUCC
).vht
[0][mcs
] += pstats
->succ_bytes
;
2669 STATS_OP_FMT(SUCC
).vht
[1][mcs
] += pstats
->succ_pkts
;
2670 STATS_OP_FMT(FAIL
).vht
[0][mcs
] += pstats
->failed_bytes
;
2671 STATS_OP_FMT(FAIL
).vht
[1][mcs
] += pstats
->failed_pkts
;
2672 STATS_OP_FMT(RETRY
).vht
[0][mcs
] += pstats
->retry_bytes
;
2673 STATS_OP_FMT(RETRY
).vht
[1][mcs
] += pstats
->retry_pkts
;
2674 } else if (txrate
->flags
== RATE_INFO_FLAGS_MCS
) {
2675 STATS_OP_FMT(SUCC
).ht
[0][ht_idx
] += pstats
->succ_bytes
;
2676 STATS_OP_FMT(SUCC
).ht
[1][ht_idx
] += pstats
->succ_pkts
;
2677 STATS_OP_FMT(FAIL
).ht
[0][ht_idx
] += pstats
->failed_bytes
;
2678 STATS_OP_FMT(FAIL
).ht
[1][ht_idx
] += pstats
->failed_pkts
;
2679 STATS_OP_FMT(RETRY
).ht
[0][ht_idx
] += pstats
->retry_bytes
;
2680 STATS_OP_FMT(RETRY
).ht
[1][ht_idx
] += pstats
->retry_pkts
;
2682 mcs
= legacy_rate_idx
;
2684 STATS_OP_FMT(SUCC
).legacy
[0][mcs
] += pstats
->succ_bytes
;
2685 STATS_OP_FMT(SUCC
).legacy
[1][mcs
] += pstats
->succ_pkts
;
2686 STATS_OP_FMT(FAIL
).legacy
[0][mcs
] += pstats
->failed_bytes
;
2687 STATS_OP_FMT(FAIL
).legacy
[1][mcs
] += pstats
->failed_pkts
;
2688 STATS_OP_FMT(RETRY
).legacy
[0][mcs
] += pstats
->retry_bytes
;
2689 STATS_OP_FMT(RETRY
).legacy
[1][mcs
] += pstats
->retry_pkts
;
2692 if (ATH10K_HW_AMPDU(pstats
->flags
)) {
2693 tx_stats
->ba_fails
+= ATH10K_HW_BA_FAIL(pstats
->flags
);
2695 if (txrate
->flags
== RATE_INFO_FLAGS_MCS
) {
2696 STATS_OP_FMT(AMPDU
).ht
[0][ht_idx
] +=
2697 pstats
->succ_bytes
+ pstats
->retry_bytes
;
2698 STATS_OP_FMT(AMPDU
).ht
[1][ht_idx
] +=
2699 pstats
->succ_pkts
+ pstats
->retry_pkts
;
2701 STATS_OP_FMT(AMPDU
).vht
[0][mcs
] +=
2702 pstats
->succ_bytes
+ pstats
->retry_bytes
;
2703 STATS_OP_FMT(AMPDU
).vht
[1][mcs
] +=
2704 pstats
->succ_pkts
+ pstats
->retry_pkts
;
2706 STATS_OP_FMT(AMPDU
).bw
[0][bw
] +=
2707 pstats
->succ_bytes
+ pstats
->retry_bytes
;
2708 STATS_OP_FMT(AMPDU
).nss
[0][nss
] +=
2709 pstats
->succ_bytes
+ pstats
->retry_bytes
;
2710 STATS_OP_FMT(AMPDU
).gi
[0][gi
] +=
2711 pstats
->succ_bytes
+ pstats
->retry_bytes
;
2712 STATS_OP_FMT(AMPDU
).bw
[1][bw
] +=
2713 pstats
->succ_pkts
+ pstats
->retry_pkts
;
2714 STATS_OP_FMT(AMPDU
).nss
[1][nss
] +=
2715 pstats
->succ_pkts
+ pstats
->retry_pkts
;
2716 STATS_OP_FMT(AMPDU
).gi
[1][gi
] +=
2717 pstats
->succ_pkts
+ pstats
->retry_pkts
;
2719 tx_stats
->ack_fails
+=
2720 ATH10K_HW_BA_FAIL(pstats
->flags
);
2723 STATS_OP_FMT(SUCC
).bw
[0][bw
] += pstats
->succ_bytes
;
2724 STATS_OP_FMT(SUCC
).nss
[0][nss
] += pstats
->succ_bytes
;
2725 STATS_OP_FMT(SUCC
).gi
[0][gi
] += pstats
->succ_bytes
;
2727 STATS_OP_FMT(SUCC
).bw
[1][bw
] += pstats
->succ_pkts
;
2728 STATS_OP_FMT(SUCC
).nss
[1][nss
] += pstats
->succ_pkts
;
2729 STATS_OP_FMT(SUCC
).gi
[1][gi
] += pstats
->succ_pkts
;
2731 STATS_OP_FMT(FAIL
).bw
[0][bw
] += pstats
->failed_bytes
;
2732 STATS_OP_FMT(FAIL
).nss
[0][nss
] += pstats
->failed_bytes
;
2733 STATS_OP_FMT(FAIL
).gi
[0][gi
] += pstats
->failed_bytes
;
2735 STATS_OP_FMT(FAIL
).bw
[1][bw
] += pstats
->failed_pkts
;
2736 STATS_OP_FMT(FAIL
).nss
[1][nss
] += pstats
->failed_pkts
;
2737 STATS_OP_FMT(FAIL
).gi
[1][gi
] += pstats
->failed_pkts
;
2739 STATS_OP_FMT(RETRY
).bw
[0][bw
] += pstats
->retry_bytes
;
2740 STATS_OP_FMT(RETRY
).nss
[0][nss
] += pstats
->retry_bytes
;
2741 STATS_OP_FMT(RETRY
).gi
[0][gi
] += pstats
->retry_bytes
;
2743 STATS_OP_FMT(RETRY
).bw
[1][bw
] += pstats
->retry_pkts
;
2744 STATS_OP_FMT(RETRY
).nss
[1][nss
] += pstats
->retry_pkts
;
2745 STATS_OP_FMT(RETRY
).gi
[1][gi
] += pstats
->retry_pkts
;
2749 ath10k_update_per_peer_tx_stats(struct ath10k
*ar
,
2750 struct ieee80211_sta
*sta
,
2751 struct ath10k_per_peer_tx_stats
*peer_stats
)
2753 struct ath10k_sta
*arsta
= (struct ath10k_sta
*)sta
->drv_priv
;
2756 struct rate_info txrate
;
2758 lockdep_assert_held(&ar
->data_lock
);
2760 txrate
.flags
= ATH10K_HW_PREAMBLE(peer_stats
->ratecode
);
2761 txrate
.bw
= ATH10K_HW_BW(peer_stats
->flags
);
2762 txrate
.nss
= ATH10K_HW_NSS(peer_stats
->ratecode
);
2763 txrate
.mcs
= ATH10K_HW_MCS_RATE(peer_stats
->ratecode
);
2764 sgi
= ATH10K_HW_GI(peer_stats
->flags
);
2766 if (txrate
.flags
== WMI_RATE_PREAMBLE_VHT
&& txrate
.mcs
> 9) {
2767 ath10k_warn(ar
, "Invalid VHT mcs %hhd peer stats", txrate
.mcs
);
2771 if (txrate
.flags
== WMI_RATE_PREAMBLE_HT
&&
2772 (txrate
.mcs
> 7 || txrate
.nss
< 1)) {
2773 ath10k_warn(ar
, "Invalid HT mcs %hhd nss %hhd peer stats",
2774 txrate
.mcs
, txrate
.nss
);
2778 memset(&arsta
->txrate
, 0, sizeof(arsta
->txrate
));
2780 if (txrate
.flags
== WMI_RATE_PREAMBLE_CCK
||
2781 txrate
.flags
== WMI_RATE_PREAMBLE_OFDM
) {
2782 rate
= ATH10K_HW_LEGACY_RATE(peer_stats
->ratecode
);
2783 /* This is hacky, FW sends CCK rate 5.5Mbps as 6 */
2784 if (rate
== 6 && txrate
.flags
== WMI_RATE_PREAMBLE_CCK
)
2786 rate_idx
= ath10k_get_legacy_rate_idx(ar
, rate
);
2789 arsta
->txrate
.legacy
= rate
;
2790 } else if (txrate
.flags
== WMI_RATE_PREAMBLE_HT
) {
2791 arsta
->txrate
.flags
= RATE_INFO_FLAGS_MCS
;
2792 arsta
->txrate
.mcs
= txrate
.mcs
+ 8 * (txrate
.nss
- 1);
2794 arsta
->txrate
.flags
= RATE_INFO_FLAGS_VHT_MCS
;
2795 arsta
->txrate
.mcs
= txrate
.mcs
;
2799 arsta
->txrate
.flags
|= RATE_INFO_FLAGS_SHORT_GI
;
2801 arsta
->txrate
.nss
= txrate
.nss
;
2802 arsta
->txrate
.bw
= ath10k_bw_to_mac80211_bw(txrate
.bw
);
2804 if (ath10k_debug_is_extd_tx_stats_enabled(ar
))
2805 ath10k_accumulate_per_peer_tx_stats(ar
, arsta
, peer_stats
,
2809 static void ath10k_htt_fetch_peer_stats(struct ath10k
*ar
,
2810 struct sk_buff
*skb
)
2812 struct htt_resp
*resp
= (struct htt_resp
*)skb
->data
;
2813 struct ath10k_per_peer_tx_stats
*p_tx_stats
= &ar
->peer_tx_stats
;
2814 struct htt_per_peer_tx_stats_ind
*tx_stats
;
2815 struct ieee80211_sta
*sta
;
2816 struct ath10k_peer
*peer
;
2818 u8 ppdu_len
, num_ppdu
;
2820 num_ppdu
= resp
->peer_tx_stats
.num_ppdu
;
2821 ppdu_len
= resp
->peer_tx_stats
.ppdu_len
* sizeof(__le32
);
2823 if (skb
->len
< sizeof(struct htt_resp_hdr
) + num_ppdu
* ppdu_len
) {
2824 ath10k_warn(ar
, "Invalid peer stats buf length %d\n", skb
->len
);
2828 tx_stats
= (struct htt_per_peer_tx_stats_ind
*)
2829 (resp
->peer_tx_stats
.payload
);
2830 peer_id
= __le16_to_cpu(tx_stats
->peer_id
);
2833 spin_lock_bh(&ar
->data_lock
);
2834 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
2836 ath10k_warn(ar
, "Invalid peer id %d peer stats buffer\n",
2842 for (i
= 0; i
< num_ppdu
; i
++) {
2843 tx_stats
= (struct htt_per_peer_tx_stats_ind
*)
2844 (resp
->peer_tx_stats
.payload
+ i
* ppdu_len
);
2846 p_tx_stats
->succ_bytes
= __le32_to_cpu(tx_stats
->succ_bytes
);
2847 p_tx_stats
->retry_bytes
= __le32_to_cpu(tx_stats
->retry_bytes
);
2848 p_tx_stats
->failed_bytes
=
2849 __le32_to_cpu(tx_stats
->failed_bytes
);
2850 p_tx_stats
->ratecode
= tx_stats
->ratecode
;
2851 p_tx_stats
->flags
= tx_stats
->flags
;
2852 p_tx_stats
->succ_pkts
= __le16_to_cpu(tx_stats
->succ_pkts
);
2853 p_tx_stats
->retry_pkts
= __le16_to_cpu(tx_stats
->retry_pkts
);
2854 p_tx_stats
->failed_pkts
= __le16_to_cpu(tx_stats
->failed_pkts
);
2856 ath10k_update_per_peer_tx_stats(ar
, sta
, p_tx_stats
);
2860 spin_unlock_bh(&ar
->data_lock
);
2864 static void ath10k_fetch_10_2_tx_stats(struct ath10k
*ar
, u8
*data
)
2866 struct ath10k_pktlog_hdr
*hdr
= (struct ath10k_pktlog_hdr
*)data
;
2867 struct ath10k_per_peer_tx_stats
*p_tx_stats
= &ar
->peer_tx_stats
;
2868 struct ath10k_10_2_peer_tx_stats
*tx_stats
;
2869 struct ieee80211_sta
*sta
;
2870 struct ath10k_peer
*peer
;
2871 u16 log_type
= __le16_to_cpu(hdr
->log_type
);
2874 if (log_type
!= ATH_PKTLOG_TYPE_TX_STAT
)
2877 tx_stats
= (struct ath10k_10_2_peer_tx_stats
*)((hdr
->payload
) +
2878 ATH10K_10_2_TX_STATS_OFFSET
);
2880 if (!tx_stats
->tx_ppdu_cnt
)
2883 peer_id
= tx_stats
->peer_id
;
2886 spin_lock_bh(&ar
->data_lock
);
2887 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
2889 ath10k_warn(ar
, "Invalid peer id %d in peer stats buffer\n",
2895 for (i
= 0; i
< tx_stats
->tx_ppdu_cnt
; i
++) {
2896 p_tx_stats
->succ_bytes
=
2897 __le16_to_cpu(tx_stats
->success_bytes
[i
]);
2898 p_tx_stats
->retry_bytes
=
2899 __le16_to_cpu(tx_stats
->retry_bytes
[i
]);
2900 p_tx_stats
->failed_bytes
=
2901 __le16_to_cpu(tx_stats
->failed_bytes
[i
]);
2902 p_tx_stats
->ratecode
= tx_stats
->ratecode
[i
];
2903 p_tx_stats
->flags
= tx_stats
->flags
[i
];
2904 p_tx_stats
->succ_pkts
= tx_stats
->success_pkts
[i
];
2905 p_tx_stats
->retry_pkts
= tx_stats
->retry_pkts
[i
];
2906 p_tx_stats
->failed_pkts
= tx_stats
->failed_pkts
[i
];
2908 ath10k_update_per_peer_tx_stats(ar
, sta
, p_tx_stats
);
2910 spin_unlock_bh(&ar
->data_lock
);
2916 spin_unlock_bh(&ar
->data_lock
);
2920 bool ath10k_htt_t2h_msg_handler(struct ath10k
*ar
, struct sk_buff
*skb
)
2922 struct ath10k_htt
*htt
= &ar
->htt
;
2923 struct htt_resp
*resp
= (struct htt_resp
*)skb
->data
;
2924 enum htt_t2h_msg_type type
;
2926 /* confirm alignment */
2927 if (!IS_ALIGNED((unsigned long)skb
->data
, 4))
2928 ath10k_warn(ar
, "unaligned htt message, expect trouble\n");
2930 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx, msg_type: 0x%0X\n",
2931 resp
->hdr
.msg_type
);
2933 if (resp
->hdr
.msg_type
>= ar
->htt
.t2h_msg_types_max
) {
2934 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
2935 resp
->hdr
.msg_type
, ar
->htt
.t2h_msg_types_max
);
2938 type
= ar
->htt
.t2h_msg_types
[resp
->hdr
.msg_type
];
2941 case HTT_T2H_MSG_TYPE_VERSION_CONF
: {
2942 htt
->target_version_major
= resp
->ver_resp
.major
;
2943 htt
->target_version_minor
= resp
->ver_resp
.minor
;
2944 complete(&htt
->target_version_received
);
2947 case HTT_T2H_MSG_TYPE_RX_IND
:
2948 if (ar
->dev_type
== ATH10K_DEV_TYPE_HL
)
2949 return ath10k_htt_rx_proc_rx_ind_hl(htt
,
2953 ath10k_htt_rx_proc_rx_ind_ll(htt
, &resp
->rx_ind
);
2955 case HTT_T2H_MSG_TYPE_PEER_MAP
: {
2956 struct htt_peer_map_event ev
= {
2957 .vdev_id
= resp
->peer_map
.vdev_id
,
2958 .peer_id
= __le16_to_cpu(resp
->peer_map
.peer_id
),
2960 memcpy(ev
.addr
, resp
->peer_map
.addr
, sizeof(ev
.addr
));
2961 ath10k_peer_map_event(htt
, &ev
);
2964 case HTT_T2H_MSG_TYPE_PEER_UNMAP
: {
2965 struct htt_peer_unmap_event ev
= {
2966 .peer_id
= __le16_to_cpu(resp
->peer_unmap
.peer_id
),
2968 ath10k_peer_unmap_event(htt
, &ev
);
2971 case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION
: {
2972 struct htt_tx_done tx_done
= {};
2973 int status
= __le32_to_cpu(resp
->mgmt_tx_completion
.status
);
2974 int info
= __le32_to_cpu(resp
->mgmt_tx_completion
.info
);
2976 tx_done
.msdu_id
= __le32_to_cpu(resp
->mgmt_tx_completion
.desc_id
);
2979 case HTT_MGMT_TX_STATUS_OK
:
2980 tx_done
.status
= HTT_TX_COMPL_STATE_ACK
;
2981 if (test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS
,
2983 (resp
->mgmt_tx_completion
.flags
&
2984 HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI
)) {
2986 FIELD_GET(HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK
,
2990 case HTT_MGMT_TX_STATUS_RETRY
:
2991 tx_done
.status
= HTT_TX_COMPL_STATE_NOACK
;
2993 case HTT_MGMT_TX_STATUS_DROP
:
2994 tx_done
.status
= HTT_TX_COMPL_STATE_DISCARD
;
2998 status
= ath10k_txrx_tx_unref(htt
, &tx_done
);
3000 spin_lock_bh(&htt
->tx_lock
);
3001 ath10k_htt_tx_mgmt_dec_pending(htt
);
3002 spin_unlock_bh(&htt
->tx_lock
);
3006 case HTT_T2H_MSG_TYPE_TX_COMPL_IND
:
3007 ath10k_htt_rx_tx_compl_ind(htt
->ar
, skb
);
3009 case HTT_T2H_MSG_TYPE_SEC_IND
: {
3010 struct ath10k
*ar
= htt
->ar
;
3011 struct htt_security_indication
*ev
= &resp
->security_indication
;
3013 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
3014 "sec ind peer_id %d unicast %d type %d\n",
3015 __le16_to_cpu(ev
->peer_id
),
3016 !!(ev
->flags
& HTT_SECURITY_IS_UNICAST
),
3017 MS(ev
->flags
, HTT_SECURITY_TYPE
));
3018 complete(&ar
->install_key_done
);
3021 case HTT_T2H_MSG_TYPE_RX_FRAG_IND
: {
3022 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt event: ",
3023 skb
->data
, skb
->len
);
3024 atomic_inc(&htt
->num_mpdus_ready
);
3027 case HTT_T2H_MSG_TYPE_TEST
:
3029 case HTT_T2H_MSG_TYPE_STATS_CONF
:
3030 trace_ath10k_htt_stats(ar
, skb
->data
, skb
->len
);
3032 case HTT_T2H_MSG_TYPE_TX_INSPECT_IND
:
3033 /* Firmware can return tx frames if it's unable to fully
3034 * process them and suspects host may be able to fix it. ath10k
3035 * sends all tx frames as already inspected so this shouldn't
3036 * happen unless fw has a bug.
3038 ath10k_warn(ar
, "received an unexpected htt tx inspect event\n");
3040 case HTT_T2H_MSG_TYPE_RX_ADDBA
:
3041 ath10k_htt_rx_addba(ar
, resp
);
3043 case HTT_T2H_MSG_TYPE_RX_DELBA
:
3044 ath10k_htt_rx_delba(ar
, resp
);
3046 case HTT_T2H_MSG_TYPE_PKTLOG
: {
3047 trace_ath10k_htt_pktlog(ar
, resp
->pktlog_msg
.payload
,
3049 offsetof(struct htt_resp
,
3050 pktlog_msg
.payload
));
3052 if (ath10k_peer_stats_enabled(ar
))
3053 ath10k_fetch_10_2_tx_stats(ar
,
3054 resp
->pktlog_msg
.payload
);
3057 case HTT_T2H_MSG_TYPE_RX_FLUSH
: {
3058 /* Ignore this event because mac80211 takes care of Rx
3059 * aggregation reordering.
3063 case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND
: {
3064 skb_queue_tail(&htt
->rx_in_ord_compl_q
, skb
);
3067 case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND
:
3069 case HTT_T2H_MSG_TYPE_CHAN_CHANGE
: {
3070 u32 phymode
= __le32_to_cpu(resp
->chan_change
.phymode
);
3071 u32 freq
= __le32_to_cpu(resp
->chan_change
.freq
);
3073 ar
->tgt_oper_chan
= ieee80211_get_channel(ar
->hw
->wiphy
, freq
);
3074 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
3075 "htt chan change freq %u phymode %s\n",
3076 freq
, ath10k_wmi_phymode_str(phymode
));
3079 case HTT_T2H_MSG_TYPE_AGGR_CONF
:
3081 case HTT_T2H_MSG_TYPE_TX_FETCH_IND
: {
3082 struct sk_buff
*tx_fetch_ind
= skb_copy(skb
, GFP_ATOMIC
);
3084 if (!tx_fetch_ind
) {
3085 ath10k_warn(ar
, "failed to copy htt tx fetch ind\n");
3088 skb_queue_tail(&htt
->tx_fetch_ind_q
, tx_fetch_ind
);
3091 case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM
:
3092 ath10k_htt_rx_tx_fetch_confirm(ar
, skb
);
3094 case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND
:
3095 ath10k_htt_rx_tx_mode_switch_ind(ar
, skb
);
3097 case HTT_T2H_MSG_TYPE_PEER_STATS
:
3098 ath10k_htt_fetch_peer_stats(ar
, skb
);
3100 case HTT_T2H_MSG_TYPE_EN_STATS
:
3102 ath10k_warn(ar
, "htt event (%d) not handled\n",
3103 resp
->hdr
.msg_type
);
3104 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt event: ",
3105 skb
->data
, skb
->len
);
3110 EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler
);
3112 void ath10k_htt_rx_pktlog_completion_handler(struct ath10k
*ar
,
3113 struct sk_buff
*skb
)
3115 trace_ath10k_htt_pktlog(ar
, skb
->data
, skb
->len
);
3116 dev_kfree_skb_any(skb
);
3118 EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler
);
3120 static int ath10k_htt_rx_deliver_msdu(struct ath10k
*ar
, int quota
, int budget
)
3122 struct sk_buff
*skb
;
3124 while (quota
< budget
) {
3125 if (skb_queue_empty(&ar
->htt
.rx_msdus_q
))
3128 skb
= skb_dequeue(&ar
->htt
.rx_msdus_q
);
3131 ath10k_process_rx(ar
, skb
);
3138 int ath10k_htt_txrx_compl_task(struct ath10k
*ar
, int budget
)
3140 struct ath10k_htt
*htt
= &ar
->htt
;
3141 struct htt_tx_done tx_done
= {};
3142 struct sk_buff_head tx_ind_q
;
3143 struct sk_buff
*skb
;
3144 unsigned long flags
;
3145 int quota
= 0, done
, ret
;
3146 bool resched_napi
= false;
3148 __skb_queue_head_init(&tx_ind_q
);
3150 /* Process pending frames before dequeuing more data
3153 quota
= ath10k_htt_rx_deliver_msdu(ar
, quota
, budget
);
3154 if (quota
== budget
) {
3155 resched_napi
= true;
3159 while ((skb
= skb_dequeue(&htt
->rx_in_ord_compl_q
))) {
3160 spin_lock_bh(&htt
->rx_ring
.lock
);
3161 ret
= ath10k_htt_rx_in_ord_ind(ar
, skb
);
3162 spin_unlock_bh(&htt
->rx_ring
.lock
);
3164 dev_kfree_skb_any(skb
);
3166 resched_napi
= true;
3171 while (atomic_read(&htt
->num_mpdus_ready
)) {
3172 ret
= ath10k_htt_rx_handle_amsdu(htt
);
3174 resched_napi
= true;
3177 atomic_dec(&htt
->num_mpdus_ready
);
3180 /* Deliver received data after processing data from hardware */
3181 quota
= ath10k_htt_rx_deliver_msdu(ar
, quota
, budget
);
3183 /* From NAPI documentation:
3184 * The napi poll() function may also process TX completions, in which
3185 * case if it processes the entire TX ring then it should count that
3186 * work as the rest of the budget.
3188 if ((quota
< budget
) && !kfifo_is_empty(&htt
->txdone_fifo
))
3191 /* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
3192 * From kfifo_get() documentation:
3193 * Note that with only one concurrent reader and one concurrent writer,
3194 * you don't need extra locking to use these macro.
3196 while (kfifo_get(&htt
->txdone_fifo
, &tx_done
))
3197 ath10k_txrx_tx_unref(htt
, &tx_done
);
3199 ath10k_mac_tx_push_pending(ar
);
3201 spin_lock_irqsave(&htt
->tx_fetch_ind_q
.lock
, flags
);
3202 skb_queue_splice_init(&htt
->tx_fetch_ind_q
, &tx_ind_q
);
3203 spin_unlock_irqrestore(&htt
->tx_fetch_ind_q
.lock
, flags
);
3205 while ((skb
= __skb_dequeue(&tx_ind_q
))) {
3206 ath10k_htt_rx_tx_fetch_ind(ar
, skb
);
3207 dev_kfree_skb_any(skb
);
3211 ath10k_htt_rx_msdu_buff_replenish(htt
);
3212 /* In case of rx failure or more data to read, report budget
3213 * to reschedule NAPI poll
3215 done
= resched_napi
? budget
: quota
;
3219 EXPORT_SYMBOL(ath10k_htt_txrx_compl_task
);
3221 static const struct ath10k_htt_rx_ops htt_rx_ops_32
= {
3222 .htt_get_rx_ring_size
= ath10k_htt_get_rx_ring_size_32
,
3223 .htt_config_paddrs_ring
= ath10k_htt_config_paddrs_ring_32
,
3224 .htt_set_paddrs_ring
= ath10k_htt_set_paddrs_ring_32
,
3225 .htt_get_vaddr_ring
= ath10k_htt_get_vaddr_ring_32
,
3226 .htt_reset_paddrs_ring
= ath10k_htt_reset_paddrs_ring_32
,
3229 static const struct ath10k_htt_rx_ops htt_rx_ops_64
= {
3230 .htt_get_rx_ring_size
= ath10k_htt_get_rx_ring_size_64
,
3231 .htt_config_paddrs_ring
= ath10k_htt_config_paddrs_ring_64
,
3232 .htt_set_paddrs_ring
= ath10k_htt_set_paddrs_ring_64
,
3233 .htt_get_vaddr_ring
= ath10k_htt_get_vaddr_ring_64
,
3234 .htt_reset_paddrs_ring
= ath10k_htt_reset_paddrs_ring_64
,
3237 static const struct ath10k_htt_rx_ops htt_rx_ops_hl
= {
3240 void ath10k_htt_set_rx_ops(struct ath10k_htt
*htt
)
3242 struct ath10k
*ar
= htt
->ar
;
3244 if (ar
->dev_type
== ATH10K_DEV_TYPE_HL
)
3245 htt
->rx_ops
= &htt_rx_ops_hl
;
3246 else if (ar
->hw_params
.target_64bit
)
3247 htt
->rx_ops
= &htt_rx_ops_64
;
3249 htt
->rx_ops
= &htt_rx_ops_32
;