2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
26 #include <linux/log2.h>
28 #define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
29 #define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
31 /* when under memory pressure rx ring refill may fail and needs a retry */
32 #define HTT_RX_RING_REFILL_RETRY_MS 50
34 #define HTT_RX_RING_REFILL_RESCHED_MS 5
36 static int ath10k_htt_rx_get_csum_state(struct sk_buff
*skb
);
37 static void ath10k_htt_txrx_compl_task(unsigned long ptr
);
39 static struct sk_buff
*
40 ath10k_htt_rx_find_skb_paddr(struct ath10k
*ar
, u32 paddr
)
42 struct ath10k_skb_rxcb
*rxcb
;
44 hash_for_each_possible(ar
->htt
.rx_ring
.skb_table
, rxcb
, hlist
, paddr
)
45 if (rxcb
->paddr
== paddr
)
46 return ATH10K_RXCB_SKB(rxcb
);
52 static void ath10k_htt_rx_ring_free(struct ath10k_htt
*htt
)
55 struct ath10k_skb_rxcb
*rxcb
;
59 if (htt
->rx_ring
.in_ord_rx
) {
60 hash_for_each_safe(htt
->rx_ring
.skb_table
, i
, n
, rxcb
, hlist
) {
61 skb
= ATH10K_RXCB_SKB(rxcb
);
62 dma_unmap_single(htt
->ar
->dev
, rxcb
->paddr
,
63 skb
->len
+ skb_tailroom(skb
),
65 hash_del(&rxcb
->hlist
);
66 dev_kfree_skb_any(skb
);
69 for (i
= 0; i
< htt
->rx_ring
.size
; i
++) {
70 skb
= htt
->rx_ring
.netbufs_ring
[i
];
74 rxcb
= ATH10K_SKB_RXCB(skb
);
75 dma_unmap_single(htt
->ar
->dev
, rxcb
->paddr
,
76 skb
->len
+ skb_tailroom(skb
),
78 dev_kfree_skb_any(skb
);
82 htt
->rx_ring
.fill_cnt
= 0;
83 hash_init(htt
->rx_ring
.skb_table
);
84 memset(htt
->rx_ring
.netbufs_ring
, 0,
85 htt
->rx_ring
.size
* sizeof(htt
->rx_ring
.netbufs_ring
[0]));
88 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt
*htt
, int num
)
90 struct htt_rx_desc
*rx_desc
;
91 struct ath10k_skb_rxcb
*rxcb
;
96 /* The Full Rx Reorder firmware has no way of telling the host
97 * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
98 * To keep things simple make sure ring is always half empty. This
99 * guarantees there'll be no replenishment overruns possible.
101 BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL
>= HTT_RX_RING_SIZE
/ 2);
103 idx
= __le32_to_cpu(*htt
->rx_ring
.alloc_idx
.vaddr
);
105 skb
= dev_alloc_skb(HTT_RX_BUF_SIZE
+ HTT_RX_DESC_ALIGN
);
111 if (!IS_ALIGNED((unsigned long)skb
->data
, HTT_RX_DESC_ALIGN
))
113 PTR_ALIGN(skb
->data
, HTT_RX_DESC_ALIGN
) -
116 /* Clear rx_desc attention word before posting to Rx ring */
117 rx_desc
= (struct htt_rx_desc
*)skb
->data
;
118 rx_desc
->attention
.flags
= __cpu_to_le32(0);
120 paddr
= dma_map_single(htt
->ar
->dev
, skb
->data
,
121 skb
->len
+ skb_tailroom(skb
),
124 if (unlikely(dma_mapping_error(htt
->ar
->dev
, paddr
))) {
125 dev_kfree_skb_any(skb
);
130 rxcb
= ATH10K_SKB_RXCB(skb
);
132 htt
->rx_ring
.netbufs_ring
[idx
] = skb
;
133 htt
->rx_ring
.paddrs_ring
[idx
] = __cpu_to_le32(paddr
);
134 htt
->rx_ring
.fill_cnt
++;
136 if (htt
->rx_ring
.in_ord_rx
) {
137 hash_add(htt
->rx_ring
.skb_table
,
138 &ATH10K_SKB_RXCB(skb
)->hlist
,
144 idx
&= htt
->rx_ring
.size_mask
;
149 * Make sure the rx buffer is updated before available buffer
150 * index to avoid any potential rx ring corruption.
153 *htt
->rx_ring
.alloc_idx
.vaddr
= __cpu_to_le32(idx
);
157 static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt
*htt
, int num
)
159 lockdep_assert_held(&htt
->rx_ring
.lock
);
160 return __ath10k_htt_rx_ring_fill_n(htt
, num
);
163 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt
*htt
)
165 int ret
, num_deficit
, num_to_fill
;
167 /* Refilling the whole RX ring buffer proves to be a bad idea. The
168 * reason is RX may take up significant amount of CPU cycles and starve
169 * other tasks, e.g. TX on an ethernet device while acting as a bridge
170 * with ath10k wlan interface. This ended up with very poor performance
171 * once CPU the host system was overwhelmed with RX on ath10k.
173 * By limiting the number of refills the replenishing occurs
174 * progressively. This in turns makes use of the fact tasklets are
175 * processed in FIFO order. This means actual RX processing can starve
176 * out refilling. If there's not enough buffers on RX ring FW will not
177 * report RX until it is refilled with enough buffers. This
178 * automatically balances load wrt to CPU power.
180 * This probably comes at a cost of lower maximum throughput but
181 * improves the average and stability. */
182 spin_lock_bh(&htt
->rx_ring
.lock
);
183 num_deficit
= htt
->rx_ring
.fill_level
- htt
->rx_ring
.fill_cnt
;
184 num_to_fill
= min(ATH10K_HTT_MAX_NUM_REFILL
, num_deficit
);
185 num_deficit
-= num_to_fill
;
186 ret
= ath10k_htt_rx_ring_fill_n(htt
, num_to_fill
);
187 if (ret
== -ENOMEM
) {
189 * Failed to fill it to the desired level -
190 * we'll start a timer and try again next time.
191 * As long as enough buffers are left in the ring for
192 * another A-MPDU rx, no special recovery is needed.
194 mod_timer(&htt
->rx_ring
.refill_retry_timer
, jiffies
+
195 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS
));
196 } else if (num_deficit
> 0) {
197 mod_timer(&htt
->rx_ring
.refill_retry_timer
, jiffies
+
198 msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS
));
200 spin_unlock_bh(&htt
->rx_ring
.lock
);
203 static void ath10k_htt_rx_ring_refill_retry(unsigned long arg
)
205 struct ath10k_htt
*htt
= (struct ath10k_htt
*)arg
;
207 ath10k_htt_rx_msdu_buff_replenish(htt
);
210 int ath10k_htt_rx_ring_refill(struct ath10k
*ar
)
212 struct ath10k_htt
*htt
= &ar
->htt
;
215 spin_lock_bh(&htt
->rx_ring
.lock
);
216 ret
= ath10k_htt_rx_ring_fill_n(htt
, (htt
->rx_ring
.fill_level
-
217 htt
->rx_ring
.fill_cnt
));
218 spin_unlock_bh(&htt
->rx_ring
.lock
);
221 ath10k_htt_rx_ring_free(htt
);
226 void ath10k_htt_rx_free(struct ath10k_htt
*htt
)
228 del_timer_sync(&htt
->rx_ring
.refill_retry_timer
);
229 tasklet_kill(&htt
->txrx_compl_task
);
231 skb_queue_purge(&htt
->rx_compl_q
);
232 skb_queue_purge(&htt
->rx_in_ord_compl_q
);
233 skb_queue_purge(&htt
->tx_fetch_ind_q
);
235 ath10k_htt_rx_ring_free(htt
);
237 dma_free_coherent(htt
->ar
->dev
,
239 sizeof(htt
->rx_ring
.paddrs_ring
)),
240 htt
->rx_ring
.paddrs_ring
,
241 htt
->rx_ring
.base_paddr
);
243 dma_free_coherent(htt
->ar
->dev
,
244 sizeof(*htt
->rx_ring
.alloc_idx
.vaddr
),
245 htt
->rx_ring
.alloc_idx
.vaddr
,
246 htt
->rx_ring
.alloc_idx
.paddr
);
248 kfree(htt
->rx_ring
.netbufs_ring
);
251 static inline struct sk_buff
*ath10k_htt_rx_netbuf_pop(struct ath10k_htt
*htt
)
253 struct ath10k
*ar
= htt
->ar
;
255 struct sk_buff
*msdu
;
257 lockdep_assert_held(&htt
->rx_ring
.lock
);
259 if (htt
->rx_ring
.fill_cnt
== 0) {
260 ath10k_warn(ar
, "tried to pop sk_buff from an empty rx ring\n");
264 idx
= htt
->rx_ring
.sw_rd_idx
.msdu_payld
;
265 msdu
= htt
->rx_ring
.netbufs_ring
[idx
];
266 htt
->rx_ring
.netbufs_ring
[idx
] = NULL
;
267 htt
->rx_ring
.paddrs_ring
[idx
] = 0;
270 idx
&= htt
->rx_ring
.size_mask
;
271 htt
->rx_ring
.sw_rd_idx
.msdu_payld
= idx
;
272 htt
->rx_ring
.fill_cnt
--;
274 dma_unmap_single(htt
->ar
->dev
,
275 ATH10K_SKB_RXCB(msdu
)->paddr
,
276 msdu
->len
+ skb_tailroom(msdu
),
278 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt rx netbuf pop: ",
279 msdu
->data
, msdu
->len
+ skb_tailroom(msdu
));
284 /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
285 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt
*htt
,
286 struct sk_buff_head
*amsdu
)
288 struct ath10k
*ar
= htt
->ar
;
289 int msdu_len
, msdu_chaining
= 0;
290 struct sk_buff
*msdu
;
291 struct htt_rx_desc
*rx_desc
;
293 lockdep_assert_held(&htt
->rx_ring
.lock
);
296 int last_msdu
, msdu_len_invalid
, msdu_chained
;
298 msdu
= ath10k_htt_rx_netbuf_pop(htt
);
300 __skb_queue_purge(amsdu
);
304 __skb_queue_tail(amsdu
, msdu
);
306 rx_desc
= (struct htt_rx_desc
*)msdu
->data
;
308 /* FIXME: we must report msdu payload since this is what caller
310 skb_put(msdu
, offsetof(struct htt_rx_desc
, msdu_payload
));
311 skb_pull(msdu
, offsetof(struct htt_rx_desc
, msdu_payload
));
314 * Sanity check - confirm the HW is finished filling in the
316 * If the HW and SW are working correctly, then it's guaranteed
317 * that the HW's MAC DMA is done before this point in the SW.
318 * To prevent the case that we handle a stale Rx descriptor,
319 * just assert for now until we have a way to recover.
321 if (!(__le32_to_cpu(rx_desc
->attention
.flags
)
322 & RX_ATTENTION_FLAGS_MSDU_DONE
)) {
323 __skb_queue_purge(amsdu
);
327 msdu_len_invalid
= !!(__le32_to_cpu(rx_desc
->attention
.flags
)
328 & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR
|
329 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR
));
330 msdu_len
= MS(__le32_to_cpu(rx_desc
->msdu_start
.common
.info0
),
331 RX_MSDU_START_INFO0_MSDU_LENGTH
);
332 msdu_chained
= rx_desc
->frag_info
.ring2_more_count
;
334 if (msdu_len_invalid
)
338 skb_put(msdu
, min(msdu_len
, HTT_RX_MSDU_SIZE
));
339 msdu_len
-= msdu
->len
;
341 /* Note: Chained buffers do not contain rx descriptor */
342 while (msdu_chained
--) {
343 msdu
= ath10k_htt_rx_netbuf_pop(htt
);
345 __skb_queue_purge(amsdu
);
349 __skb_queue_tail(amsdu
, msdu
);
351 skb_put(msdu
, min(msdu_len
, HTT_RX_BUF_SIZE
));
352 msdu_len
-= msdu
->len
;
356 last_msdu
= __le32_to_cpu(rx_desc
->msdu_end
.common
.info0
) &
357 RX_MSDU_END_INFO0_LAST_MSDU
;
359 trace_ath10k_htt_rx_desc(ar
, &rx_desc
->attention
,
360 sizeof(*rx_desc
) - sizeof(u32
));
366 if (skb_queue_empty(amsdu
))
370 * Don't refill the ring yet.
372 * First, the elements popped here are still in use - it is not
373 * safe to overwrite them until the matching call to
374 * mpdu_desc_list_next. Second, for efficiency it is preferable to
375 * refill the rx ring with 1 PPDU's worth of rx buffers (something
376 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
377 * (something like 3 buffers). Consequently, we'll rely on the txrx
378 * SW to tell us when it is done pulling all the PPDU's rx buffers
379 * out of the rx ring, and then refill it just once.
382 return msdu_chaining
;
385 static struct sk_buff
*ath10k_htt_rx_pop_paddr(struct ath10k_htt
*htt
,
388 struct ath10k
*ar
= htt
->ar
;
389 struct ath10k_skb_rxcb
*rxcb
;
390 struct sk_buff
*msdu
;
392 lockdep_assert_held(&htt
->rx_ring
.lock
);
394 msdu
= ath10k_htt_rx_find_skb_paddr(ar
, paddr
);
398 rxcb
= ATH10K_SKB_RXCB(msdu
);
399 hash_del(&rxcb
->hlist
);
400 htt
->rx_ring
.fill_cnt
--;
402 dma_unmap_single(htt
->ar
->dev
, rxcb
->paddr
,
403 msdu
->len
+ skb_tailroom(msdu
),
405 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt rx netbuf pop: ",
406 msdu
->data
, msdu
->len
+ skb_tailroom(msdu
));
411 static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt
*htt
,
412 struct htt_rx_in_ord_ind
*ev
,
413 struct sk_buff_head
*list
)
415 struct ath10k
*ar
= htt
->ar
;
416 struct htt_rx_in_ord_msdu_desc
*msdu_desc
= ev
->msdu_descs
;
417 struct htt_rx_desc
*rxd
;
418 struct sk_buff
*msdu
;
423 lockdep_assert_held(&htt
->rx_ring
.lock
);
425 msdu_count
= __le16_to_cpu(ev
->msdu_count
);
426 is_offload
= !!(ev
->info
& HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK
);
428 while (msdu_count
--) {
429 paddr
= __le32_to_cpu(msdu_desc
->msdu_paddr
);
431 msdu
= ath10k_htt_rx_pop_paddr(htt
, paddr
);
433 __skb_queue_purge(list
);
437 __skb_queue_tail(list
, msdu
);
440 rxd
= (void *)msdu
->data
;
442 trace_ath10k_htt_rx_desc(ar
, rxd
, sizeof(*rxd
));
444 skb_put(msdu
, sizeof(*rxd
));
445 skb_pull(msdu
, sizeof(*rxd
));
446 skb_put(msdu
, __le16_to_cpu(msdu_desc
->msdu_len
));
448 if (!(__le32_to_cpu(rxd
->attention
.flags
) &
449 RX_ATTENTION_FLAGS_MSDU_DONE
)) {
450 ath10k_warn(htt
->ar
, "tried to pop an incomplete frame, oops!\n");
461 int ath10k_htt_rx_alloc(struct ath10k_htt
*htt
)
463 struct ath10k
*ar
= htt
->ar
;
467 struct timer_list
*timer
= &htt
->rx_ring
.refill_retry_timer
;
469 htt
->rx_confused
= false;
471 /* XXX: The fill level could be changed during runtime in response to
472 * the host processing latency. Is this really worth it?
474 htt
->rx_ring
.size
= HTT_RX_RING_SIZE
;
475 htt
->rx_ring
.size_mask
= htt
->rx_ring
.size
- 1;
476 htt
->rx_ring
.fill_level
= HTT_RX_RING_FILL_LEVEL
;
478 if (!is_power_of_2(htt
->rx_ring
.size
)) {
479 ath10k_warn(ar
, "htt rx ring size is not power of 2\n");
483 htt
->rx_ring
.netbufs_ring
=
484 kzalloc(htt
->rx_ring
.size
* sizeof(struct sk_buff
*),
486 if (!htt
->rx_ring
.netbufs_ring
)
489 size
= htt
->rx_ring
.size
* sizeof(htt
->rx_ring
.paddrs_ring
);
491 vaddr
= dma_alloc_coherent(htt
->ar
->dev
, size
, &paddr
, GFP_KERNEL
);
495 htt
->rx_ring
.paddrs_ring
= vaddr
;
496 htt
->rx_ring
.base_paddr
= paddr
;
498 vaddr
= dma_alloc_coherent(htt
->ar
->dev
,
499 sizeof(*htt
->rx_ring
.alloc_idx
.vaddr
),
504 htt
->rx_ring
.alloc_idx
.vaddr
= vaddr
;
505 htt
->rx_ring
.alloc_idx
.paddr
= paddr
;
506 htt
->rx_ring
.sw_rd_idx
.msdu_payld
= htt
->rx_ring
.size_mask
;
507 *htt
->rx_ring
.alloc_idx
.vaddr
= 0;
509 /* Initialize the Rx refill retry timer */
510 setup_timer(timer
, ath10k_htt_rx_ring_refill_retry
, (unsigned long)htt
);
512 spin_lock_init(&htt
->rx_ring
.lock
);
514 htt
->rx_ring
.fill_cnt
= 0;
515 htt
->rx_ring
.sw_rd_idx
.msdu_payld
= 0;
516 hash_init(htt
->rx_ring
.skb_table
);
518 skb_queue_head_init(&htt
->rx_compl_q
);
519 skb_queue_head_init(&htt
->rx_in_ord_compl_q
);
520 skb_queue_head_init(&htt
->tx_fetch_ind_q
);
521 atomic_set(&htt
->num_mpdus_ready
, 0);
523 tasklet_init(&htt
->txrx_compl_task
, ath10k_htt_txrx_compl_task
,
526 ath10k_dbg(ar
, ATH10K_DBG_BOOT
, "htt rx ring size %d fill_level %d\n",
527 htt
->rx_ring
.size
, htt
->rx_ring
.fill_level
);
531 dma_free_coherent(htt
->ar
->dev
,
533 sizeof(htt
->rx_ring
.paddrs_ring
)),
534 htt
->rx_ring
.paddrs_ring
,
535 htt
->rx_ring
.base_paddr
);
537 kfree(htt
->rx_ring
.netbufs_ring
);
542 static int ath10k_htt_rx_crypto_param_len(struct ath10k
*ar
,
543 enum htt_rx_mpdu_encrypt_type type
)
546 case HTT_RX_MPDU_ENCRYPT_NONE
:
548 case HTT_RX_MPDU_ENCRYPT_WEP40
:
549 case HTT_RX_MPDU_ENCRYPT_WEP104
:
550 return IEEE80211_WEP_IV_LEN
;
551 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC
:
552 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA
:
553 return IEEE80211_TKIP_IV_LEN
;
554 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2
:
555 return IEEE80211_CCMP_HDR_LEN
;
556 case HTT_RX_MPDU_ENCRYPT_WEP128
:
557 case HTT_RX_MPDU_ENCRYPT_WAPI
:
561 ath10k_warn(ar
, "unsupported encryption type %d\n", type
);
565 #define MICHAEL_MIC_LEN 8
567 static int ath10k_htt_rx_crypto_tail_len(struct ath10k
*ar
,
568 enum htt_rx_mpdu_encrypt_type type
)
571 case HTT_RX_MPDU_ENCRYPT_NONE
:
573 case HTT_RX_MPDU_ENCRYPT_WEP40
:
574 case HTT_RX_MPDU_ENCRYPT_WEP104
:
575 return IEEE80211_WEP_ICV_LEN
;
576 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC
:
577 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA
:
578 return IEEE80211_TKIP_ICV_LEN
;
579 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2
:
580 return IEEE80211_CCMP_MIC_LEN
;
581 case HTT_RX_MPDU_ENCRYPT_WEP128
:
582 case HTT_RX_MPDU_ENCRYPT_WAPI
:
586 ath10k_warn(ar
, "unsupported encryption type %d\n", type
);
590 struct amsdu_subframe_hdr
{
596 #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
598 static void ath10k_htt_rx_h_rates(struct ath10k
*ar
,
599 struct ieee80211_rx_status
*status
,
600 struct htt_rx_desc
*rxd
)
602 struct ieee80211_supported_band
*sband
;
603 u8 cck
, rate
, bw
, sgi
, mcs
, nss
;
606 u32 info1
, info2
, info3
;
608 info1
= __le32_to_cpu(rxd
->ppdu_start
.info1
);
609 info2
= __le32_to_cpu(rxd
->ppdu_start
.info2
);
610 info3
= __le32_to_cpu(rxd
->ppdu_start
.info3
);
612 preamble
= MS(info1
, RX_PPDU_START_INFO1_PREAMBLE_TYPE
);
616 /* To get legacy rate index band is required. Since band can't
617 * be undefined check if freq is non-zero.
622 cck
= info1
& RX_PPDU_START_INFO1_L_SIG_RATE_SELECT
;
623 rate
= MS(info1
, RX_PPDU_START_INFO1_L_SIG_RATE
);
624 rate
&= ~RX_PPDU_START_RATE_FLAG
;
626 sband
= &ar
->mac
.sbands
[status
->band
];
627 status
->rate_idx
= ath10k_mac_hw_rate_to_idx(sband
, rate
, cck
);
630 case HTT_RX_HT_WITH_TXBF
:
631 /* HT-SIG - Table 20-11 in info2 and info3 */
634 bw
= (info2
>> 7) & 1;
635 sgi
= (info3
>> 7) & 1;
637 status
->rate_idx
= mcs
;
638 status
->flag
|= RX_FLAG_HT
;
640 status
->flag
|= RX_FLAG_SHORT_GI
;
642 status
->flag
|= RX_FLAG_40MHZ
;
645 case HTT_RX_VHT_WITH_TXBF
:
646 /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
650 group_id
= (info2
>> 4) & 0x3F;
652 if (GROUP_ID_IS_SU_MIMO(group_id
)) {
653 mcs
= (info3
>> 4) & 0x0F;
654 nss
= ((info2
>> 10) & 0x07) + 1;
656 /* Hardware doesn't decode VHT-SIG-B into Rx descriptor
657 * so it's impossible to decode MCS. Also since
658 * firmware consumes Group Id Management frames host
659 * has no knowledge regarding group/user position
660 * mapping so it's impossible to pick the correct Nsts
663 * Bandwidth and SGI are valid so report the rateinfo
664 * on best-effort basis.
671 ath10k_warn(ar
, "invalid MCS received %u\n", mcs
);
672 ath10k_warn(ar
, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
673 __le32_to_cpu(rxd
->attention
.flags
),
674 __le32_to_cpu(rxd
->mpdu_start
.info0
),
675 __le32_to_cpu(rxd
->mpdu_start
.info1
),
676 __le32_to_cpu(rxd
->msdu_start
.common
.info0
),
677 __le32_to_cpu(rxd
->msdu_start
.common
.info1
),
678 rxd
->ppdu_start
.info0
,
679 __le32_to_cpu(rxd
->ppdu_start
.info1
),
680 __le32_to_cpu(rxd
->ppdu_start
.info2
),
681 __le32_to_cpu(rxd
->ppdu_start
.info3
),
682 __le32_to_cpu(rxd
->ppdu_start
.info4
));
684 ath10k_warn(ar
, "msdu end %08x mpdu end %08x\n",
685 __le32_to_cpu(rxd
->msdu_end
.common
.info0
),
686 __le32_to_cpu(rxd
->mpdu_end
.info0
));
688 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
,
689 "rx desc msdu payload: ",
690 rxd
->msdu_payload
, 50);
693 status
->rate_idx
= mcs
;
694 status
->vht_nss
= nss
;
697 status
->flag
|= RX_FLAG_SHORT_GI
;
705 status
->flag
|= RX_FLAG_40MHZ
;
709 status
->vht_flag
|= RX_VHT_FLAG_80MHZ
;
712 status
->flag
|= RX_FLAG_VHT
;
719 static struct ieee80211_channel
*
720 ath10k_htt_rx_h_peer_channel(struct ath10k
*ar
, struct htt_rx_desc
*rxd
)
722 struct ath10k_peer
*peer
;
723 struct ath10k_vif
*arvif
;
724 struct cfg80211_chan_def def
;
727 lockdep_assert_held(&ar
->data_lock
);
732 if (rxd
->attention
.flags
&
733 __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID
))
736 if (!(rxd
->msdu_end
.common
.info0
&
737 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU
)))
740 peer_id
= MS(__le32_to_cpu(rxd
->mpdu_start
.info0
),
741 RX_MPDU_START_INFO0_PEER_IDX
);
743 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
747 arvif
= ath10k_get_arvif(ar
, peer
->vdev_id
);
748 if (WARN_ON_ONCE(!arvif
))
751 if (ath10k_mac_vif_chan(arvif
->vif
, &def
))
757 static struct ieee80211_channel
*
758 ath10k_htt_rx_h_vdev_channel(struct ath10k
*ar
, u32 vdev_id
)
760 struct ath10k_vif
*arvif
;
761 struct cfg80211_chan_def def
;
763 lockdep_assert_held(&ar
->data_lock
);
765 list_for_each_entry(arvif
, &ar
->arvifs
, list
) {
766 if (arvif
->vdev_id
== vdev_id
&&
767 ath10k_mac_vif_chan(arvif
->vif
, &def
) == 0)
775 ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw
*hw
,
776 struct ieee80211_chanctx_conf
*conf
,
779 struct cfg80211_chan_def
*def
= data
;
784 static struct ieee80211_channel
*
785 ath10k_htt_rx_h_any_channel(struct ath10k
*ar
)
787 struct cfg80211_chan_def def
= {};
789 ieee80211_iter_chan_contexts_atomic(ar
->hw
,
790 ath10k_htt_rx_h_any_chan_iter
,
796 static bool ath10k_htt_rx_h_channel(struct ath10k
*ar
,
797 struct ieee80211_rx_status
*status
,
798 struct htt_rx_desc
*rxd
,
801 struct ieee80211_channel
*ch
;
803 spin_lock_bh(&ar
->data_lock
);
804 ch
= ar
->scan_channel
;
808 ch
= ath10k_htt_rx_h_peer_channel(ar
, rxd
);
810 ch
= ath10k_htt_rx_h_vdev_channel(ar
, vdev_id
);
812 ch
= ath10k_htt_rx_h_any_channel(ar
);
814 ch
= ar
->tgt_oper_chan
;
815 spin_unlock_bh(&ar
->data_lock
);
820 status
->band
= ch
->band
;
821 status
->freq
= ch
->center_freq
;
826 static void ath10k_htt_rx_h_signal(struct ath10k
*ar
,
827 struct ieee80211_rx_status
*status
,
828 struct htt_rx_desc
*rxd
)
830 /* FIXME: Get real NF */
831 status
->signal
= ATH10K_DEFAULT_NOISE_FLOOR
+
832 rxd
->ppdu_start
.rssi_comb
;
833 status
->flag
&= ~RX_FLAG_NO_SIGNAL_VAL
;
836 static void ath10k_htt_rx_h_mactime(struct ath10k
*ar
,
837 struct ieee80211_rx_status
*status
,
838 struct htt_rx_desc
*rxd
)
840 /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
841 * means all prior MSDUs in a PPDU are reported to mac80211 without the
842 * TSF. Is it worth holding frames until end of PPDU is known?
844 * FIXME: Can we get/compute 64bit TSF?
846 status
->mactime
= __le32_to_cpu(rxd
->ppdu_end
.common
.tsf_timestamp
);
847 status
->flag
|= RX_FLAG_MACTIME_END
;
850 static void ath10k_htt_rx_h_ppdu(struct ath10k
*ar
,
851 struct sk_buff_head
*amsdu
,
852 struct ieee80211_rx_status
*status
,
855 struct sk_buff
*first
;
856 struct htt_rx_desc
*rxd
;
860 if (skb_queue_empty(amsdu
))
863 first
= skb_peek(amsdu
);
864 rxd
= (void *)first
->data
- sizeof(*rxd
);
866 is_first_ppdu
= !!(rxd
->attention
.flags
&
867 __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU
));
868 is_last_ppdu
= !!(rxd
->attention
.flags
&
869 __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU
));
872 /* New PPDU starts so clear out the old per-PPDU status. */
874 status
->rate_idx
= 0;
876 status
->vht_flag
&= ~RX_VHT_FLAG_80MHZ
;
877 status
->flag
&= ~(RX_FLAG_HT
|
881 RX_FLAG_MACTIME_END
);
882 status
->flag
|= RX_FLAG_NO_SIGNAL_VAL
;
884 ath10k_htt_rx_h_signal(ar
, status
, rxd
);
885 ath10k_htt_rx_h_channel(ar
, status
, rxd
, vdev_id
);
886 ath10k_htt_rx_h_rates(ar
, status
, rxd
);
890 ath10k_htt_rx_h_mactime(ar
, status
, rxd
);
893 static const char * const tid_to_ac
[] = {
904 static char *ath10k_get_tid(struct ieee80211_hdr
*hdr
, char *out
, size_t size
)
909 if (!ieee80211_is_data_qos(hdr
->frame_control
))
912 qc
= ieee80211_get_qos_ctl(hdr
);
913 tid
= *qc
& IEEE80211_QOS_CTL_TID_MASK
;
915 snprintf(out
, size
, "tid %d (%s)", tid
, tid_to_ac
[tid
]);
917 snprintf(out
, size
, "tid %d", tid
);
922 static void ath10k_process_rx(struct ath10k
*ar
,
923 struct ieee80211_rx_status
*rx_status
,
926 struct ieee80211_rx_status
*status
;
927 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
930 status
= IEEE80211_SKB_RXCB(skb
);
931 *status
= *rx_status
;
933 ath10k_dbg(ar
, ATH10K_DBG_DATA
,
934 "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%llx fcs-err %i mic-err %i amsdu-more %i\n",
937 ieee80211_get_SA(hdr
),
938 ath10k_get_tid(hdr
, tid
, sizeof(tid
)),
939 is_multicast_ether_addr(ieee80211_get_DA(hdr
)) ?
941 (__le16_to_cpu(hdr
->seq_ctrl
) & IEEE80211_SCTL_SEQ
) >> 4,
942 (status
->flag
& (RX_FLAG_HT
| RX_FLAG_VHT
)) == 0 ?
944 status
->flag
& RX_FLAG_HT
? "ht" : "",
945 status
->flag
& RX_FLAG_VHT
? "vht" : "",
946 status
->flag
& RX_FLAG_40MHZ
? "40" : "",
947 status
->vht_flag
& RX_VHT_FLAG_80MHZ
? "80" : "",
948 status
->flag
& RX_FLAG_SHORT_GI
? "sgi " : "",
952 status
->band
, status
->flag
,
953 !!(status
->flag
& RX_FLAG_FAILED_FCS_CRC
),
954 !!(status
->flag
& RX_FLAG_MMIC_ERROR
),
955 !!(status
->flag
& RX_FLAG_AMSDU_MORE
));
956 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "rx skb: ",
957 skb
->data
, skb
->len
);
958 trace_ath10k_rx_hdr(ar
, skb
->data
, skb
->len
);
959 trace_ath10k_rx_payload(ar
, skb
->data
, skb
->len
);
961 ieee80211_rx(ar
->hw
, skb
);
964 static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k
*ar
,
965 struct ieee80211_hdr
*hdr
)
967 int len
= ieee80211_hdrlen(hdr
->frame_control
);
969 if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING
,
970 ar
->running_fw
->fw_file
.fw_features
))
971 len
= round_up(len
, 4);
976 static void ath10k_htt_rx_h_undecap_raw(struct ath10k
*ar
,
977 struct sk_buff
*msdu
,
978 struct ieee80211_rx_status
*status
,
979 enum htt_rx_mpdu_encrypt_type enctype
,
982 struct ieee80211_hdr
*hdr
;
983 struct htt_rx_desc
*rxd
;
989 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
990 is_first
= !!(rxd
->msdu_end
.common
.info0
&
991 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU
));
992 is_last
= !!(rxd
->msdu_end
.common
.info0
&
993 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU
));
995 /* Delivered decapped frame:
997 * [crypto param] <-- can be trimmed if !fcs_err &&
998 * !decrypt_err && !peer_idx_invalid
999 * [amsdu header] <-- only if A-MSDU
1002 * [FCS] <-- at end, needs to be trimmed
1005 /* This probably shouldn't happen but warn just in case */
1006 if (unlikely(WARN_ON_ONCE(!is_first
)))
1009 /* This probably shouldn't happen but warn just in case */
1010 if (unlikely(WARN_ON_ONCE(!(is_first
&& is_last
))))
1013 skb_trim(msdu
, msdu
->len
- FCS_LEN
);
1015 /* In most cases this will be true for sniffed frames. It makes sense
1016 * to deliver them as-is without stripping the crypto param. This is
1017 * necessary for software based decryption.
1019 * If there's no error then the frame is decrypted. At least that is
1020 * the case for frames that come in via fragmented rx indication.
1025 /* The payload is decrypted so strip crypto params. Start from tail
1026 * since hdr is used to compute some stuff.
1029 hdr
= (void *)msdu
->data
;
1032 if (status
->flag
& RX_FLAG_IV_STRIPPED
)
1033 skb_trim(msdu
, msdu
->len
-
1034 ath10k_htt_rx_crypto_tail_len(ar
, enctype
));
1037 if ((status
->flag
& RX_FLAG_MMIC_STRIPPED
) &&
1038 !ieee80211_has_morefrags(hdr
->frame_control
) &&
1039 enctype
== HTT_RX_MPDU_ENCRYPT_TKIP_WPA
)
1040 skb_trim(msdu
, msdu
->len
- 8);
1043 if (status
->flag
& RX_FLAG_IV_STRIPPED
) {
1044 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1045 crypto_len
= ath10k_htt_rx_crypto_param_len(ar
, enctype
);
1047 memmove((void *)msdu
->data
+ crypto_len
,
1048 (void *)msdu
->data
, hdr_len
);
1049 skb_pull(msdu
, crypto_len
);
1053 static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k
*ar
,
1054 struct sk_buff
*msdu
,
1055 struct ieee80211_rx_status
*status
,
1056 const u8 first_hdr
[64])
1058 struct ieee80211_hdr
*hdr
;
1063 /* Delivered decapped frame:
1064 * [nwifi 802.11 header] <-- replaced with 802.11 hdr
1067 * Note: The nwifi header doesn't have QoS Control and is
1068 * (always?) a 3addr frame.
1070 * Note2: There's no A-MSDU subframe header. Even if it's part
1074 /* pull decapped header and copy SA & DA */
1075 if ((ar
->hw_params
.hw_4addr_pad
== ATH10K_HW_4ADDR_PAD_BEFORE
) &&
1076 ieee80211_has_a4(((struct ieee80211_hdr
*)first_hdr
)->frame_control
)) {
1077 /* The QCA99X0 4 address mode pad 2 bytes at the
1080 hdr
= (struct ieee80211_hdr
*)(msdu
->data
+ 2);
1081 /* The skb length need be extended 2 as the 2 bytes at the tail
1082 * be excluded due to the padding
1086 hdr
= (struct ieee80211_hdr
*)(msdu
->data
);
1089 hdr_len
= ath10k_htt_rx_nwifi_hdrlen(ar
, hdr
);
1090 ether_addr_copy(da
, ieee80211_get_DA(hdr
));
1091 ether_addr_copy(sa
, ieee80211_get_SA(hdr
));
1092 skb_pull(msdu
, hdr_len
);
1094 /* push original 802.11 header */
1095 hdr
= (struct ieee80211_hdr
*)first_hdr
;
1096 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1097 memcpy(skb_push(msdu
, hdr_len
), hdr
, hdr_len
);
1099 /* original 802.11 header has a different DA and in
1100 * case of 4addr it may also have different SA
1102 hdr
= (struct ieee80211_hdr
*)msdu
->data
;
1103 ether_addr_copy(ieee80211_get_DA(hdr
), da
);
1104 ether_addr_copy(ieee80211_get_SA(hdr
), sa
);
1107 static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k
*ar
,
1108 struct sk_buff
*msdu
,
1109 enum htt_rx_mpdu_encrypt_type enctype
)
1111 struct ieee80211_hdr
*hdr
;
1112 struct htt_rx_desc
*rxd
;
1113 size_t hdr_len
, crypto_len
;
1115 bool is_first
, is_last
, is_amsdu
;
1117 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
1118 hdr
= (void *)rxd
->rx_hdr_status
;
1120 is_first
= !!(rxd
->msdu_end
.common
.info0
&
1121 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU
));
1122 is_last
= !!(rxd
->msdu_end
.common
.info0
&
1123 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU
));
1124 is_amsdu
= !(is_first
&& is_last
);
1129 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1130 crypto_len
= ath10k_htt_rx_crypto_param_len(ar
, enctype
);
1132 rfc1042
+= round_up(hdr_len
, 4) +
1133 round_up(crypto_len
, 4);
1137 rfc1042
+= sizeof(struct amsdu_subframe_hdr
);
1142 static void ath10k_htt_rx_h_undecap_eth(struct ath10k
*ar
,
1143 struct sk_buff
*msdu
,
1144 struct ieee80211_rx_status
*status
,
1145 const u8 first_hdr
[64],
1146 enum htt_rx_mpdu_encrypt_type enctype
)
1148 struct ieee80211_hdr
*hdr
;
1155 /* Delivered decapped frame:
1156 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
1160 rfc1042
= ath10k_htt_rx_h_find_rfc1042(ar
, msdu
, enctype
);
1161 if (WARN_ON_ONCE(!rfc1042
))
1164 /* pull decapped header and copy SA & DA */
1165 eth
= (struct ethhdr
*)msdu
->data
;
1166 ether_addr_copy(da
, eth
->h_dest
);
1167 ether_addr_copy(sa
, eth
->h_source
);
1168 skb_pull(msdu
, sizeof(struct ethhdr
));
1170 /* push rfc1042/llc/snap */
1171 memcpy(skb_push(msdu
, sizeof(struct rfc1042_hdr
)), rfc1042
,
1172 sizeof(struct rfc1042_hdr
));
1174 /* push original 802.11 header */
1175 hdr
= (struct ieee80211_hdr
*)first_hdr
;
1176 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1177 memcpy(skb_push(msdu
, hdr_len
), hdr
, hdr_len
);
1179 /* original 802.11 header has a different DA and in
1180 * case of 4addr it may also have different SA
1182 hdr
= (struct ieee80211_hdr
*)msdu
->data
;
1183 ether_addr_copy(ieee80211_get_DA(hdr
), da
);
1184 ether_addr_copy(ieee80211_get_SA(hdr
), sa
);
1187 static void ath10k_htt_rx_h_undecap_snap(struct ath10k
*ar
,
1188 struct sk_buff
*msdu
,
1189 struct ieee80211_rx_status
*status
,
1190 const u8 first_hdr
[64])
1192 struct ieee80211_hdr
*hdr
;
1195 /* Delivered decapped frame:
1196 * [amsdu header] <-- replaced with 802.11 hdr
1201 skb_pull(msdu
, sizeof(struct amsdu_subframe_hdr
));
1203 hdr
= (struct ieee80211_hdr
*)first_hdr
;
1204 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1205 memcpy(skb_push(msdu
, hdr_len
), hdr
, hdr_len
);
1208 static void ath10k_htt_rx_h_undecap(struct ath10k
*ar
,
1209 struct sk_buff
*msdu
,
1210 struct ieee80211_rx_status
*status
,
1212 enum htt_rx_mpdu_encrypt_type enctype
,
1215 struct htt_rx_desc
*rxd
;
1216 enum rx_msdu_decap_format decap
;
1218 /* First msdu's decapped header:
1219 * [802.11 header] <-- padded to 4 bytes long
1220 * [crypto param] <-- padded to 4 bytes long
1221 * [amsdu header] <-- only if A-MSDU
1224 * Other (2nd, 3rd, ..) msdu's decapped header:
1225 * [amsdu header] <-- only if A-MSDU
1229 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
1230 decap
= MS(__le32_to_cpu(rxd
->msdu_start
.common
.info1
),
1231 RX_MSDU_START_INFO1_DECAP_FORMAT
);
1234 case RX_MSDU_DECAP_RAW
:
1235 ath10k_htt_rx_h_undecap_raw(ar
, msdu
, status
, enctype
,
1238 case RX_MSDU_DECAP_NATIVE_WIFI
:
1239 ath10k_htt_rx_h_undecap_nwifi(ar
, msdu
, status
, first_hdr
);
1241 case RX_MSDU_DECAP_ETHERNET2_DIX
:
1242 ath10k_htt_rx_h_undecap_eth(ar
, msdu
, status
, first_hdr
, enctype
);
1244 case RX_MSDU_DECAP_8023_SNAP_LLC
:
1245 ath10k_htt_rx_h_undecap_snap(ar
, msdu
, status
, first_hdr
);
1250 static int ath10k_htt_rx_get_csum_state(struct sk_buff
*skb
)
1252 struct htt_rx_desc
*rxd
;
1254 bool is_ip4
, is_ip6
;
1255 bool is_tcp
, is_udp
;
1256 bool ip_csum_ok
, tcpudp_csum_ok
;
1258 rxd
= (void *)skb
->data
- sizeof(*rxd
);
1259 flags
= __le32_to_cpu(rxd
->attention
.flags
);
1260 info
= __le32_to_cpu(rxd
->msdu_start
.common
.info1
);
1262 is_ip4
= !!(info
& RX_MSDU_START_INFO1_IPV4_PROTO
);
1263 is_ip6
= !!(info
& RX_MSDU_START_INFO1_IPV6_PROTO
);
1264 is_tcp
= !!(info
& RX_MSDU_START_INFO1_TCP_PROTO
);
1265 is_udp
= !!(info
& RX_MSDU_START_INFO1_UDP_PROTO
);
1266 ip_csum_ok
= !(flags
& RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL
);
1267 tcpudp_csum_ok
= !(flags
& RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL
);
1269 if (!is_ip4
&& !is_ip6
)
1270 return CHECKSUM_NONE
;
1271 if (!is_tcp
&& !is_udp
)
1272 return CHECKSUM_NONE
;
1274 return CHECKSUM_NONE
;
1275 if (!tcpudp_csum_ok
)
1276 return CHECKSUM_NONE
;
1278 return CHECKSUM_UNNECESSARY
;
1281 static void ath10k_htt_rx_h_csum_offload(struct sk_buff
*msdu
)
1283 msdu
->ip_summed
= ath10k_htt_rx_get_csum_state(msdu
);
1286 static void ath10k_htt_rx_h_mpdu(struct ath10k
*ar
,
1287 struct sk_buff_head
*amsdu
,
1288 struct ieee80211_rx_status
*status
)
1290 struct sk_buff
*first
;
1291 struct sk_buff
*last
;
1292 struct sk_buff
*msdu
;
1293 struct htt_rx_desc
*rxd
;
1294 struct ieee80211_hdr
*hdr
;
1295 enum htt_rx_mpdu_encrypt_type enctype
;
1300 bool has_crypto_err
;
1302 bool has_peer_idx_invalid
;
1307 if (skb_queue_empty(amsdu
))
1310 first
= skb_peek(amsdu
);
1311 rxd
= (void *)first
->data
- sizeof(*rxd
);
1313 is_mgmt
= !!(rxd
->attention
.flags
&
1314 __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE
));
1316 enctype
= MS(__le32_to_cpu(rxd
->mpdu_start
.info0
),
1317 RX_MPDU_START_INFO0_ENCRYPT_TYPE
);
1319 /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
1320 * decapped header. It'll be used for undecapping of each MSDU.
1322 hdr
= (void *)rxd
->rx_hdr_status
;
1323 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1324 memcpy(first_hdr
, hdr
, hdr_len
);
1326 /* Each A-MSDU subframe will use the original header as the base and be
1327 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
1329 hdr
= (void *)first_hdr
;
1330 qos
= ieee80211_get_qos_ctl(hdr
);
1331 qos
[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT
;
1333 /* Some attention flags are valid only in the last MSDU. */
1334 last
= skb_peek_tail(amsdu
);
1335 rxd
= (void *)last
->data
- sizeof(*rxd
);
1336 attention
= __le32_to_cpu(rxd
->attention
.flags
);
1338 has_fcs_err
= !!(attention
& RX_ATTENTION_FLAGS_FCS_ERR
);
1339 has_crypto_err
= !!(attention
& RX_ATTENTION_FLAGS_DECRYPT_ERR
);
1340 has_tkip_err
= !!(attention
& RX_ATTENTION_FLAGS_TKIP_MIC_ERR
);
1341 has_peer_idx_invalid
= !!(attention
& RX_ATTENTION_FLAGS_PEER_IDX_INVALID
);
1343 /* Note: If hardware captures an encrypted frame that it can't decrypt,
1344 * e.g. due to fcs error, missing peer or invalid key data it will
1345 * report the frame as raw.
1347 is_decrypted
= (enctype
!= HTT_RX_MPDU_ENCRYPT_NONE
&&
1350 !has_peer_idx_invalid
);
1352 /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
1353 status
->flag
&= ~(RX_FLAG_FAILED_FCS_CRC
|
1354 RX_FLAG_MMIC_ERROR
|
1356 RX_FLAG_IV_STRIPPED
|
1357 RX_FLAG_ONLY_MONITOR
|
1358 RX_FLAG_MMIC_STRIPPED
);
1361 status
->flag
|= RX_FLAG_FAILED_FCS_CRC
;
1364 status
->flag
|= RX_FLAG_MMIC_ERROR
;
1366 /* Firmware reports all necessary management frames via WMI already.
1367 * They are not reported to monitor interfaces at all so pass the ones
1368 * coming via HTT to monitor interfaces instead. This simplifies
1372 status
->flag
|= RX_FLAG_ONLY_MONITOR
;
1375 status
->flag
|= RX_FLAG_DECRYPTED
;
1377 if (likely(!is_mgmt
))
1378 status
->flag
|= RX_FLAG_IV_STRIPPED
|
1379 RX_FLAG_MMIC_STRIPPED
;
1382 skb_queue_walk(amsdu
, msdu
) {
1383 ath10k_htt_rx_h_csum_offload(msdu
);
1384 ath10k_htt_rx_h_undecap(ar
, msdu
, status
, first_hdr
, enctype
,
1387 /* Undecapping involves copying the original 802.11 header back
1388 * to sk_buff. If frame is protected and hardware has decrypted
1389 * it then remove the protected bit.
1396 hdr
= (void *)msdu
->data
;
1397 hdr
->frame_control
&= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED
);
1401 static void ath10k_htt_rx_h_deliver(struct ath10k
*ar
,
1402 struct sk_buff_head
*amsdu
,
1403 struct ieee80211_rx_status
*status
)
1405 struct sk_buff
*msdu
;
1407 while ((msdu
= __skb_dequeue(amsdu
))) {
1408 /* Setup per-MSDU flags */
1409 if (skb_queue_empty(amsdu
))
1410 status
->flag
&= ~RX_FLAG_AMSDU_MORE
;
1412 status
->flag
|= RX_FLAG_AMSDU_MORE
;
1414 ath10k_process_rx(ar
, status
, msdu
);
1418 static int ath10k_unchain_msdu(struct sk_buff_head
*amsdu
)
1420 struct sk_buff
*skb
, *first
;
1424 /* TODO: Might could optimize this by using
1425 * skb_try_coalesce or similar method to
1426 * decrease copying, or maybe get mac80211 to
1427 * provide a way to just receive a list of
1431 first
= __skb_dequeue(amsdu
);
1433 /* Allocate total length all at once. */
1434 skb_queue_walk(amsdu
, skb
)
1435 total_len
+= skb
->len
;
1437 space
= total_len
- skb_tailroom(first
);
1439 (pskb_expand_head(first
, 0, space
, GFP_ATOMIC
) < 0)) {
1440 /* TODO: bump some rx-oom error stat */
1441 /* put it back together so we can free the
1442 * whole list at once.
1444 __skb_queue_head(amsdu
, first
);
1448 /* Walk list again, copying contents into
1451 while ((skb
= __skb_dequeue(amsdu
))) {
1452 skb_copy_from_linear_data(skb
, skb_put(first
, skb
->len
),
1454 dev_kfree_skb_any(skb
);
1457 __skb_queue_head(amsdu
, first
);
1461 static void ath10k_htt_rx_h_unchain(struct ath10k
*ar
,
1462 struct sk_buff_head
*amsdu
,
1465 struct sk_buff
*first
;
1466 struct htt_rx_desc
*rxd
;
1467 enum rx_msdu_decap_format decap
;
1469 first
= skb_peek(amsdu
);
1470 rxd
= (void *)first
->data
- sizeof(*rxd
);
1471 decap
= MS(__le32_to_cpu(rxd
->msdu_start
.common
.info1
),
1472 RX_MSDU_START_INFO1_DECAP_FORMAT
);
1477 /* FIXME: Current unchaining logic can only handle simple case of raw
1478 * msdu chaining. If decapping is other than raw the chaining may be
1479 * more complex and this isn't handled by the current code. Don't even
1480 * try re-constructing such frames - it'll be pretty much garbage.
1482 if (decap
!= RX_MSDU_DECAP_RAW
||
1483 skb_queue_len(amsdu
) != 1 + rxd
->frag_info
.ring2_more_count
) {
1484 __skb_queue_purge(amsdu
);
1488 ath10k_unchain_msdu(amsdu
);
1491 static bool ath10k_htt_rx_amsdu_allowed(struct ath10k
*ar
,
1492 struct sk_buff_head
*amsdu
,
1493 struct ieee80211_rx_status
*rx_status
)
1495 /* FIXME: It might be a good idea to do some fuzzy-testing to drop
1496 * invalid/dangerous frames.
1499 if (!rx_status
->freq
) {
1500 ath10k_warn(ar
, "no channel configured; ignoring frame(s)!\n");
1504 if (test_bit(ATH10K_CAC_RUNNING
, &ar
->dev_flags
)) {
1505 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx cac running\n");
1512 static void ath10k_htt_rx_h_filter(struct ath10k
*ar
,
1513 struct sk_buff_head
*amsdu
,
1514 struct ieee80211_rx_status
*rx_status
)
1516 if (skb_queue_empty(amsdu
))
1519 if (ath10k_htt_rx_amsdu_allowed(ar
, amsdu
, rx_status
))
1522 __skb_queue_purge(amsdu
);
1525 static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt
*htt
)
1527 struct ath10k
*ar
= htt
->ar
;
1528 struct ieee80211_rx_status
*rx_status
= &htt
->rx_status
;
1529 struct sk_buff_head amsdu
;
1532 __skb_queue_head_init(&amsdu
);
1534 spin_lock_bh(&htt
->rx_ring
.lock
);
1535 if (htt
->rx_confused
) {
1536 spin_unlock_bh(&htt
->rx_ring
.lock
);
1539 ret
= ath10k_htt_rx_amsdu_pop(htt
, &amsdu
);
1540 spin_unlock_bh(&htt
->rx_ring
.lock
);
1543 ath10k_warn(ar
, "rx ring became corrupted: %d\n", ret
);
1544 __skb_queue_purge(&amsdu
);
1545 /* FIXME: It's probably a good idea to reboot the
1546 * device instead of leaving it inoperable.
1548 htt
->rx_confused
= true;
1552 ath10k_htt_rx_h_ppdu(ar
, &amsdu
, rx_status
, 0xffff);
1553 ath10k_htt_rx_h_unchain(ar
, &amsdu
, ret
> 0);
1554 ath10k_htt_rx_h_filter(ar
, &amsdu
, rx_status
);
1555 ath10k_htt_rx_h_mpdu(ar
, &amsdu
, rx_status
);
1556 ath10k_htt_rx_h_deliver(ar
, &amsdu
, rx_status
);
1561 static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt
*htt
,
1562 struct htt_rx_indication
*rx
)
1564 struct ath10k
*ar
= htt
->ar
;
1565 struct htt_rx_indication_mpdu_range
*mpdu_ranges
;
1566 int num_mpdu_ranges
;
1567 int i
, mpdu_count
= 0;
1569 num_mpdu_ranges
= MS(__le32_to_cpu(rx
->hdr
.info1
),
1570 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES
);
1571 mpdu_ranges
= htt_rx_ind_get_mpdu_ranges(rx
);
1573 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt rx ind: ",
1575 (sizeof(struct htt_rx_indication_mpdu_range
) *
1578 for (i
= 0; i
< num_mpdu_ranges
; i
++)
1579 mpdu_count
+= mpdu_ranges
[i
].mpdu_count
;
1581 atomic_add(mpdu_count
, &htt
->num_mpdus_ready
);
1583 tasklet_schedule(&htt
->txrx_compl_task
);
1586 static void ath10k_htt_rx_frag_handler(struct ath10k_htt
*htt
)
1588 atomic_inc(&htt
->num_mpdus_ready
);
1590 tasklet_schedule(&htt
->txrx_compl_task
);
1593 static void ath10k_htt_rx_tx_compl_ind(struct ath10k
*ar
,
1594 struct sk_buff
*skb
)
1596 struct ath10k_htt
*htt
= &ar
->htt
;
1597 struct htt_resp
*resp
= (struct htt_resp
*)skb
->data
;
1598 struct htt_tx_done tx_done
= {};
1599 int status
= MS(resp
->data_tx_completion
.flags
, HTT_DATA_TX_STATUS
);
1604 case HTT_DATA_TX_STATUS_NO_ACK
:
1605 tx_done
.status
= HTT_TX_COMPL_STATE_NOACK
;
1607 case HTT_DATA_TX_STATUS_OK
:
1608 tx_done
.status
= HTT_TX_COMPL_STATE_ACK
;
1610 case HTT_DATA_TX_STATUS_DISCARD
:
1611 case HTT_DATA_TX_STATUS_POSTPONE
:
1612 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL
:
1613 tx_done
.status
= HTT_TX_COMPL_STATE_DISCARD
;
1616 ath10k_warn(ar
, "unhandled tx completion status %d\n", status
);
1617 tx_done
.status
= HTT_TX_COMPL_STATE_DISCARD
;
1621 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt tx completion num_msdus %d\n",
1622 resp
->data_tx_completion
.num_msdus
);
1624 for (i
= 0; i
< resp
->data_tx_completion
.num_msdus
; i
++) {
1625 msdu_id
= resp
->data_tx_completion
.msdus
[i
];
1626 tx_done
.msdu_id
= __le16_to_cpu(msdu_id
);
1628 /* kfifo_put: In practice firmware shouldn't fire off per-CE
1629 * interrupt and main interrupt (MSI/-X range case) for the same
1630 * HTC service so it should be safe to use kfifo_put w/o lock.
1632 * From kfifo_put() documentation:
1633 * Note that with only one concurrent reader and one concurrent
1634 * writer, you don't need extra locking to use these macro.
1636 if (!kfifo_put(&htt
->txdone_fifo
, tx_done
)) {
1637 ath10k_warn(ar
, "txdone fifo overrun, msdu_id %d status %d\n",
1638 tx_done
.msdu_id
, tx_done
.status
);
1639 ath10k_txrx_tx_unref(htt
, &tx_done
);
1644 static void ath10k_htt_rx_addba(struct ath10k
*ar
, struct htt_resp
*resp
)
1646 struct htt_rx_addba
*ev
= &resp
->rx_addba
;
1647 struct ath10k_peer
*peer
;
1648 struct ath10k_vif
*arvif
;
1649 u16 info0
, tid
, peer_id
;
1651 info0
= __le16_to_cpu(ev
->info0
);
1652 tid
= MS(info0
, HTT_RX_BA_INFO0_TID
);
1653 peer_id
= MS(info0
, HTT_RX_BA_INFO0_PEER_ID
);
1655 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
1656 "htt rx addba tid %hu peer_id %hu size %hhu\n",
1657 tid
, peer_id
, ev
->window_size
);
1659 spin_lock_bh(&ar
->data_lock
);
1660 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
1662 ath10k_warn(ar
, "received addba event for invalid peer_id: %hu\n",
1664 spin_unlock_bh(&ar
->data_lock
);
1668 arvif
= ath10k_get_arvif(ar
, peer
->vdev_id
);
1670 ath10k_warn(ar
, "received addba event for invalid vdev_id: %u\n",
1672 spin_unlock_bh(&ar
->data_lock
);
1676 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
1677 "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
1678 peer
->addr
, tid
, ev
->window_size
);
1680 ieee80211_start_rx_ba_session_offl(arvif
->vif
, peer
->addr
, tid
);
1681 spin_unlock_bh(&ar
->data_lock
);
1684 static void ath10k_htt_rx_delba(struct ath10k
*ar
, struct htt_resp
*resp
)
1686 struct htt_rx_delba
*ev
= &resp
->rx_delba
;
1687 struct ath10k_peer
*peer
;
1688 struct ath10k_vif
*arvif
;
1689 u16 info0
, tid
, peer_id
;
1691 info0
= __le16_to_cpu(ev
->info0
);
1692 tid
= MS(info0
, HTT_RX_BA_INFO0_TID
);
1693 peer_id
= MS(info0
, HTT_RX_BA_INFO0_PEER_ID
);
1695 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
1696 "htt rx delba tid %hu peer_id %hu\n",
1699 spin_lock_bh(&ar
->data_lock
);
1700 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
1702 ath10k_warn(ar
, "received addba event for invalid peer_id: %hu\n",
1704 spin_unlock_bh(&ar
->data_lock
);
1708 arvif
= ath10k_get_arvif(ar
, peer
->vdev_id
);
1710 ath10k_warn(ar
, "received addba event for invalid vdev_id: %u\n",
1712 spin_unlock_bh(&ar
->data_lock
);
1716 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
1717 "htt rx stop rx ba session sta %pM tid %hu\n",
1720 ieee80211_stop_rx_ba_session_offl(arvif
->vif
, peer
->addr
, tid
);
1721 spin_unlock_bh(&ar
->data_lock
);
1724 static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head
*list
,
1725 struct sk_buff_head
*amsdu
)
1727 struct sk_buff
*msdu
;
1728 struct htt_rx_desc
*rxd
;
1730 if (skb_queue_empty(list
))
1733 if (WARN_ON(!skb_queue_empty(amsdu
)))
1736 while ((msdu
= __skb_dequeue(list
))) {
1737 __skb_queue_tail(amsdu
, msdu
);
1739 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
1740 if (rxd
->msdu_end
.common
.info0
&
1741 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU
))
1745 msdu
= skb_peek_tail(amsdu
);
1746 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
1747 if (!(rxd
->msdu_end
.common
.info0
&
1748 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU
))) {
1749 skb_queue_splice_init(amsdu
, list
);
1756 static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status
*status
,
1757 struct sk_buff
*skb
)
1759 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
1761 if (!ieee80211_has_protected(hdr
->frame_control
))
1764 /* Offloaded frames are already decrypted but firmware insists they are
1765 * protected in the 802.11 header. Strip the flag. Otherwise mac80211
1766 * will drop the frame.
1769 hdr
->frame_control
&= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED
);
1770 status
->flag
|= RX_FLAG_DECRYPTED
|
1771 RX_FLAG_IV_STRIPPED
|
1772 RX_FLAG_MMIC_STRIPPED
;
1775 static void ath10k_htt_rx_h_rx_offload(struct ath10k
*ar
,
1776 struct sk_buff_head
*list
)
1778 struct ath10k_htt
*htt
= &ar
->htt
;
1779 struct ieee80211_rx_status
*status
= &htt
->rx_status
;
1780 struct htt_rx_offload_msdu
*rx
;
1781 struct sk_buff
*msdu
;
1784 while ((msdu
= __skb_dequeue(list
))) {
1785 /* Offloaded frames don't have Rx descriptor. Instead they have
1786 * a short meta information header.
1789 rx
= (void *)msdu
->data
;
1791 skb_put(msdu
, sizeof(*rx
));
1792 skb_pull(msdu
, sizeof(*rx
));
1794 if (skb_tailroom(msdu
) < __le16_to_cpu(rx
->msdu_len
)) {
1795 ath10k_warn(ar
, "dropping frame: offloaded rx msdu is too long!\n");
1796 dev_kfree_skb_any(msdu
);
1800 skb_put(msdu
, __le16_to_cpu(rx
->msdu_len
));
1802 /* Offloaded rx header length isn't multiple of 2 nor 4 so the
1803 * actual payload is unaligned. Align the frame. Otherwise
1804 * mac80211 complains. This shouldn't reduce performance much
1805 * because these offloaded frames are rare.
1807 offset
= 4 - ((unsigned long)msdu
->data
& 3);
1808 skb_put(msdu
, offset
);
1809 memmove(msdu
->data
+ offset
, msdu
->data
, msdu
->len
);
1810 skb_pull(msdu
, offset
);
1812 /* FIXME: The frame is NWifi. Re-construct QoS Control
1813 * if possible later.
1816 memset(status
, 0, sizeof(*status
));
1817 status
->flag
|= RX_FLAG_NO_SIGNAL_VAL
;
1819 ath10k_htt_rx_h_rx_offload_prot(status
, msdu
);
1820 ath10k_htt_rx_h_channel(ar
, status
, NULL
, rx
->vdev_id
);
1821 ath10k_process_rx(ar
, status
, msdu
);
1825 static void ath10k_htt_rx_in_ord_ind(struct ath10k
*ar
, struct sk_buff
*skb
)
1827 struct ath10k_htt
*htt
= &ar
->htt
;
1828 struct htt_resp
*resp
= (void *)skb
->data
;
1829 struct ieee80211_rx_status
*status
= &htt
->rx_status
;
1830 struct sk_buff_head list
;
1831 struct sk_buff_head amsdu
;
1840 lockdep_assert_held(&htt
->rx_ring
.lock
);
1842 if (htt
->rx_confused
)
1845 skb_pull(skb
, sizeof(resp
->hdr
));
1846 skb_pull(skb
, sizeof(resp
->rx_in_ord_ind
));
1848 peer_id
= __le16_to_cpu(resp
->rx_in_ord_ind
.peer_id
);
1849 msdu_count
= __le16_to_cpu(resp
->rx_in_ord_ind
.msdu_count
);
1850 vdev_id
= resp
->rx_in_ord_ind
.vdev_id
;
1851 tid
= SM(resp
->rx_in_ord_ind
.info
, HTT_RX_IN_ORD_IND_INFO_TID
);
1852 offload
= !!(resp
->rx_in_ord_ind
.info
&
1853 HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK
);
1854 frag
= !!(resp
->rx_in_ord_ind
.info
& HTT_RX_IN_ORD_IND_INFO_FRAG_MASK
);
1856 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
1857 "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
1858 vdev_id
, peer_id
, tid
, offload
, frag
, msdu_count
);
1860 if (skb
->len
< msdu_count
* sizeof(*resp
->rx_in_ord_ind
.msdu_descs
)) {
1861 ath10k_warn(ar
, "dropping invalid in order rx indication\n");
1865 /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
1866 * extracted and processed.
1868 __skb_queue_head_init(&list
);
1869 ret
= ath10k_htt_rx_pop_paddr_list(htt
, &resp
->rx_in_ord_ind
, &list
);
1871 ath10k_warn(ar
, "failed to pop paddr list: %d\n", ret
);
1872 htt
->rx_confused
= true;
1876 /* Offloaded frames are very different and need to be handled
1880 ath10k_htt_rx_h_rx_offload(ar
, &list
);
1882 while (!skb_queue_empty(&list
)) {
1883 __skb_queue_head_init(&amsdu
);
1884 ret
= ath10k_htt_rx_extract_amsdu(&list
, &amsdu
);
1887 /* Note: The in-order indication may report interleaved
1888 * frames from different PPDUs meaning reported rx rate
1889 * to mac80211 isn't accurate/reliable. It's still
1890 * better to report something than nothing though. This
1891 * should still give an idea about rx rate to the user.
1893 ath10k_htt_rx_h_ppdu(ar
, &amsdu
, status
, vdev_id
);
1894 ath10k_htt_rx_h_filter(ar
, &amsdu
, status
);
1895 ath10k_htt_rx_h_mpdu(ar
, &amsdu
, status
);
1896 ath10k_htt_rx_h_deliver(ar
, &amsdu
, status
);
1901 /* Should not happen. */
1902 ath10k_warn(ar
, "failed to extract amsdu: %d\n", ret
);
1903 htt
->rx_confused
= true;
1904 __skb_queue_purge(&list
);
1910 static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k
*ar
,
1911 const __le32
*resp_ids
,
1917 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch confirm num_resp_ids %d\n",
1920 for (i
= 0; i
< num_resp_ids
; i
++) {
1921 resp_id
= le32_to_cpu(resp_ids
[i
]);
1923 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch confirm resp_id %u\n",
1926 /* TODO: free resp_id */
1930 static void ath10k_htt_rx_tx_fetch_ind(struct ath10k
*ar
, struct sk_buff
*skb
)
1932 struct ieee80211_hw
*hw
= ar
->hw
;
1933 struct ieee80211_txq
*txq
;
1934 struct htt_resp
*resp
= (struct htt_resp
*)skb
->data
;
1935 struct htt_tx_fetch_record
*record
;
1937 size_t max_num_bytes
;
1938 size_t max_num_msdus
;
1941 const __le32
*resp_ids
;
1949 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch ind\n");
1951 len
= sizeof(resp
->hdr
) + sizeof(resp
->tx_fetch_ind
);
1952 if (unlikely(skb
->len
< len
)) {
1953 ath10k_warn(ar
, "received corrupted tx_fetch_ind event: buffer too short\n");
1957 num_records
= le16_to_cpu(resp
->tx_fetch_ind
.num_records
);
1958 num_resp_ids
= le16_to_cpu(resp
->tx_fetch_ind
.num_resp_ids
);
1960 len
+= sizeof(resp
->tx_fetch_ind
.records
[0]) * num_records
;
1961 len
+= sizeof(resp
->tx_fetch_ind
.resp_ids
[0]) * num_resp_ids
;
1963 if (unlikely(skb
->len
< len
)) {
1964 ath10k_warn(ar
, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
1968 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n",
1969 num_records
, num_resp_ids
,
1970 le16_to_cpu(resp
->tx_fetch_ind
.fetch_seq_num
));
1972 if (!ar
->htt
.tx_q_state
.enabled
) {
1973 ath10k_warn(ar
, "received unexpected tx_fetch_ind event: not enabled\n");
1977 if (ar
->htt
.tx_q_state
.mode
== HTT_TX_MODE_SWITCH_PUSH
) {
1978 ath10k_warn(ar
, "received unexpected tx_fetch_ind event: in push mode\n");
1984 for (i
= 0; i
< num_records
; i
++) {
1985 record
= &resp
->tx_fetch_ind
.records
[i
];
1986 peer_id
= MS(le16_to_cpu(record
->info
),
1987 HTT_TX_FETCH_RECORD_INFO_PEER_ID
);
1988 tid
= MS(le16_to_cpu(record
->info
),
1989 HTT_TX_FETCH_RECORD_INFO_TID
);
1990 max_num_msdus
= le16_to_cpu(record
->num_msdus
);
1991 max_num_bytes
= le32_to_cpu(record
->num_bytes
);
1993 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n",
1994 i
, peer_id
, tid
, max_num_msdus
, max_num_bytes
);
1996 if (unlikely(peer_id
>= ar
->htt
.tx_q_state
.num_peers
) ||
1997 unlikely(tid
>= ar
->htt
.tx_q_state
.num_tids
)) {
1998 ath10k_warn(ar
, "received out of range peer_id %hu tid %hhu\n",
2003 spin_lock_bh(&ar
->data_lock
);
2004 txq
= ath10k_mac_txq_lookup(ar
, peer_id
, tid
);
2005 spin_unlock_bh(&ar
->data_lock
);
2007 /* It is okay to release the lock and use txq because RCU read
2011 if (unlikely(!txq
)) {
2012 ath10k_warn(ar
, "failed to lookup txq for peer_id %hu tid %hhu\n",
2020 while (num_msdus
< max_num_msdus
&&
2021 num_bytes
< max_num_bytes
) {
2022 ret
= ath10k_mac_tx_push_txq(hw
, txq
);
2030 record
->num_msdus
= cpu_to_le16(num_msdus
);
2031 record
->num_bytes
= cpu_to_le32(num_bytes
);
2033 ath10k_htt_tx_txq_recalc(hw
, txq
);
2038 resp_ids
= ath10k_htt_get_tx_fetch_ind_resp_ids(&resp
->tx_fetch_ind
);
2039 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar
, resp_ids
, num_resp_ids
);
2041 ret
= ath10k_htt_tx_fetch_resp(ar
,
2042 resp
->tx_fetch_ind
.token
,
2043 resp
->tx_fetch_ind
.fetch_seq_num
,
2044 resp
->tx_fetch_ind
.records
,
2046 if (unlikely(ret
)) {
2047 ath10k_warn(ar
, "failed to submit tx fetch resp for token 0x%08x: %d\n",
2048 le32_to_cpu(resp
->tx_fetch_ind
.token
), ret
);
2049 /* FIXME: request fw restart */
2052 ath10k_htt_tx_txq_sync(ar
);
2055 static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k
*ar
,
2056 struct sk_buff
*skb
)
2058 const struct htt_resp
*resp
= (void *)skb
->data
;
2062 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch confirm\n");
2064 len
= sizeof(resp
->hdr
) + sizeof(resp
->tx_fetch_confirm
);
2065 if (unlikely(skb
->len
< len
)) {
2066 ath10k_warn(ar
, "received corrupted tx_fetch_confirm event: buffer too short\n");
2070 num_resp_ids
= le16_to_cpu(resp
->tx_fetch_confirm
.num_resp_ids
);
2071 len
+= sizeof(resp
->tx_fetch_confirm
.resp_ids
[0]) * num_resp_ids
;
2073 if (unlikely(skb
->len
< len
)) {
2074 ath10k_warn(ar
, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
2078 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar
,
2079 resp
->tx_fetch_confirm
.resp_ids
,
2083 static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k
*ar
,
2084 struct sk_buff
*skb
)
2086 const struct htt_resp
*resp
= (void *)skb
->data
;
2087 const struct htt_tx_mode_switch_record
*record
;
2088 struct ieee80211_txq
*txq
;
2089 struct ath10k_txq
*artxq
;
2092 enum htt_tx_mode_switch_mode mode
;
2101 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx mode switch ind\n");
2103 len
= sizeof(resp
->hdr
) + sizeof(resp
->tx_mode_switch_ind
);
2104 if (unlikely(skb
->len
< len
)) {
2105 ath10k_warn(ar
, "received corrupted tx_mode_switch_ind event: buffer too short\n");
2109 info0
= le16_to_cpu(resp
->tx_mode_switch_ind
.info0
);
2110 info1
= le16_to_cpu(resp
->tx_mode_switch_ind
.info1
);
2112 enable
= !!(info0
& HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE
);
2113 num_records
= MS(info0
, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD
);
2114 mode
= MS(info1
, HTT_TX_MODE_SWITCH_IND_INFO1_MODE
);
2115 threshold
= MS(info1
, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD
);
2117 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
2118 "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n",
2119 info0
, info1
, enable
, num_records
, mode
, threshold
);
2121 len
+= sizeof(resp
->tx_mode_switch_ind
.records
[0]) * num_records
;
2123 if (unlikely(skb
->len
< len
)) {
2124 ath10k_warn(ar
, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
2129 case HTT_TX_MODE_SWITCH_PUSH
:
2130 case HTT_TX_MODE_SWITCH_PUSH_PULL
:
2133 ath10k_warn(ar
, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
2141 ar
->htt
.tx_q_state
.enabled
= enable
;
2142 ar
->htt
.tx_q_state
.mode
= mode
;
2143 ar
->htt
.tx_q_state
.num_push_allowed
= threshold
;
2147 for (i
= 0; i
< num_records
; i
++) {
2148 record
= &resp
->tx_mode_switch_ind
.records
[i
];
2149 info0
= le16_to_cpu(record
->info0
);
2150 peer_id
= MS(info0
, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID
);
2151 tid
= MS(info0
, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID
);
2153 if (unlikely(peer_id
>= ar
->htt
.tx_q_state
.num_peers
) ||
2154 unlikely(tid
>= ar
->htt
.tx_q_state
.num_tids
)) {
2155 ath10k_warn(ar
, "received out of range peer_id %hu tid %hhu\n",
2160 spin_lock_bh(&ar
->data_lock
);
2161 txq
= ath10k_mac_txq_lookup(ar
, peer_id
, tid
);
2162 spin_unlock_bh(&ar
->data_lock
);
2164 /* It is okay to release the lock and use txq because RCU read
2168 if (unlikely(!txq
)) {
2169 ath10k_warn(ar
, "failed to lookup txq for peer_id %hu tid %hhu\n",
2174 spin_lock_bh(&ar
->htt
.tx_lock
);
2175 artxq
= (void *)txq
->drv_priv
;
2176 artxq
->num_push_allowed
= le16_to_cpu(record
->num_max_msdus
);
2177 spin_unlock_bh(&ar
->htt
.tx_lock
);
2182 ath10k_mac_tx_push_pending(ar
);
2185 void ath10k_htt_htc_t2h_msg_handler(struct ath10k
*ar
, struct sk_buff
*skb
)
2189 release
= ath10k_htt_t2h_msg_handler(ar
, skb
);
2191 /* Free the indication buffer */
2193 dev_kfree_skb_any(skb
);
2196 bool ath10k_htt_t2h_msg_handler(struct ath10k
*ar
, struct sk_buff
*skb
)
2198 struct ath10k_htt
*htt
= &ar
->htt
;
2199 struct htt_resp
*resp
= (struct htt_resp
*)skb
->data
;
2200 enum htt_t2h_msg_type type
;
2202 /* confirm alignment */
2203 if (!IS_ALIGNED((unsigned long)skb
->data
, 4))
2204 ath10k_warn(ar
, "unaligned htt message, expect trouble\n");
2206 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx, msg_type: 0x%0X\n",
2207 resp
->hdr
.msg_type
);
2209 if (resp
->hdr
.msg_type
>= ar
->htt
.t2h_msg_types_max
) {
2210 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
2211 resp
->hdr
.msg_type
, ar
->htt
.t2h_msg_types_max
);
2214 type
= ar
->htt
.t2h_msg_types
[resp
->hdr
.msg_type
];
2217 case HTT_T2H_MSG_TYPE_VERSION_CONF
: {
2218 htt
->target_version_major
= resp
->ver_resp
.major
;
2219 htt
->target_version_minor
= resp
->ver_resp
.minor
;
2220 complete(&htt
->target_version_received
);
2223 case HTT_T2H_MSG_TYPE_RX_IND
:
2224 ath10k_htt_rx_proc_rx_ind(htt
, &resp
->rx_ind
);
2226 case HTT_T2H_MSG_TYPE_PEER_MAP
: {
2227 struct htt_peer_map_event ev
= {
2228 .vdev_id
= resp
->peer_map
.vdev_id
,
2229 .peer_id
= __le16_to_cpu(resp
->peer_map
.peer_id
),
2231 memcpy(ev
.addr
, resp
->peer_map
.addr
, sizeof(ev
.addr
));
2232 ath10k_peer_map_event(htt
, &ev
);
2235 case HTT_T2H_MSG_TYPE_PEER_UNMAP
: {
2236 struct htt_peer_unmap_event ev
= {
2237 .peer_id
= __le16_to_cpu(resp
->peer_unmap
.peer_id
),
2239 ath10k_peer_unmap_event(htt
, &ev
);
2242 case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION
: {
2243 struct htt_tx_done tx_done
= {};
2244 int status
= __le32_to_cpu(resp
->mgmt_tx_completion
.status
);
2246 tx_done
.msdu_id
= __le32_to_cpu(resp
->mgmt_tx_completion
.desc_id
);
2249 case HTT_MGMT_TX_STATUS_OK
:
2250 tx_done
.status
= HTT_TX_COMPL_STATE_ACK
;
2252 case HTT_MGMT_TX_STATUS_RETRY
:
2253 tx_done
.status
= HTT_TX_COMPL_STATE_NOACK
;
2255 case HTT_MGMT_TX_STATUS_DROP
:
2256 tx_done
.status
= HTT_TX_COMPL_STATE_DISCARD
;
2260 status
= ath10k_txrx_tx_unref(htt
, &tx_done
);
2262 spin_lock_bh(&htt
->tx_lock
);
2263 ath10k_htt_tx_mgmt_dec_pending(htt
);
2264 spin_unlock_bh(&htt
->tx_lock
);
2268 case HTT_T2H_MSG_TYPE_TX_COMPL_IND
:
2269 ath10k_htt_rx_tx_compl_ind(htt
->ar
, skb
);
2270 tasklet_schedule(&htt
->txrx_compl_task
);
2272 case HTT_T2H_MSG_TYPE_SEC_IND
: {
2273 struct ath10k
*ar
= htt
->ar
;
2274 struct htt_security_indication
*ev
= &resp
->security_indication
;
2276 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
2277 "sec ind peer_id %d unicast %d type %d\n",
2278 __le16_to_cpu(ev
->peer_id
),
2279 !!(ev
->flags
& HTT_SECURITY_IS_UNICAST
),
2280 MS(ev
->flags
, HTT_SECURITY_TYPE
));
2281 complete(&ar
->install_key_done
);
2284 case HTT_T2H_MSG_TYPE_RX_FRAG_IND
: {
2285 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt event: ",
2286 skb
->data
, skb
->len
);
2287 ath10k_htt_rx_frag_handler(htt
);
2290 case HTT_T2H_MSG_TYPE_TEST
:
2292 case HTT_T2H_MSG_TYPE_STATS_CONF
:
2293 trace_ath10k_htt_stats(ar
, skb
->data
, skb
->len
);
2295 case HTT_T2H_MSG_TYPE_TX_INSPECT_IND
:
2296 /* Firmware can return tx frames if it's unable to fully
2297 * process them and suspects host may be able to fix it. ath10k
2298 * sends all tx frames as already inspected so this shouldn't
2299 * happen unless fw has a bug.
2301 ath10k_warn(ar
, "received an unexpected htt tx inspect event\n");
2303 case HTT_T2H_MSG_TYPE_RX_ADDBA
:
2304 ath10k_htt_rx_addba(ar
, resp
);
2306 case HTT_T2H_MSG_TYPE_RX_DELBA
:
2307 ath10k_htt_rx_delba(ar
, resp
);
2309 case HTT_T2H_MSG_TYPE_PKTLOG
: {
2310 trace_ath10k_htt_pktlog(ar
, resp
->pktlog_msg
.payload
,
2312 offsetof(struct htt_resp
,
2313 pktlog_msg
.payload
));
2316 case HTT_T2H_MSG_TYPE_RX_FLUSH
: {
2317 /* Ignore this event because mac80211 takes care of Rx
2318 * aggregation reordering.
2322 case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND
: {
2323 skb_queue_tail(&htt
->rx_in_ord_compl_q
, skb
);
2324 tasklet_schedule(&htt
->txrx_compl_task
);
2327 case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND
:
2329 case HTT_T2H_MSG_TYPE_CHAN_CHANGE
: {
2330 u32 phymode
= __le32_to_cpu(resp
->chan_change
.phymode
);
2331 u32 freq
= __le32_to_cpu(resp
->chan_change
.freq
);
2334 __ieee80211_get_channel(ar
->hw
->wiphy
, freq
);
2335 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
2336 "htt chan change freq %u phymode %s\n",
2337 freq
, ath10k_wmi_phymode_str(phymode
));
2340 case HTT_T2H_MSG_TYPE_AGGR_CONF
:
2342 case HTT_T2H_MSG_TYPE_TX_FETCH_IND
: {
2343 struct sk_buff
*tx_fetch_ind
= skb_copy(skb
, GFP_ATOMIC
);
2345 if (!tx_fetch_ind
) {
2346 ath10k_warn(ar
, "failed to copy htt tx fetch ind\n");
2349 skb_queue_tail(&htt
->tx_fetch_ind_q
, tx_fetch_ind
);
2350 tasklet_schedule(&htt
->txrx_compl_task
);
2353 case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM
:
2354 ath10k_htt_rx_tx_fetch_confirm(ar
, skb
);
2356 case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND
:
2357 ath10k_htt_rx_tx_mode_switch_ind(ar
, skb
);
2359 case HTT_T2H_MSG_TYPE_EN_STATS
:
2361 ath10k_warn(ar
, "htt event (%d) not handled\n",
2362 resp
->hdr
.msg_type
);
2363 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt event: ",
2364 skb
->data
, skb
->len
);
2369 EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler
);
2371 void ath10k_htt_rx_pktlog_completion_handler(struct ath10k
*ar
,
2372 struct sk_buff
*skb
)
2374 trace_ath10k_htt_pktlog(ar
, skb
->data
, skb
->len
);
2375 dev_kfree_skb_any(skb
);
2377 EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler
);
2379 static void ath10k_htt_txrx_compl_task(unsigned long ptr
)
2381 struct ath10k_htt
*htt
= (struct ath10k_htt
*)ptr
;
2382 struct ath10k
*ar
= htt
->ar
;
2383 struct htt_tx_done tx_done
= {};
2384 struct sk_buff_head rx_ind_q
;
2385 struct sk_buff_head tx_ind_q
;
2386 struct sk_buff
*skb
;
2387 unsigned long flags
;
2390 __skb_queue_head_init(&rx_ind_q
);
2391 __skb_queue_head_init(&tx_ind_q
);
2393 spin_lock_irqsave(&htt
->rx_in_ord_compl_q
.lock
, flags
);
2394 skb_queue_splice_init(&htt
->rx_in_ord_compl_q
, &rx_ind_q
);
2395 spin_unlock_irqrestore(&htt
->rx_in_ord_compl_q
.lock
, flags
);
2397 spin_lock_irqsave(&htt
->tx_fetch_ind_q
.lock
, flags
);
2398 skb_queue_splice_init(&htt
->tx_fetch_ind_q
, &tx_ind_q
);
2399 spin_unlock_irqrestore(&htt
->tx_fetch_ind_q
.lock
, flags
);
2401 /* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
2402 * From kfifo_get() documentation:
2403 * Note that with only one concurrent reader and one concurrent writer,
2404 * you don't need extra locking to use these macro.
2406 while (kfifo_get(&htt
->txdone_fifo
, &tx_done
))
2407 ath10k_txrx_tx_unref(htt
, &tx_done
);
2409 while ((skb
= __skb_dequeue(&tx_ind_q
))) {
2410 ath10k_htt_rx_tx_fetch_ind(ar
, skb
);
2411 dev_kfree_skb_any(skb
);
2414 num_mpdus
= atomic_read(&htt
->num_mpdus_ready
);
2417 if (ath10k_htt_rx_handle_amsdu(htt
))
2421 atomic_dec(&htt
->num_mpdus_ready
);
2424 while ((skb
= __skb_dequeue(&rx_ind_q
))) {
2425 spin_lock_bh(&htt
->rx_ring
.lock
);
2426 ath10k_htt_rx_in_ord_ind(ar
, skb
);
2427 spin_unlock_bh(&htt
->rx_ring
.lock
);
2428 dev_kfree_skb_any(skb
);
2431 ath10k_htt_rx_msdu_buff_replenish(htt
);