2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
25 #include <linux/log2.h>
27 /* slightly larger than one large A-MPDU */
28 #define HTT_RX_RING_SIZE_MIN 128
30 /* roughly 20 ms @ 1 Gbps of 1500B MSDUs */
31 #define HTT_RX_RING_SIZE_MAX 2048
33 #define HTT_RX_AVG_FRM_BYTES 1000
35 /* ms, very conservative */
36 #define HTT_RX_HOST_LATENCY_MAX_MS 20
38 /* ms, conservative */
39 #define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10
41 /* when under memory pressure rx ring refill may fail and needs a retry */
42 #define HTT_RX_RING_REFILL_RETRY_MS 50
45 static int ath10k_htt_rx_get_csum_state(struct sk_buff
*skb
);
48 static int ath10k_htt_rx_ring_size(struct ath10k_htt
*htt
)
53 * It is expected that the host CPU will typically be able to
54 * service the rx indication from one A-MPDU before the rx
55 * indication from the subsequent A-MPDU happens, roughly 1-2 ms
56 * later. However, the rx ring should be sized very conservatively,
57 * to accomodate the worst reasonable delay before the host CPU
58 * services a rx indication interrupt.
60 * The rx ring need not be kept full of empty buffers. In theory,
61 * the htt host SW can dynamically track the low-water mark in the
62 * rx ring, and dynamically adjust the level to which the rx ring
63 * is filled with empty buffers, to dynamically meet the desired
66 * In contrast, it's difficult to resize the rx ring itself, once
67 * it's in use. Thus, the ring itself should be sized very
68 * conservatively, while the degree to which the ring is filled
69 * with empty buffers should be sized moderately conservatively.
72 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
74 htt
->max_throughput_mbps
+
76 (8 * HTT_RX_AVG_FRM_BYTES
) * HTT_RX_HOST_LATENCY_MAX_MS
;
78 if (size
< HTT_RX_RING_SIZE_MIN
)
79 size
= HTT_RX_RING_SIZE_MIN
;
81 if (size
> HTT_RX_RING_SIZE_MAX
)
82 size
= HTT_RX_RING_SIZE_MAX
;
84 size
= roundup_pow_of_two(size
);
89 static int ath10k_htt_rx_ring_fill_level(struct ath10k_htt
*htt
)
93 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
95 htt
->max_throughput_mbps
*
97 (8 * HTT_RX_AVG_FRM_BYTES
) * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS
;
100 * Make sure the fill level is at least 1 less than the ring size.
101 * Leaving 1 element empty allows the SW to easily distinguish
102 * between a full ring vs. an empty ring.
104 if (size
>= htt
->rx_ring
.size
)
105 size
= htt
->rx_ring
.size
- 1;
110 static void ath10k_htt_rx_ring_free(struct ath10k_htt
*htt
)
113 struct ath10k_skb_cb
*cb
;
116 for (i
= 0; i
< htt
->rx_ring
.fill_cnt
; i
++) {
117 skb
= htt
->rx_ring
.netbufs_ring
[i
];
118 cb
= ATH10K_SKB_CB(skb
);
119 dma_unmap_single(htt
->ar
->dev
, cb
->paddr
,
120 skb
->len
+ skb_tailroom(skb
),
122 dev_kfree_skb_any(skb
);
125 htt
->rx_ring
.fill_cnt
= 0;
128 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt
*htt
, int num
)
130 struct htt_rx_desc
*rx_desc
;
135 idx
= __le32_to_cpu(*(htt
->rx_ring
.alloc_idx
.vaddr
));
137 skb
= dev_alloc_skb(HTT_RX_BUF_SIZE
+ HTT_RX_DESC_ALIGN
);
143 if (!IS_ALIGNED((unsigned long)skb
->data
, HTT_RX_DESC_ALIGN
))
145 PTR_ALIGN(skb
->data
, HTT_RX_DESC_ALIGN
) -
148 /* Clear rx_desc attention word before posting to Rx ring */
149 rx_desc
= (struct htt_rx_desc
*)skb
->data
;
150 rx_desc
->attention
.flags
= __cpu_to_le32(0);
152 paddr
= dma_map_single(htt
->ar
->dev
, skb
->data
,
153 skb
->len
+ skb_tailroom(skb
),
156 if (unlikely(dma_mapping_error(htt
->ar
->dev
, paddr
))) {
157 dev_kfree_skb_any(skb
);
162 ATH10K_SKB_CB(skb
)->paddr
= paddr
;
163 htt
->rx_ring
.netbufs_ring
[idx
] = skb
;
164 htt
->rx_ring
.paddrs_ring
[idx
] = __cpu_to_le32(paddr
);
165 htt
->rx_ring
.fill_cnt
++;
169 idx
&= htt
->rx_ring
.size_mask
;
173 *(htt
->rx_ring
.alloc_idx
.vaddr
) = __cpu_to_le32(idx
);
177 static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt
*htt
, int num
)
179 lockdep_assert_held(&htt
->rx_ring
.lock
);
180 return __ath10k_htt_rx_ring_fill_n(htt
, num
);
183 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt
*htt
)
185 int ret
, num_deficit
, num_to_fill
;
187 /* Refilling the whole RX ring buffer proves to be a bad idea. The
188 * reason is RX may take up significant amount of CPU cycles and starve
189 * other tasks, e.g. TX on an ethernet device while acting as a bridge
190 * with ath10k wlan interface. This ended up with very poor performance
191 * once CPU the host system was overwhelmed with RX on ath10k.
193 * By limiting the number of refills the replenishing occurs
194 * progressively. This in turns makes use of the fact tasklets are
195 * processed in FIFO order. This means actual RX processing can starve
196 * out refilling. If there's not enough buffers on RX ring FW will not
197 * report RX until it is refilled with enough buffers. This
198 * automatically balances load wrt to CPU power.
200 * This probably comes at a cost of lower maximum throughput but
201 * improves the avarage and stability. */
202 spin_lock_bh(&htt
->rx_ring
.lock
);
203 num_deficit
= htt
->rx_ring
.fill_level
- htt
->rx_ring
.fill_cnt
;
204 num_to_fill
= min(ATH10K_HTT_MAX_NUM_REFILL
, num_deficit
);
205 num_deficit
-= num_to_fill
;
206 ret
= ath10k_htt_rx_ring_fill_n(htt
, num_to_fill
);
207 if (ret
== -ENOMEM
) {
209 * Failed to fill it to the desired level -
210 * we'll start a timer and try again next time.
211 * As long as enough buffers are left in the ring for
212 * another A-MPDU rx, no special recovery is needed.
214 mod_timer(&htt
->rx_ring
.refill_retry_timer
, jiffies
+
215 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS
));
216 } else if (num_deficit
> 0) {
217 tasklet_schedule(&htt
->rx_replenish_task
);
219 spin_unlock_bh(&htt
->rx_ring
.lock
);
222 static void ath10k_htt_rx_ring_refill_retry(unsigned long arg
)
224 struct ath10k_htt
*htt
= (struct ath10k_htt
*)arg
;
225 ath10k_htt_rx_msdu_buff_replenish(htt
);
228 static unsigned ath10k_htt_rx_ring_elems(struct ath10k_htt
*htt
)
230 return (__le32_to_cpu(*htt
->rx_ring
.alloc_idx
.vaddr
) -
231 htt
->rx_ring
.sw_rd_idx
.msdu_payld
) & htt
->rx_ring
.size_mask
;
234 void ath10k_htt_rx_detach(struct ath10k_htt
*htt
)
236 int sw_rd_idx
= htt
->rx_ring
.sw_rd_idx
.msdu_payld
;
238 del_timer_sync(&htt
->rx_ring
.refill_retry_timer
);
239 tasklet_kill(&htt
->rx_replenish_task
);
241 while (sw_rd_idx
!= __le32_to_cpu(*(htt
->rx_ring
.alloc_idx
.vaddr
))) {
242 struct sk_buff
*skb
=
243 htt
->rx_ring
.netbufs_ring
[sw_rd_idx
];
244 struct ath10k_skb_cb
*cb
= ATH10K_SKB_CB(skb
);
246 dma_unmap_single(htt
->ar
->dev
, cb
->paddr
,
247 skb
->len
+ skb_tailroom(skb
),
249 dev_kfree_skb_any(htt
->rx_ring
.netbufs_ring
[sw_rd_idx
]);
251 sw_rd_idx
&= htt
->rx_ring
.size_mask
;
254 dma_free_coherent(htt
->ar
->dev
,
256 sizeof(htt
->rx_ring
.paddrs_ring
)),
257 htt
->rx_ring
.paddrs_ring
,
258 htt
->rx_ring
.base_paddr
);
260 dma_free_coherent(htt
->ar
->dev
,
261 sizeof(*htt
->rx_ring
.alloc_idx
.vaddr
),
262 htt
->rx_ring
.alloc_idx
.vaddr
,
263 htt
->rx_ring
.alloc_idx
.paddr
);
265 kfree(htt
->rx_ring
.netbufs_ring
);
268 static inline struct sk_buff
*ath10k_htt_rx_netbuf_pop(struct ath10k_htt
*htt
)
271 struct sk_buff
*msdu
;
273 spin_lock_bh(&htt
->rx_ring
.lock
);
275 if (ath10k_htt_rx_ring_elems(htt
) == 0)
276 ath10k_warn("htt rx ring is empty!\n");
278 idx
= htt
->rx_ring
.sw_rd_idx
.msdu_payld
;
279 msdu
= htt
->rx_ring
.netbufs_ring
[idx
];
282 idx
&= htt
->rx_ring
.size_mask
;
283 htt
->rx_ring
.sw_rd_idx
.msdu_payld
= idx
;
284 htt
->rx_ring
.fill_cnt
--;
286 spin_unlock_bh(&htt
->rx_ring
.lock
);
290 static void ath10k_htt_rx_free_msdu_chain(struct sk_buff
*skb
)
292 struct sk_buff
*next
;
296 dev_kfree_skb_any(skb
);
301 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt
*htt
,
302 u8
**fw_desc
, int *fw_desc_len
,
303 struct sk_buff
**head_msdu
,
304 struct sk_buff
**tail_msdu
)
306 int msdu_len
, msdu_chaining
= 0;
307 struct sk_buff
*msdu
;
308 struct htt_rx_desc
*rx_desc
;
310 if (ath10k_htt_rx_ring_elems(htt
) == 0)
311 ath10k_warn("htt rx ring is empty!\n");
313 if (htt
->rx_confused
) {
314 ath10k_warn("htt is confused. refusing rx\n");
318 msdu
= *head_msdu
= ath10k_htt_rx_netbuf_pop(htt
);
320 int last_msdu
, msdu_len_invalid
, msdu_chained
;
322 dma_unmap_single(htt
->ar
->dev
,
323 ATH10K_SKB_CB(msdu
)->paddr
,
324 msdu
->len
+ skb_tailroom(msdu
),
327 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP
, NULL
, "htt rx: ",
328 msdu
->data
, msdu
->len
+ skb_tailroom(msdu
));
330 rx_desc
= (struct htt_rx_desc
*)msdu
->data
;
332 /* FIXME: we must report msdu payload since this is what caller
334 skb_put(msdu
, offsetof(struct htt_rx_desc
, msdu_payload
));
335 skb_pull(msdu
, offsetof(struct htt_rx_desc
, msdu_payload
));
338 * Sanity check - confirm the HW is finished filling in the
340 * If the HW and SW are working correctly, then it's guaranteed
341 * that the HW's MAC DMA is done before this point in the SW.
342 * To prevent the case that we handle a stale Rx descriptor,
343 * just assert for now until we have a way to recover.
345 if (!(__le32_to_cpu(rx_desc
->attention
.flags
)
346 & RX_ATTENTION_FLAGS_MSDU_DONE
)) {
347 ath10k_htt_rx_free_msdu_chain(*head_msdu
);
350 ath10k_err("htt rx stopped. cannot recover\n");
351 htt
->rx_confused
= true;
356 * Copy the FW rx descriptor for this MSDU from the rx
357 * indication message into the MSDU's netbuf. HL uses the
358 * same rx indication message definition as LL, and simply
359 * appends new info (fields from the HW rx desc, and the
360 * MSDU payload itself). So, the offset into the rx
361 * indication message only has to account for the standard
362 * offset of the per-MSDU FW rx desc info within the
363 * message, and how many bytes of the per-MSDU FW rx desc
364 * info have already been consumed. (And the endianness of
365 * the host, since for a big-endian host, the rx ind
366 * message contents, including the per-MSDU rx desc bytes,
367 * were byteswapped during upload.)
369 if (*fw_desc_len
> 0) {
370 rx_desc
->fw_desc
.info0
= **fw_desc
;
372 * The target is expected to only provide the basic
373 * per-MSDU rx descriptors. Just to be sure, verify
374 * that the target has not attached extension data
375 * (e.g. LRO flow ID).
378 /* or more, if there's extension data */
383 * When an oversized AMSDU happened, FW will lost
384 * some of MSDU status - in this case, the FW
385 * descriptors provided will be less than the
386 * actual MSDUs inside this MPDU. Mark the FW
387 * descriptors so that it will still deliver to
388 * upper stack, if no CRC error for this MPDU.
390 * FIX THIS - the FW descriptors are actually for
391 * MSDUs in the end of this A-MSDU instead of the
394 rx_desc
->fw_desc
.info0
= 0;
397 msdu_len_invalid
= !!(__le32_to_cpu(rx_desc
->attention
.flags
)
398 & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR
|
399 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR
));
400 msdu_len
= MS(__le32_to_cpu(rx_desc
->msdu_start
.info0
),
401 RX_MSDU_START_INFO0_MSDU_LENGTH
);
402 msdu_chained
= rx_desc
->frag_info
.ring2_more_count
;
404 if (msdu_len_invalid
)
408 skb_put(msdu
, min(msdu_len
, HTT_RX_MSDU_SIZE
));
409 msdu_len
-= msdu
->len
;
411 /* FIXME: Do chained buffers include htt_rx_desc or not? */
412 while (msdu_chained
--) {
413 struct sk_buff
*next
= ath10k_htt_rx_netbuf_pop(htt
);
415 dma_unmap_single(htt
->ar
->dev
,
416 ATH10K_SKB_CB(next
)->paddr
,
417 next
->len
+ skb_tailroom(next
),
420 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP
, NULL
, "htt rx: ",
422 next
->len
+ skb_tailroom(next
));
425 skb_put(next
, min(msdu_len
, HTT_RX_BUF_SIZE
));
426 msdu_len
-= next
->len
;
434 /* This may suggest FW bug? */
435 ath10k_warn("htt rx msdu len not consumed (%d)\n",
439 last_msdu
= __le32_to_cpu(rx_desc
->msdu_end
.info0
) &
440 RX_MSDU_END_INFO0_LAST_MSDU
;
446 struct sk_buff
*next
= ath10k_htt_rx_netbuf_pop(htt
);
454 * Don't refill the ring yet.
456 * First, the elements popped here are still in use - it is not
457 * safe to overwrite them until the matching call to
458 * mpdu_desc_list_next. Second, for efficiency it is preferable to
459 * refill the rx ring with 1 PPDU's worth of rx buffers (something
460 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
461 * (something like 3 buffers). Consequently, we'll rely on the txrx
462 * SW to tell us when it is done pulling all the PPDU's rx buffers
463 * out of the rx ring, and then refill it just once.
466 return msdu_chaining
;
469 static void ath10k_htt_rx_replenish_task(unsigned long ptr
)
471 struct ath10k_htt
*htt
= (struct ath10k_htt
*)ptr
;
472 ath10k_htt_rx_msdu_buff_replenish(htt
);
475 int ath10k_htt_rx_attach(struct ath10k_htt
*htt
)
479 struct timer_list
*timer
= &htt
->rx_ring
.refill_retry_timer
;
481 htt
->rx_ring
.size
= ath10k_htt_rx_ring_size(htt
);
482 if (!is_power_of_2(htt
->rx_ring
.size
)) {
483 ath10k_warn("htt rx ring size is not power of 2\n");
487 htt
->rx_ring
.size_mask
= htt
->rx_ring
.size
- 1;
490 * Set the initial value for the level to which the rx ring
491 * should be filled, based on the max throughput and the
492 * worst likely latency for the host to fill the rx ring
493 * with new buffers. In theory, this fill level can be
494 * dynamically adjusted from the initial value set here, to
495 * reflect the actual host latency rather than a
496 * conservative assumption about the host latency.
498 htt
->rx_ring
.fill_level
= ath10k_htt_rx_ring_fill_level(htt
);
500 htt
->rx_ring
.netbufs_ring
=
501 kmalloc(htt
->rx_ring
.size
* sizeof(struct sk_buff
*),
503 if (!htt
->rx_ring
.netbufs_ring
)
506 vaddr
= dma_alloc_coherent(htt
->ar
->dev
,
507 (htt
->rx_ring
.size
* sizeof(htt
->rx_ring
.paddrs_ring
)),
512 htt
->rx_ring
.paddrs_ring
= vaddr
;
513 htt
->rx_ring
.base_paddr
= paddr
;
515 vaddr
= dma_alloc_coherent(htt
->ar
->dev
,
516 sizeof(*htt
->rx_ring
.alloc_idx
.vaddr
),
521 htt
->rx_ring
.alloc_idx
.vaddr
= vaddr
;
522 htt
->rx_ring
.alloc_idx
.paddr
= paddr
;
523 htt
->rx_ring
.sw_rd_idx
.msdu_payld
= 0;
524 *htt
->rx_ring
.alloc_idx
.vaddr
= 0;
526 /* Initialize the Rx refill retry timer */
527 setup_timer(timer
, ath10k_htt_rx_ring_refill_retry
, (unsigned long)htt
);
529 spin_lock_init(&htt
->rx_ring
.lock
);
531 htt
->rx_ring
.fill_cnt
= 0;
532 if (__ath10k_htt_rx_ring_fill_n(htt
, htt
->rx_ring
.fill_level
))
535 tasklet_init(&htt
->rx_replenish_task
, ath10k_htt_rx_replenish_task
,
538 ath10k_dbg(ATH10K_DBG_BOOT
, "htt rx ring size %d fill_level %d\n",
539 htt
->rx_ring
.size
, htt
->rx_ring
.fill_level
);
543 ath10k_htt_rx_ring_free(htt
);
544 dma_free_coherent(htt
->ar
->dev
,
545 sizeof(*htt
->rx_ring
.alloc_idx
.vaddr
),
546 htt
->rx_ring
.alloc_idx
.vaddr
,
547 htt
->rx_ring
.alloc_idx
.paddr
);
549 dma_free_coherent(htt
->ar
->dev
,
551 sizeof(htt
->rx_ring
.paddrs_ring
)),
552 htt
->rx_ring
.paddrs_ring
,
553 htt
->rx_ring
.base_paddr
);
555 kfree(htt
->rx_ring
.netbufs_ring
);
560 static int ath10k_htt_rx_crypto_param_len(enum htt_rx_mpdu_encrypt_type type
)
563 case HTT_RX_MPDU_ENCRYPT_WEP40
:
564 case HTT_RX_MPDU_ENCRYPT_WEP104
:
566 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC
:
567 case HTT_RX_MPDU_ENCRYPT_WEP128
: /* not tested */
568 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA
:
569 case HTT_RX_MPDU_ENCRYPT_WAPI
: /* not tested */
570 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2
:
572 case HTT_RX_MPDU_ENCRYPT_NONE
:
576 ath10k_warn("unknown encryption type %d\n", type
);
580 static int ath10k_htt_rx_crypto_tail_len(enum htt_rx_mpdu_encrypt_type type
)
583 case HTT_RX_MPDU_ENCRYPT_NONE
:
584 case HTT_RX_MPDU_ENCRYPT_WEP40
:
585 case HTT_RX_MPDU_ENCRYPT_WEP104
:
586 case HTT_RX_MPDU_ENCRYPT_WEP128
:
587 case HTT_RX_MPDU_ENCRYPT_WAPI
:
589 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC
:
590 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA
:
592 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2
:
596 ath10k_warn("unknown encryption type %d\n", type
);
600 /* Applies for first msdu in chain, before altering it. */
601 static struct ieee80211_hdr
*ath10k_htt_rx_skb_get_hdr(struct sk_buff
*skb
)
603 struct htt_rx_desc
*rxd
;
604 enum rx_msdu_decap_format fmt
;
606 rxd
= (void *)skb
->data
- sizeof(*rxd
);
607 fmt
= MS(__le32_to_cpu(rxd
->msdu_start
.info1
),
608 RX_MSDU_START_INFO1_DECAP_FORMAT
);
610 if (fmt
== RX_MSDU_DECAP_RAW
)
611 return (void *)skb
->data
;
613 return (void *)skb
->data
- RX_HTT_HDR_STATUS_LEN
;
616 /* This function only applies for first msdu in an msdu chain */
617 static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr
*hdr
)
619 if (ieee80211_is_data_qos(hdr
->frame_control
)) {
620 u8
*qc
= ieee80211_get_qos_ctl(hdr
);
635 struct amsdu_subframe_hdr
{
641 static void ath10k_htt_rx_amsdu(struct ath10k_htt
*htt
,
642 struct htt_rx_info
*info
)
644 struct htt_rx_desc
*rxd
;
645 struct sk_buff
*first
;
646 struct sk_buff
*skb
= info
->skb
;
647 enum rx_msdu_decap_format fmt
;
648 enum htt_rx_mpdu_encrypt_type enctype
;
649 struct ieee80211_hdr
*hdr
;
650 u8 hdr_buf
[64], addr
[ETH_ALEN
], *qos
;
651 unsigned int hdr_len
;
653 rxd
= (void *)skb
->data
- sizeof(*rxd
);
654 enctype
= MS(__le32_to_cpu(rxd
->mpdu_start
.info0
),
655 RX_MPDU_START_INFO0_ENCRYPT_TYPE
);
657 hdr
= (struct ieee80211_hdr
*)rxd
->rx_hdr_status
;
658 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
659 memcpy(hdr_buf
, hdr
, hdr_len
);
660 hdr
= (struct ieee80211_hdr
*)hdr_buf
;
667 rxd
= (void *)skb
->data
- sizeof(*rxd
);
668 fmt
= MS(__le32_to_cpu(rxd
->msdu_start
.info1
),
669 RX_MSDU_START_INFO1_DECAP_FORMAT
);
670 decap_hdr
= (void *)rxd
->rx_hdr_status
;
672 skb
->ip_summed
= ath10k_htt_rx_get_csum_state(skb
);
674 /* First frame in an A-MSDU chain has more decapped data. */
676 len
= round_up(ieee80211_hdrlen(hdr
->frame_control
), 4);
677 len
+= round_up(ath10k_htt_rx_crypto_param_len(enctype
),
683 case RX_MSDU_DECAP_RAW
:
684 /* remove trailing FCS */
685 skb_trim(skb
, skb
->len
- FCS_LEN
);
687 case RX_MSDU_DECAP_NATIVE_WIFI
:
688 /* pull decapped header and copy DA */
689 hdr
= (struct ieee80211_hdr
*)skb
->data
;
690 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
691 memcpy(addr
, ieee80211_get_DA(hdr
), ETH_ALEN
);
692 skb_pull(skb
, hdr_len
);
694 /* push original 802.11 header */
695 hdr
= (struct ieee80211_hdr
*)hdr_buf
;
696 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
697 memcpy(skb_push(skb
, hdr_len
), hdr
, hdr_len
);
699 /* original A-MSDU header has the bit set but we're
700 * not including A-MSDU subframe header */
701 hdr
= (struct ieee80211_hdr
*)skb
->data
;
702 qos
= ieee80211_get_qos_ctl(hdr
);
703 qos
[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT
;
705 /* original 802.11 header has a different DA */
706 memcpy(ieee80211_get_DA(hdr
), addr
, ETH_ALEN
);
708 case RX_MSDU_DECAP_ETHERNET2_DIX
:
709 /* strip ethernet header and insert decapped 802.11
710 * header, amsdu subframe header and rfc1042 header */
713 len
+= sizeof(struct rfc1042_hdr
);
714 len
+= sizeof(struct amsdu_subframe_hdr
);
716 skb_pull(skb
, sizeof(struct ethhdr
));
717 memcpy(skb_push(skb
, len
), decap_hdr
, len
);
718 memcpy(skb_push(skb
, hdr_len
), hdr
, hdr_len
);
720 case RX_MSDU_DECAP_8023_SNAP_LLC
:
721 /* insert decapped 802.11 header making a singly
723 memcpy(skb_push(skb
, hdr_len
), hdr
, hdr_len
);
728 info
->encrypt_type
= enctype
;
730 info
->skb
->next
= NULL
;
733 info
->amsdu_more
= true;
735 ath10k_process_rx(htt
->ar
, info
);
738 /* FIXME: It might be nice to re-assemble the A-MSDU when there's a
739 * monitor interface active for sniffing purposes. */
742 static void ath10k_htt_rx_msdu(struct ath10k_htt
*htt
, struct htt_rx_info
*info
)
744 struct sk_buff
*skb
= info
->skb
;
745 struct htt_rx_desc
*rxd
;
746 struct ieee80211_hdr
*hdr
;
747 enum rx_msdu_decap_format fmt
;
748 enum htt_rx_mpdu_encrypt_type enctype
;
752 /* This shouldn't happen. If it does than it may be a FW bug. */
754 ath10k_warn("received chained non A-MSDU frame\n");
755 ath10k_htt_rx_free_msdu_chain(skb
->next
);
759 rxd
= (void *)skb
->data
- sizeof(*rxd
);
760 fmt
= MS(__le32_to_cpu(rxd
->msdu_start
.info1
),
761 RX_MSDU_START_INFO1_DECAP_FORMAT
);
762 enctype
= MS(__le32_to_cpu(rxd
->mpdu_start
.info0
),
763 RX_MPDU_START_INFO0_ENCRYPT_TYPE
);
764 hdr
= (struct ieee80211_hdr
*)rxd
->rx_hdr_status
;
765 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
767 skb
->ip_summed
= ath10k_htt_rx_get_csum_state(skb
);
770 case RX_MSDU_DECAP_RAW
:
771 /* remove trailing FCS */
772 skb_trim(skb
, skb
->len
- FCS_LEN
);
774 case RX_MSDU_DECAP_NATIVE_WIFI
:
775 /* Pull decapped header */
776 hdr
= (struct ieee80211_hdr
*)skb
->data
;
777 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
778 skb_pull(skb
, hdr_len
);
780 /* Push original header */
781 hdr
= (struct ieee80211_hdr
*)rxd
->rx_hdr_status
;
782 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
783 memcpy(skb_push(skb
, hdr_len
), hdr
, hdr_len
);
785 case RX_MSDU_DECAP_ETHERNET2_DIX
:
786 /* strip ethernet header and insert decapped 802.11 header and
790 rfc1042
+= roundup(hdr_len
, 4);
791 rfc1042
+= roundup(ath10k_htt_rx_crypto_param_len(enctype
), 4);
793 skb_pull(skb
, sizeof(struct ethhdr
));
794 memcpy(skb_push(skb
, sizeof(struct rfc1042_hdr
)),
795 rfc1042
, sizeof(struct rfc1042_hdr
));
796 memcpy(skb_push(skb
, hdr_len
), hdr
, hdr_len
);
798 case RX_MSDU_DECAP_8023_SNAP_LLC
:
799 /* remove A-MSDU subframe header and insert
800 * decapped 802.11 header. rfc1042 header is already there */
802 skb_pull(skb
, sizeof(struct amsdu_subframe_hdr
));
803 memcpy(skb_push(skb
, hdr_len
), hdr
, hdr_len
);
808 info
->encrypt_type
= enctype
;
810 ath10k_process_rx(htt
->ar
, info
);
813 static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff
*skb
)
815 struct htt_rx_desc
*rxd
;
818 rxd
= (void *)skb
->data
- sizeof(*rxd
);
819 flags
= __le32_to_cpu(rxd
->attention
.flags
);
821 if (flags
& RX_ATTENTION_FLAGS_DECRYPT_ERR
)
827 static bool ath10k_htt_rx_has_fcs_err(struct sk_buff
*skb
)
829 struct htt_rx_desc
*rxd
;
832 rxd
= (void *)skb
->data
- sizeof(*rxd
);
833 flags
= __le32_to_cpu(rxd
->attention
.flags
);
835 if (flags
& RX_ATTENTION_FLAGS_FCS_ERR
)
841 static bool ath10k_htt_rx_has_mic_err(struct sk_buff
*skb
)
843 struct htt_rx_desc
*rxd
;
846 rxd
= (void *)skb
->data
- sizeof(*rxd
);
847 flags
= __le32_to_cpu(rxd
->attention
.flags
);
849 if (flags
& RX_ATTENTION_FLAGS_TKIP_MIC_ERR
)
855 static int ath10k_htt_rx_get_csum_state(struct sk_buff
*skb
)
857 struct htt_rx_desc
*rxd
;
861 bool ip_csum_ok
, tcpudp_csum_ok
;
863 rxd
= (void *)skb
->data
- sizeof(*rxd
);
864 flags
= __le32_to_cpu(rxd
->attention
.flags
);
865 info
= __le32_to_cpu(rxd
->msdu_start
.info1
);
867 is_ip4
= !!(info
& RX_MSDU_START_INFO1_IPV4_PROTO
);
868 is_ip6
= !!(info
& RX_MSDU_START_INFO1_IPV6_PROTO
);
869 is_tcp
= !!(info
& RX_MSDU_START_INFO1_TCP_PROTO
);
870 is_udp
= !!(info
& RX_MSDU_START_INFO1_UDP_PROTO
);
871 ip_csum_ok
= !(flags
& RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL
);
872 tcpudp_csum_ok
= !(flags
& RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL
);
874 if (!is_ip4
&& !is_ip6
)
875 return CHECKSUM_NONE
;
876 if (!is_tcp
&& !is_udp
)
877 return CHECKSUM_NONE
;
879 return CHECKSUM_NONE
;
881 return CHECKSUM_NONE
;
883 return CHECKSUM_UNNECESSARY
;
886 static void ath10k_htt_rx_handler(struct ath10k_htt
*htt
,
887 struct htt_rx_indication
*rx
)
889 struct htt_rx_info info
;
890 struct htt_rx_indication_mpdu_range
*mpdu_ranges
;
891 struct ieee80211_hdr
*hdr
;
897 memset(&info
, 0, sizeof(info
));
899 fw_desc_len
= __le16_to_cpu(rx
->prefix
.fw_rx_desc_bytes
);
900 fw_desc
= (u8
*)&rx
->fw_desc
;
902 num_mpdu_ranges
= MS(__le32_to_cpu(rx
->hdr
.info1
),
903 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES
);
904 mpdu_ranges
= htt_rx_ind_get_mpdu_ranges(rx
);
906 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP
, NULL
, "htt rx ind: ",
908 (sizeof(struct htt_rx_indication_mpdu_range
) *
911 for (i
= 0; i
< num_mpdu_ranges
; i
++) {
912 info
.status
= mpdu_ranges
[i
].mpdu_range_status
;
914 for (j
= 0; j
< mpdu_ranges
[i
].mpdu_count
; j
++) {
915 struct sk_buff
*msdu_head
, *msdu_tail
;
916 enum htt_rx_mpdu_status status
;
921 msdu_chaining
= ath10k_htt_rx_amsdu_pop(htt
,
928 ath10k_warn("htt rx no data!\n");
932 if (msdu_head
->len
== 0) {
933 ath10k_dbg(ATH10K_DBG_HTT
,
934 "htt rx dropping due to zero-len\n");
935 ath10k_htt_rx_free_msdu_chain(msdu_head
);
939 if (ath10k_htt_rx_has_decrypt_err(msdu_head
)) {
940 ath10k_htt_rx_free_msdu_chain(msdu_head
);
944 status
= info
.status
;
946 /* Skip mgmt frames while we handle this in WMI */
947 if (status
== HTT_RX_IND_MPDU_STATUS_MGMT_CTRL
) {
948 ath10k_htt_rx_free_msdu_chain(msdu_head
);
952 if (status
!= HTT_RX_IND_MPDU_STATUS_OK
&&
953 status
!= HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR
&&
954 !htt
->ar
->monitor_enabled
) {
955 ath10k_dbg(ATH10K_DBG_HTT
,
956 "htt rx ignoring frame w/ status %d\n",
958 ath10k_htt_rx_free_msdu_chain(msdu_head
);
962 if (test_bit(ATH10K_CAC_RUNNING
, &htt
->ar
->dev_flags
)) {
963 ath10k_htt_rx_free_msdu_chain(msdu_head
);
967 /* FIXME: we do not support chaining yet.
968 * this needs investigation */
970 ath10k_warn("msdu_chaining is true\n");
971 ath10k_htt_rx_free_msdu_chain(msdu_head
);
975 info
.skb
= msdu_head
;
976 info
.fcs_err
= ath10k_htt_rx_has_fcs_err(msdu_head
);
977 info
.mic_err
= ath10k_htt_rx_has_mic_err(msdu_head
);
978 info
.signal
= ATH10K_DEFAULT_NOISE_FLOOR
;
979 info
.signal
+= rx
->ppdu
.combined_rssi
;
981 info
.rate
.info0
= rx
->ppdu
.info0
;
982 info
.rate
.info1
= __le32_to_cpu(rx
->ppdu
.info1
);
983 info
.rate
.info2
= __le32_to_cpu(rx
->ppdu
.info2
);
985 hdr
= ath10k_htt_rx_skb_get_hdr(msdu_head
);
987 if (ath10k_htt_rx_hdr_is_amsdu(hdr
))
988 ath10k_htt_rx_amsdu(htt
, &info
);
990 ath10k_htt_rx_msdu(htt
, &info
);
994 tasklet_schedule(&htt
->rx_replenish_task
);
997 static void ath10k_htt_rx_frag_handler(struct ath10k_htt
*htt
,
998 struct htt_rx_fragment_indication
*frag
)
1000 struct sk_buff
*msdu_head
, *msdu_tail
;
1001 struct htt_rx_desc
*rxd
;
1002 enum rx_msdu_decap_format fmt
;
1003 struct htt_rx_info info
= {};
1004 struct ieee80211_hdr
*hdr
;
1009 int fw_desc_len
, hdrlen
, paramlen
;
1012 fw_desc_len
= __le16_to_cpu(frag
->fw_rx_desc_bytes
);
1013 fw_desc
= (u8
*)frag
->fw_msdu_rx_desc
;
1017 msdu_chaining
= ath10k_htt_rx_amsdu_pop(htt
, &fw_desc
, &fw_desc_len
,
1018 &msdu_head
, &msdu_tail
);
1020 ath10k_dbg(ATH10K_DBG_HTT_DUMP
, "htt rx frag ahead\n");
1023 ath10k_warn("htt rx frag no data\n");
1027 if (msdu_chaining
|| msdu_head
!= msdu_tail
) {
1028 ath10k_warn("aggregation with fragmentation?!\n");
1029 ath10k_htt_rx_free_msdu_chain(msdu_head
);
1033 /* FIXME: implement signal strength */
1035 hdr
= (struct ieee80211_hdr
*)msdu_head
->data
;
1036 rxd
= (void *)msdu_head
->data
- sizeof(*rxd
);
1037 tkip_mic_err
= !!(__le32_to_cpu(rxd
->attention
.flags
) &
1038 RX_ATTENTION_FLAGS_TKIP_MIC_ERR
);
1039 decrypt_err
= !!(__le32_to_cpu(rxd
->attention
.flags
) &
1040 RX_ATTENTION_FLAGS_DECRYPT_ERR
);
1041 fmt
= MS(__le32_to_cpu(rxd
->msdu_start
.info1
),
1042 RX_MSDU_START_INFO1_DECAP_FORMAT
);
1044 if (fmt
!= RX_MSDU_DECAP_RAW
) {
1045 ath10k_warn("we dont support non-raw fragmented rx yet\n");
1046 dev_kfree_skb_any(msdu_head
);
1050 info
.skb
= msdu_head
;
1051 info
.status
= HTT_RX_IND_MPDU_STATUS_OK
;
1052 info
.encrypt_type
= MS(__le32_to_cpu(rxd
->mpdu_start
.info0
),
1053 RX_MPDU_START_INFO0_ENCRYPT_TYPE
);
1054 info
.skb
->ip_summed
= ath10k_htt_rx_get_csum_state(info
.skb
);
1057 ath10k_warn("tkip mic error\n");
1058 info
.status
= HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR
;
1062 ath10k_warn("decryption err in fragmented rx\n");
1063 dev_kfree_skb_any(info
.skb
);
1067 if (info
.encrypt_type
!= HTT_RX_MPDU_ENCRYPT_NONE
) {
1068 hdrlen
= ieee80211_hdrlen(hdr
->frame_control
);
1069 paramlen
= ath10k_htt_rx_crypto_param_len(info
.encrypt_type
);
1071 /* It is more efficient to move the header than the payload */
1072 memmove((void *)info
.skb
->data
+ paramlen
,
1073 (void *)info
.skb
->data
,
1075 skb_pull(info
.skb
, paramlen
);
1076 hdr
= (struct ieee80211_hdr
*)info
.skb
->data
;
1079 /* remove trailing FCS */
1082 /* remove crypto trailer */
1083 trim
+= ath10k_htt_rx_crypto_tail_len(info
.encrypt_type
);
1085 /* last fragment of TKIP frags has MIC */
1086 if (!ieee80211_has_morefrags(hdr
->frame_control
) &&
1087 info
.encrypt_type
== HTT_RX_MPDU_ENCRYPT_TKIP_WPA
)
1090 if (trim
> info
.skb
->len
) {
1091 ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n");
1092 dev_kfree_skb_any(info
.skb
);
1096 skb_trim(info
.skb
, info
.skb
->len
- trim
);
1098 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP
, NULL
, "htt frag mpdu: ",
1099 info
.skb
->data
, info
.skb
->len
);
1100 ath10k_process_rx(htt
->ar
, &info
);
1103 if (fw_desc_len
> 0) {
1104 ath10k_dbg(ATH10K_DBG_HTT
,
1105 "expecting more fragmented rx in one indication %d\n",
1110 void ath10k_htt_t2h_msg_handler(struct ath10k
*ar
, struct sk_buff
*skb
)
1112 struct ath10k_htt
*htt
= &ar
->htt
;
1113 struct htt_resp
*resp
= (struct htt_resp
*)skb
->data
;
1115 /* confirm alignment */
1116 if (!IS_ALIGNED((unsigned long)skb
->data
, 4))
1117 ath10k_warn("unaligned htt message, expect trouble\n");
1119 ath10k_dbg(ATH10K_DBG_HTT
, "HTT RX, msg_type: 0x%0X\n",
1120 resp
->hdr
.msg_type
);
1121 switch (resp
->hdr
.msg_type
) {
1122 case HTT_T2H_MSG_TYPE_VERSION_CONF
: {
1123 htt
->target_version_major
= resp
->ver_resp
.major
;
1124 htt
->target_version_minor
= resp
->ver_resp
.minor
;
1125 complete(&htt
->target_version_received
);
1128 case HTT_T2H_MSG_TYPE_RX_IND
: {
1129 ath10k_htt_rx_handler(htt
, &resp
->rx_ind
);
1132 case HTT_T2H_MSG_TYPE_PEER_MAP
: {
1133 struct htt_peer_map_event ev
= {
1134 .vdev_id
= resp
->peer_map
.vdev_id
,
1135 .peer_id
= __le16_to_cpu(resp
->peer_map
.peer_id
),
1137 memcpy(ev
.addr
, resp
->peer_map
.addr
, sizeof(ev
.addr
));
1138 ath10k_peer_map_event(htt
, &ev
);
1141 case HTT_T2H_MSG_TYPE_PEER_UNMAP
: {
1142 struct htt_peer_unmap_event ev
= {
1143 .peer_id
= __le16_to_cpu(resp
->peer_unmap
.peer_id
),
1145 ath10k_peer_unmap_event(htt
, &ev
);
1148 case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION
: {
1149 struct htt_tx_done tx_done
= {};
1150 int status
= __le32_to_cpu(resp
->mgmt_tx_completion
.status
);
1153 __le32_to_cpu(resp
->mgmt_tx_completion
.desc_id
);
1156 case HTT_MGMT_TX_STATUS_OK
:
1158 case HTT_MGMT_TX_STATUS_RETRY
:
1159 tx_done
.no_ack
= true;
1161 case HTT_MGMT_TX_STATUS_DROP
:
1162 tx_done
.discard
= true;
1166 ath10k_txrx_tx_unref(htt
, &tx_done
);
1169 case HTT_T2H_MSG_TYPE_TX_COMPL_IND
: {
1170 struct htt_tx_done tx_done
= {};
1171 int status
= MS(resp
->data_tx_completion
.flags
,
1172 HTT_DATA_TX_STATUS
);
1177 case HTT_DATA_TX_STATUS_NO_ACK
:
1178 tx_done
.no_ack
= true;
1180 case HTT_DATA_TX_STATUS_OK
:
1182 case HTT_DATA_TX_STATUS_DISCARD
:
1183 case HTT_DATA_TX_STATUS_POSTPONE
:
1184 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL
:
1185 tx_done
.discard
= true;
1188 ath10k_warn("unhandled tx completion status %d\n",
1190 tx_done
.discard
= true;
1194 ath10k_dbg(ATH10K_DBG_HTT
, "htt tx completion num_msdus %d\n",
1195 resp
->data_tx_completion
.num_msdus
);
1197 for (i
= 0; i
< resp
->data_tx_completion
.num_msdus
; i
++) {
1198 msdu_id
= resp
->data_tx_completion
.msdus
[i
];
1199 tx_done
.msdu_id
= __le16_to_cpu(msdu_id
);
1200 ath10k_txrx_tx_unref(htt
, &tx_done
);
1204 case HTT_T2H_MSG_TYPE_SEC_IND
: {
1205 struct ath10k
*ar
= htt
->ar
;
1206 struct htt_security_indication
*ev
= &resp
->security_indication
;
1208 ath10k_dbg(ATH10K_DBG_HTT
,
1209 "sec ind peer_id %d unicast %d type %d\n",
1210 __le16_to_cpu(ev
->peer_id
),
1211 !!(ev
->flags
& HTT_SECURITY_IS_UNICAST
),
1212 MS(ev
->flags
, HTT_SECURITY_TYPE
));
1213 complete(&ar
->install_key_done
);
1216 case HTT_T2H_MSG_TYPE_RX_FRAG_IND
: {
1217 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP
, NULL
, "htt event: ",
1218 skb
->data
, skb
->len
);
1219 ath10k_htt_rx_frag_handler(htt
, &resp
->rx_frag_ind
);
1222 case HTT_T2H_MSG_TYPE_TEST
:
1225 case HTT_T2H_MSG_TYPE_STATS_CONF
:
1226 trace_ath10k_htt_stats(skb
->data
, skb
->len
);
1228 case HTT_T2H_MSG_TYPE_TX_INSPECT_IND
:
1229 case HTT_T2H_MSG_TYPE_RX_ADDBA
:
1230 case HTT_T2H_MSG_TYPE_RX_DELBA
:
1231 case HTT_T2H_MSG_TYPE_RX_FLUSH
:
1233 ath10k_dbg(ATH10K_DBG_HTT
, "htt event (%d) not handled\n",
1234 resp
->hdr
.msg_type
);
1235 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP
, NULL
, "htt event: ",
1236 skb
->data
, skb
->len
);
1240 /* Free the indication buffer */
1241 dev_kfree_skb_any(skb
);