2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 #include <linux/log2.h>
26 /* slightly larger than one large A-MPDU */
27 #define HTT_RX_RING_SIZE_MIN 128
29 /* roughly 20 ms @ 1 Gbps of 1500B MSDUs */
30 #define HTT_RX_RING_SIZE_MAX 2048
32 #define HTT_RX_AVG_FRM_BYTES 1000
34 /* ms, very conservative */
35 #define HTT_RX_HOST_LATENCY_MAX_MS 20
37 /* ms, conservative */
38 #define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10
40 /* when under memory pressure rx ring refill may fail and needs a retry */
41 #define HTT_RX_RING_REFILL_RETRY_MS 50
43 static int ath10k_htt_rx_ring_size(struct ath10k_htt
*htt
)
48 * It is expected that the host CPU will typically be able to
49 * service the rx indication from one A-MPDU before the rx
50 * indication from the subsequent A-MPDU happens, roughly 1-2 ms
51 * later. However, the rx ring should be sized very conservatively,
52 * to accomodate the worst reasonable delay before the host CPU
53 * services a rx indication interrupt.
55 * The rx ring need not be kept full of empty buffers. In theory,
56 * the htt host SW can dynamically track the low-water mark in the
57 * rx ring, and dynamically adjust the level to which the rx ring
58 * is filled with empty buffers, to dynamically meet the desired
61 * In contrast, it's difficult to resize the rx ring itself, once
62 * it's in use. Thus, the ring itself should be sized very
63 * conservatively, while the degree to which the ring is filled
64 * with empty buffers should be sized moderately conservatively.
67 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
69 htt
->max_throughput_mbps
+
71 (8 * HTT_RX_AVG_FRM_BYTES
) * HTT_RX_HOST_LATENCY_MAX_MS
;
73 if (size
< HTT_RX_RING_SIZE_MIN
)
74 size
= HTT_RX_RING_SIZE_MIN
;
76 if (size
> HTT_RX_RING_SIZE_MAX
)
77 size
= HTT_RX_RING_SIZE_MAX
;
79 size
= roundup_pow_of_two(size
);
84 static int ath10k_htt_rx_ring_fill_level(struct ath10k_htt
*htt
)
88 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
90 htt
->max_throughput_mbps
*
92 (8 * HTT_RX_AVG_FRM_BYTES
) * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS
;
95 * Make sure the fill level is at least 1 less than the ring size.
96 * Leaving 1 element empty allows the SW to easily distinguish
97 * between a full ring vs. an empty ring.
99 if (size
>= htt
->rx_ring
.size
)
100 size
= htt
->rx_ring
.size
- 1;
105 static void ath10k_htt_rx_ring_free(struct ath10k_htt
*htt
)
108 struct ath10k_skb_cb
*cb
;
111 for (i
= 0; i
< htt
->rx_ring
.fill_cnt
; i
++) {
112 skb
= htt
->rx_ring
.netbufs_ring
[i
];
113 cb
= ATH10K_SKB_CB(skb
);
114 dma_unmap_single(htt
->ar
->dev
, cb
->paddr
,
115 skb
->len
+ skb_tailroom(skb
),
117 dev_kfree_skb_any(skb
);
120 htt
->rx_ring
.fill_cnt
= 0;
123 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt
*htt
, int num
)
125 struct htt_rx_desc
*rx_desc
;
130 idx
= __le32_to_cpu(*(htt
->rx_ring
.alloc_idx
.vaddr
));
132 skb
= dev_alloc_skb(HTT_RX_BUF_SIZE
+ HTT_RX_DESC_ALIGN
);
138 if (!IS_ALIGNED((unsigned long)skb
->data
, HTT_RX_DESC_ALIGN
))
140 PTR_ALIGN(skb
->data
, HTT_RX_DESC_ALIGN
) -
143 /* Clear rx_desc attention word before posting to Rx ring */
144 rx_desc
= (struct htt_rx_desc
*)skb
->data
;
145 rx_desc
->attention
.flags
= __cpu_to_le32(0);
147 paddr
= dma_map_single(htt
->ar
->dev
, skb
->data
,
148 skb
->len
+ skb_tailroom(skb
),
151 if (unlikely(dma_mapping_error(htt
->ar
->dev
, paddr
))) {
152 dev_kfree_skb_any(skb
);
157 ATH10K_SKB_CB(skb
)->paddr
= paddr
;
158 htt
->rx_ring
.netbufs_ring
[idx
] = skb
;
159 htt
->rx_ring
.paddrs_ring
[idx
] = __cpu_to_le32(paddr
);
160 htt
->rx_ring
.fill_cnt
++;
164 idx
&= htt
->rx_ring
.size_mask
;
168 *(htt
->rx_ring
.alloc_idx
.vaddr
) = __cpu_to_le32(idx
);
172 static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt
*htt
, int num
)
174 lockdep_assert_held(&htt
->rx_ring
.lock
);
175 return __ath10k_htt_rx_ring_fill_n(htt
, num
);
178 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt
*htt
)
180 int ret
, num_to_fill
;
182 spin_lock_bh(&htt
->rx_ring
.lock
);
183 num_to_fill
= htt
->rx_ring
.fill_level
- htt
->rx_ring
.fill_cnt
;
184 ret
= ath10k_htt_rx_ring_fill_n(htt
, num_to_fill
);
185 if (ret
== -ENOMEM
) {
187 * Failed to fill it to the desired level -
188 * we'll start a timer and try again next time.
189 * As long as enough buffers are left in the ring for
190 * another A-MPDU rx, no special recovery is needed.
192 mod_timer(&htt
->rx_ring
.refill_retry_timer
, jiffies
+
193 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS
));
195 spin_unlock_bh(&htt
->rx_ring
.lock
);
198 static void ath10k_htt_rx_ring_refill_retry(unsigned long arg
)
200 struct ath10k_htt
*htt
= (struct ath10k_htt
*)arg
;
201 ath10k_htt_rx_msdu_buff_replenish(htt
);
204 static unsigned ath10k_htt_rx_ring_elems(struct ath10k_htt
*htt
)
206 return (__le32_to_cpu(*htt
->rx_ring
.alloc_idx
.vaddr
) -
207 htt
->rx_ring
.sw_rd_idx
.msdu_payld
) & htt
->rx_ring
.size_mask
;
210 void ath10k_htt_rx_detach(struct ath10k_htt
*htt
)
212 int sw_rd_idx
= htt
->rx_ring
.sw_rd_idx
.msdu_payld
;
214 del_timer_sync(&htt
->rx_ring
.refill_retry_timer
);
216 while (sw_rd_idx
!= __le32_to_cpu(*(htt
->rx_ring
.alloc_idx
.vaddr
))) {
217 struct sk_buff
*skb
=
218 htt
->rx_ring
.netbufs_ring
[sw_rd_idx
];
219 struct ath10k_skb_cb
*cb
= ATH10K_SKB_CB(skb
);
221 dma_unmap_single(htt
->ar
->dev
, cb
->paddr
,
222 skb
->len
+ skb_tailroom(skb
),
224 dev_kfree_skb_any(htt
->rx_ring
.netbufs_ring
[sw_rd_idx
]);
226 sw_rd_idx
&= htt
->rx_ring
.size_mask
;
229 dma_free_coherent(htt
->ar
->dev
,
231 sizeof(htt
->rx_ring
.paddrs_ring
)),
232 htt
->rx_ring
.paddrs_ring
,
233 htt
->rx_ring
.base_paddr
);
235 dma_free_coherent(htt
->ar
->dev
,
236 sizeof(*htt
->rx_ring
.alloc_idx
.vaddr
),
237 htt
->rx_ring
.alloc_idx
.vaddr
,
238 htt
->rx_ring
.alloc_idx
.paddr
);
240 kfree(htt
->rx_ring
.netbufs_ring
);
243 static inline struct sk_buff
*ath10k_htt_rx_netbuf_pop(struct ath10k_htt
*htt
)
246 struct sk_buff
*msdu
;
248 spin_lock_bh(&htt
->rx_ring
.lock
);
250 if (ath10k_htt_rx_ring_elems(htt
) == 0)
251 ath10k_warn("htt rx ring is empty!\n");
253 idx
= htt
->rx_ring
.sw_rd_idx
.msdu_payld
;
254 msdu
= htt
->rx_ring
.netbufs_ring
[idx
];
257 idx
&= htt
->rx_ring
.size_mask
;
258 htt
->rx_ring
.sw_rd_idx
.msdu_payld
= idx
;
259 htt
->rx_ring
.fill_cnt
--;
261 spin_unlock_bh(&htt
->rx_ring
.lock
);
265 static void ath10k_htt_rx_free_msdu_chain(struct sk_buff
*skb
)
267 struct sk_buff
*next
;
271 dev_kfree_skb_any(skb
);
276 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt
*htt
,
277 u8
**fw_desc
, int *fw_desc_len
,
278 struct sk_buff
**head_msdu
,
279 struct sk_buff
**tail_msdu
)
281 int msdu_len
, msdu_chaining
= 0;
282 struct sk_buff
*msdu
;
283 struct htt_rx_desc
*rx_desc
;
285 if (ath10k_htt_rx_ring_elems(htt
) == 0)
286 ath10k_warn("htt rx ring is empty!\n");
288 if (htt
->rx_confused
) {
289 ath10k_warn("htt is confused. refusing rx\n");
293 msdu
= *head_msdu
= ath10k_htt_rx_netbuf_pop(htt
);
295 int last_msdu
, msdu_len_invalid
, msdu_chained
;
297 dma_unmap_single(htt
->ar
->dev
,
298 ATH10K_SKB_CB(msdu
)->paddr
,
299 msdu
->len
+ skb_tailroom(msdu
),
302 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP
, NULL
, "htt rx: ",
303 msdu
->data
, msdu
->len
+ skb_tailroom(msdu
));
305 rx_desc
= (struct htt_rx_desc
*)msdu
->data
;
307 /* FIXME: we must report msdu payload since this is what caller
309 skb_put(msdu
, offsetof(struct htt_rx_desc
, msdu_payload
));
310 skb_pull(msdu
, offsetof(struct htt_rx_desc
, msdu_payload
));
313 * Sanity check - confirm the HW is finished filling in the
315 * If the HW and SW are working correctly, then it's guaranteed
316 * that the HW's MAC DMA is done before this point in the SW.
317 * To prevent the case that we handle a stale Rx descriptor,
318 * just assert for now until we have a way to recover.
320 if (!(__le32_to_cpu(rx_desc
->attention
.flags
)
321 & RX_ATTENTION_FLAGS_MSDU_DONE
)) {
322 ath10k_htt_rx_free_msdu_chain(*head_msdu
);
325 ath10k_err("htt rx stopped. cannot recover\n");
326 htt
->rx_confused
= true;
331 * Copy the FW rx descriptor for this MSDU from the rx
332 * indication message into the MSDU's netbuf. HL uses the
333 * same rx indication message definition as LL, and simply
334 * appends new info (fields from the HW rx desc, and the
335 * MSDU payload itself). So, the offset into the rx
336 * indication message only has to account for the standard
337 * offset of the per-MSDU FW rx desc info within the
338 * message, and how many bytes of the per-MSDU FW rx desc
339 * info have already been consumed. (And the endianness of
340 * the host, since for a big-endian host, the rx ind
341 * message contents, including the per-MSDU rx desc bytes,
342 * were byteswapped during upload.)
344 if (*fw_desc_len
> 0) {
345 rx_desc
->fw_desc
.info0
= **fw_desc
;
347 * The target is expected to only provide the basic
348 * per-MSDU rx descriptors. Just to be sure, verify
349 * that the target has not attached extension data
350 * (e.g. LRO flow ID).
353 /* or more, if there's extension data */
358 * When an oversized AMSDU happened, FW will lost
359 * some of MSDU status - in this case, the FW
360 * descriptors provided will be less than the
361 * actual MSDUs inside this MPDU. Mark the FW
362 * descriptors so that it will still deliver to
363 * upper stack, if no CRC error for this MPDU.
365 * FIX THIS - the FW descriptors are actually for
366 * MSDUs in the end of this A-MSDU instead of the
369 rx_desc
->fw_desc
.info0
= 0;
372 msdu_len_invalid
= !!(__le32_to_cpu(rx_desc
->attention
.flags
)
373 & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR
|
374 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR
));
375 msdu_len
= MS(__le32_to_cpu(rx_desc
->msdu_start
.info0
),
376 RX_MSDU_START_INFO0_MSDU_LENGTH
);
377 msdu_chained
= rx_desc
->frag_info
.ring2_more_count
;
379 if (msdu_len_invalid
)
383 skb_put(msdu
, min(msdu_len
, HTT_RX_MSDU_SIZE
));
384 msdu_len
-= msdu
->len
;
386 /* FIXME: Do chained buffers include htt_rx_desc or not? */
387 while (msdu_chained
--) {
388 struct sk_buff
*next
= ath10k_htt_rx_netbuf_pop(htt
);
390 dma_unmap_single(htt
->ar
->dev
,
391 ATH10K_SKB_CB(next
)->paddr
,
392 next
->len
+ skb_tailroom(next
),
395 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP
, NULL
, "htt rx: ",
397 next
->len
+ skb_tailroom(next
));
400 skb_put(next
, min(msdu_len
, HTT_RX_BUF_SIZE
));
401 msdu_len
-= next
->len
;
409 /* This may suggest FW bug? */
410 ath10k_warn("htt rx msdu len not consumed (%d)\n",
414 last_msdu
= __le32_to_cpu(rx_desc
->msdu_end
.info0
) &
415 RX_MSDU_END_INFO0_LAST_MSDU
;
421 struct sk_buff
*next
= ath10k_htt_rx_netbuf_pop(htt
);
429 * Don't refill the ring yet.
431 * First, the elements popped here are still in use - it is not
432 * safe to overwrite them until the matching call to
433 * mpdu_desc_list_next. Second, for efficiency it is preferable to
434 * refill the rx ring with 1 PPDU's worth of rx buffers (something
435 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
436 * (something like 3 buffers). Consequently, we'll rely on the txrx
437 * SW to tell us when it is done pulling all the PPDU's rx buffers
438 * out of the rx ring, and then refill it just once.
441 return msdu_chaining
;
444 int ath10k_htt_rx_attach(struct ath10k_htt
*htt
)
448 struct timer_list
*timer
= &htt
->rx_ring
.refill_retry_timer
;
450 htt
->rx_ring
.size
= ath10k_htt_rx_ring_size(htt
);
451 if (!is_power_of_2(htt
->rx_ring
.size
)) {
452 ath10k_warn("htt rx ring size is not power of 2\n");
456 htt
->rx_ring
.size_mask
= htt
->rx_ring
.size
- 1;
459 * Set the initial value for the level to which the rx ring
460 * should be filled, based on the max throughput and the
461 * worst likely latency for the host to fill the rx ring
462 * with new buffers. In theory, this fill level can be
463 * dynamically adjusted from the initial value set here, to
464 * reflect the actual host latency rather than a
465 * conservative assumption about the host latency.
467 htt
->rx_ring
.fill_level
= ath10k_htt_rx_ring_fill_level(htt
);
469 htt
->rx_ring
.netbufs_ring
=
470 kmalloc(htt
->rx_ring
.size
* sizeof(struct sk_buff
*),
472 if (!htt
->rx_ring
.netbufs_ring
)
475 vaddr
= dma_alloc_coherent(htt
->ar
->dev
,
476 (htt
->rx_ring
.size
* sizeof(htt
->rx_ring
.paddrs_ring
)),
481 htt
->rx_ring
.paddrs_ring
= vaddr
;
482 htt
->rx_ring
.base_paddr
= paddr
;
484 vaddr
= dma_alloc_coherent(htt
->ar
->dev
,
485 sizeof(*htt
->rx_ring
.alloc_idx
.vaddr
),
490 htt
->rx_ring
.alloc_idx
.vaddr
= vaddr
;
491 htt
->rx_ring
.alloc_idx
.paddr
= paddr
;
492 htt
->rx_ring
.sw_rd_idx
.msdu_payld
= 0;
493 *htt
->rx_ring
.alloc_idx
.vaddr
= 0;
495 /* Initialize the Rx refill retry timer */
496 setup_timer(timer
, ath10k_htt_rx_ring_refill_retry
, (unsigned long)htt
);
498 spin_lock_init(&htt
->rx_ring
.lock
);
500 htt
->rx_ring
.fill_cnt
= 0;
501 if (__ath10k_htt_rx_ring_fill_n(htt
, htt
->rx_ring
.fill_level
))
504 ath10k_dbg(ATH10K_DBG_HTT
, "HTT RX ring size: %d, fill_level: %d\n",
505 htt
->rx_ring
.size
, htt
->rx_ring
.fill_level
);
509 ath10k_htt_rx_ring_free(htt
);
510 dma_free_coherent(htt
->ar
->dev
,
511 sizeof(*htt
->rx_ring
.alloc_idx
.vaddr
),
512 htt
->rx_ring
.alloc_idx
.vaddr
,
513 htt
->rx_ring
.alloc_idx
.paddr
);
515 dma_free_coherent(htt
->ar
->dev
,
517 sizeof(htt
->rx_ring
.paddrs_ring
)),
518 htt
->rx_ring
.paddrs_ring
,
519 htt
->rx_ring
.base_paddr
);
521 kfree(htt
->rx_ring
.netbufs_ring
);
526 static int ath10k_htt_rx_crypto_param_len(enum htt_rx_mpdu_encrypt_type type
)
529 case HTT_RX_MPDU_ENCRYPT_WEP40
:
530 case HTT_RX_MPDU_ENCRYPT_WEP104
:
532 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC
:
533 case HTT_RX_MPDU_ENCRYPT_WEP128
: /* not tested */
534 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA
:
535 case HTT_RX_MPDU_ENCRYPT_WAPI
: /* not tested */
536 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2
:
538 case HTT_RX_MPDU_ENCRYPT_NONE
:
542 ath10k_warn("unknown encryption type %d\n", type
);
546 static int ath10k_htt_rx_crypto_tail_len(enum htt_rx_mpdu_encrypt_type type
)
549 case HTT_RX_MPDU_ENCRYPT_NONE
:
550 case HTT_RX_MPDU_ENCRYPT_WEP40
:
551 case HTT_RX_MPDU_ENCRYPT_WEP104
:
552 case HTT_RX_MPDU_ENCRYPT_WEP128
:
553 case HTT_RX_MPDU_ENCRYPT_WAPI
:
555 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC
:
556 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA
:
558 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2
:
562 ath10k_warn("unknown encryption type %d\n", type
);
566 /* Applies for first msdu in chain, before altering it. */
567 static struct ieee80211_hdr
*ath10k_htt_rx_skb_get_hdr(struct sk_buff
*skb
)
569 struct htt_rx_desc
*rxd
;
570 enum rx_msdu_decap_format fmt
;
572 rxd
= (void *)skb
->data
- sizeof(*rxd
);
573 fmt
= MS(__le32_to_cpu(rxd
->msdu_start
.info1
),
574 RX_MSDU_START_INFO1_DECAP_FORMAT
);
576 if (fmt
== RX_MSDU_DECAP_RAW
)
577 return (void *)skb
->data
;
579 return (void *)skb
->data
- RX_HTT_HDR_STATUS_LEN
;
582 /* This function only applies for first msdu in an msdu chain */
583 static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr
*hdr
)
585 if (ieee80211_is_data_qos(hdr
->frame_control
)) {
586 u8
*qc
= ieee80211_get_qos_ctl(hdr
);
593 static int ath10k_htt_rx_amsdu(struct ath10k_htt
*htt
,
594 struct htt_rx_info
*info
)
596 struct htt_rx_desc
*rxd
;
597 struct sk_buff
*amsdu
;
598 struct sk_buff
*first
;
599 struct ieee80211_hdr
*hdr
;
600 struct sk_buff
*skb
= info
->skb
;
601 enum rx_msdu_decap_format fmt
;
602 enum htt_rx_mpdu_encrypt_type enctype
;
603 unsigned int hdr_len
;
606 rxd
= (void *)skb
->data
- sizeof(*rxd
);
607 fmt
= MS(__le32_to_cpu(rxd
->msdu_start
.info1
),
608 RX_MSDU_START_INFO1_DECAP_FORMAT
);
609 enctype
= MS(__le32_to_cpu(rxd
->mpdu_start
.info0
),
610 RX_MPDU_START_INFO0_ENCRYPT_TYPE
);
612 /* FIXME: No idea what assumptions are safe here. Need logs */
613 if ((fmt
== RX_MSDU_DECAP_RAW
&& skb
->next
) ||
614 (fmt
== RX_MSDU_DECAP_8023_SNAP_LLC
)) {
615 ath10k_htt_rx_free_msdu_chain(skb
->next
);
620 /* A-MSDU max is a little less than 8K */
621 amsdu
= dev_alloc_skb(8*1024);
623 ath10k_warn("A-MSDU allocation failed\n");
624 ath10k_htt_rx_free_msdu_chain(skb
->next
);
629 if (fmt
>= RX_MSDU_DECAP_NATIVE_WIFI
) {
632 hdr
= (void *)rxd
->rx_hdr_status
;
633 hdrlen
= ieee80211_hdrlen(hdr
->frame_control
);
634 memcpy(skb_put(amsdu
, hdrlen
), hdr
, hdrlen
);
642 rxd
= (void *)skb
->data
- sizeof(*rxd
);
643 fmt
= MS(__le32_to_cpu(rxd
->msdu_start
.info1
),
644 RX_MSDU_START_INFO1_DECAP_FORMAT
);
645 decap_hdr
= (void *)rxd
->rx_hdr_status
;
648 /* We receive linked A-MSDU subframe skbuffs. The
649 * first one contains the original 802.11 header (and
650 * possible crypto param) in the RX descriptor. The
651 * A-MSDU subframe header follows that. Each part is
652 * aligned to 4 byte boundary. */
654 hdr
= (void *)amsdu
->data
;
655 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
656 crypto_len
= ath10k_htt_rx_crypto_param_len(enctype
);
658 decap_hdr
+= roundup(hdr_len
, 4);
659 decap_hdr
+= roundup(crypto_len
, 4);
662 if (fmt
== RX_MSDU_DECAP_ETHERNET2_DIX
) {
663 /* Ethernet2 decap inserts ethernet header in place of
664 * A-MSDU subframe header. */
665 skb_pull(skb
, 6 + 6 + 2);
667 /* A-MSDU subframe header length */
668 decap_len
+= 6 + 6 + 2;
670 /* Ethernet2 decap also strips the LLC/SNAP so we need
671 * to re-insert it. The LLC/SNAP follows A-MSDU
672 * subframe header. */
673 /* FIXME: Not all LLCs are 8 bytes long */
676 memcpy(skb_put(amsdu
, decap_len
), decap_hdr
, decap_len
);
679 if (fmt
== RX_MSDU_DECAP_NATIVE_WIFI
) {
680 /* Native Wifi decap inserts regular 802.11 header
681 * in place of A-MSDU subframe header. */
682 hdr
= (struct ieee80211_hdr
*)skb
->data
;
683 skb_pull(skb
, ieee80211_hdrlen(hdr
->frame_control
));
685 /* A-MSDU subframe header length */
686 decap_len
+= 6 + 6 + 2;
688 memcpy(skb_put(amsdu
, decap_len
), decap_hdr
, decap_len
);
691 if (fmt
== RX_MSDU_DECAP_RAW
)
692 skb_trim(skb
, skb
->len
- 4); /* remove FCS */
694 memcpy(skb_put(amsdu
, skb
->len
), skb
->data
, skb
->len
);
696 /* A-MSDU subframes are padded to 4bytes
697 * but relative to first subframe, not the whole MPDU */
698 if (skb
->next
&& ((decap_len
+ skb
->len
) & 3)) {
699 int padlen
= 4 - ((decap_len
+ skb
->len
) & 3);
700 memset(skb_put(amsdu
, padlen
), 0, padlen
);
707 info
->encrypt_type
= enctype
;
709 ath10k_htt_rx_free_msdu_chain(first
);
714 static int ath10k_htt_rx_msdu(struct ath10k_htt
*htt
, struct htt_rx_info
*info
)
716 struct sk_buff
*skb
= info
->skb
;
717 struct htt_rx_desc
*rxd
;
718 struct ieee80211_hdr
*hdr
;
719 enum rx_msdu_decap_format fmt
;
720 enum htt_rx_mpdu_encrypt_type enctype
;
722 /* This shouldn't happen. If it does than it may be a FW bug. */
724 ath10k_warn("received chained non A-MSDU frame\n");
725 ath10k_htt_rx_free_msdu_chain(skb
->next
);
729 rxd
= (void *)skb
->data
- sizeof(*rxd
);
730 fmt
= MS(__le32_to_cpu(rxd
->msdu_start
.info1
),
731 RX_MSDU_START_INFO1_DECAP_FORMAT
);
732 enctype
= MS(__le32_to_cpu(rxd
->mpdu_start
.info0
),
733 RX_MPDU_START_INFO0_ENCRYPT_TYPE
);
734 hdr
= (void *)skb
->data
- RX_HTT_HDR_STATUS_LEN
;
737 case RX_MSDU_DECAP_RAW
:
738 /* remove trailing FCS */
739 skb_trim(skb
, skb
->len
- 4);
741 case RX_MSDU_DECAP_NATIVE_WIFI
:
742 /* nothing to do here */
744 case RX_MSDU_DECAP_ETHERNET2_DIX
:
745 /* macaddr[6] + macaddr[6] + ethertype[2] */
746 skb_pull(skb
, 6 + 6 + 2);
748 case RX_MSDU_DECAP_8023_SNAP_LLC
:
749 /* macaddr[6] + macaddr[6] + len[2] */
750 /* we don't need this for non-A-MSDU */
751 skb_pull(skb
, 6 + 6 + 2);
755 if (fmt
== RX_MSDU_DECAP_ETHERNET2_DIX
) {
761 llc
+= roundup(ieee80211_hdrlen(hdr
->frame_control
), 4);
762 llc
+= roundup(ath10k_htt_rx_crypto_param_len(enctype
), 4);
764 skb_push(skb
, llclen
);
765 memcpy(skb
->data
, llc
, llclen
);
768 if (fmt
>= RX_MSDU_DECAP_ETHERNET2_DIX
) {
769 int len
= ieee80211_hdrlen(hdr
->frame_control
);
771 memcpy(skb
->data
, hdr
, len
);
775 info
->encrypt_type
= enctype
;
779 static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff
*skb
)
781 struct htt_rx_desc
*rxd
;
784 rxd
= (void *)skb
->data
- sizeof(*rxd
);
785 flags
= __le32_to_cpu(rxd
->attention
.flags
);
787 if (flags
& RX_ATTENTION_FLAGS_DECRYPT_ERR
)
793 static bool ath10k_htt_rx_has_fcs_err(struct sk_buff
*skb
)
795 struct htt_rx_desc
*rxd
;
798 rxd
= (void *)skb
->data
- sizeof(*rxd
);
799 flags
= __le32_to_cpu(rxd
->attention
.flags
);
801 if (flags
& RX_ATTENTION_FLAGS_FCS_ERR
)
807 static int ath10k_htt_rx_get_csum_state(struct sk_buff
*skb
)
809 struct htt_rx_desc
*rxd
;
813 bool ip_csum_ok
, tcpudp_csum_ok
;
815 rxd
= (void *)skb
->data
- sizeof(*rxd
);
816 flags
= __le32_to_cpu(rxd
->attention
.flags
);
817 info
= __le32_to_cpu(rxd
->msdu_start
.info1
);
819 is_ip4
= !!(info
& RX_MSDU_START_INFO1_IPV4_PROTO
);
820 is_ip6
= !!(info
& RX_MSDU_START_INFO1_IPV6_PROTO
);
821 is_tcp
= !!(info
& RX_MSDU_START_INFO1_TCP_PROTO
);
822 is_udp
= !!(info
& RX_MSDU_START_INFO1_UDP_PROTO
);
823 ip_csum_ok
= !(flags
& RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL
);
824 tcpudp_csum_ok
= !(flags
& RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL
);
826 if (!is_ip4
&& !is_ip6
)
827 return CHECKSUM_NONE
;
828 if (!is_tcp
&& !is_udp
)
829 return CHECKSUM_NONE
;
831 return CHECKSUM_NONE
;
833 return CHECKSUM_NONE
;
835 return CHECKSUM_UNNECESSARY
;
838 static void ath10k_htt_rx_handler(struct ath10k_htt
*htt
,
839 struct htt_rx_indication
*rx
)
841 struct htt_rx_info info
;
842 struct htt_rx_indication_mpdu_range
*mpdu_ranges
;
843 struct ieee80211_hdr
*hdr
;
851 memset(&info
, 0, sizeof(info
));
853 fw_desc_len
= __le16_to_cpu(rx
->prefix
.fw_rx_desc_bytes
);
854 fw_desc
= (u8
*)&rx
->fw_desc
;
856 num_mpdu_ranges
= MS(__le32_to_cpu(rx
->hdr
.info1
),
857 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES
);
858 mpdu_ranges
= htt_rx_ind_get_mpdu_ranges(rx
);
860 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP
, NULL
, "htt rx ind: ",
862 (sizeof(struct htt_rx_indication_mpdu_range
) *
865 for (i
= 0; i
< num_mpdu_ranges
; i
++) {
866 info
.status
= mpdu_ranges
[i
].mpdu_range_status
;
868 for (j
= 0; j
< mpdu_ranges
[i
].mpdu_count
; j
++) {
869 struct sk_buff
*msdu_head
, *msdu_tail
;
870 enum htt_rx_mpdu_status status
;
875 msdu_chaining
= ath10k_htt_rx_amsdu_pop(htt
,
882 ath10k_warn("htt rx no data!\n");
886 if (msdu_head
->len
== 0) {
887 ath10k_dbg(ATH10K_DBG_HTT
,
888 "htt rx dropping due to zero-len\n");
889 ath10k_htt_rx_free_msdu_chain(msdu_head
);
893 if (ath10k_htt_rx_has_decrypt_err(msdu_head
)) {
894 ath10k_htt_rx_free_msdu_chain(msdu_head
);
898 status
= info
.status
;
900 /* Skip mgmt frames while we handle this in WMI */
901 if (status
== HTT_RX_IND_MPDU_STATUS_MGMT_CTRL
) {
902 ath10k_htt_rx_free_msdu_chain(msdu_head
);
906 if (status
!= HTT_RX_IND_MPDU_STATUS_OK
&&
907 status
!= HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR
&&
908 !htt
->ar
->monitor_enabled
) {
909 ath10k_dbg(ATH10K_DBG_HTT
,
910 "htt rx ignoring frame w/ status %d\n",
912 ath10k_htt_rx_free_msdu_chain(msdu_head
);
916 /* FIXME: we do not support chaining yet.
917 * this needs investigation */
919 ath10k_warn("msdu_chaining is true\n");
920 ath10k_htt_rx_free_msdu_chain(msdu_head
);
924 /* The skb is not yet processed and it may be
925 * reallocated. Since the offload is in the original
926 * skb extract the checksum now and assign it later */
927 ip_summed
= ath10k_htt_rx_get_csum_state(msdu_head
);
929 info
.skb
= msdu_head
;
930 info
.fcs_err
= ath10k_htt_rx_has_fcs_err(msdu_head
);
931 info
.signal
= ATH10K_DEFAULT_NOISE_FLOOR
;
932 info
.signal
+= rx
->ppdu
.combined_rssi
;
934 info
.rate
.info0
= rx
->ppdu
.info0
;
935 info
.rate
.info1
= __le32_to_cpu(rx
->ppdu
.info1
);
936 info
.rate
.info2
= __le32_to_cpu(rx
->ppdu
.info2
);
938 hdr
= ath10k_htt_rx_skb_get_hdr(msdu_head
);
940 if (ath10k_htt_rx_hdr_is_amsdu(hdr
))
941 ret
= ath10k_htt_rx_amsdu(htt
, &info
);
943 ret
= ath10k_htt_rx_msdu(htt
, &info
);
945 if (ret
&& !info
.fcs_err
) {
946 ath10k_warn("error processing msdus %d\n", ret
);
947 dev_kfree_skb_any(info
.skb
);
951 if (ath10k_htt_rx_hdr_is_amsdu((void *)info
.skb
->data
))
952 ath10k_dbg(ATH10K_DBG_HTT
, "htt mpdu is amsdu\n");
954 info
.skb
->ip_summed
= ip_summed
;
956 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP
, NULL
, "htt mpdu: ",
957 info
.skb
->data
, info
.skb
->len
);
958 ath10k_process_rx(htt
->ar
, &info
);
962 ath10k_htt_rx_msdu_buff_replenish(htt
);
965 static void ath10k_htt_rx_frag_handler(struct ath10k_htt
*htt
,
966 struct htt_rx_fragment_indication
*frag
)
968 struct sk_buff
*msdu_head
, *msdu_tail
;
969 struct htt_rx_desc
*rxd
;
970 enum rx_msdu_decap_format fmt
;
971 struct htt_rx_info info
= {};
972 struct ieee80211_hdr
*hdr
;
977 int fw_desc_len
, hdrlen
, paramlen
;
980 fw_desc_len
= __le16_to_cpu(frag
->fw_rx_desc_bytes
);
981 fw_desc
= (u8
*)frag
->fw_msdu_rx_desc
;
985 msdu_chaining
= ath10k_htt_rx_amsdu_pop(htt
, &fw_desc
, &fw_desc_len
,
986 &msdu_head
, &msdu_tail
);
988 ath10k_dbg(ATH10K_DBG_HTT_DUMP
, "htt rx frag ahead\n");
991 ath10k_warn("htt rx frag no data\n");
995 if (msdu_chaining
|| msdu_head
!= msdu_tail
) {
996 ath10k_warn("aggregation with fragmentation?!\n");
997 ath10k_htt_rx_free_msdu_chain(msdu_head
);
1001 /* FIXME: implement signal strength */
1003 hdr
= (struct ieee80211_hdr
*)msdu_head
->data
;
1004 rxd
= (void *)msdu_head
->data
- sizeof(*rxd
);
1005 tkip_mic_err
= !!(__le32_to_cpu(rxd
->attention
.flags
) &
1006 RX_ATTENTION_FLAGS_TKIP_MIC_ERR
);
1007 decrypt_err
= !!(__le32_to_cpu(rxd
->attention
.flags
) &
1008 RX_ATTENTION_FLAGS_DECRYPT_ERR
);
1009 fmt
= MS(__le32_to_cpu(rxd
->msdu_start
.info1
),
1010 RX_MSDU_START_INFO1_DECAP_FORMAT
);
1012 if (fmt
!= RX_MSDU_DECAP_RAW
) {
1013 ath10k_warn("we dont support non-raw fragmented rx yet\n");
1014 dev_kfree_skb_any(msdu_head
);
1018 info
.skb
= msdu_head
;
1019 info
.status
= HTT_RX_IND_MPDU_STATUS_OK
;
1020 info
.encrypt_type
= MS(__le32_to_cpu(rxd
->mpdu_start
.info0
),
1021 RX_MPDU_START_INFO0_ENCRYPT_TYPE
);
1022 info
.skb
->ip_summed
= ath10k_htt_rx_get_csum_state(info
.skb
);
1025 ath10k_warn("tkip mic error\n");
1026 info
.status
= HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR
;
1030 ath10k_warn("decryption err in fragmented rx\n");
1031 dev_kfree_skb_any(info
.skb
);
1035 if (info
.encrypt_type
!= HTT_RX_MPDU_ENCRYPT_NONE
) {
1036 hdrlen
= ieee80211_hdrlen(hdr
->frame_control
);
1037 paramlen
= ath10k_htt_rx_crypto_param_len(info
.encrypt_type
);
1039 /* It is more efficient to move the header than the payload */
1040 memmove((void *)info
.skb
->data
+ paramlen
,
1041 (void *)info
.skb
->data
,
1043 skb_pull(info
.skb
, paramlen
);
1044 hdr
= (struct ieee80211_hdr
*)info
.skb
->data
;
1047 /* remove trailing FCS */
1050 /* remove crypto trailer */
1051 trim
+= ath10k_htt_rx_crypto_tail_len(info
.encrypt_type
);
1053 /* last fragment of TKIP frags has MIC */
1054 if (!ieee80211_has_morefrags(hdr
->frame_control
) &&
1055 info
.encrypt_type
== HTT_RX_MPDU_ENCRYPT_TKIP_WPA
)
1058 if (trim
> info
.skb
->len
) {
1059 ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n");
1060 dev_kfree_skb_any(info
.skb
);
1064 skb_trim(info
.skb
, info
.skb
->len
- trim
);
1066 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP
, NULL
, "htt frag mpdu: ",
1067 info
.skb
->data
, info
.skb
->len
);
1068 ath10k_process_rx(htt
->ar
, &info
);
1071 if (fw_desc_len
> 0) {
1072 ath10k_dbg(ATH10K_DBG_HTT
,
1073 "expecting more fragmented rx in one indication %d\n",
1078 void ath10k_htt_t2h_msg_handler(struct ath10k
*ar
, struct sk_buff
*skb
)
1080 struct ath10k_htt
*htt
= &ar
->htt
;
1081 struct htt_resp
*resp
= (struct htt_resp
*)skb
->data
;
1083 /* confirm alignment */
1084 if (!IS_ALIGNED((unsigned long)skb
->data
, 4))
1085 ath10k_warn("unaligned htt message, expect trouble\n");
1087 ath10k_dbg(ATH10K_DBG_HTT
, "HTT RX, msg_type: 0x%0X\n",
1088 resp
->hdr
.msg_type
);
1089 switch (resp
->hdr
.msg_type
) {
1090 case HTT_T2H_MSG_TYPE_VERSION_CONF
: {
1091 htt
->target_version_major
= resp
->ver_resp
.major
;
1092 htt
->target_version_minor
= resp
->ver_resp
.minor
;
1093 complete(&htt
->target_version_received
);
1096 case HTT_T2H_MSG_TYPE_RX_IND
: {
1097 ath10k_htt_rx_handler(htt
, &resp
->rx_ind
);
1100 case HTT_T2H_MSG_TYPE_PEER_MAP
: {
1101 struct htt_peer_map_event ev
= {
1102 .vdev_id
= resp
->peer_map
.vdev_id
,
1103 .peer_id
= __le16_to_cpu(resp
->peer_map
.peer_id
),
1105 memcpy(ev
.addr
, resp
->peer_map
.addr
, sizeof(ev
.addr
));
1106 ath10k_peer_map_event(htt
, &ev
);
1109 case HTT_T2H_MSG_TYPE_PEER_UNMAP
: {
1110 struct htt_peer_unmap_event ev
= {
1111 .peer_id
= __le16_to_cpu(resp
->peer_unmap
.peer_id
),
1113 ath10k_peer_unmap_event(htt
, &ev
);
1116 case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION
: {
1117 struct htt_tx_done tx_done
= {};
1118 int status
= __le32_to_cpu(resp
->mgmt_tx_completion
.status
);
1121 __le32_to_cpu(resp
->mgmt_tx_completion
.desc_id
);
1124 case HTT_MGMT_TX_STATUS_OK
:
1126 case HTT_MGMT_TX_STATUS_RETRY
:
1127 tx_done
.no_ack
= true;
1129 case HTT_MGMT_TX_STATUS_DROP
:
1130 tx_done
.discard
= true;
1134 ath10k_txrx_tx_completed(htt
, &tx_done
);
1137 case HTT_T2H_MSG_TYPE_TX_COMPL_IND
: {
1138 struct htt_tx_done tx_done
= {};
1139 int status
= MS(resp
->data_tx_completion
.flags
,
1140 HTT_DATA_TX_STATUS
);
1145 case HTT_DATA_TX_STATUS_NO_ACK
:
1146 tx_done
.no_ack
= true;
1148 case HTT_DATA_TX_STATUS_OK
:
1150 case HTT_DATA_TX_STATUS_DISCARD
:
1151 case HTT_DATA_TX_STATUS_POSTPONE
:
1152 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL
:
1153 tx_done
.discard
= true;
1156 ath10k_warn("unhandled tx completion status %d\n",
1158 tx_done
.discard
= true;
1162 ath10k_dbg(ATH10K_DBG_HTT
, "htt tx completion num_msdus %d\n",
1163 resp
->data_tx_completion
.num_msdus
);
1165 for (i
= 0; i
< resp
->data_tx_completion
.num_msdus
; i
++) {
1166 msdu_id
= resp
->data_tx_completion
.msdus
[i
];
1167 tx_done
.msdu_id
= __le16_to_cpu(msdu_id
);
1168 ath10k_txrx_tx_completed(htt
, &tx_done
);
1172 case HTT_T2H_MSG_TYPE_SEC_IND
: {
1173 struct ath10k
*ar
= htt
->ar
;
1174 struct htt_security_indication
*ev
= &resp
->security_indication
;
1176 ath10k_dbg(ATH10K_DBG_HTT
,
1177 "sec ind peer_id %d unicast %d type %d\n",
1178 __le16_to_cpu(ev
->peer_id
),
1179 !!(ev
->flags
& HTT_SECURITY_IS_UNICAST
),
1180 MS(ev
->flags
, HTT_SECURITY_TYPE
));
1181 complete(&ar
->install_key_done
);
1184 case HTT_T2H_MSG_TYPE_RX_FRAG_IND
: {
1185 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP
, NULL
, "htt event: ",
1186 skb
->data
, skb
->len
);
1187 ath10k_htt_rx_frag_handler(htt
, &resp
->rx_frag_ind
);
1190 case HTT_T2H_MSG_TYPE_TEST
:
1193 case HTT_T2H_MSG_TYPE_TX_INSPECT_IND
:
1194 case HTT_T2H_MSG_TYPE_STATS_CONF
:
1195 case HTT_T2H_MSG_TYPE_RX_ADDBA
:
1196 case HTT_T2H_MSG_TYPE_RX_DELBA
:
1197 case HTT_T2H_MSG_TYPE_RX_FLUSH
:
1199 ath10k_dbg(ATH10K_DBG_HTT
, "htt event (%d) not handled\n",
1200 resp
->hdr
.msg_type
);
1201 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP
, NULL
, "htt event: ",
1202 skb
->data
, skb
->len
);
1206 /* Free the indication buffer */
1207 dev_kfree_skb_any(skb
);