2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 #include <linux/log2.h>
25 /* slightly larger than one large A-MPDU */
26 #define HTT_RX_RING_SIZE_MIN 128
28 /* roughly 20 ms @ 1 Gbps of 1500B MSDUs */
29 #define HTT_RX_RING_SIZE_MAX 2048
31 #define HTT_RX_AVG_FRM_BYTES 1000
33 /* ms, very conservative */
34 #define HTT_RX_HOST_LATENCY_MAX_MS 20
36 /* ms, conservative */
37 #define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10
39 /* when under memory pressure rx ring refill may fail and needs a retry */
40 #define HTT_RX_RING_REFILL_RETRY_MS 50
42 static int ath10k_htt_rx_ring_size(struct ath10k_htt
*htt
)
47 * It is expected that the host CPU will typically be able to
48 * service the rx indication from one A-MPDU before the rx
49 * indication from the subsequent A-MPDU happens, roughly 1-2 ms
50 * later. However, the rx ring should be sized very conservatively,
51 * to accomodate the worst reasonable delay before the host CPU
52 * services a rx indication interrupt.
54 * The rx ring need not be kept full of empty buffers. In theory,
55 * the htt host SW can dynamically track the low-water mark in the
56 * rx ring, and dynamically adjust the level to which the rx ring
57 * is filled with empty buffers, to dynamically meet the desired
60 * In contrast, it's difficult to resize the rx ring itself, once
61 * it's in use. Thus, the ring itself should be sized very
62 * conservatively, while the degree to which the ring is filled
63 * with empty buffers should be sized moderately conservatively.
66 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
68 htt
->max_throughput_mbps
+
70 (8 * HTT_RX_AVG_FRM_BYTES
) * HTT_RX_HOST_LATENCY_MAX_MS
;
72 if (size
< HTT_RX_RING_SIZE_MIN
)
73 size
= HTT_RX_RING_SIZE_MIN
;
75 if (size
> HTT_RX_RING_SIZE_MAX
)
76 size
= HTT_RX_RING_SIZE_MAX
;
78 size
= roundup_pow_of_two(size
);
83 static int ath10k_htt_rx_ring_fill_level(struct ath10k_htt
*htt
)
87 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
89 htt
->max_throughput_mbps
*
91 (8 * HTT_RX_AVG_FRM_BYTES
) * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS
;
94 * Make sure the fill level is at least 1 less than the ring size.
95 * Leaving 1 element empty allows the SW to easily distinguish
96 * between a full ring vs. an empty ring.
98 if (size
>= htt
->rx_ring
.size
)
99 size
= htt
->rx_ring
.size
- 1;
104 static void ath10k_htt_rx_ring_free(struct ath10k_htt
*htt
)
107 struct ath10k_skb_cb
*cb
;
110 for (i
= 0; i
< htt
->rx_ring
.fill_cnt
; i
++) {
111 skb
= htt
->rx_ring
.netbufs_ring
[i
];
112 cb
= ATH10K_SKB_CB(skb
);
113 dma_unmap_single(htt
->ar
->dev
, cb
->paddr
,
114 skb
->len
+ skb_tailroom(skb
),
116 dev_kfree_skb_any(skb
);
119 htt
->rx_ring
.fill_cnt
= 0;
122 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt
*htt
, int num
)
124 struct htt_rx_desc
*rx_desc
;
129 idx
= __le32_to_cpu(*(htt
->rx_ring
.alloc_idx
.vaddr
));
131 skb
= dev_alloc_skb(HTT_RX_BUF_SIZE
+ HTT_RX_DESC_ALIGN
);
137 if (!IS_ALIGNED((unsigned long)skb
->data
, HTT_RX_DESC_ALIGN
))
139 PTR_ALIGN(skb
->data
, HTT_RX_DESC_ALIGN
) -
142 /* Clear rx_desc attention word before posting to Rx ring */
143 rx_desc
= (struct htt_rx_desc
*)skb
->data
;
144 rx_desc
->attention
.flags
= __cpu_to_le32(0);
146 paddr
= dma_map_single(htt
->ar
->dev
, skb
->data
,
147 skb
->len
+ skb_tailroom(skb
),
150 if (unlikely(dma_mapping_error(htt
->ar
->dev
, paddr
))) {
151 dev_kfree_skb_any(skb
);
156 ATH10K_SKB_CB(skb
)->paddr
= paddr
;
157 htt
->rx_ring
.netbufs_ring
[idx
] = skb
;
158 htt
->rx_ring
.paddrs_ring
[idx
] = __cpu_to_le32(paddr
);
159 htt
->rx_ring
.fill_cnt
++;
163 idx
&= htt
->rx_ring
.size_mask
;
167 *(htt
->rx_ring
.alloc_idx
.vaddr
) = __cpu_to_le32(idx
);
171 static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt
*htt
, int num
)
173 lockdep_assert_held(&htt
->rx_ring
.lock
);
174 return __ath10k_htt_rx_ring_fill_n(htt
, num
);
177 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt
*htt
)
179 int ret
, num_to_fill
;
181 spin_lock_bh(&htt
->rx_ring
.lock
);
182 num_to_fill
= htt
->rx_ring
.fill_level
- htt
->rx_ring
.fill_cnt
;
183 ret
= ath10k_htt_rx_ring_fill_n(htt
, num_to_fill
);
184 if (ret
== -ENOMEM
) {
186 * Failed to fill it to the desired level -
187 * we'll start a timer and try again next time.
188 * As long as enough buffers are left in the ring for
189 * another A-MPDU rx, no special recovery is needed.
191 mod_timer(&htt
->rx_ring
.refill_retry_timer
, jiffies
+
192 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS
));
194 spin_unlock_bh(&htt
->rx_ring
.lock
);
197 static void ath10k_htt_rx_ring_refill_retry(unsigned long arg
)
199 struct ath10k_htt
*htt
= (struct ath10k_htt
*)arg
;
200 ath10k_htt_rx_msdu_buff_replenish(htt
);
203 static unsigned ath10k_htt_rx_ring_elems(struct ath10k_htt
*htt
)
205 return (__le32_to_cpu(*htt
->rx_ring
.alloc_idx
.vaddr
) -
206 htt
->rx_ring
.sw_rd_idx
.msdu_payld
) & htt
->rx_ring
.size_mask
;
209 void ath10k_htt_rx_detach(struct ath10k_htt
*htt
)
211 int sw_rd_idx
= htt
->rx_ring
.sw_rd_idx
.msdu_payld
;
213 del_timer_sync(&htt
->rx_ring
.refill_retry_timer
);
215 while (sw_rd_idx
!= __le32_to_cpu(*(htt
->rx_ring
.alloc_idx
.vaddr
))) {
216 struct sk_buff
*skb
=
217 htt
->rx_ring
.netbufs_ring
[sw_rd_idx
];
218 struct ath10k_skb_cb
*cb
= ATH10K_SKB_CB(skb
);
220 dma_unmap_single(htt
->ar
->dev
, cb
->paddr
,
221 skb
->len
+ skb_tailroom(skb
),
223 dev_kfree_skb_any(htt
->rx_ring
.netbufs_ring
[sw_rd_idx
]);
225 sw_rd_idx
&= htt
->rx_ring
.size_mask
;
228 dma_free_coherent(htt
->ar
->dev
,
230 sizeof(htt
->rx_ring
.paddrs_ring
)),
231 htt
->rx_ring
.paddrs_ring
,
232 htt
->rx_ring
.base_paddr
);
234 dma_free_coherent(htt
->ar
->dev
,
235 sizeof(*htt
->rx_ring
.alloc_idx
.vaddr
),
236 htt
->rx_ring
.alloc_idx
.vaddr
,
237 htt
->rx_ring
.alloc_idx
.paddr
);
239 kfree(htt
->rx_ring
.netbufs_ring
);
242 static inline struct sk_buff
*ath10k_htt_rx_netbuf_pop(struct ath10k_htt
*htt
)
245 struct sk_buff
*msdu
;
247 spin_lock_bh(&htt
->rx_ring
.lock
);
249 if (ath10k_htt_rx_ring_elems(htt
) == 0)
250 ath10k_warn("htt rx ring is empty!\n");
252 idx
= htt
->rx_ring
.sw_rd_idx
.msdu_payld
;
253 msdu
= htt
->rx_ring
.netbufs_ring
[idx
];
256 idx
&= htt
->rx_ring
.size_mask
;
257 htt
->rx_ring
.sw_rd_idx
.msdu_payld
= idx
;
258 htt
->rx_ring
.fill_cnt
--;
260 spin_unlock_bh(&htt
->rx_ring
.lock
);
264 static void ath10k_htt_rx_free_msdu_chain(struct sk_buff
*skb
)
266 struct sk_buff
*next
;
270 dev_kfree_skb_any(skb
);
275 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt
*htt
,
276 u8
**fw_desc
, int *fw_desc_len
,
277 struct sk_buff
**head_msdu
,
278 struct sk_buff
**tail_msdu
)
280 int msdu_len
, msdu_chaining
= 0;
281 struct sk_buff
*msdu
;
282 struct htt_rx_desc
*rx_desc
;
284 if (ath10k_htt_rx_ring_elems(htt
) == 0)
285 ath10k_warn("htt rx ring is empty!\n");
287 if (htt
->rx_confused
) {
288 ath10k_warn("htt is confused. refusing rx\n");
292 msdu
= *head_msdu
= ath10k_htt_rx_netbuf_pop(htt
);
294 int last_msdu
, msdu_len_invalid
, msdu_chained
;
296 dma_unmap_single(htt
->ar
->dev
,
297 ATH10K_SKB_CB(msdu
)->paddr
,
298 msdu
->len
+ skb_tailroom(msdu
),
301 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP
, NULL
, "htt rx: ",
302 msdu
->data
, msdu
->len
+ skb_tailroom(msdu
));
304 rx_desc
= (struct htt_rx_desc
*)msdu
->data
;
306 /* FIXME: we must report msdu payload since this is what caller
308 skb_put(msdu
, offsetof(struct htt_rx_desc
, msdu_payload
));
309 skb_pull(msdu
, offsetof(struct htt_rx_desc
, msdu_payload
));
312 * Sanity check - confirm the HW is finished filling in the
314 * If the HW and SW are working correctly, then it's guaranteed
315 * that the HW's MAC DMA is done before this point in the SW.
316 * To prevent the case that we handle a stale Rx descriptor,
317 * just assert for now until we have a way to recover.
319 if (!(__le32_to_cpu(rx_desc
->attention
.flags
)
320 & RX_ATTENTION_FLAGS_MSDU_DONE
)) {
321 ath10k_htt_rx_free_msdu_chain(*head_msdu
);
324 ath10k_err("htt rx stopped. cannot recover\n");
325 htt
->rx_confused
= true;
330 * Copy the FW rx descriptor for this MSDU from the rx
331 * indication message into the MSDU's netbuf. HL uses the
332 * same rx indication message definition as LL, and simply
333 * appends new info (fields from the HW rx desc, and the
334 * MSDU payload itself). So, the offset into the rx
335 * indication message only has to account for the standard
336 * offset of the per-MSDU FW rx desc info within the
337 * message, and how many bytes of the per-MSDU FW rx desc
338 * info have already been consumed. (And the endianness of
339 * the host, since for a big-endian host, the rx ind
340 * message contents, including the per-MSDU rx desc bytes,
341 * were byteswapped during upload.)
343 if (*fw_desc_len
> 0) {
344 rx_desc
->fw_desc
.info0
= **fw_desc
;
346 * The target is expected to only provide the basic
347 * per-MSDU rx descriptors. Just to be sure, verify
348 * that the target has not attached extension data
349 * (e.g. LRO flow ID).
352 /* or more, if there's extension data */
357 * When an oversized AMSDU happened, FW will lost
358 * some of MSDU status - in this case, the FW
359 * descriptors provided will be less than the
360 * actual MSDUs inside this MPDU. Mark the FW
361 * descriptors so that it will still deliver to
362 * upper stack, if no CRC error for this MPDU.
364 * FIX THIS - the FW descriptors are actually for
365 * MSDUs in the end of this A-MSDU instead of the
368 rx_desc
->fw_desc
.info0
= 0;
371 msdu_len_invalid
= !!(__le32_to_cpu(rx_desc
->attention
.flags
)
372 & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR
|
373 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR
));
374 msdu_len
= MS(__le32_to_cpu(rx_desc
->msdu_start
.info0
),
375 RX_MSDU_START_INFO0_MSDU_LENGTH
);
376 msdu_chained
= rx_desc
->frag_info
.ring2_more_count
;
378 if (msdu_len_invalid
)
382 skb_put(msdu
, min(msdu_len
, HTT_RX_MSDU_SIZE
));
383 msdu_len
-= msdu
->len
;
385 /* FIXME: Do chained buffers include htt_rx_desc or not? */
386 while (msdu_chained
--) {
387 struct sk_buff
*next
= ath10k_htt_rx_netbuf_pop(htt
);
389 dma_unmap_single(htt
->ar
->dev
,
390 ATH10K_SKB_CB(next
)->paddr
,
391 next
->len
+ skb_tailroom(next
),
394 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP
, NULL
, "htt rx: ",
396 next
->len
+ skb_tailroom(next
));
399 skb_put(next
, min(msdu_len
, HTT_RX_BUF_SIZE
));
400 msdu_len
-= next
->len
;
408 /* This may suggest FW bug? */
409 ath10k_warn("htt rx msdu len not consumed (%d)\n",
413 last_msdu
= __le32_to_cpu(rx_desc
->msdu_end
.info0
) &
414 RX_MSDU_END_INFO0_LAST_MSDU
;
420 struct sk_buff
*next
= ath10k_htt_rx_netbuf_pop(htt
);
428 * Don't refill the ring yet.
430 * First, the elements popped here are still in use - it is not
431 * safe to overwrite them until the matching call to
432 * mpdu_desc_list_next. Second, for efficiency it is preferable to
433 * refill the rx ring with 1 PPDU's worth of rx buffers (something
434 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
435 * (something like 3 buffers). Consequently, we'll rely on the txrx
436 * SW to tell us when it is done pulling all the PPDU's rx buffers
437 * out of the rx ring, and then refill it just once.
440 return msdu_chaining
;
443 int ath10k_htt_rx_attach(struct ath10k_htt
*htt
)
447 struct timer_list
*timer
= &htt
->rx_ring
.refill_retry_timer
;
449 htt
->rx_ring
.size
= ath10k_htt_rx_ring_size(htt
);
450 if (!is_power_of_2(htt
->rx_ring
.size
)) {
451 ath10k_warn("htt rx ring size is not power of 2\n");
455 htt
->rx_ring
.size_mask
= htt
->rx_ring
.size
- 1;
458 * Set the initial value for the level to which the rx ring
459 * should be filled, based on the max throughput and the
460 * worst likely latency for the host to fill the rx ring
461 * with new buffers. In theory, this fill level can be
462 * dynamically adjusted from the initial value set here, to
463 * reflect the actual host latency rather than a
464 * conservative assumption about the host latency.
466 htt
->rx_ring
.fill_level
= ath10k_htt_rx_ring_fill_level(htt
);
468 htt
->rx_ring
.netbufs_ring
=
469 kmalloc(htt
->rx_ring
.size
* sizeof(struct sk_buff
*),
471 if (!htt
->rx_ring
.netbufs_ring
)
474 vaddr
= dma_alloc_coherent(htt
->ar
->dev
,
475 (htt
->rx_ring
.size
* sizeof(htt
->rx_ring
.paddrs_ring
)),
480 htt
->rx_ring
.paddrs_ring
= vaddr
;
481 htt
->rx_ring
.base_paddr
= paddr
;
483 vaddr
= dma_alloc_coherent(htt
->ar
->dev
,
484 sizeof(*htt
->rx_ring
.alloc_idx
.vaddr
),
489 htt
->rx_ring
.alloc_idx
.vaddr
= vaddr
;
490 htt
->rx_ring
.alloc_idx
.paddr
= paddr
;
491 htt
->rx_ring
.sw_rd_idx
.msdu_payld
= 0;
492 *htt
->rx_ring
.alloc_idx
.vaddr
= 0;
494 /* Initialize the Rx refill retry timer */
495 setup_timer(timer
, ath10k_htt_rx_ring_refill_retry
, (unsigned long)htt
);
497 spin_lock_init(&htt
->rx_ring
.lock
);
499 htt
->rx_ring
.fill_cnt
= 0;
500 if (__ath10k_htt_rx_ring_fill_n(htt
, htt
->rx_ring
.fill_level
))
503 ath10k_dbg(ATH10K_DBG_HTT
, "HTT RX ring size: %d, fill_level: %d\n",
504 htt
->rx_ring
.size
, htt
->rx_ring
.fill_level
);
508 ath10k_htt_rx_ring_free(htt
);
509 dma_free_coherent(htt
->ar
->dev
,
510 sizeof(*htt
->rx_ring
.alloc_idx
.vaddr
),
511 htt
->rx_ring
.alloc_idx
.vaddr
,
512 htt
->rx_ring
.alloc_idx
.paddr
);
514 dma_free_coherent(htt
->ar
->dev
,
516 sizeof(htt
->rx_ring
.paddrs_ring
)),
517 htt
->rx_ring
.paddrs_ring
,
518 htt
->rx_ring
.base_paddr
);
520 kfree(htt
->rx_ring
.netbufs_ring
);
525 static int ath10k_htt_rx_crypto_param_len(enum htt_rx_mpdu_encrypt_type type
)
528 case HTT_RX_MPDU_ENCRYPT_WEP40
:
529 case HTT_RX_MPDU_ENCRYPT_WEP104
:
531 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC
:
532 case HTT_RX_MPDU_ENCRYPT_WEP128
: /* not tested */
533 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA
:
534 case HTT_RX_MPDU_ENCRYPT_WAPI
: /* not tested */
535 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2
:
537 case HTT_RX_MPDU_ENCRYPT_NONE
:
541 ath10k_warn("unknown encryption type %d\n", type
);
545 static int ath10k_htt_rx_crypto_tail_len(enum htt_rx_mpdu_encrypt_type type
)
548 case HTT_RX_MPDU_ENCRYPT_NONE
:
549 case HTT_RX_MPDU_ENCRYPT_WEP40
:
550 case HTT_RX_MPDU_ENCRYPT_WEP104
:
551 case HTT_RX_MPDU_ENCRYPT_WEP128
:
552 case HTT_RX_MPDU_ENCRYPT_WAPI
:
554 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC
:
555 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA
:
557 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2
:
561 ath10k_warn("unknown encryption type %d\n", type
);
565 /* Applies for first msdu in chain, before altering it. */
566 static struct ieee80211_hdr
*ath10k_htt_rx_skb_get_hdr(struct sk_buff
*skb
)
568 struct htt_rx_desc
*rxd
;
569 enum rx_msdu_decap_format fmt
;
571 rxd
= (void *)skb
->data
- sizeof(*rxd
);
572 fmt
= MS(__le32_to_cpu(rxd
->msdu_start
.info1
),
573 RX_MSDU_START_INFO1_DECAP_FORMAT
);
575 if (fmt
== RX_MSDU_DECAP_RAW
)
576 return (void *)skb
->data
;
578 return (void *)skb
->data
- RX_HTT_HDR_STATUS_LEN
;
581 /* This function only applies for first msdu in an msdu chain */
582 static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr
*hdr
)
584 if (ieee80211_is_data_qos(hdr
->frame_control
)) {
585 u8
*qc
= ieee80211_get_qos_ctl(hdr
);
592 static int ath10k_htt_rx_amsdu(struct ath10k_htt
*htt
,
593 struct htt_rx_info
*info
)
595 struct htt_rx_desc
*rxd
;
596 struct sk_buff
*amsdu
;
597 struct sk_buff
*first
;
598 struct ieee80211_hdr
*hdr
;
599 struct sk_buff
*skb
= info
->skb
;
600 enum rx_msdu_decap_format fmt
;
601 enum htt_rx_mpdu_encrypt_type enctype
;
602 unsigned int hdr_len
;
605 rxd
= (void *)skb
->data
- sizeof(*rxd
);
606 fmt
= MS(__le32_to_cpu(rxd
->msdu_start
.info1
),
607 RX_MSDU_START_INFO1_DECAP_FORMAT
);
608 enctype
= MS(__le32_to_cpu(rxd
->mpdu_start
.info0
),
609 RX_MPDU_START_INFO0_ENCRYPT_TYPE
);
611 /* FIXME: No idea what assumptions are safe here. Need logs */
612 if ((fmt
== RX_MSDU_DECAP_RAW
&& skb
->next
) ||
613 (fmt
== RX_MSDU_DECAP_8023_SNAP_LLC
)) {
614 ath10k_htt_rx_free_msdu_chain(skb
->next
);
619 /* A-MSDU max is a little less than 8K */
620 amsdu
= dev_alloc_skb(8*1024);
622 ath10k_warn("A-MSDU allocation failed\n");
623 ath10k_htt_rx_free_msdu_chain(skb
->next
);
628 if (fmt
>= RX_MSDU_DECAP_NATIVE_WIFI
) {
631 hdr
= (void *)rxd
->rx_hdr_status
;
632 hdrlen
= ieee80211_hdrlen(hdr
->frame_control
);
633 memcpy(skb_put(amsdu
, hdrlen
), hdr
, hdrlen
);
641 rxd
= (void *)skb
->data
- sizeof(*rxd
);
642 fmt
= MS(__le32_to_cpu(rxd
->msdu_start
.info1
),
643 RX_MSDU_START_INFO1_DECAP_FORMAT
);
644 decap_hdr
= (void *)rxd
->rx_hdr_status
;
647 /* We receive linked A-MSDU subframe skbuffs. The
648 * first one contains the original 802.11 header (and
649 * possible crypto param) in the RX descriptor. The
650 * A-MSDU subframe header follows that. Each part is
651 * aligned to 4 byte boundary. */
653 hdr
= (void *)amsdu
->data
;
654 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
655 crypto_len
= ath10k_htt_rx_crypto_param_len(enctype
);
657 decap_hdr
+= roundup(hdr_len
, 4);
658 decap_hdr
+= roundup(crypto_len
, 4);
661 if (fmt
== RX_MSDU_DECAP_ETHERNET2_DIX
) {
662 /* Ethernet2 decap inserts ethernet header in place of
663 * A-MSDU subframe header. */
664 skb_pull(skb
, 6 + 6 + 2);
666 /* A-MSDU subframe header length */
667 decap_len
+= 6 + 6 + 2;
669 /* Ethernet2 decap also strips the LLC/SNAP so we need
670 * to re-insert it. The LLC/SNAP follows A-MSDU
671 * subframe header. */
672 /* FIXME: Not all LLCs are 8 bytes long */
675 memcpy(skb_put(amsdu
, decap_len
), decap_hdr
, decap_len
);
678 if (fmt
== RX_MSDU_DECAP_NATIVE_WIFI
) {
679 /* Native Wifi decap inserts regular 802.11 header
680 * in place of A-MSDU subframe header. */
681 hdr
= (struct ieee80211_hdr
*)skb
->data
;
682 skb_pull(skb
, ieee80211_hdrlen(hdr
->frame_control
));
684 /* A-MSDU subframe header length */
685 decap_len
+= 6 + 6 + 2;
687 memcpy(skb_put(amsdu
, decap_len
), decap_hdr
, decap_len
);
690 if (fmt
== RX_MSDU_DECAP_RAW
)
691 skb_trim(skb
, skb
->len
- 4); /* remove FCS */
693 memcpy(skb_put(amsdu
, skb
->len
), skb
->data
, skb
->len
);
695 /* A-MSDU subframes are padded to 4bytes
696 * but relative to first subframe, not the whole MPDU */
697 if (skb
->next
&& ((decap_len
+ skb
->len
) & 3)) {
698 int padlen
= 4 - ((decap_len
+ skb
->len
) & 3);
699 memset(skb_put(amsdu
, padlen
), 0, padlen
);
706 info
->encrypt_type
= enctype
;
708 ath10k_htt_rx_free_msdu_chain(first
);
713 static int ath10k_htt_rx_msdu(struct ath10k_htt
*htt
, struct htt_rx_info
*info
)
715 struct sk_buff
*skb
= info
->skb
;
716 struct htt_rx_desc
*rxd
;
717 struct ieee80211_hdr
*hdr
;
718 enum rx_msdu_decap_format fmt
;
719 enum htt_rx_mpdu_encrypt_type enctype
;
721 /* This shouldn't happen. If it does than it may be a FW bug. */
723 ath10k_warn("received chained non A-MSDU frame\n");
724 ath10k_htt_rx_free_msdu_chain(skb
->next
);
728 rxd
= (void *)skb
->data
- sizeof(*rxd
);
729 fmt
= MS(__le32_to_cpu(rxd
->msdu_start
.info1
),
730 RX_MSDU_START_INFO1_DECAP_FORMAT
);
731 enctype
= MS(__le32_to_cpu(rxd
->mpdu_start
.info0
),
732 RX_MPDU_START_INFO0_ENCRYPT_TYPE
);
733 hdr
= (void *)skb
->data
- RX_HTT_HDR_STATUS_LEN
;
736 case RX_MSDU_DECAP_RAW
:
737 /* remove trailing FCS */
738 skb_trim(skb
, skb
->len
- 4);
740 case RX_MSDU_DECAP_NATIVE_WIFI
:
741 /* nothing to do here */
743 case RX_MSDU_DECAP_ETHERNET2_DIX
:
744 /* macaddr[6] + macaddr[6] + ethertype[2] */
745 skb_pull(skb
, 6 + 6 + 2);
747 case RX_MSDU_DECAP_8023_SNAP_LLC
:
748 /* macaddr[6] + macaddr[6] + len[2] */
749 /* we don't need this for non-A-MSDU */
750 skb_pull(skb
, 6 + 6 + 2);
754 if (fmt
== RX_MSDU_DECAP_ETHERNET2_DIX
) {
760 llc
+= roundup(ieee80211_hdrlen(hdr
->frame_control
), 4);
761 llc
+= roundup(ath10k_htt_rx_crypto_param_len(enctype
), 4);
763 skb_push(skb
, llclen
);
764 memcpy(skb
->data
, llc
, llclen
);
767 if (fmt
>= RX_MSDU_DECAP_ETHERNET2_DIX
) {
768 int len
= ieee80211_hdrlen(hdr
->frame_control
);
770 memcpy(skb
->data
, hdr
, len
);
774 info
->encrypt_type
= enctype
;
778 static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff
*skb
)
780 struct htt_rx_desc
*rxd
;
783 rxd
= (void *)skb
->data
- sizeof(*rxd
);
784 flags
= __le32_to_cpu(rxd
->attention
.flags
);
786 if (flags
& RX_ATTENTION_FLAGS_DECRYPT_ERR
)
792 static bool ath10k_htt_rx_has_fcs_err(struct sk_buff
*skb
)
794 struct htt_rx_desc
*rxd
;
797 rxd
= (void *)skb
->data
- sizeof(*rxd
);
798 flags
= __le32_to_cpu(rxd
->attention
.flags
);
800 if (flags
& RX_ATTENTION_FLAGS_FCS_ERR
)
806 static void ath10k_htt_rx_handler(struct ath10k_htt
*htt
,
807 struct htt_rx_indication
*rx
)
809 struct htt_rx_info info
;
810 struct htt_rx_indication_mpdu_range
*mpdu_ranges
;
811 struct ieee80211_hdr
*hdr
;
818 memset(&info
, 0, sizeof(info
));
820 fw_desc_len
= __le16_to_cpu(rx
->prefix
.fw_rx_desc_bytes
);
821 fw_desc
= (u8
*)&rx
->fw_desc
;
823 num_mpdu_ranges
= MS(__le32_to_cpu(rx
->hdr
.info1
),
824 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES
);
825 mpdu_ranges
= htt_rx_ind_get_mpdu_ranges(rx
);
827 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP
, NULL
, "htt rx ind: ",
829 (sizeof(struct htt_rx_indication_mpdu_range
) *
832 for (i
= 0; i
< num_mpdu_ranges
; i
++) {
833 info
.status
= mpdu_ranges
[i
].mpdu_range_status
;
835 for (j
= 0; j
< mpdu_ranges
[i
].mpdu_count
; j
++) {
836 struct sk_buff
*msdu_head
, *msdu_tail
;
837 enum htt_rx_mpdu_status status
;
842 msdu_chaining
= ath10k_htt_rx_amsdu_pop(htt
,
849 ath10k_warn("htt rx no data!\n");
853 if (msdu_head
->len
== 0) {
854 ath10k_dbg(ATH10K_DBG_HTT
,
855 "htt rx dropping due to zero-len\n");
856 ath10k_htt_rx_free_msdu_chain(msdu_head
);
860 if (ath10k_htt_rx_has_decrypt_err(msdu_head
)) {
861 ath10k_htt_rx_free_msdu_chain(msdu_head
);
865 status
= info
.status
;
867 /* Skip mgmt frames while we handle this in WMI */
868 if (status
== HTT_RX_IND_MPDU_STATUS_MGMT_CTRL
) {
869 ath10k_htt_rx_free_msdu_chain(msdu_head
);
873 if (status
!= HTT_RX_IND_MPDU_STATUS_OK
&&
874 status
!= HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR
&&
875 !htt
->ar
->monitor_enabled
) {
876 ath10k_dbg(ATH10K_DBG_HTT
,
877 "htt rx ignoring frame w/ status %d\n",
879 ath10k_htt_rx_free_msdu_chain(msdu_head
);
883 /* FIXME: we do not support chaining yet.
884 * this needs investigation */
886 ath10k_warn("msdu_chaining is true\n");
887 ath10k_htt_rx_free_msdu_chain(msdu_head
);
891 info
.skb
= msdu_head
;
892 info
.fcs_err
= ath10k_htt_rx_has_fcs_err(msdu_head
);
893 info
.signal
= ATH10K_DEFAULT_NOISE_FLOOR
;
894 info
.signal
+= rx
->ppdu
.combined_rssi
;
896 info
.rate
.info0
= rx
->ppdu
.info0
;
897 info
.rate
.info1
= __le32_to_cpu(rx
->ppdu
.info1
);
898 info
.rate
.info2
= __le32_to_cpu(rx
->ppdu
.info2
);
900 hdr
= ath10k_htt_rx_skb_get_hdr(msdu_head
);
902 if (ath10k_htt_rx_hdr_is_amsdu(hdr
))
903 ret
= ath10k_htt_rx_amsdu(htt
, &info
);
905 ret
= ath10k_htt_rx_msdu(htt
, &info
);
907 if (ret
&& !info
.fcs_err
) {
908 ath10k_warn("error processing msdus %d\n", ret
);
909 dev_kfree_skb_any(info
.skb
);
913 if (ath10k_htt_rx_hdr_is_amsdu((void *)info
.skb
->data
))
914 ath10k_dbg(ATH10K_DBG_HTT
, "htt mpdu is amsdu\n");
916 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP
, NULL
, "htt mpdu: ",
917 info
.skb
->data
, info
.skb
->len
);
918 ath10k_process_rx(htt
->ar
, &info
);
922 ath10k_htt_rx_msdu_buff_replenish(htt
);
925 static void ath10k_htt_rx_frag_handler(struct ath10k_htt
*htt
,
926 struct htt_rx_fragment_indication
*frag
)
928 struct sk_buff
*msdu_head
, *msdu_tail
;
929 struct htt_rx_desc
*rxd
;
930 enum rx_msdu_decap_format fmt
;
931 struct htt_rx_info info
= {};
932 struct ieee80211_hdr
*hdr
;
937 int fw_desc_len
, hdrlen
, paramlen
;
940 fw_desc_len
= __le16_to_cpu(frag
->fw_rx_desc_bytes
);
941 fw_desc
= (u8
*)frag
->fw_msdu_rx_desc
;
945 msdu_chaining
= ath10k_htt_rx_amsdu_pop(htt
, &fw_desc
, &fw_desc_len
,
946 &msdu_head
, &msdu_tail
);
948 ath10k_dbg(ATH10K_DBG_HTT_DUMP
, "htt rx frag ahead\n");
951 ath10k_warn("htt rx frag no data\n");
955 if (msdu_chaining
|| msdu_head
!= msdu_tail
) {
956 ath10k_warn("aggregation with fragmentation?!\n");
957 ath10k_htt_rx_free_msdu_chain(msdu_head
);
961 /* FIXME: implement signal strength */
963 hdr
= (struct ieee80211_hdr
*)msdu_head
->data
;
964 rxd
= (void *)msdu_head
->data
- sizeof(*rxd
);
965 tkip_mic_err
= !!(__le32_to_cpu(rxd
->attention
.flags
) &
966 RX_ATTENTION_FLAGS_TKIP_MIC_ERR
);
967 decrypt_err
= !!(__le32_to_cpu(rxd
->attention
.flags
) &
968 RX_ATTENTION_FLAGS_DECRYPT_ERR
);
969 fmt
= MS(__le32_to_cpu(rxd
->msdu_start
.info1
),
970 RX_MSDU_START_INFO1_DECAP_FORMAT
);
972 if (fmt
!= RX_MSDU_DECAP_RAW
) {
973 ath10k_warn("we dont support non-raw fragmented rx yet\n");
974 dev_kfree_skb_any(msdu_head
);
978 info
.skb
= msdu_head
;
979 info
.status
= HTT_RX_IND_MPDU_STATUS_OK
;
980 info
.encrypt_type
= MS(__le32_to_cpu(rxd
->mpdu_start
.info0
),
981 RX_MPDU_START_INFO0_ENCRYPT_TYPE
);
984 ath10k_warn("tkip mic error\n");
985 info
.status
= HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR
;
989 ath10k_warn("decryption err in fragmented rx\n");
990 dev_kfree_skb_any(info
.skb
);
994 if (info
.encrypt_type
!= HTT_RX_MPDU_ENCRYPT_NONE
) {
995 hdrlen
= ieee80211_hdrlen(hdr
->frame_control
);
996 paramlen
= ath10k_htt_rx_crypto_param_len(info
.encrypt_type
);
998 /* It is more efficient to move the header than the payload */
999 memmove((void *)info
.skb
->data
+ paramlen
,
1000 (void *)info
.skb
->data
,
1002 skb_pull(info
.skb
, paramlen
);
1003 hdr
= (struct ieee80211_hdr
*)info
.skb
->data
;
1006 /* remove trailing FCS */
1009 /* remove crypto trailer */
1010 trim
+= ath10k_htt_rx_crypto_tail_len(info
.encrypt_type
);
1012 /* last fragment of TKIP frags has MIC */
1013 if (!ieee80211_has_morefrags(hdr
->frame_control
) &&
1014 info
.encrypt_type
== HTT_RX_MPDU_ENCRYPT_TKIP_WPA
)
1017 if (trim
> info
.skb
->len
) {
1018 ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n");
1019 dev_kfree_skb_any(info
.skb
);
1023 skb_trim(info
.skb
, info
.skb
->len
- trim
);
1025 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP
, NULL
, "htt frag mpdu: ",
1026 info
.skb
->data
, info
.skb
->len
);
1027 ath10k_process_rx(htt
->ar
, &info
);
1030 if (fw_desc_len
> 0) {
1031 ath10k_dbg(ATH10K_DBG_HTT
,
1032 "expecting more fragmented rx in one indication %d\n",
1037 void ath10k_htt_t2h_msg_handler(struct ath10k
*ar
, struct sk_buff
*skb
)
1039 struct ath10k_htt
*htt
= ar
->htt
;
1040 struct htt_resp
*resp
= (struct htt_resp
*)skb
->data
;
1042 /* confirm alignment */
1043 if (!IS_ALIGNED((unsigned long)skb
->data
, 4))
1044 ath10k_warn("unaligned htt message, expect trouble\n");
1046 ath10k_dbg(ATH10K_DBG_HTT
, "HTT RX, msg_type: 0x%0X\n",
1047 resp
->hdr
.msg_type
);
1048 switch (resp
->hdr
.msg_type
) {
1049 case HTT_T2H_MSG_TYPE_VERSION_CONF
: {
1050 htt
->target_version_major
= resp
->ver_resp
.major
;
1051 htt
->target_version_minor
= resp
->ver_resp
.minor
;
1052 complete(&htt
->target_version_received
);
1055 case HTT_T2H_MSG_TYPE_RX_IND
: {
1056 ath10k_htt_rx_handler(htt
, &resp
->rx_ind
);
1059 case HTT_T2H_MSG_TYPE_PEER_MAP
: {
1060 struct htt_peer_map_event ev
= {
1061 .vdev_id
= resp
->peer_map
.vdev_id
,
1062 .peer_id
= __le16_to_cpu(resp
->peer_map
.peer_id
),
1064 memcpy(ev
.addr
, resp
->peer_map
.addr
, sizeof(ev
.addr
));
1065 ath10k_peer_map_event(htt
, &ev
);
1068 case HTT_T2H_MSG_TYPE_PEER_UNMAP
: {
1069 struct htt_peer_unmap_event ev
= {
1070 .peer_id
= __le16_to_cpu(resp
->peer_unmap
.peer_id
),
1072 ath10k_peer_unmap_event(htt
, &ev
);
1075 case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION
: {
1076 struct htt_tx_done tx_done
= {};
1077 int status
= __le32_to_cpu(resp
->mgmt_tx_completion
.status
);
1080 __le32_to_cpu(resp
->mgmt_tx_completion
.desc_id
);
1083 case HTT_MGMT_TX_STATUS_OK
:
1085 case HTT_MGMT_TX_STATUS_RETRY
:
1086 tx_done
.no_ack
= true;
1088 case HTT_MGMT_TX_STATUS_DROP
:
1089 tx_done
.discard
= true;
1093 ath10k_txrx_tx_completed(htt
, &tx_done
);
1096 case HTT_T2H_MSG_TYPE_TX_COMPL_IND
: {
1097 struct htt_tx_done tx_done
= {};
1098 int status
= MS(resp
->data_tx_completion
.flags
,
1099 HTT_DATA_TX_STATUS
);
1104 case HTT_DATA_TX_STATUS_NO_ACK
:
1105 tx_done
.no_ack
= true;
1107 case HTT_DATA_TX_STATUS_OK
:
1109 case HTT_DATA_TX_STATUS_DISCARD
:
1110 case HTT_DATA_TX_STATUS_POSTPONE
:
1111 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL
:
1112 tx_done
.discard
= true;
1115 ath10k_warn("unhandled tx completion status %d\n",
1117 tx_done
.discard
= true;
1121 ath10k_dbg(ATH10K_DBG_HTT
, "htt tx completion num_msdus %d\n",
1122 resp
->data_tx_completion
.num_msdus
);
1124 for (i
= 0; i
< resp
->data_tx_completion
.num_msdus
; i
++) {
1125 msdu_id
= resp
->data_tx_completion
.msdus
[i
];
1126 tx_done
.msdu_id
= __le16_to_cpu(msdu_id
);
1127 ath10k_txrx_tx_completed(htt
, &tx_done
);
1131 case HTT_T2H_MSG_TYPE_SEC_IND
: {
1132 struct ath10k
*ar
= htt
->ar
;
1133 struct htt_security_indication
*ev
= &resp
->security_indication
;
1135 ath10k_dbg(ATH10K_DBG_HTT
,
1136 "sec ind peer_id %d unicast %d type %d\n",
1137 __le16_to_cpu(ev
->peer_id
),
1138 !!(ev
->flags
& HTT_SECURITY_IS_UNICAST
),
1139 MS(ev
->flags
, HTT_SECURITY_TYPE
));
1140 complete(&ar
->install_key_done
);
1143 case HTT_T2H_MSG_TYPE_RX_FRAG_IND
: {
1144 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP
, NULL
, "htt event: ",
1145 skb
->data
, skb
->len
);
1146 ath10k_htt_rx_frag_handler(htt
, &resp
->rx_frag_ind
);
1149 case HTT_T2H_MSG_TYPE_TEST
:
1152 case HTT_T2H_MSG_TYPE_TX_INSPECT_IND
:
1153 case HTT_T2H_MSG_TYPE_STATS_CONF
:
1154 case HTT_T2H_MSG_TYPE_RX_ADDBA
:
1155 case HTT_T2H_MSG_TYPE_RX_DELBA
:
1156 case HTT_T2H_MSG_TYPE_RX_FLUSH
:
1158 ath10k_dbg(ATH10K_DBG_HTT
, "htt event (%d) not handled\n",
1159 resp
->hdr
.msg_type
);
1160 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP
, NULL
, "htt event: ",
1161 skb
->data
, skb
->len
);
1165 /* Free the indication buffer */
1166 dev_kfree_skb_any(skb
);