2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include <linux/etherdevice.h>
25 void __ath10k_htt_tx_dec_pending(struct ath10k_htt
*htt
, bool limit_mgmt_desc
)
28 htt
->num_pending_mgmt_tx
--;
30 htt
->num_pending_tx
--;
31 if (htt
->num_pending_tx
== htt
->max_num_pending_tx
- 1)
32 ath10k_mac_tx_unlock(htt
->ar
, ATH10K_TX_PAUSE_Q_FULL
);
35 static void ath10k_htt_tx_dec_pending(struct ath10k_htt
*htt
,
38 spin_lock_bh(&htt
->tx_lock
);
39 __ath10k_htt_tx_dec_pending(htt
, limit_mgmt_desc
);
40 spin_unlock_bh(&htt
->tx_lock
);
43 static int ath10k_htt_tx_inc_pending(struct ath10k_htt
*htt
,
44 bool limit_mgmt_desc
, bool is_probe_resp
)
46 struct ath10k
*ar
= htt
->ar
;
49 spin_lock_bh(&htt
->tx_lock
);
51 if (htt
->num_pending_tx
>= htt
->max_num_pending_tx
) {
56 if (limit_mgmt_desc
) {
57 if (is_probe_resp
&& (htt
->num_pending_mgmt_tx
>
58 ar
->hw_params
.max_probe_resp_desc_thres
)) {
62 htt
->num_pending_mgmt_tx
++;
65 htt
->num_pending_tx
++;
66 if (htt
->num_pending_tx
== htt
->max_num_pending_tx
)
67 ath10k_mac_tx_lock(htt
->ar
, ATH10K_TX_PAUSE_Q_FULL
);
70 spin_unlock_bh(&htt
->tx_lock
);
74 int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt
*htt
, struct sk_buff
*skb
)
76 struct ath10k
*ar
= htt
->ar
;
79 lockdep_assert_held(&htt
->tx_lock
);
81 ret
= idr_alloc(&htt
->pending_tx
, skb
, 0,
82 htt
->max_num_pending_tx
, GFP_ATOMIC
);
84 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt tx alloc msdu_id %d\n", ret
);
89 void ath10k_htt_tx_free_msdu_id(struct ath10k_htt
*htt
, u16 msdu_id
)
91 struct ath10k
*ar
= htt
->ar
;
93 lockdep_assert_held(&htt
->tx_lock
);
95 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt tx free msdu_id %hu\n", msdu_id
);
97 idr_remove(&htt
->pending_tx
, msdu_id
);
100 int ath10k_htt_tx_alloc(struct ath10k_htt
*htt
)
102 struct ath10k
*ar
= htt
->ar
;
105 ath10k_dbg(ar
, ATH10K_DBG_BOOT
, "htt tx max num pending tx %d\n",
106 htt
->max_num_pending_tx
);
108 spin_lock_init(&htt
->tx_lock
);
109 idr_init(&htt
->pending_tx
);
111 size
= htt
->max_num_pending_tx
* sizeof(struct ath10k_htt_txbuf
);
112 htt
->txbuf
.vaddr
= dma_alloc_coherent(ar
->dev
, size
,
115 if (!htt
->txbuf
.vaddr
) {
116 ath10k_err(ar
, "failed to alloc tx buffer\n");
118 goto free_idr_pending_tx
;
121 if (!ar
->hw_params
.continuous_frag_desc
)
122 goto skip_frag_desc_alloc
;
124 size
= htt
->max_num_pending_tx
* sizeof(struct htt_msdu_ext_desc
);
125 htt
->frag_desc
.vaddr
= dma_alloc_coherent(ar
->dev
, size
,
126 &htt
->frag_desc
.paddr
,
128 if (!htt
->frag_desc
.vaddr
) {
129 ath10k_warn(ar
, "failed to alloc fragment desc memory\n");
134 skip_frag_desc_alloc
:
138 size
= htt
->max_num_pending_tx
*
139 sizeof(struct ath10k_htt_txbuf
);
140 dma_free_coherent(htt
->ar
->dev
, size
, htt
->txbuf
.vaddr
,
143 idr_destroy(&htt
->pending_tx
);
147 static int ath10k_htt_tx_clean_up_pending(int msdu_id
, void *skb
, void *ctx
)
149 struct ath10k
*ar
= ctx
;
150 struct ath10k_htt
*htt
= &ar
->htt
;
151 struct htt_tx_done tx_done
= {0};
153 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "force cleanup msdu_id %hu\n", msdu_id
);
156 tx_done
.msdu_id
= msdu_id
;
158 ath10k_txrx_tx_unref(htt
, &tx_done
);
163 void ath10k_htt_tx_free(struct ath10k_htt
*htt
)
167 idr_for_each(&htt
->pending_tx
, ath10k_htt_tx_clean_up_pending
, htt
->ar
);
168 idr_destroy(&htt
->pending_tx
);
170 if (htt
->txbuf
.vaddr
) {
171 size
= htt
->max_num_pending_tx
*
172 sizeof(struct ath10k_htt_txbuf
);
173 dma_free_coherent(htt
->ar
->dev
, size
, htt
->txbuf
.vaddr
,
177 if (htt
->frag_desc
.vaddr
) {
178 size
= htt
->max_num_pending_tx
*
179 sizeof(struct htt_msdu_ext_desc
);
180 dma_free_coherent(htt
->ar
->dev
, size
, htt
->frag_desc
.vaddr
,
181 htt
->frag_desc
.paddr
);
185 void ath10k_htt_htc_tx_complete(struct ath10k
*ar
, struct sk_buff
*skb
)
187 dev_kfree_skb_any(skb
);
190 void ath10k_htt_hif_tx_complete(struct ath10k
*ar
, struct sk_buff
*skb
)
192 dev_kfree_skb_any(skb
);
194 EXPORT_SYMBOL(ath10k_htt_hif_tx_complete
);
196 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt
*htt
)
198 struct ath10k
*ar
= htt
->ar
;
204 len
+= sizeof(cmd
->hdr
);
205 len
+= sizeof(cmd
->ver_req
);
207 skb
= ath10k_htc_alloc_skb(ar
, len
);
212 cmd
= (struct htt_cmd
*)skb
->data
;
213 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_VERSION_REQ
;
215 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
217 dev_kfree_skb_any(skb
);
224 int ath10k_htt_h2t_stats_req(struct ath10k_htt
*htt
, u8 mask
, u64 cookie
)
226 struct ath10k
*ar
= htt
->ar
;
227 struct htt_stats_req
*req
;
232 len
+= sizeof(cmd
->hdr
);
233 len
+= sizeof(cmd
->stats_req
);
235 skb
= ath10k_htc_alloc_skb(ar
, len
);
240 cmd
= (struct htt_cmd
*)skb
->data
;
241 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_STATS_REQ
;
243 req
= &cmd
->stats_req
;
245 memset(req
, 0, sizeof(*req
));
247 /* currently we support only max 8 bit masks so no need to worry
248 * about endian support */
249 req
->upload_types
[0] = mask
;
250 req
->reset_types
[0] = mask
;
251 req
->stat_type
= HTT_STATS_REQ_CFG_STAT_TYPE_INVALID
;
252 req
->cookie_lsb
= cpu_to_le32(cookie
& 0xffffffff);
253 req
->cookie_msb
= cpu_to_le32((cookie
& 0xffffffff00000000ULL
) >> 32);
255 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
257 ath10k_warn(ar
, "failed to send htt type stats request: %d",
259 dev_kfree_skb_any(skb
);
266 int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt
*htt
)
268 struct ath10k
*ar
= htt
->ar
;
273 if (!ar
->hw_params
.continuous_frag_desc
)
276 if (!htt
->frag_desc
.paddr
) {
277 ath10k_warn(ar
, "invalid frag desc memory\n");
281 size
= sizeof(cmd
->hdr
) + sizeof(cmd
->frag_desc_bank_cfg
);
282 skb
= ath10k_htc_alloc_skb(ar
, size
);
287 cmd
= (struct htt_cmd
*)skb
->data
;
288 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG
;
289 cmd
->frag_desc_bank_cfg
.info
= 0;
290 cmd
->frag_desc_bank_cfg
.num_banks
= 1;
291 cmd
->frag_desc_bank_cfg
.desc_size
= sizeof(struct htt_msdu_ext_desc
);
292 cmd
->frag_desc_bank_cfg
.bank_base_addrs
[0] =
293 __cpu_to_le32(htt
->frag_desc
.paddr
);
294 cmd
->frag_desc_bank_cfg
.bank_id
[0].bank_min_id
= 0;
295 cmd
->frag_desc_bank_cfg
.bank_id
[0].bank_max_id
=
296 __cpu_to_le16(htt
->max_num_pending_tx
- 1);
298 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
300 ath10k_warn(ar
, "failed to send frag desc bank cfg request: %d\n",
302 dev_kfree_skb_any(skb
);
309 int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt
*htt
)
311 struct ath10k
*ar
= htt
->ar
;
314 struct htt_rx_ring_setup_ring
*ring
;
315 const int num_rx_ring
= 1;
322 * the HW expects the buffer to be an integral number of 4-byte
325 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE
, 4));
326 BUILD_BUG_ON((HTT_RX_BUF_SIZE
& HTT_MAX_CACHE_LINE_SIZE_MASK
) != 0);
328 len
= sizeof(cmd
->hdr
) + sizeof(cmd
->rx_setup
.hdr
)
329 + (sizeof(*ring
) * num_rx_ring
);
330 skb
= ath10k_htc_alloc_skb(ar
, len
);
336 cmd
= (struct htt_cmd
*)skb
->data
;
337 ring
= &cmd
->rx_setup
.rings
[0];
339 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_RX_RING_CFG
;
340 cmd
->rx_setup
.hdr
.num_rings
= 1;
342 /* FIXME: do we need all of this? */
344 flags
|= HTT_RX_RING_FLAGS_MAC80211_HDR
;
345 flags
|= HTT_RX_RING_FLAGS_MSDU_PAYLOAD
;
346 flags
|= HTT_RX_RING_FLAGS_PPDU_START
;
347 flags
|= HTT_RX_RING_FLAGS_PPDU_END
;
348 flags
|= HTT_RX_RING_FLAGS_MPDU_START
;
349 flags
|= HTT_RX_RING_FLAGS_MPDU_END
;
350 flags
|= HTT_RX_RING_FLAGS_MSDU_START
;
351 flags
|= HTT_RX_RING_FLAGS_MSDU_END
;
352 flags
|= HTT_RX_RING_FLAGS_RX_ATTENTION
;
353 flags
|= HTT_RX_RING_FLAGS_FRAG_INFO
;
354 flags
|= HTT_RX_RING_FLAGS_UNICAST_RX
;
355 flags
|= HTT_RX_RING_FLAGS_MULTICAST_RX
;
356 flags
|= HTT_RX_RING_FLAGS_CTRL_RX
;
357 flags
|= HTT_RX_RING_FLAGS_MGMT_RX
;
358 flags
|= HTT_RX_RING_FLAGS_NULL_RX
;
359 flags
|= HTT_RX_RING_FLAGS_PHY_DATA_RX
;
361 fw_idx
= __le32_to_cpu(*htt
->rx_ring
.alloc_idx
.vaddr
);
363 ring
->fw_idx_shadow_reg_paddr
=
364 __cpu_to_le32(htt
->rx_ring
.alloc_idx
.paddr
);
365 ring
->rx_ring_base_paddr
= __cpu_to_le32(htt
->rx_ring
.base_paddr
);
366 ring
->rx_ring_len
= __cpu_to_le16(htt
->rx_ring
.size
);
367 ring
->rx_ring_bufsize
= __cpu_to_le16(HTT_RX_BUF_SIZE
);
368 ring
->flags
= __cpu_to_le16(flags
);
369 ring
->fw_idx_init_val
= __cpu_to_le16(fw_idx
);
371 #define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
373 ring
->mac80211_hdr_offset
= __cpu_to_le16(desc_offset(rx_hdr_status
));
374 ring
->msdu_payload_offset
= __cpu_to_le16(desc_offset(msdu_payload
));
375 ring
->ppdu_start_offset
= __cpu_to_le16(desc_offset(ppdu_start
));
376 ring
->ppdu_end_offset
= __cpu_to_le16(desc_offset(ppdu_end
));
377 ring
->mpdu_start_offset
= __cpu_to_le16(desc_offset(mpdu_start
));
378 ring
->mpdu_end_offset
= __cpu_to_le16(desc_offset(mpdu_end
));
379 ring
->msdu_start_offset
= __cpu_to_le16(desc_offset(msdu_start
));
380 ring
->msdu_end_offset
= __cpu_to_le16(desc_offset(msdu_end
));
381 ring
->rx_attention_offset
= __cpu_to_le16(desc_offset(attention
));
382 ring
->frag_info_offset
= __cpu_to_le16(desc_offset(frag_info
));
386 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
388 dev_kfree_skb_any(skb
);
395 int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt
*htt
,
396 u8 max_subfrms_ampdu
,
397 u8 max_subfrms_amsdu
)
399 struct ath10k
*ar
= htt
->ar
;
400 struct htt_aggr_conf
*aggr_conf
;
406 /* Firmware defaults are: amsdu = 3 and ampdu = 64 */
408 if (max_subfrms_ampdu
== 0 || max_subfrms_ampdu
> 64)
411 if (max_subfrms_amsdu
== 0 || max_subfrms_amsdu
> 31)
414 len
= sizeof(cmd
->hdr
);
415 len
+= sizeof(cmd
->aggr_conf
);
417 skb
= ath10k_htc_alloc_skb(ar
, len
);
422 cmd
= (struct htt_cmd
*)skb
->data
;
423 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_AGGR_CFG
;
425 aggr_conf
= &cmd
->aggr_conf
;
426 aggr_conf
->max_num_ampdu_subframes
= max_subfrms_ampdu
;
427 aggr_conf
->max_num_amsdu_subframes
= max_subfrms_amsdu
;
429 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt h2t aggr cfg msg amsdu %d ampdu %d",
430 aggr_conf
->max_num_amsdu_subframes
,
431 aggr_conf
->max_num_ampdu_subframes
);
433 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
435 dev_kfree_skb_any(skb
);
442 int ath10k_htt_mgmt_tx(struct ath10k_htt
*htt
, struct sk_buff
*msdu
)
444 struct ath10k
*ar
= htt
->ar
;
445 struct device
*dev
= ar
->dev
;
446 struct sk_buff
*txdesc
= NULL
;
448 struct ath10k_skb_cb
*skb_cb
= ATH10K_SKB_CB(msdu
);
449 u8 vdev_id
= skb_cb
->vdev_id
;
453 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)msdu
->data
;
454 bool limit_mgmt_desc
= false;
455 bool is_probe_resp
= false;
457 if (ar
->hw_params
.max_probe_resp_desc_thres
) {
458 limit_mgmt_desc
= true;
460 if (ieee80211_is_probe_resp(hdr
->frame_control
))
461 is_probe_resp
= true;
464 res
= ath10k_htt_tx_inc_pending(htt
, limit_mgmt_desc
, is_probe_resp
);
469 len
+= sizeof(cmd
->hdr
);
470 len
+= sizeof(cmd
->mgmt_tx
);
472 spin_lock_bh(&htt
->tx_lock
);
473 res
= ath10k_htt_tx_alloc_msdu_id(htt
, msdu
);
474 spin_unlock_bh(&htt
->tx_lock
);
480 txdesc
= ath10k_htc_alloc_skb(ar
, len
);
483 goto err_free_msdu_id
;
486 skb_cb
->paddr
= dma_map_single(dev
, msdu
->data
, msdu
->len
,
488 res
= dma_mapping_error(dev
, skb_cb
->paddr
);
491 goto err_free_txdesc
;
494 skb_put(txdesc
, len
);
495 cmd
= (struct htt_cmd
*)txdesc
->data
;
498 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_MGMT_TX
;
499 cmd
->mgmt_tx
.msdu_paddr
= __cpu_to_le32(ATH10K_SKB_CB(msdu
)->paddr
);
500 cmd
->mgmt_tx
.len
= __cpu_to_le32(msdu
->len
);
501 cmd
->mgmt_tx
.desc_id
= __cpu_to_le32(msdu_id
);
502 cmd
->mgmt_tx
.vdev_id
= __cpu_to_le32(vdev_id
);
503 memcpy(cmd
->mgmt_tx
.hdr
, msdu
->data
,
504 min_t(int, msdu
->len
, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN
));
506 skb_cb
->htt
.txbuf
= NULL
;
508 res
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, txdesc
);
515 dma_unmap_single(dev
, skb_cb
->paddr
, msdu
->len
, DMA_TO_DEVICE
);
517 dev_kfree_skb_any(txdesc
);
519 spin_lock_bh(&htt
->tx_lock
);
520 ath10k_htt_tx_free_msdu_id(htt
, msdu_id
);
521 spin_unlock_bh(&htt
->tx_lock
);
523 ath10k_htt_tx_dec_pending(htt
, limit_mgmt_desc
);
528 int ath10k_htt_tx(struct ath10k_htt
*htt
, struct sk_buff
*msdu
)
530 struct ath10k
*ar
= htt
->ar
;
531 struct device
*dev
= ar
->dev
;
532 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)msdu
->data
;
533 struct ath10k_skb_cb
*skb_cb
= ATH10K_SKB_CB(msdu
);
534 struct ath10k_hif_sg_item sg_items
[2];
535 struct htt_data_tx_desc_frag
*frags
;
536 u8 vdev_id
= skb_cb
->vdev_id
;
537 u8 tid
= skb_cb
->htt
.tid
;
541 u16 msdu_id
, flags1
= 0;
543 struct htt_msdu_ext_desc
*ext_desc
= NULL
;
544 bool limit_mgmt_desc
= false;
545 bool is_probe_resp
= false;
547 if (unlikely(ieee80211_is_mgmt(hdr
->frame_control
)) &&
548 ar
->hw_params
.max_probe_resp_desc_thres
) {
549 limit_mgmt_desc
= true;
551 if (ieee80211_is_probe_resp(hdr
->frame_control
))
552 is_probe_resp
= true;
555 res
= ath10k_htt_tx_inc_pending(htt
, limit_mgmt_desc
, is_probe_resp
);
559 spin_lock_bh(&htt
->tx_lock
);
560 res
= ath10k_htt_tx_alloc_msdu_id(htt
, msdu
);
561 spin_unlock_bh(&htt
->tx_lock
);
567 prefetch_len
= min(htt
->prefetch_len
, msdu
->len
);
568 prefetch_len
= roundup(prefetch_len
, 4);
570 skb_cb
->htt
.txbuf
= &htt
->txbuf
.vaddr
[msdu_id
];
571 skb_cb
->htt
.txbuf_paddr
= htt
->txbuf
.paddr
+
572 (sizeof(struct ath10k_htt_txbuf
) * msdu_id
);
574 if ((ieee80211_is_action(hdr
->frame_control
) ||
575 ieee80211_is_deauth(hdr
->frame_control
) ||
576 ieee80211_is_disassoc(hdr
->frame_control
)) &&
577 ieee80211_has_protected(hdr
->frame_control
)) {
578 skb_put(msdu
, IEEE80211_CCMP_MIC_LEN
);
579 } else if (!skb_cb
->htt
.nohwcrypt
&&
580 skb_cb
->txmode
== ATH10K_HW_TXRX_RAW
&&
581 ieee80211_has_protected(hdr
->frame_control
)) {
582 skb_put(msdu
, IEEE80211_CCMP_MIC_LEN
);
585 skb_cb
->paddr
= dma_map_single(dev
, msdu
->data
, msdu
->len
,
587 res
= dma_mapping_error(dev
, skb_cb
->paddr
);
590 goto err_free_msdu_id
;
593 switch (skb_cb
->txmode
) {
594 case ATH10K_HW_TXRX_RAW
:
595 case ATH10K_HW_TXRX_NATIVE_WIFI
:
596 flags0
|= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT
;
598 case ATH10K_HW_TXRX_ETHERNET
:
599 if (ar
->hw_params
.continuous_frag_desc
) {
600 memset(&htt
->frag_desc
.vaddr
[msdu_id
], 0,
601 sizeof(struct htt_msdu_ext_desc
));
602 frags
= (struct htt_data_tx_desc_frag
*)
603 &htt
->frag_desc
.vaddr
[msdu_id
].frags
;
604 ext_desc
= &htt
->frag_desc
.vaddr
[msdu_id
];
605 frags
[0].tword_addr
.paddr_lo
=
606 __cpu_to_le32(skb_cb
->paddr
);
607 frags
[0].tword_addr
.paddr_hi
= 0;
608 frags
[0].tword_addr
.len_16
= __cpu_to_le16(msdu
->len
);
610 frags_paddr
= htt
->frag_desc
.paddr
+
611 (sizeof(struct htt_msdu_ext_desc
) * msdu_id
);
613 frags
= skb_cb
->htt
.txbuf
->frags
;
614 frags
[0].dword_addr
.paddr
=
615 __cpu_to_le32(skb_cb
->paddr
);
616 frags
[0].dword_addr
.len
= __cpu_to_le32(msdu
->len
);
617 frags
[1].dword_addr
.paddr
= 0;
618 frags
[1].dword_addr
.len
= 0;
620 frags_paddr
= skb_cb
->htt
.txbuf_paddr
;
622 flags0
|= SM(skb_cb
->txmode
, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE
);
624 case ATH10K_HW_TXRX_MGMT
:
625 flags0
|= SM(ATH10K_HW_TXRX_MGMT
,
626 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE
);
627 flags0
|= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT
;
629 frags_paddr
= skb_cb
->paddr
;
633 /* Normally all commands go through HTC which manages tx credits for
634 * each endpoint and notifies when tx is completed.
636 * HTT endpoint is creditless so there's no need to care about HTC
637 * flags. In that case it is trivial to fill the HTC header here.
639 * MSDU transmission is considered completed upon HTT event. This
640 * implies no relevant resources can be freed until after the event is
641 * received. That's why HTC tx completion handler itself is ignored by
642 * setting NULL to transfer_context for all sg items.
644 * There is simply no point in pushing HTT TX_FRM through HTC tx path
645 * as it's a waste of resources. By bypassing HTC it is possible to
646 * avoid extra memory allocations, compress data structures and thus
647 * improve performance. */
649 skb_cb
->htt
.txbuf
->htc_hdr
.eid
= htt
->eid
;
650 skb_cb
->htt
.txbuf
->htc_hdr
.len
= __cpu_to_le16(
651 sizeof(skb_cb
->htt
.txbuf
->cmd_hdr
) +
652 sizeof(skb_cb
->htt
.txbuf
->cmd_tx
) +
654 skb_cb
->htt
.txbuf
->htc_hdr
.flags
= 0;
656 if (skb_cb
->htt
.nohwcrypt
)
657 flags0
|= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT
;
659 if (!skb_cb
->is_protected
)
660 flags0
|= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT
;
662 flags1
|= SM((u16
)vdev_id
, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID
);
663 flags1
|= SM((u16
)tid
, HTT_DATA_TX_DESC_FLAGS1_EXT_TID
);
664 if (msdu
->ip_summed
== CHECKSUM_PARTIAL
&&
665 !test_bit(ATH10K_FLAG_RAW_MODE
, &ar
->dev_flags
)) {
666 flags1
|= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD
;
667 flags1
|= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD
;
668 if (ar
->hw_params
.continuous_frag_desc
)
669 ext_desc
->flags
|= HTT_MSDU_CHECKSUM_ENABLE
;
672 /* Prevent firmware from sending up tx inspection requests. There's
673 * nothing ath10k can do with frames requested for inspection so force
674 * it to simply rely a regular tx completion with discard status.
676 flags1
|= HTT_DATA_TX_DESC_FLAGS1_POSTPONED
;
678 skb_cb
->htt
.txbuf
->cmd_hdr
.msg_type
= HTT_H2T_MSG_TYPE_TX_FRM
;
679 skb_cb
->htt
.txbuf
->cmd_tx
.flags0
= flags0
;
680 skb_cb
->htt
.txbuf
->cmd_tx
.flags1
= __cpu_to_le16(flags1
);
681 skb_cb
->htt
.txbuf
->cmd_tx
.len
= __cpu_to_le16(msdu
->len
);
682 skb_cb
->htt
.txbuf
->cmd_tx
.id
= __cpu_to_le16(msdu_id
);
683 skb_cb
->htt
.txbuf
->cmd_tx
.frags_paddr
= __cpu_to_le32(frags_paddr
);
684 skb_cb
->htt
.txbuf
->cmd_tx
.peerid
= __cpu_to_le16(HTT_INVALID_PEERID
);
685 skb_cb
->htt
.txbuf
->cmd_tx
.freq
= __cpu_to_le16(skb_cb
->htt
.freq
);
687 trace_ath10k_htt_tx(ar
, msdu_id
, msdu
->len
, vdev_id
, tid
);
688 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
689 "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n",
690 flags0
, flags1
, msdu
->len
, msdu_id
, frags_paddr
,
691 (u32
)skb_cb
->paddr
, vdev_id
, tid
, skb_cb
->htt
.freq
);
692 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt tx msdu: ",
693 msdu
->data
, msdu
->len
);
694 trace_ath10k_tx_hdr(ar
, msdu
->data
, msdu
->len
);
695 trace_ath10k_tx_payload(ar
, msdu
->data
, msdu
->len
);
697 sg_items
[0].transfer_id
= 0;
698 sg_items
[0].transfer_context
= NULL
;
699 sg_items
[0].vaddr
= &skb_cb
->htt
.txbuf
->htc_hdr
;
700 sg_items
[0].paddr
= skb_cb
->htt
.txbuf_paddr
+
701 sizeof(skb_cb
->htt
.txbuf
->frags
);
702 sg_items
[0].len
= sizeof(skb_cb
->htt
.txbuf
->htc_hdr
) +
703 sizeof(skb_cb
->htt
.txbuf
->cmd_hdr
) +
704 sizeof(skb_cb
->htt
.txbuf
->cmd_tx
);
706 sg_items
[1].transfer_id
= 0;
707 sg_items
[1].transfer_context
= NULL
;
708 sg_items
[1].vaddr
= msdu
->data
;
709 sg_items
[1].paddr
= skb_cb
->paddr
;
710 sg_items
[1].len
= prefetch_len
;
712 res
= ath10k_hif_tx_sg(htt
->ar
,
713 htt
->ar
->htc
.endpoint
[htt
->eid
].ul_pipe_id
,
714 sg_items
, ARRAY_SIZE(sg_items
));
721 dma_unmap_single(dev
, skb_cb
->paddr
, msdu
->len
, DMA_TO_DEVICE
);
723 spin_lock_bh(&htt
->tx_lock
);
724 ath10k_htt_tx_free_msdu_id(htt
, msdu_id
);
725 spin_unlock_bh(&htt
->tx_lock
);
727 ath10k_htt_tx_dec_pending(htt
, limit_mgmt_desc
);