2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include <linux/etherdevice.h>
25 void __ath10k_htt_tx_dec_pending(struct ath10k_htt
*htt
)
27 htt
->num_pending_tx
--;
28 if (htt
->num_pending_tx
== htt
->max_num_pending_tx
- 1)
29 ieee80211_wake_queues(htt
->ar
->hw
);
32 static void ath10k_htt_tx_dec_pending(struct ath10k_htt
*htt
)
34 spin_lock_bh(&htt
->tx_lock
);
35 __ath10k_htt_tx_dec_pending(htt
);
36 spin_unlock_bh(&htt
->tx_lock
);
39 static int ath10k_htt_tx_inc_pending(struct ath10k_htt
*htt
)
43 spin_lock_bh(&htt
->tx_lock
);
45 if (htt
->num_pending_tx
>= htt
->max_num_pending_tx
) {
50 htt
->num_pending_tx
++;
51 if (htt
->num_pending_tx
== htt
->max_num_pending_tx
)
52 ieee80211_stop_queues(htt
->ar
->hw
);
55 spin_unlock_bh(&htt
->tx_lock
);
59 int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt
*htt
)
63 lockdep_assert_held(&htt
->tx_lock
);
65 msdu_id
= find_first_zero_bit(htt
->used_msdu_ids
,
66 htt
->max_num_pending_tx
);
67 if (msdu_id
== htt
->max_num_pending_tx
)
70 ath10k_dbg(ATH10K_DBG_HTT
, "htt tx alloc msdu_id %d\n", msdu_id
);
71 __set_bit(msdu_id
, htt
->used_msdu_ids
);
75 void ath10k_htt_tx_free_msdu_id(struct ath10k_htt
*htt
, u16 msdu_id
)
77 lockdep_assert_held(&htt
->tx_lock
);
79 if (!test_bit(msdu_id
, htt
->used_msdu_ids
))
80 ath10k_warn("trying to free unallocated msdu_id %d\n", msdu_id
);
82 ath10k_dbg(ATH10K_DBG_HTT
, "htt tx free msdu_id %hu\n", msdu_id
);
83 __clear_bit(msdu_id
, htt
->used_msdu_ids
);
86 int ath10k_htt_tx_attach(struct ath10k_htt
*htt
)
90 spin_lock_init(&htt
->tx_lock
);
91 init_waitqueue_head(&htt
->empty_tx_wq
);
93 /* At the beginning free queue number should hint us the maximum
95 pipe
= htt
->ar
->htc
->endpoint
[htt
->eid
].ul_pipe_id
;
96 htt
->max_num_pending_tx
= ath10k_hif_get_free_queue_number(htt
->ar
,
99 ath10k_dbg(ATH10K_DBG_HTT
, "htt tx max num pending tx %d\n",
100 htt
->max_num_pending_tx
);
102 htt
->pending_tx
= kzalloc(sizeof(*htt
->pending_tx
) *
103 htt
->max_num_pending_tx
, GFP_KERNEL
);
104 if (!htt
->pending_tx
)
107 htt
->used_msdu_ids
= kzalloc(sizeof(unsigned long) *
108 BITS_TO_LONGS(htt
->max_num_pending_tx
),
110 if (!htt
->used_msdu_ids
) {
111 kfree(htt
->pending_tx
);
118 static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt
*htt
)
120 struct sk_buff
*txdesc
;
123 /* No locks needed. Called after communication with the device has
126 for (msdu_id
= 0; msdu_id
< htt
->max_num_pending_tx
; msdu_id
++) {
127 if (!test_bit(msdu_id
, htt
->used_msdu_ids
))
130 txdesc
= htt
->pending_tx
[msdu_id
];
134 ath10k_dbg(ATH10K_DBG_HTT
, "force cleanup msdu_id %hu\n",
137 if (ATH10K_SKB_CB(txdesc
)->htt
.refcount
> 0)
138 ATH10K_SKB_CB(txdesc
)->htt
.refcount
= 1;
140 ATH10K_SKB_CB(txdesc
)->htt
.discard
= true;
141 ath10k_txrx_tx_unref(htt
, txdesc
);
145 void ath10k_htt_tx_detach(struct ath10k_htt
*htt
)
147 ath10k_htt_tx_cleanup_pending(htt
);
148 kfree(htt
->pending_tx
);
149 kfree(htt
->used_msdu_ids
);
153 void ath10k_htt_htc_tx_complete(struct ath10k
*ar
, struct sk_buff
*skb
)
155 struct ath10k_skb_cb
*skb_cb
= ATH10K_SKB_CB(skb
);
156 struct ath10k_htt
*htt
= ar
->htt
;
158 if (skb_cb
->htt
.is_conf
) {
159 dev_kfree_skb_any(skb
);
163 if (skb_cb
->is_aborted
) {
164 skb_cb
->htt
.discard
= true;
166 /* if the skbuff is aborted we need to make sure we'll free up
167 * the tx resources, we can't simply run tx_unref() 2 times
168 * because if htt tx completion came in earlier we'd access
169 * unallocated memory */
170 if (skb_cb
->htt
.refcount
> 1)
171 skb_cb
->htt
.refcount
= 1;
174 ath10k_txrx_tx_unref(htt
, skb
);
177 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt
*htt
)
184 len
+= sizeof(cmd
->hdr
);
185 len
+= sizeof(cmd
->ver_req
);
187 skb
= ath10k_htc_alloc_skb(len
);
192 cmd
= (struct htt_cmd
*)skb
->data
;
193 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_VERSION_REQ
;
195 ATH10K_SKB_CB(skb
)->htt
.is_conf
= true;
197 ret
= ath10k_htc_send(htt
->ar
->htc
, htt
->eid
, skb
);
199 dev_kfree_skb_any(skb
);
206 int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt
*htt
)
210 struct htt_rx_ring_setup_ring
*ring
;
211 const int num_rx_ring
= 1;
218 * the HW expects the buffer to be an integral number of 4-byte
221 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE
, 4));
222 BUILD_BUG_ON((HTT_RX_BUF_SIZE
& HTT_MAX_CACHE_LINE_SIZE_MASK
) != 0);
224 len
= sizeof(cmd
->hdr
) + sizeof(cmd
->rx_setup
.hdr
)
225 + (sizeof(*ring
) * num_rx_ring
);
226 skb
= ath10k_htc_alloc_skb(len
);
232 cmd
= (struct htt_cmd
*)skb
->data
;
233 ring
= &cmd
->rx_setup
.rings
[0];
235 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_RX_RING_CFG
;
236 cmd
->rx_setup
.hdr
.num_rings
= 1;
238 /* FIXME: do we need all of this? */
240 flags
|= HTT_RX_RING_FLAGS_MAC80211_HDR
;
241 flags
|= HTT_RX_RING_FLAGS_MSDU_PAYLOAD
;
242 flags
|= HTT_RX_RING_FLAGS_PPDU_START
;
243 flags
|= HTT_RX_RING_FLAGS_PPDU_END
;
244 flags
|= HTT_RX_RING_FLAGS_MPDU_START
;
245 flags
|= HTT_RX_RING_FLAGS_MPDU_END
;
246 flags
|= HTT_RX_RING_FLAGS_MSDU_START
;
247 flags
|= HTT_RX_RING_FLAGS_MSDU_END
;
248 flags
|= HTT_RX_RING_FLAGS_RX_ATTENTION
;
249 flags
|= HTT_RX_RING_FLAGS_FRAG_INFO
;
250 flags
|= HTT_RX_RING_FLAGS_UNICAST_RX
;
251 flags
|= HTT_RX_RING_FLAGS_MULTICAST_RX
;
252 flags
|= HTT_RX_RING_FLAGS_CTRL_RX
;
253 flags
|= HTT_RX_RING_FLAGS_MGMT_RX
;
254 flags
|= HTT_RX_RING_FLAGS_NULL_RX
;
255 flags
|= HTT_RX_RING_FLAGS_PHY_DATA_RX
;
257 fw_idx
= __le32_to_cpu(*htt
->rx_ring
.alloc_idx
.vaddr
);
259 ring
->fw_idx_shadow_reg_paddr
=
260 __cpu_to_le32(htt
->rx_ring
.alloc_idx
.paddr
);
261 ring
->rx_ring_base_paddr
= __cpu_to_le32(htt
->rx_ring
.base_paddr
);
262 ring
->rx_ring_len
= __cpu_to_le16(htt
->rx_ring
.size
);
263 ring
->rx_ring_bufsize
= __cpu_to_le16(HTT_RX_BUF_SIZE
);
264 ring
->flags
= __cpu_to_le16(flags
);
265 ring
->fw_idx_init_val
= __cpu_to_le16(fw_idx
);
267 #define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
269 ring
->mac80211_hdr_offset
= __cpu_to_le16(desc_offset(rx_hdr_status
));
270 ring
->msdu_payload_offset
= __cpu_to_le16(desc_offset(msdu_payload
));
271 ring
->ppdu_start_offset
= __cpu_to_le16(desc_offset(ppdu_start
));
272 ring
->ppdu_end_offset
= __cpu_to_le16(desc_offset(ppdu_end
));
273 ring
->mpdu_start_offset
= __cpu_to_le16(desc_offset(mpdu_start
));
274 ring
->mpdu_end_offset
= __cpu_to_le16(desc_offset(mpdu_end
));
275 ring
->msdu_start_offset
= __cpu_to_le16(desc_offset(msdu_start
));
276 ring
->msdu_end_offset
= __cpu_to_le16(desc_offset(msdu_end
));
277 ring
->rx_attention_offset
= __cpu_to_le16(desc_offset(attention
));
278 ring
->frag_info_offset
= __cpu_to_le16(desc_offset(frag_info
));
282 ATH10K_SKB_CB(skb
)->htt
.is_conf
= true;
284 ret
= ath10k_htc_send(htt
->ar
->htc
, htt
->eid
, skb
);
286 dev_kfree_skb_any(skb
);
293 int ath10k_htt_mgmt_tx(struct ath10k_htt
*htt
, struct sk_buff
*msdu
)
295 struct device
*dev
= htt
->ar
->dev
;
296 struct ath10k_skb_cb
*skb_cb
;
297 struct sk_buff
*txdesc
= NULL
;
299 u8 vdev_id
= ATH10K_SKB_CB(msdu
)->htt
.vdev_id
;
305 res
= ath10k_htt_tx_inc_pending(htt
);
309 len
+= sizeof(cmd
->hdr
);
310 len
+= sizeof(cmd
->mgmt_tx
);
312 txdesc
= ath10k_htc_alloc_skb(len
);
318 spin_lock_bh(&htt
->tx_lock
);
319 msdu_id
= ath10k_htt_tx_alloc_msdu_id(htt
);
321 spin_unlock_bh(&htt
->tx_lock
);
325 htt
->pending_tx
[msdu_id
] = txdesc
;
326 spin_unlock_bh(&htt
->tx_lock
);
328 res
= ath10k_skb_map(dev
, msdu
);
332 skb_put(txdesc
, len
);
333 cmd
= (struct htt_cmd
*)txdesc
->data
;
334 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_MGMT_TX
;
335 cmd
->mgmt_tx
.msdu_paddr
= __cpu_to_le32(ATH10K_SKB_CB(msdu
)->paddr
);
336 cmd
->mgmt_tx
.len
= __cpu_to_le32(msdu
->len
);
337 cmd
->mgmt_tx
.desc_id
= __cpu_to_le32(msdu_id
);
338 cmd
->mgmt_tx
.vdev_id
= __cpu_to_le32(vdev_id
);
339 memcpy(cmd
->mgmt_tx
.hdr
, msdu
->data
,
340 min_t(int, msdu
->len
, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN
));
342 /* refcount is decremented by HTC and HTT completions until it reaches
343 * zero and is freed */
344 skb_cb
= ATH10K_SKB_CB(txdesc
);
345 skb_cb
->htt
.msdu_id
= msdu_id
;
346 skb_cb
->htt
.refcount
= 2;
347 skb_cb
->htt
.msdu
= msdu
;
349 res
= ath10k_htc_send(htt
->ar
->htc
, htt
->eid
, txdesc
);
356 ath10k_skb_unmap(dev
, msdu
);
359 dev_kfree_skb_any(txdesc
);
361 spin_lock_bh(&htt
->tx_lock
);
362 htt
->pending_tx
[msdu_id
] = NULL
;
363 ath10k_htt_tx_free_msdu_id(htt
, msdu_id
);
364 spin_unlock_bh(&htt
->tx_lock
);
366 ath10k_htt_tx_dec_pending(htt
);
370 int ath10k_htt_tx(struct ath10k_htt
*htt
, struct sk_buff
*msdu
)
372 struct device
*dev
= htt
->ar
->dev
;
374 struct htt_data_tx_desc_frag
*tx_frags
;
375 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)msdu
->data
;
376 struct ath10k_skb_cb
*skb_cb
;
377 struct sk_buff
*txdesc
= NULL
;
378 struct sk_buff
*txfrag
= NULL
;
379 u8 vdev_id
= ATH10K_SKB_CB(msdu
)->htt
.vdev_id
;
381 int prefetch_len
, desc_len
, frag_len
;
382 dma_addr_t frags_paddr
;
388 res
= ath10k_htt_tx_inc_pending(htt
);
392 prefetch_len
= min(htt
->prefetch_len
, msdu
->len
);
393 prefetch_len
= roundup(prefetch_len
, 4);
395 desc_len
= sizeof(cmd
->hdr
) + sizeof(cmd
->data_tx
) + prefetch_len
;
396 frag_len
= sizeof(*tx_frags
) * 2;
398 txdesc
= ath10k_htc_alloc_skb(desc_len
);
404 txfrag
= dev_alloc_skb(frag_len
);
410 if (!IS_ALIGNED((unsigned long)txdesc
->data
, 4)) {
411 ath10k_warn("htt alignment check failed. dropping packet.\n");
416 spin_lock_bh(&htt
->tx_lock
);
417 msdu_id
= ath10k_htt_tx_alloc_msdu_id(htt
);
419 spin_unlock_bh(&htt
->tx_lock
);
423 htt
->pending_tx
[msdu_id
] = txdesc
;
424 spin_unlock_bh(&htt
->tx_lock
);
426 res
= ath10k_skb_map(dev
, msdu
);
430 /* tx fragment list must be terminated with zero-entry */
431 skb_put(txfrag
, frag_len
);
432 tx_frags
= (struct htt_data_tx_desc_frag
*)txfrag
->data
;
433 tx_frags
[0].paddr
= __cpu_to_le32(ATH10K_SKB_CB(msdu
)->paddr
);
434 tx_frags
[0].len
= __cpu_to_le32(msdu
->len
);
435 tx_frags
[1].paddr
= __cpu_to_le32(0);
436 tx_frags
[1].len
= __cpu_to_le32(0);
438 res
= ath10k_skb_map(dev
, txfrag
);
442 ath10k_dbg(ATH10K_DBG_HTT
, "txfrag 0x%llx msdu 0x%llx\n",
443 (unsigned long long) ATH10K_SKB_CB(txfrag
)->paddr
,
444 (unsigned long long) ATH10K_SKB_CB(msdu
)->paddr
);
445 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP
, NULL
, "txfrag: ",
446 txfrag
->data
, frag_len
);
447 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP
, NULL
, "msdu: ",
448 msdu
->data
, msdu
->len
);
450 skb_put(txdesc
, desc_len
);
451 cmd
= (struct htt_cmd
*)txdesc
->data
;
452 memset(cmd
, 0, desc_len
);
454 tid
= ATH10K_SKB_CB(msdu
)->htt
.tid
;
456 ath10k_dbg(ATH10K_DBG_HTT
, "htt data tx using tid %hhu\n", tid
);
459 if (!ieee80211_has_protected(hdr
->frame_control
))
460 flags0
|= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT
;
461 flags0
|= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT
;
462 flags0
|= SM(ATH10K_HW_TXRX_NATIVE_WIFI
,
463 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE
);
466 flags1
|= SM((u16
)vdev_id
, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID
);
467 flags1
|= SM((u16
)tid
, HTT_DATA_TX_DESC_FLAGS1_EXT_TID
);
469 frags_paddr
= ATH10K_SKB_CB(txfrag
)->paddr
;
471 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_TX_FRM
;
472 cmd
->data_tx
.flags0
= flags0
;
473 cmd
->data_tx
.flags1
= __cpu_to_le16(flags1
);
474 cmd
->data_tx
.len
= __cpu_to_le16(msdu
->len
);
475 cmd
->data_tx
.id
= __cpu_to_le16(msdu_id
);
476 cmd
->data_tx
.frags_paddr
= __cpu_to_le32(frags_paddr
);
477 cmd
->data_tx
.peerid
= __cpu_to_le32(HTT_INVALID_PEERID
);
479 memcpy(cmd
->data_tx
.prefetch
, msdu
->data
, prefetch_len
);
481 /* refcount is decremented by HTC and HTT completions until it reaches
482 * zero and is freed */
483 skb_cb
= ATH10K_SKB_CB(txdesc
);
484 skb_cb
->htt
.msdu_id
= msdu_id
;
485 skb_cb
->htt
.refcount
= 2;
486 skb_cb
->htt
.txfrag
= txfrag
;
487 skb_cb
->htt
.msdu
= msdu
;
489 res
= ath10k_htc_send(htt
->ar
->htc
, htt
->eid
, txdesc
);
496 ath10k_skb_unmap(dev
, txfrag
);
498 dev_kfree_skb_any(txdesc
);
500 dev_kfree_skb_any(txfrag
);
502 spin_lock_bh(&htt
->tx_lock
);
503 htt
->pending_tx
[msdu_id
] = NULL
;
504 ath10k_htt_tx_free_msdu_id(htt
, msdu_id
);
505 spin_unlock_bh(&htt
->tx_lock
);
507 ath10k_htt_tx_dec_pending(htt
);
508 ath10k_skb_unmap(dev
, msdu
);