2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include <linux/etherdevice.h>
25 void __ath10k_htt_tx_dec_pending(struct ath10k_htt
*htt
)
27 htt
->num_pending_tx
--;
28 if (htt
->num_pending_tx
== htt
->max_num_pending_tx
- 1)
29 ieee80211_wake_queues(htt
->ar
->hw
);
32 static void ath10k_htt_tx_dec_pending(struct ath10k_htt
*htt
)
34 spin_lock_bh(&htt
->tx_lock
);
35 __ath10k_htt_tx_dec_pending(htt
);
36 spin_unlock_bh(&htt
->tx_lock
);
39 static int ath10k_htt_tx_inc_pending(struct ath10k_htt
*htt
)
43 spin_lock_bh(&htt
->tx_lock
);
45 if (htt
->num_pending_tx
>= htt
->max_num_pending_tx
) {
50 htt
->num_pending_tx
++;
51 if (htt
->num_pending_tx
== htt
->max_num_pending_tx
)
52 ieee80211_stop_queues(htt
->ar
->hw
);
55 spin_unlock_bh(&htt
->tx_lock
);
59 int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt
*htt
)
63 lockdep_assert_held(&htt
->tx_lock
);
65 msdu_id
= find_first_zero_bit(htt
->used_msdu_ids
,
66 htt
->max_num_pending_tx
);
67 if (msdu_id
== htt
->max_num_pending_tx
)
70 ath10k_dbg(ATH10K_DBG_HTT
, "htt tx alloc msdu_id %d\n", msdu_id
);
71 __set_bit(msdu_id
, htt
->used_msdu_ids
);
75 void ath10k_htt_tx_free_msdu_id(struct ath10k_htt
*htt
, u16 msdu_id
)
77 lockdep_assert_held(&htt
->tx_lock
);
79 if (!test_bit(msdu_id
, htt
->used_msdu_ids
))
80 ath10k_warn("trying to free unallocated msdu_id %d\n", msdu_id
);
82 ath10k_dbg(ATH10K_DBG_HTT
, "htt tx free msdu_id %hu\n", msdu_id
);
83 __clear_bit(msdu_id
, htt
->used_msdu_ids
);
86 int ath10k_htt_tx_attach(struct ath10k_htt
*htt
)
88 spin_lock_init(&htt
->tx_lock
);
89 init_waitqueue_head(&htt
->empty_tx_wq
);
91 if (test_bit(ATH10K_FW_FEATURE_WMI_10X
, htt
->ar
->fw_features
))
92 htt
->max_num_pending_tx
= TARGET_10X_NUM_MSDU_DESC
;
94 htt
->max_num_pending_tx
= TARGET_NUM_MSDU_DESC
;
96 ath10k_dbg(ATH10K_DBG_BOOT
, "htt tx max num pending tx %d\n",
97 htt
->max_num_pending_tx
);
99 htt
->pending_tx
= kzalloc(sizeof(*htt
->pending_tx
) *
100 htt
->max_num_pending_tx
, GFP_KERNEL
);
101 if (!htt
->pending_tx
)
104 htt
->used_msdu_ids
= kzalloc(sizeof(unsigned long) *
105 BITS_TO_LONGS(htt
->max_num_pending_tx
),
107 if (!htt
->used_msdu_ids
) {
108 kfree(htt
->pending_tx
);
115 static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt
*htt
)
117 struct htt_tx_done tx_done
= {0};
120 /* No locks needed. Called after communication with the device has
123 for (msdu_id
= 0; msdu_id
< htt
->max_num_pending_tx
; msdu_id
++) {
124 if (!test_bit(msdu_id
, htt
->used_msdu_ids
))
127 ath10k_dbg(ATH10K_DBG_HTT
, "force cleanup msdu_id %hu\n",
131 tx_done
.msdu_id
= msdu_id
;
133 ath10k_txrx_tx_unref(htt
, &tx_done
);
137 void ath10k_htt_tx_detach(struct ath10k_htt
*htt
)
139 ath10k_htt_tx_cleanup_pending(htt
);
140 kfree(htt
->pending_tx
);
141 kfree(htt
->used_msdu_ids
);
145 void ath10k_htt_htc_tx_complete(struct ath10k
*ar
, struct sk_buff
*skb
)
147 dev_kfree_skb_any(skb
);
150 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt
*htt
)
157 len
+= sizeof(cmd
->hdr
);
158 len
+= sizeof(cmd
->ver_req
);
160 skb
= ath10k_htc_alloc_skb(len
);
165 cmd
= (struct htt_cmd
*)skb
->data
;
166 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_VERSION_REQ
;
168 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
170 dev_kfree_skb_any(skb
);
177 int ath10k_htt_h2t_stats_req(struct ath10k_htt
*htt
, u8 mask
, u64 cookie
)
179 struct htt_stats_req
*req
;
184 len
+= sizeof(cmd
->hdr
);
185 len
+= sizeof(cmd
->stats_req
);
187 skb
= ath10k_htc_alloc_skb(len
);
192 cmd
= (struct htt_cmd
*)skb
->data
;
193 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_STATS_REQ
;
195 req
= &cmd
->stats_req
;
197 memset(req
, 0, sizeof(*req
));
199 /* currently we support only max 8 bit masks so no need to worry
200 * about endian support */
201 req
->upload_types
[0] = mask
;
202 req
->reset_types
[0] = mask
;
203 req
->stat_type
= HTT_STATS_REQ_CFG_STAT_TYPE_INVALID
;
204 req
->cookie_lsb
= cpu_to_le32(cookie
& 0xffffffff);
205 req
->cookie_msb
= cpu_to_le32((cookie
& 0xffffffff00000000ULL
) >> 32);
207 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
209 ath10k_warn("failed to send htt type stats request: %d", ret
);
210 dev_kfree_skb_any(skb
);
217 int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt
*htt
)
221 struct htt_rx_ring_setup_ring
*ring
;
222 const int num_rx_ring
= 1;
229 * the HW expects the buffer to be an integral number of 4-byte
232 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE
, 4));
233 BUILD_BUG_ON((HTT_RX_BUF_SIZE
& HTT_MAX_CACHE_LINE_SIZE_MASK
) != 0);
235 len
= sizeof(cmd
->hdr
) + sizeof(cmd
->rx_setup
.hdr
)
236 + (sizeof(*ring
) * num_rx_ring
);
237 skb
= ath10k_htc_alloc_skb(len
);
243 cmd
= (struct htt_cmd
*)skb
->data
;
244 ring
= &cmd
->rx_setup
.rings
[0];
246 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_RX_RING_CFG
;
247 cmd
->rx_setup
.hdr
.num_rings
= 1;
249 /* FIXME: do we need all of this? */
251 flags
|= HTT_RX_RING_FLAGS_MAC80211_HDR
;
252 flags
|= HTT_RX_RING_FLAGS_MSDU_PAYLOAD
;
253 flags
|= HTT_RX_RING_FLAGS_PPDU_START
;
254 flags
|= HTT_RX_RING_FLAGS_PPDU_END
;
255 flags
|= HTT_RX_RING_FLAGS_MPDU_START
;
256 flags
|= HTT_RX_RING_FLAGS_MPDU_END
;
257 flags
|= HTT_RX_RING_FLAGS_MSDU_START
;
258 flags
|= HTT_RX_RING_FLAGS_MSDU_END
;
259 flags
|= HTT_RX_RING_FLAGS_RX_ATTENTION
;
260 flags
|= HTT_RX_RING_FLAGS_FRAG_INFO
;
261 flags
|= HTT_RX_RING_FLAGS_UNICAST_RX
;
262 flags
|= HTT_RX_RING_FLAGS_MULTICAST_RX
;
263 flags
|= HTT_RX_RING_FLAGS_CTRL_RX
;
264 flags
|= HTT_RX_RING_FLAGS_MGMT_RX
;
265 flags
|= HTT_RX_RING_FLAGS_NULL_RX
;
266 flags
|= HTT_RX_RING_FLAGS_PHY_DATA_RX
;
268 fw_idx
= __le32_to_cpu(*htt
->rx_ring
.alloc_idx
.vaddr
);
270 ring
->fw_idx_shadow_reg_paddr
=
271 __cpu_to_le32(htt
->rx_ring
.alloc_idx
.paddr
);
272 ring
->rx_ring_base_paddr
= __cpu_to_le32(htt
->rx_ring
.base_paddr
);
273 ring
->rx_ring_len
= __cpu_to_le16(htt
->rx_ring
.size
);
274 ring
->rx_ring_bufsize
= __cpu_to_le16(HTT_RX_BUF_SIZE
);
275 ring
->flags
= __cpu_to_le16(flags
);
276 ring
->fw_idx_init_val
= __cpu_to_le16(fw_idx
);
278 #define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
280 ring
->mac80211_hdr_offset
= __cpu_to_le16(desc_offset(rx_hdr_status
));
281 ring
->msdu_payload_offset
= __cpu_to_le16(desc_offset(msdu_payload
));
282 ring
->ppdu_start_offset
= __cpu_to_le16(desc_offset(ppdu_start
));
283 ring
->ppdu_end_offset
= __cpu_to_le16(desc_offset(ppdu_end
));
284 ring
->mpdu_start_offset
= __cpu_to_le16(desc_offset(mpdu_start
));
285 ring
->mpdu_end_offset
= __cpu_to_le16(desc_offset(mpdu_end
));
286 ring
->msdu_start_offset
= __cpu_to_le16(desc_offset(msdu_start
));
287 ring
->msdu_end_offset
= __cpu_to_le16(desc_offset(msdu_end
));
288 ring
->rx_attention_offset
= __cpu_to_le16(desc_offset(attention
));
289 ring
->frag_info_offset
= __cpu_to_le16(desc_offset(frag_info
));
293 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
295 dev_kfree_skb_any(skb
);
302 int ath10k_htt_mgmt_tx(struct ath10k_htt
*htt
, struct sk_buff
*msdu
)
304 struct device
*dev
= htt
->ar
->dev
;
305 struct sk_buff
*txdesc
= NULL
;
307 struct ath10k_skb_cb
*skb_cb
= ATH10K_SKB_CB(msdu
);
308 u8 vdev_id
= skb_cb
->vdev_id
;
314 res
= ath10k_htt_tx_inc_pending(htt
);
318 len
+= sizeof(cmd
->hdr
);
319 len
+= sizeof(cmd
->mgmt_tx
);
321 spin_lock_bh(&htt
->tx_lock
);
322 res
= ath10k_htt_tx_alloc_msdu_id(htt
);
324 spin_unlock_bh(&htt
->tx_lock
);
328 htt
->pending_tx
[msdu_id
] = msdu
;
329 spin_unlock_bh(&htt
->tx_lock
);
331 txdesc
= ath10k_htc_alloc_skb(len
);
334 goto err_free_msdu_id
;
337 res
= ath10k_skb_map(dev
, msdu
);
339 goto err_free_txdesc
;
341 skb_put(txdesc
, len
);
342 cmd
= (struct htt_cmd
*)txdesc
->data
;
343 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_MGMT_TX
;
344 cmd
->mgmt_tx
.msdu_paddr
= __cpu_to_le32(ATH10K_SKB_CB(msdu
)->paddr
);
345 cmd
->mgmt_tx
.len
= __cpu_to_le32(msdu
->len
);
346 cmd
->mgmt_tx
.desc_id
= __cpu_to_le32(msdu_id
);
347 cmd
->mgmt_tx
.vdev_id
= __cpu_to_le32(vdev_id
);
348 memcpy(cmd
->mgmt_tx
.hdr
, msdu
->data
,
349 min_t(int, msdu
->len
, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN
));
351 skb_cb
->htt
.frag_len
= 0;
352 skb_cb
->htt
.pad_len
= 0;
354 res
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, txdesc
);
361 ath10k_skb_unmap(dev
, msdu
);
363 dev_kfree_skb_any(txdesc
);
365 spin_lock_bh(&htt
->tx_lock
);
366 htt
->pending_tx
[msdu_id
] = NULL
;
367 ath10k_htt_tx_free_msdu_id(htt
, msdu_id
);
368 spin_unlock_bh(&htt
->tx_lock
);
370 ath10k_htt_tx_dec_pending(htt
);
375 int ath10k_htt_tx(struct ath10k_htt
*htt
, struct sk_buff
*msdu
)
377 struct device
*dev
= htt
->ar
->dev
;
379 struct htt_data_tx_desc_frag
*tx_frags
;
380 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)msdu
->data
;
381 struct ath10k_skb_cb
*skb_cb
= ATH10K_SKB_CB(msdu
);
382 struct sk_buff
*txdesc
= NULL
;
384 u8 vdev_id
= ATH10K_SKB_CB(msdu
)->vdev_id
;
386 int prefetch_len
, desc_len
;
392 res
= ath10k_htt_tx_inc_pending(htt
);
396 spin_lock_bh(&htt
->tx_lock
);
397 res
= ath10k_htt_tx_alloc_msdu_id(htt
);
399 spin_unlock_bh(&htt
->tx_lock
);
403 htt
->pending_tx
[msdu_id
] = msdu
;
404 spin_unlock_bh(&htt
->tx_lock
);
406 prefetch_len
= min(htt
->prefetch_len
, msdu
->len
);
407 prefetch_len
= roundup(prefetch_len
, 4);
409 desc_len
= sizeof(cmd
->hdr
) + sizeof(cmd
->data_tx
) + prefetch_len
;
411 txdesc
= ath10k_htc_alloc_skb(desc_len
);
414 goto err_free_msdu_id
;
417 /* Since HTT 3.0 there is no separate mgmt tx command. However in case
418 * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
419 * fragment list host driver specifies directly frame pointer. */
420 use_frags
= htt
->target_version_major
< 3 ||
421 !ieee80211_is_mgmt(hdr
->frame_control
);
423 if (!IS_ALIGNED((unsigned long)txdesc
->data
, 4)) {
424 ath10k_warn("htt alignment check failed. dropping packet.\n");
426 goto err_free_txdesc
;
430 skb_cb
->htt
.frag_len
= sizeof(*tx_frags
) * 2;
431 skb_cb
->htt
.pad_len
= (unsigned long)msdu
->data
-
432 round_down((unsigned long)msdu
->data
, 4);
434 skb_push(msdu
, skb_cb
->htt
.frag_len
+ skb_cb
->htt
.pad_len
);
436 skb_cb
->htt
.frag_len
= 0;
437 skb_cb
->htt
.pad_len
= 0;
440 res
= ath10k_skb_map(dev
, msdu
);
442 goto err_pull_txfrag
;
445 dma_sync_single_for_cpu(dev
, skb_cb
->paddr
, msdu
->len
,
448 /* tx fragment list must be terminated with zero-entry */
449 tx_frags
= (struct htt_data_tx_desc_frag
*)msdu
->data
;
450 tx_frags
[0].paddr
= __cpu_to_le32(skb_cb
->paddr
+
451 skb_cb
->htt
.frag_len
+
452 skb_cb
->htt
.pad_len
);
453 tx_frags
[0].len
= __cpu_to_le32(msdu
->len
-
454 skb_cb
->htt
.frag_len
-
455 skb_cb
->htt
.pad_len
);
456 tx_frags
[1].paddr
= __cpu_to_le32(0);
457 tx_frags
[1].len
= __cpu_to_le32(0);
459 dma_sync_single_for_device(dev
, skb_cb
->paddr
, msdu
->len
,
463 ath10k_dbg(ATH10K_DBG_HTT
, "msdu 0x%llx\n",
464 (unsigned long long) ATH10K_SKB_CB(msdu
)->paddr
);
465 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP
, NULL
, "msdu: ",
466 msdu
->data
, msdu
->len
);
468 skb_put(txdesc
, desc_len
);
469 cmd
= (struct htt_cmd
*)txdesc
->data
;
471 tid
= ATH10K_SKB_CB(msdu
)->htt
.tid
;
473 ath10k_dbg(ATH10K_DBG_HTT
, "htt data tx using tid %hhu\n", tid
);
476 if (!ieee80211_has_protected(hdr
->frame_control
))
477 flags0
|= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT
;
478 flags0
|= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT
;
481 flags0
|= SM(ATH10K_HW_TXRX_NATIVE_WIFI
,
482 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE
);
484 flags0
|= SM(ATH10K_HW_TXRX_MGMT
,
485 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE
);
488 flags1
|= SM((u16
)vdev_id
, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID
);
489 flags1
|= SM((u16
)tid
, HTT_DATA_TX_DESC_FLAGS1_EXT_TID
);
490 flags1
|= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD
;
491 flags1
|= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD
;
493 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_TX_FRM
;
494 cmd
->data_tx
.flags0
= flags0
;
495 cmd
->data_tx
.flags1
= __cpu_to_le16(flags1
);
496 cmd
->data_tx
.len
= __cpu_to_le16(msdu
->len
-
497 skb_cb
->htt
.frag_len
-
498 skb_cb
->htt
.pad_len
);
499 cmd
->data_tx
.id
= __cpu_to_le16(msdu_id
);
500 cmd
->data_tx
.frags_paddr
= __cpu_to_le32(skb_cb
->paddr
);
501 cmd
->data_tx
.peerid
= __cpu_to_le32(HTT_INVALID_PEERID
);
503 memcpy(cmd
->data_tx
.prefetch
, hdr
, prefetch_len
);
505 res
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, txdesc
);
512 ath10k_skb_unmap(dev
, msdu
);
514 skb_pull(msdu
, skb_cb
->htt
.frag_len
+ skb_cb
->htt
.pad_len
);
516 dev_kfree_skb_any(txdesc
);
518 spin_lock_bh(&htt
->tx_lock
);
519 htt
->pending_tx
[msdu_id
] = NULL
;
520 ath10k_htt_tx_free_msdu_id(htt
, msdu_id
);
521 spin_unlock_bh(&htt
->tx_lock
);
523 ath10k_htt_tx_dec_pending(htt
);