2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 static void ath10k_report_offchan_tx(struct ath10k
*ar
, struct sk_buff
*skb
)
26 if (!ATH10K_SKB_CB(skb
)->htt
.is_offchan
)
29 /* If the original wait_for_completion() timed out before
30 * {data,mgmt}_tx_completed() was called then we could complete
31 * offchan_tx_completed for a different skb. Prevent this by using
33 spin_lock_bh(&ar
->data_lock
);
34 if (ar
->offchan_tx_skb
!= skb
) {
35 ath10k_warn(ar
, "completed old offchannel frame\n");
39 complete(&ar
->offchan_tx_completed
);
40 ar
->offchan_tx_skb
= NULL
; /* just for sanity */
42 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "completed offchannel skb %p\n", skb
);
44 spin_unlock_bh(&ar
->data_lock
);
47 void ath10k_txrx_tx_unref(struct ath10k_htt
*htt
,
48 const struct htt_tx_done
*tx_done
)
50 struct ath10k
*ar
= htt
->ar
;
51 struct device
*dev
= ar
->dev
;
52 struct ieee80211_tx_info
*info
;
53 struct ath10k_skb_cb
*skb_cb
;
56 lockdep_assert_held(&htt
->tx_lock
);
58 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt tx completion msdu_id %u discard %d no_ack %d\n",
59 tx_done
->msdu_id
, !!tx_done
->discard
, !!tx_done
->no_ack
);
61 if (tx_done
->msdu_id
>= htt
->max_num_pending_tx
) {
62 ath10k_warn(ar
, "warning: msdu_id %d too big, ignoring\n",
67 msdu
= idr_find(&htt
->pending_tx
, tx_done
->msdu_id
);
69 ath10k_warn(ar
, "received tx completion for invalid msdu_id: %d\n",
74 skb_cb
= ATH10K_SKB_CB(msdu
);
76 dma_unmap_single(dev
, skb_cb
->paddr
, msdu
->len
, DMA_TO_DEVICE
);
78 if (skb_cb
->htt
.txbuf
)
79 dma_pool_free(htt
->tx_pool
,
81 skb_cb
->htt
.txbuf_paddr
);
83 ath10k_report_offchan_tx(htt
->ar
, msdu
);
85 info
= IEEE80211_SKB_CB(msdu
);
86 memset(&info
->status
, 0, sizeof(info
->status
));
87 trace_ath10k_txrx_tx_unref(ar
, tx_done
->msdu_id
);
89 if (tx_done
->discard
) {
90 ieee80211_free_txskb(htt
->ar
->hw
, msdu
);
94 if (!(info
->flags
& IEEE80211_TX_CTL_NO_ACK
))
95 info
->flags
|= IEEE80211_TX_STAT_ACK
;
98 info
->flags
&= ~IEEE80211_TX_STAT_ACK
;
100 ieee80211_tx_status(htt
->ar
->hw
, msdu
);
101 /* we do not own the msdu anymore */
104 ath10k_htt_tx_free_msdu_id(htt
, tx_done
->msdu_id
);
105 __ath10k_htt_tx_dec_pending(htt
);
106 if (htt
->num_pending_tx
== 0)
107 wake_up(&htt
->empty_tx_wq
);
110 struct ath10k_peer
*ath10k_peer_find(struct ath10k
*ar
, int vdev_id
,
113 struct ath10k_peer
*peer
;
115 lockdep_assert_held(&ar
->data_lock
);
117 list_for_each_entry(peer
, &ar
->peers
, list
) {
118 if (peer
->vdev_id
!= vdev_id
)
120 if (memcmp(peer
->addr
, addr
, ETH_ALEN
))
129 struct ath10k_peer
*ath10k_peer_find_by_id(struct ath10k
*ar
, int peer_id
)
131 struct ath10k_peer
*peer
;
133 lockdep_assert_held(&ar
->data_lock
);
135 list_for_each_entry(peer
, &ar
->peers
, list
)
136 if (test_bit(peer_id
, peer
->peer_ids
))
142 static int ath10k_wait_for_peer_common(struct ath10k
*ar
, int vdev_id
,
143 const u8
*addr
, bool expect_mapped
)
147 ret
= wait_event_timeout(ar
->peer_mapping_wq
, ({
150 spin_lock_bh(&ar
->data_lock
);
151 mapped
= !!ath10k_peer_find(ar
, vdev_id
, addr
);
152 spin_unlock_bh(&ar
->data_lock
);
154 (mapped
== expect_mapped
||
155 test_bit(ATH10K_FLAG_CRASH_FLUSH
, &ar
->dev_flags
));
164 int ath10k_wait_for_peer_created(struct ath10k
*ar
, int vdev_id
, const u8
*addr
)
166 return ath10k_wait_for_peer_common(ar
, vdev_id
, addr
, true);
169 int ath10k_wait_for_peer_deleted(struct ath10k
*ar
, int vdev_id
, const u8
*addr
)
171 return ath10k_wait_for_peer_common(ar
, vdev_id
, addr
, false);
174 void ath10k_peer_map_event(struct ath10k_htt
*htt
,
175 struct htt_peer_map_event
*ev
)
177 struct ath10k
*ar
= htt
->ar
;
178 struct ath10k_peer
*peer
;
180 spin_lock_bh(&ar
->data_lock
);
181 peer
= ath10k_peer_find(ar
, ev
->vdev_id
, ev
->addr
);
183 peer
= kzalloc(sizeof(*peer
), GFP_ATOMIC
);
187 peer
->vdev_id
= ev
->vdev_id
;
188 ether_addr_copy(peer
->addr
, ev
->addr
);
189 list_add(&peer
->list
, &ar
->peers
);
190 wake_up(&ar
->peer_mapping_wq
);
193 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt peer map vdev %d peer %pM id %d\n",
194 ev
->vdev_id
, ev
->addr
, ev
->peer_id
);
196 set_bit(ev
->peer_id
, peer
->peer_ids
);
198 spin_unlock_bh(&ar
->data_lock
);
201 void ath10k_peer_unmap_event(struct ath10k_htt
*htt
,
202 struct htt_peer_unmap_event
*ev
)
204 struct ath10k
*ar
= htt
->ar
;
205 struct ath10k_peer
*peer
;
207 spin_lock_bh(&ar
->data_lock
);
208 peer
= ath10k_peer_find_by_id(ar
, ev
->peer_id
);
210 ath10k_warn(ar
, "peer-unmap-event: unknown peer id %d\n",
215 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt peer unmap vdev %d peer %pM id %d\n",
216 peer
->vdev_id
, peer
->addr
, ev
->peer_id
);
218 clear_bit(ev
->peer_id
, peer
->peer_ids
);
220 if (bitmap_empty(peer
->peer_ids
, ATH10K_MAX_NUM_PEER_IDS
)) {
221 list_del(&peer
->list
);
223 wake_up(&ar
->peer_mapping_wq
);
227 spin_unlock_bh(&ar
->data_lock
);