1 // SPDX-License-Identifier: GPL-2.0-only
3 * O(1) TX queue with built-in allocator.
5 * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
6 * Copyright (c) 2010, ST-Ericsson
8 #include <linux/sched.h>
9 #include <net/mac80211.h>
16 void wfx_tx_lock(struct wfx_dev
*wdev
)
18 atomic_inc(&wdev
->tx_lock
);
21 void wfx_tx_unlock(struct wfx_dev
*wdev
)
23 int tx_lock
= atomic_dec_return(&wdev
->tx_lock
);
25 WARN(tx_lock
< 0, "inconsistent tx_lock value");
27 wfx_bh_request_tx(wdev
);
30 void wfx_tx_flush(struct wfx_dev
*wdev
)
34 // Do not wait for any reply if chip is frozen
35 if (wdev
->chip_frozen
)
39 mutex_lock(&wdev
->hif_cmd
.lock
);
40 ret
= wait_event_timeout(wdev
->hif
.tx_buffers_empty
,
41 !wdev
->hif
.tx_buffers_used
,
42 msecs_to_jiffies(3000));
44 dev_warn(wdev
->dev
, "cannot flush tx buffers (%d still busy)\n",
45 wdev
->hif
.tx_buffers_used
);
46 wfx_pending_dump_old_frames(wdev
, 3000);
47 // FIXME: drop pending frames here
48 wdev
->chip_frozen
= true;
50 mutex_unlock(&wdev
->hif_cmd
.lock
);
54 void wfx_tx_lock_flush(struct wfx_dev
*wdev
)
60 void wfx_tx_queues_init(struct wfx_dev
*wdev
)
64 skb_queue_head_init(&wdev
->tx_pending
);
65 init_waitqueue_head(&wdev
->tx_dequeue
);
66 for (i
= 0; i
< IEEE80211_NUM_ACS
; ++i
) {
67 skb_queue_head_init(&wdev
->tx_queue
[i
].normal
);
68 skb_queue_head_init(&wdev
->tx_queue
[i
].cab
);
72 void wfx_tx_queues_check_empty(struct wfx_dev
*wdev
)
76 WARN_ON(!skb_queue_empty_lockless(&wdev
->tx_pending
));
77 for (i
= 0; i
< IEEE80211_NUM_ACS
; ++i
) {
78 WARN_ON(atomic_read(&wdev
->tx_queue
[i
].pending_frames
));
79 WARN_ON(!skb_queue_empty_lockless(&wdev
->tx_queue
[i
].normal
));
80 WARN_ON(!skb_queue_empty_lockless(&wdev
->tx_queue
[i
].cab
));
84 static bool __wfx_tx_queue_empty(struct wfx_dev
*wdev
,
85 struct sk_buff_head
*skb_queue
, int vif_id
)
87 struct hif_msg
*hif_msg
;
90 spin_lock_bh(&skb_queue
->lock
);
91 skb_queue_walk(skb_queue
, skb
) {
92 hif_msg
= (struct hif_msg
*)skb
->data
;
93 if (vif_id
< 0 || hif_msg
->interface
== vif_id
) {
94 spin_unlock_bh(&skb_queue
->lock
);
98 spin_unlock_bh(&skb_queue
->lock
);
102 bool wfx_tx_queue_empty(struct wfx_dev
*wdev
,
103 struct wfx_queue
*queue
, int vif_id
)
105 return __wfx_tx_queue_empty(wdev
, &queue
->normal
, vif_id
) &&
106 __wfx_tx_queue_empty(wdev
, &queue
->cab
, vif_id
);
109 static void __wfx_tx_queue_drop(struct wfx_dev
*wdev
,
110 struct sk_buff_head
*skb_queue
, int vif_id
,
111 struct sk_buff_head
*dropped
)
113 struct sk_buff
*skb
, *tmp
;
114 struct hif_msg
*hif_msg
;
116 spin_lock_bh(&skb_queue
->lock
);
117 skb_queue_walk_safe(skb_queue
, skb
, tmp
) {
118 hif_msg
= (struct hif_msg
*)skb
->data
;
119 if (vif_id
< 0 || hif_msg
->interface
== vif_id
) {
120 __skb_unlink(skb
, skb_queue
);
121 skb_queue_head(dropped
, skb
);
124 spin_unlock_bh(&skb_queue
->lock
);
127 void wfx_tx_queue_drop(struct wfx_dev
*wdev
, struct wfx_queue
*queue
,
128 int vif_id
, struct sk_buff_head
*dropped
)
130 __wfx_tx_queue_drop(wdev
, &queue
->cab
, vif_id
, dropped
);
131 __wfx_tx_queue_drop(wdev
, &queue
->normal
, vif_id
, dropped
);
132 wake_up(&wdev
->tx_dequeue
);
135 void wfx_tx_queues_put(struct wfx_dev
*wdev
, struct sk_buff
*skb
)
137 struct wfx_queue
*queue
= &wdev
->tx_queue
[skb_get_queue_mapping(skb
)];
138 struct ieee80211_tx_info
*tx_info
= IEEE80211_SKB_CB(skb
);
140 if (tx_info
->flags
& IEEE80211_TX_CTL_SEND_AFTER_DTIM
)
141 skb_queue_tail(&queue
->cab
, skb
);
143 skb_queue_tail(&queue
->normal
, skb
);
146 void wfx_pending_drop(struct wfx_dev
*wdev
, struct sk_buff_head
*dropped
)
148 struct wfx_queue
*queue
;
151 WARN(!wdev
->chip_frozen
, "%s should only be used to recover a frozen device",
153 while ((skb
= skb_dequeue(&wdev
->tx_pending
)) != NULL
) {
154 queue
= &wdev
->tx_queue
[skb_get_queue_mapping(skb
)];
155 WARN_ON(skb_get_queue_mapping(skb
) > 3);
156 WARN_ON(!atomic_read(&queue
->pending_frames
));
157 atomic_dec(&queue
->pending_frames
);
158 skb_queue_head(dropped
, skb
);
162 struct sk_buff
*wfx_pending_get(struct wfx_dev
*wdev
, u32 packet_id
)
164 struct wfx_queue
*queue
;
165 struct hif_req_tx
*req
;
168 spin_lock_bh(&wdev
->tx_pending
.lock
);
169 skb_queue_walk(&wdev
->tx_pending
, skb
) {
170 req
= wfx_skb_txreq(skb
);
171 if (req
->packet_id
== packet_id
) {
172 spin_unlock_bh(&wdev
->tx_pending
.lock
);
173 queue
= &wdev
->tx_queue
[skb_get_queue_mapping(skb
)];
174 WARN_ON(skb_get_queue_mapping(skb
) > 3);
175 WARN_ON(!atomic_read(&queue
->pending_frames
));
176 atomic_dec(&queue
->pending_frames
);
177 skb_unlink(skb
, &wdev
->tx_pending
);
181 spin_unlock_bh(&wdev
->tx_pending
.lock
);
182 WARN(1, "cannot find packet in pending queue");
186 void wfx_pending_dump_old_frames(struct wfx_dev
*wdev
, unsigned int limit_ms
)
188 ktime_t now
= ktime_get();
189 struct wfx_tx_priv
*tx_priv
;
190 struct hif_req_tx
*req
;
194 spin_lock_bh(&wdev
->tx_pending
.lock
);
195 skb_queue_walk(&wdev
->tx_pending
, skb
) {
196 tx_priv
= wfx_skb_tx_priv(skb
);
197 req
= wfx_skb_txreq(skb
);
198 if (ktime_after(now
, ktime_add_ms(tx_priv
->xmit_timestamp
,
201 dev_info(wdev
->dev
, "frames stuck in firmware since %dms or more:\n",
205 dev_info(wdev
->dev
, " id %08x sent %lldms ago\n",
207 ktime_ms_delta(now
, tx_priv
->xmit_timestamp
));
210 spin_unlock_bh(&wdev
->tx_pending
.lock
);
213 unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev
*wdev
,
216 ktime_t now
= ktime_get();
217 struct wfx_tx_priv
*tx_priv
= wfx_skb_tx_priv(skb
);
219 return ktime_us_delta(now
, tx_priv
->xmit_timestamp
);
222 bool wfx_tx_queues_has_cab(struct wfx_vif
*wvif
)
224 struct wfx_dev
*wdev
= wvif
->wdev
;
227 if (wvif
->vif
->type
!= NL80211_IFTYPE_AP
)
229 for (i
= 0; i
< IEEE80211_NUM_ACS
; ++i
)
230 // Note: since only AP can have mcast frames in queue and only
231 // one vif can be AP, all queued frames has same interface id
232 if (!skb_queue_empty_lockless(&wdev
->tx_queue
[i
].cab
))
237 static struct sk_buff
*wfx_tx_queues_get_skb(struct wfx_dev
*wdev
)
239 struct wfx_queue
*sorted_queues
[IEEE80211_NUM_ACS
];
240 struct wfx_vif
*wvif
;
246 for (i
= 0; i
< IEEE80211_NUM_ACS
; i
++) {
247 sorted_queues
[i
] = &wdev
->tx_queue
[i
];
248 for (j
= i
; j
> 0; j
--)
249 if (atomic_read(&sorted_queues
[j
]->pending_frames
) >
250 atomic_read(&sorted_queues
[j
- 1]->pending_frames
))
251 swap(sorted_queues
[j
- 1], sorted_queues
[j
]);
254 while ((wvif
= wvif_iterate(wdev
, wvif
)) != NULL
) {
255 if (!wvif
->after_dtim_tx_allowed
)
257 for (i
= 0; i
< IEEE80211_NUM_ACS
; i
++) {
258 skb
= skb_dequeue(&sorted_queues
[i
]->cab
);
261 // Note: since only AP can have mcast frames in queue
262 // and only one vif can be AP, all queued frames has
264 hif
= (struct hif_msg
*)skb
->data
;
265 WARN_ON(hif
->interface
!= wvif
->id
);
266 WARN_ON(sorted_queues
[i
] !=
267 &wdev
->tx_queue
[skb_get_queue_mapping(skb
)]);
268 atomic_inc(&sorted_queues
[i
]->pending_frames
);
271 // No more multicast to sent
272 wvif
->after_dtim_tx_allowed
= false;
273 schedule_work(&wvif
->update_tim_work
);
275 for (i
= 0; i
< IEEE80211_NUM_ACS
; i
++) {
276 skb
= skb_dequeue(&sorted_queues
[i
]->normal
);
278 WARN_ON(sorted_queues
[i
] !=
279 &wdev
->tx_queue
[skb_get_queue_mapping(skb
)]);
280 atomic_inc(&sorted_queues
[i
]->pending_frames
);
287 struct hif_msg
*wfx_tx_queues_get(struct wfx_dev
*wdev
)
289 struct wfx_tx_priv
*tx_priv
;
292 if (atomic_read(&wdev
->tx_lock
))
296 skb
= wfx_tx_queues_get_skb(wdev
);
299 skb_queue_tail(&wdev
->tx_pending
, skb
);
300 wake_up(&wdev
->tx_dequeue
);
301 tx_priv
= wfx_skb_tx_priv(skb
);
302 tx_priv
->xmit_timestamp
= ktime_get();
303 return (struct hif_msg
*)skb
->data
;