1 // SPDX-License-Identifier: GPL-2.0-only
3 * O(1) TX queue with built-in allocator.
5 * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
6 * Copyright (c) 2010, ST-Ericsson
8 #include <linux/sched.h>
9 #include <net/mac80211.h>
16 void wfx_tx_lock(struct wfx_dev
*wdev
)
18 atomic_inc(&wdev
->tx_lock
);
21 void wfx_tx_unlock(struct wfx_dev
*wdev
)
23 int tx_lock
= atomic_dec_return(&wdev
->tx_lock
);
25 WARN(tx_lock
< 0, "inconsistent tx_lock value");
27 wfx_bh_request_tx(wdev
);
30 void wfx_tx_flush(struct wfx_dev
*wdev
)
34 // Do not wait for any reply if chip is frozen
35 if (wdev
->chip_frozen
)
38 mutex_lock(&wdev
->hif_cmd
.lock
);
39 ret
= wait_event_timeout(wdev
->hif
.tx_buffers_empty
,
40 !wdev
->hif
.tx_buffers_used
,
41 msecs_to_jiffies(3000));
43 dev_warn(wdev
->dev
, "cannot flush tx buffers (%d still busy)\n",
44 wdev
->hif
.tx_buffers_used
);
45 wfx_pending_dump_old_frames(wdev
, 3000);
46 // FIXME: drop pending frames here
47 wdev
->chip_frozen
= 1;
49 mutex_unlock(&wdev
->hif_cmd
.lock
);
52 void wfx_tx_lock_flush(struct wfx_dev
*wdev
)
58 void wfx_tx_queues_lock(struct wfx_dev
*wdev
)
61 struct wfx_queue
*queue
;
63 for (i
= 0; i
< IEEE80211_NUM_ACS
; ++i
) {
64 queue
= &wdev
->tx_queue
[i
];
65 spin_lock_bh(&queue
->queue
.lock
);
66 if (queue
->tx_locked_cnt
++ == 0)
67 ieee80211_stop_queue(wdev
->hw
, queue
->queue_id
);
68 spin_unlock_bh(&queue
->queue
.lock
);
72 void wfx_tx_queues_unlock(struct wfx_dev
*wdev
)
75 struct wfx_queue
*queue
;
77 for (i
= 0; i
< IEEE80211_NUM_ACS
; ++i
) {
78 queue
= &wdev
->tx_queue
[i
];
79 spin_lock_bh(&queue
->queue
.lock
);
80 WARN(!queue
->tx_locked_cnt
, "queue already unlocked");
81 if (--queue
->tx_locked_cnt
== 0)
82 ieee80211_wake_queue(wdev
->hw
, queue
->queue_id
);
83 spin_unlock_bh(&queue
->queue
.lock
);
87 /* If successful, LOCKS the TX queue! */
88 void wfx_tx_queues_wait_empty_vif(struct wfx_vif
*wvif
)
92 struct wfx_queue
*queue
;
94 struct wfx_dev
*wdev
= wvif
->wdev
;
97 if (wvif
->wdev
->chip_frozen
) {
98 wfx_tx_lock_flush(wdev
);
99 wfx_tx_queues_clear(wdev
);
105 wfx_tx_lock_flush(wdev
);
106 for (i
= 0; i
< IEEE80211_NUM_ACS
&& done
; ++i
) {
107 queue
= &wdev
->tx_queue
[i
];
108 spin_lock_bh(&queue
->queue
.lock
);
109 skb_queue_walk(&queue
->queue
, item
) {
110 hif
= (struct hif_msg
*) item
->data
;
111 if (hif
->interface
== wvif
->id
)
114 spin_unlock_bh(&queue
->queue
.lock
);
123 static void wfx_tx_queue_clear(struct wfx_dev
*wdev
, struct wfx_queue
*queue
,
124 struct sk_buff_head
*gc_list
)
127 struct sk_buff
*item
;
128 struct wfx_queue_stats
*stats
= &wdev
->tx_queue_stats
;
130 spin_lock_bh(&queue
->queue
.lock
);
131 while ((item
= __skb_dequeue(&queue
->queue
)) != NULL
)
132 skb_queue_head(gc_list
, item
);
133 spin_lock_bh(&stats
->pending
.lock
);
134 for (i
= 0; i
< ARRAY_SIZE(stats
->link_map_cache
); ++i
) {
135 stats
->link_map_cache
[i
] -= queue
->link_map_cache
[i
];
136 queue
->link_map_cache
[i
] = 0;
138 spin_unlock_bh(&stats
->pending
.lock
);
139 spin_unlock_bh(&queue
->queue
.lock
);
142 void wfx_tx_queues_clear(struct wfx_dev
*wdev
)
145 struct sk_buff
*item
;
146 struct sk_buff_head gc_list
;
147 struct wfx_queue_stats
*stats
= &wdev
->tx_queue_stats
;
149 skb_queue_head_init(&gc_list
);
150 for (i
= 0; i
< IEEE80211_NUM_ACS
; ++i
)
151 wfx_tx_queue_clear(wdev
, &wdev
->tx_queue
[i
], &gc_list
);
152 wake_up(&stats
->wait_link_id_empty
);
153 while ((item
= skb_dequeue(&gc_list
)) != NULL
)
154 wfx_skb_dtor(wdev
, item
);
157 void wfx_tx_queues_init(struct wfx_dev
*wdev
)
161 memset(&wdev
->tx_queue_stats
, 0, sizeof(wdev
->tx_queue_stats
));
162 memset(wdev
->tx_queue
, 0, sizeof(wdev
->tx_queue
));
163 skb_queue_head_init(&wdev
->tx_queue_stats
.pending
);
164 init_waitqueue_head(&wdev
->tx_queue_stats
.wait_link_id_empty
);
166 for (i
= 0; i
< IEEE80211_NUM_ACS
; ++i
) {
167 wdev
->tx_queue
[i
].queue_id
= i
;
168 skb_queue_head_init(&wdev
->tx_queue
[i
].queue
);
172 void wfx_tx_queues_deinit(struct wfx_dev
*wdev
)
174 WARN_ON(!skb_queue_empty(&wdev
->tx_queue_stats
.pending
));
175 wfx_tx_queues_clear(wdev
);
178 int wfx_tx_queue_get_num_queued(struct wfx_queue
*queue
, u32 link_id_map
)
185 spin_lock_bh(&queue
->queue
.lock
);
186 if (link_id_map
== (u32
)-1) {
187 ret
= skb_queue_len(&queue
->queue
);
190 for (i
= 0; i
< ARRAY_SIZE(queue
->link_map_cache
); i
++)
191 if (link_id_map
& BIT(i
))
192 ret
+= queue
->link_map_cache
[i
];
194 spin_unlock_bh(&queue
->queue
.lock
);
198 void wfx_tx_queue_put(struct wfx_dev
*wdev
, struct wfx_queue
*queue
,
201 struct wfx_queue_stats
*stats
= &wdev
->tx_queue_stats
;
202 struct wfx_tx_priv
*tx_priv
= wfx_skb_tx_priv(skb
);
204 WARN(tx_priv
->link_id
>= ARRAY_SIZE(stats
->link_map_cache
), "invalid link-id value");
205 spin_lock_bh(&queue
->queue
.lock
);
206 __skb_queue_tail(&queue
->queue
, skb
);
208 ++queue
->link_map_cache
[tx_priv
->link_id
];
210 spin_lock_bh(&stats
->pending
.lock
);
211 ++stats
->link_map_cache
[tx_priv
->link_id
];
212 spin_unlock_bh(&stats
->pending
.lock
);
213 spin_unlock_bh(&queue
->queue
.lock
);
216 static struct sk_buff
*wfx_tx_queue_get(struct wfx_dev
*wdev
,
217 struct wfx_queue
*queue
,
220 struct sk_buff
*skb
= NULL
;
221 struct sk_buff
*item
;
222 struct wfx_queue_stats
*stats
= &wdev
->tx_queue_stats
;
223 struct wfx_tx_priv
*tx_priv
;
224 bool wakeup_stats
= false;
226 spin_lock_bh(&queue
->queue
.lock
);
227 skb_queue_walk(&queue
->queue
, item
) {
228 tx_priv
= wfx_skb_tx_priv(item
);
229 if (link_id_map
& BIT(tx_priv
->link_id
)) {
235 tx_priv
= wfx_skb_tx_priv(skb
);
236 tx_priv
->xmit_timestamp
= ktime_get();
237 __skb_unlink(skb
, &queue
->queue
);
238 --queue
->link_map_cache
[tx_priv
->link_id
];
240 spin_lock_bh(&stats
->pending
.lock
);
241 __skb_queue_tail(&stats
->pending
, skb
);
242 if (!--stats
->link_map_cache
[tx_priv
->link_id
])
244 spin_unlock_bh(&stats
->pending
.lock
);
246 spin_unlock_bh(&queue
->queue
.lock
);
248 wake_up(&stats
->wait_link_id_empty
);
252 int wfx_pending_requeue(struct wfx_dev
*wdev
, struct sk_buff
*skb
)
254 struct wfx_queue_stats
*stats
= &wdev
->tx_queue_stats
;
255 struct wfx_tx_priv
*tx_priv
= wfx_skb_tx_priv(skb
);
256 struct wfx_queue
*queue
= &wdev
->tx_queue
[skb_get_queue_mapping(skb
)];
258 WARN_ON(skb_get_queue_mapping(skb
) > 3);
259 spin_lock_bh(&queue
->queue
.lock
);
260 ++queue
->link_map_cache
[tx_priv
->link_id
];
262 spin_lock_bh(&stats
->pending
.lock
);
263 ++stats
->link_map_cache
[tx_priv
->link_id
];
264 __skb_unlink(skb
, &stats
->pending
);
265 spin_unlock_bh(&stats
->pending
.lock
);
266 __skb_queue_tail(&queue
->queue
, skb
);
267 spin_unlock_bh(&queue
->queue
.lock
);
271 int wfx_pending_remove(struct wfx_dev
*wdev
, struct sk_buff
*skb
)
273 struct wfx_queue_stats
*stats
= &wdev
->tx_queue_stats
;
275 spin_lock_bh(&stats
->pending
.lock
);
276 __skb_unlink(skb
, &stats
->pending
);
277 spin_unlock_bh(&stats
->pending
.lock
);
278 wfx_skb_dtor(wdev
, skb
);
283 struct sk_buff
*wfx_pending_get(struct wfx_dev
*wdev
, u32 packet_id
)
286 struct hif_req_tx
*req
;
287 struct wfx_queue_stats
*stats
= &wdev
->tx_queue_stats
;
289 spin_lock_bh(&stats
->pending
.lock
);
290 skb_queue_walk(&stats
->pending
, skb
) {
291 req
= wfx_skb_txreq(skb
);
292 if (req
->packet_id
== packet_id
) {
293 spin_unlock_bh(&stats
->pending
.lock
);
297 spin_unlock_bh(&stats
->pending
.lock
);
298 WARN(1, "cannot find packet in pending queue");
302 void wfx_pending_dump_old_frames(struct wfx_dev
*wdev
, unsigned int limit_ms
)
304 struct wfx_queue_stats
*stats
= &wdev
->tx_queue_stats
;
305 ktime_t now
= ktime_get();
306 struct wfx_tx_priv
*tx_priv
;
307 struct hif_req_tx
*req
;
311 spin_lock_bh(&stats
->pending
.lock
);
312 skb_queue_walk(&stats
->pending
, skb
) {
313 tx_priv
= wfx_skb_tx_priv(skb
);
314 req
= wfx_skb_txreq(skb
);
315 if (ktime_after(now
, ktime_add_ms(tx_priv
->xmit_timestamp
,
318 dev_info(wdev
->dev
, "frames stuck in firmware since %dms or more:\n",
322 dev_info(wdev
->dev
, " id %08x sent %lldms ago\n",
324 ktime_ms_delta(now
, tx_priv
->xmit_timestamp
));
327 spin_unlock_bh(&stats
->pending
.lock
);
330 unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev
*wdev
,
333 ktime_t now
= ktime_get();
334 struct wfx_tx_priv
*tx_priv
= wfx_skb_tx_priv(skb
);
336 return ktime_us_delta(now
, tx_priv
->xmit_timestamp
);
339 bool wfx_tx_queues_is_empty(struct wfx_dev
*wdev
)
342 struct sk_buff_head
*queue
;
345 for (i
= 0; i
< IEEE80211_NUM_ACS
; i
++) {
346 queue
= &wdev
->tx_queue
[i
].queue
;
347 spin_lock_bh(&queue
->lock
);
348 if (!skb_queue_empty(queue
))
350 spin_unlock_bh(&queue
->lock
);
355 static bool hif_handle_tx_data(struct wfx_vif
*wvif
, struct sk_buff
*skb
,
356 struct wfx_queue
*queue
)
358 struct hif_req_tx
*req
= wfx_skb_txreq(skb
);
359 struct ieee80211_key_conf
*hw_key
= wfx_skb_tx_priv(skb
)->hw_key
;
360 struct ieee80211_hdr
*frame
=
361 (struct ieee80211_hdr
*)(req
->frame
+ req
->data_flags
.fc_offset
);
363 // FIXME: mac80211 is smart enough to handle BSS loss. Driver should not
364 // try to do anything about that.
365 if (ieee80211_is_nullfunc(frame
->frame_control
)) {
366 mutex_lock(&wvif
->bss_loss_lock
);
367 if (wvif
->bss_loss_state
) {
368 wvif
->bss_loss_confirm_id
= req
->packet_id
;
369 req
->queue_id
.queue_id
= HIF_QUEUE_ID_VOICE
;
371 mutex_unlock(&wvif
->bss_loss_lock
);
374 // FIXME: identify the exact scenario matched by this condition. Does it
376 if (ieee80211_has_protected(frame
->frame_control
) &&
377 hw_key
&& hw_key
->keyidx
!= wvif
->wep_default_key_id
&&
378 (hw_key
->cipher
== WLAN_CIPHER_SUITE_WEP40
||
379 hw_key
->cipher
== WLAN_CIPHER_SUITE_WEP104
)) {
380 wfx_tx_lock(wvif
->wdev
);
381 WARN_ON(wvif
->wep_pending_skb
);
382 wvif
->wep_default_key_id
= hw_key
->keyidx
;
383 wvif
->wep_pending_skb
= skb
;
384 if (!schedule_work(&wvif
->wep_key_work
))
385 wfx_tx_unlock(wvif
->wdev
);
392 static int wfx_get_prio_queue(struct wfx_vif
*wvif
,
393 u32 tx_allowed_mask
, int *total
)
395 static const int urgent
= BIT(WFX_LINK_ID_AFTER_DTIM
) |
396 BIT(WFX_LINK_ID_UAPSD
);
397 const struct ieee80211_tx_queue_params
*edca
;
398 unsigned int score
, best
= -1;
402 /* search for a winner using edca params */
403 for (i
= 0; i
< IEEE80211_NUM_ACS
; ++i
) {
406 edca
= &wvif
->edca_params
[i
];
407 queued
= wfx_tx_queue_get_num_queued(&wvif
->wdev
->tx_queue
[i
],
412 score
= ((edca
->aifs
+ edca
->cw_min
) << 16) +
413 ((edca
->cw_max
- edca
->cw_min
) *
414 (get_random_int() & 0xFFFF));
415 if (score
< best
&& (winner
< 0 || i
!= 3)) {
421 /* override winner if bursting */
422 if (winner
>= 0 && wvif
->wdev
->tx_burst_idx
>= 0 &&
423 winner
!= wvif
->wdev
->tx_burst_idx
&&
424 !wfx_tx_queue_get_num_queued(&wvif
->wdev
->tx_queue
[winner
],
425 tx_allowed_mask
& urgent
) &&
426 wfx_tx_queue_get_num_queued(&wvif
->wdev
->tx_queue
[wvif
->wdev
->tx_burst_idx
], tx_allowed_mask
))
427 winner
= wvif
->wdev
->tx_burst_idx
;
432 static int wfx_tx_queue_mask_get(struct wfx_vif
*wvif
,
433 struct wfx_queue
**queue_p
,
434 u32
*tx_allowed_mask_p
)
440 /* Search for unicast traffic */
441 tx_allowed_mask
= ~wvif
->sta_asleep_mask
;
442 tx_allowed_mask
|= BIT(WFX_LINK_ID_UAPSD
);
443 if (wvif
->sta_asleep_mask
)
444 tx_allowed_mask
&= ~BIT(WFX_LINK_ID_AFTER_DTIM
);
446 tx_allowed_mask
|= BIT(WFX_LINK_ID_AFTER_DTIM
);
447 idx
= wfx_get_prio_queue(wvif
, tx_allowed_mask
, &total
);
451 *queue_p
= &wvif
->wdev
->tx_queue
[idx
];
452 *tx_allowed_mask_p
= tx_allowed_mask
;
456 struct hif_msg
*wfx_tx_queues_get_after_dtim(struct wfx_vif
*wvif
)
458 struct wfx_dev
*wdev
= wvif
->wdev
;
459 struct ieee80211_tx_info
*tx_info
;
464 for (i
= 0; i
< IEEE80211_NUM_ACS
; ++i
) {
465 skb_queue_walk(&wdev
->tx_queue
[i
].queue
, skb
) {
466 tx_info
= IEEE80211_SKB_CB(skb
);
467 hif
= (struct hif_msg
*)skb
->data
;
468 if ((tx_info
->flags
& IEEE80211_TX_CTL_SEND_AFTER_DTIM
) &&
469 (hif
->interface
== wvif
->id
))
470 return (struct hif_msg
*)skb
->data
;
476 struct hif_msg
*wfx_tx_queues_get(struct wfx_dev
*wdev
)
479 struct hif_msg
*hif
= NULL
;
480 struct wfx_queue
*queue
= NULL
;
481 struct wfx_queue
*vif_queue
= NULL
;
482 u32 tx_allowed_mask
= 0;
483 u32 vif_tx_allowed_mask
= 0;
484 const struct wfx_tx_priv
*tx_priv
= NULL
;
485 struct wfx_vif
*wvif
;
490 if (atomic_read(&wdev
->tx_lock
))
494 while ((wvif
= wvif_iterate(wdev
, wvif
)) != NULL
) {
495 if (wvif
->after_dtim_tx_allowed
) {
496 for (i
= 0; i
< IEEE80211_NUM_ACS
; ++i
) {
497 skb
= wfx_tx_queue_get(wvif
->wdev
,
499 BIT(WFX_LINK_ID_AFTER_DTIM
));
501 hif
= (struct hif_msg
*)skb
->data
;
502 // Cannot happen since only one vif can
504 WARN_ON(wvif
->id
!= hif
->interface
);
508 // No more multicast to sent
509 wvif
->after_dtim_tx_allowed
= false;
510 schedule_work(&wvif
->update_tim_work
);
519 while ((wvif
= wvif_iterate(wdev
, wvif
)) != NULL
) {
520 spin_lock_bh(&wvif
->ps_state_lock
);
522 not_found
= wfx_tx_queue_mask_get(wvif
, &vif_queue
,
523 &vif_tx_allowed_mask
);
525 spin_unlock_bh(&wvif
->ps_state_lock
);
528 if (queue
&& queue
!= vif_queue
)
529 dev_info(wdev
->dev
, "vifs disagree about queue priority\n");
530 tx_allowed_mask
|= vif_tx_allowed_mask
;
539 queue_num
= queue
- wdev
->tx_queue
;
541 skb
= wfx_tx_queue_get(wdev
, queue
, tx_allowed_mask
);
544 tx_priv
= wfx_skb_tx_priv(skb
);
545 hif
= (struct hif_msg
*) skb
->data
;
546 wvif
= wdev_to_wvif(wdev
, hif
->interface
);
549 if (hif_handle_tx_data(wvif
, skb
, queue
))
550 continue; /* Handled by WSM */
552 /* allow bursting if txop is set */
553 if (wvif
->edca_params
[queue_num
].txop
)
554 burst
= wfx_tx_queue_get_num_queued(queue
, tx_allowed_mask
) + 1;
558 /* store index of bursting queue */
560 wdev
->tx_burst_idx
= queue_num
;
562 wdev
->tx_burst_idx
= -1;