1 // SPDX-License-Identifier: GPL-2.0-only
3 * O(1) TX queue with built-in allocator.
5 * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
6 * Copyright (c) 2010, ST-Ericsson
8 #include <linux/sched.h>
9 #include <net/mac80211.h>
16 void wfx_tx_lock(struct wfx_dev
*wdev
)
18 atomic_inc(&wdev
->tx_lock
);
21 void wfx_tx_unlock(struct wfx_dev
*wdev
)
23 int tx_lock
= atomic_dec_return(&wdev
->tx_lock
);
25 WARN(tx_lock
< 0, "inconsistent tx_lock value");
27 wfx_bh_request_tx(wdev
);
30 void wfx_tx_flush(struct wfx_dev
*wdev
)
34 WARN(!atomic_read(&wdev
->tx_lock
), "tx_lock is not locked");
36 // Do not wait for any reply if chip is frozen
37 if (wdev
->chip_frozen
)
40 mutex_lock(&wdev
->hif_cmd
.lock
);
41 ret
= wait_event_timeout(wdev
->hif
.tx_buffers_empty
,
42 !wdev
->hif
.tx_buffers_used
,
43 msecs_to_jiffies(3000));
45 dev_warn(wdev
->dev
, "cannot flush tx buffers (%d still busy)\n",
46 wdev
->hif
.tx_buffers_used
);
47 wfx_pending_dump_old_frames(wdev
, 3000);
48 // FIXME: drop pending frames here
49 wdev
->chip_frozen
= 1;
51 mutex_unlock(&wdev
->hif_cmd
.lock
);
54 void wfx_tx_lock_flush(struct wfx_dev
*wdev
)
60 void wfx_tx_queues_lock(struct wfx_dev
*wdev
)
63 struct wfx_queue
*queue
;
65 for (i
= 0; i
< IEEE80211_NUM_ACS
; ++i
) {
66 queue
= &wdev
->tx_queue
[i
];
67 spin_lock_bh(&queue
->queue
.lock
);
68 if (queue
->tx_locked_cnt
++ == 0)
69 ieee80211_stop_queue(wdev
->hw
, queue
->queue_id
);
70 spin_unlock_bh(&queue
->queue
.lock
);
74 void wfx_tx_queues_unlock(struct wfx_dev
*wdev
)
77 struct wfx_queue
*queue
;
79 for (i
= 0; i
< IEEE80211_NUM_ACS
; ++i
) {
80 queue
= &wdev
->tx_queue
[i
];
81 spin_lock_bh(&queue
->queue
.lock
);
82 WARN(!queue
->tx_locked_cnt
, "queue already unlocked");
83 if (--queue
->tx_locked_cnt
== 0)
84 ieee80211_wake_queue(wdev
->hw
, queue
->queue_id
);
85 spin_unlock_bh(&queue
->queue
.lock
);
89 /* If successful, LOCKS the TX queue! */
90 void wfx_tx_queues_wait_empty_vif(struct wfx_vif
*wvif
)
94 struct wfx_queue
*queue
;
96 struct wfx_dev
*wdev
= wvif
->wdev
;
99 if (wvif
->wdev
->chip_frozen
) {
100 wfx_tx_lock_flush(wdev
);
101 wfx_tx_queues_clear(wdev
);
107 wfx_tx_lock_flush(wdev
);
108 for (i
= 0; i
< IEEE80211_NUM_ACS
&& done
; ++i
) {
109 queue
= &wdev
->tx_queue
[i
];
110 spin_lock_bh(&queue
->queue
.lock
);
111 skb_queue_walk(&queue
->queue
, item
) {
112 hif
= (struct hif_msg
*) item
->data
;
113 if (hif
->interface
== wvif
->id
)
116 spin_unlock_bh(&queue
->queue
.lock
);
125 static void wfx_tx_queue_clear(struct wfx_dev
*wdev
, struct wfx_queue
*queue
,
126 struct sk_buff_head
*gc_list
)
129 struct sk_buff
*item
;
130 struct wfx_queue_stats
*stats
= &wdev
->tx_queue_stats
;
132 spin_lock_bh(&queue
->queue
.lock
);
133 while ((item
= __skb_dequeue(&queue
->queue
)) != NULL
)
134 skb_queue_head(gc_list
, item
);
135 spin_lock_bh(&stats
->pending
.lock
);
136 for (i
= 0; i
< ARRAY_SIZE(stats
->link_map_cache
); ++i
) {
137 stats
->link_map_cache
[i
] -= queue
->link_map_cache
[i
];
138 queue
->link_map_cache
[i
] = 0;
140 spin_unlock_bh(&stats
->pending
.lock
);
141 spin_unlock_bh(&queue
->queue
.lock
);
144 void wfx_tx_queues_clear(struct wfx_dev
*wdev
)
147 struct sk_buff
*item
;
148 struct sk_buff_head gc_list
;
149 struct wfx_queue_stats
*stats
= &wdev
->tx_queue_stats
;
151 skb_queue_head_init(&gc_list
);
152 for (i
= 0; i
< IEEE80211_NUM_ACS
; ++i
)
153 wfx_tx_queue_clear(wdev
, &wdev
->tx_queue
[i
], &gc_list
);
154 wake_up(&stats
->wait_link_id_empty
);
155 while ((item
= skb_dequeue(&gc_list
)) != NULL
)
156 wfx_skb_dtor(wdev
, item
);
159 void wfx_tx_queues_init(struct wfx_dev
*wdev
)
163 memset(&wdev
->tx_queue_stats
, 0, sizeof(wdev
->tx_queue_stats
));
164 memset(wdev
->tx_queue
, 0, sizeof(wdev
->tx_queue
));
165 skb_queue_head_init(&wdev
->tx_queue_stats
.pending
);
166 init_waitqueue_head(&wdev
->tx_queue_stats
.wait_link_id_empty
);
168 for (i
= 0; i
< IEEE80211_NUM_ACS
; ++i
) {
169 wdev
->tx_queue
[i
].queue_id
= i
;
170 skb_queue_head_init(&wdev
->tx_queue
[i
].queue
);
174 void wfx_tx_queues_deinit(struct wfx_dev
*wdev
)
176 WARN_ON(!skb_queue_empty(&wdev
->tx_queue_stats
.pending
));
177 wfx_tx_queues_clear(wdev
);
180 size_t wfx_tx_queue_get_num_queued(struct wfx_queue
*queue
,
189 spin_lock_bh(&queue
->queue
.lock
);
190 if (link_id_map
== (u32
)-1) {
191 ret
= skb_queue_len(&queue
->queue
);
194 for (i
= 0, bit
= 1; i
< ARRAY_SIZE(queue
->link_map_cache
);
196 if (link_id_map
& bit
)
197 ret
+= queue
->link_map_cache
[i
];
200 spin_unlock_bh(&queue
->queue
.lock
);
204 void wfx_tx_queue_put(struct wfx_dev
*wdev
, struct wfx_queue
*queue
,
207 struct wfx_queue_stats
*stats
= &wdev
->tx_queue_stats
;
208 struct wfx_tx_priv
*tx_priv
= wfx_skb_tx_priv(skb
);
210 WARN(tx_priv
->link_id
>= ARRAY_SIZE(stats
->link_map_cache
), "invalid link-id value");
211 spin_lock_bh(&queue
->queue
.lock
);
212 __skb_queue_tail(&queue
->queue
, skb
);
214 ++queue
->link_map_cache
[tx_priv
->link_id
];
216 spin_lock_bh(&stats
->pending
.lock
);
217 ++stats
->link_map_cache
[tx_priv
->link_id
];
218 spin_unlock_bh(&stats
->pending
.lock
);
219 spin_unlock_bh(&queue
->queue
.lock
);
222 static struct sk_buff
*wfx_tx_queue_get(struct wfx_dev
*wdev
,
223 struct wfx_queue
*queue
,
226 struct sk_buff
*skb
= NULL
;
227 struct sk_buff
*item
;
228 struct wfx_queue_stats
*stats
= &wdev
->tx_queue_stats
;
229 struct wfx_tx_priv
*tx_priv
;
230 bool wakeup_stats
= false;
232 spin_lock_bh(&queue
->queue
.lock
);
233 skb_queue_walk(&queue
->queue
, item
) {
234 tx_priv
= wfx_skb_tx_priv(item
);
235 if (link_id_map
& BIT(tx_priv
->link_id
)) {
242 tx_priv
= wfx_skb_tx_priv(skb
);
243 tx_priv
->xmit_timestamp
= ktime_get();
244 __skb_unlink(skb
, &queue
->queue
);
245 --queue
->link_map_cache
[tx_priv
->link_id
];
247 spin_lock_bh(&stats
->pending
.lock
);
248 __skb_queue_tail(&stats
->pending
, skb
);
249 if (!--stats
->link_map_cache
[tx_priv
->link_id
])
251 spin_unlock_bh(&stats
->pending
.lock
);
253 spin_unlock_bh(&queue
->queue
.lock
);
255 wake_up(&stats
->wait_link_id_empty
);
259 int wfx_pending_requeue(struct wfx_dev
*wdev
, struct sk_buff
*skb
)
261 struct wfx_queue_stats
*stats
= &wdev
->tx_queue_stats
;
262 struct wfx_tx_priv
*tx_priv
= wfx_skb_tx_priv(skb
);
263 struct wfx_queue
*queue
= &wdev
->tx_queue
[skb_get_queue_mapping(skb
)];
265 WARN_ON(skb_get_queue_mapping(skb
) > 3);
266 spin_lock_bh(&queue
->queue
.lock
);
267 ++queue
->link_map_cache
[tx_priv
->link_id
];
269 spin_lock_bh(&stats
->pending
.lock
);
270 ++stats
->link_map_cache
[tx_priv
->link_id
];
271 __skb_unlink(skb
, &stats
->pending
);
272 spin_unlock_bh(&stats
->pending
.lock
);
273 __skb_queue_tail(&queue
->queue
, skb
);
274 spin_unlock_bh(&queue
->queue
.lock
);
278 int wfx_pending_remove(struct wfx_dev
*wdev
, struct sk_buff
*skb
)
280 struct wfx_queue_stats
*stats
= &wdev
->tx_queue_stats
;
282 spin_lock_bh(&stats
->pending
.lock
);
283 __skb_unlink(skb
, &stats
->pending
);
284 spin_unlock_bh(&stats
->pending
.lock
);
285 wfx_skb_dtor(wdev
, skb
);
290 struct sk_buff
*wfx_pending_get(struct wfx_dev
*wdev
, u32 packet_id
)
293 struct hif_req_tx
*req
;
294 struct wfx_queue_stats
*stats
= &wdev
->tx_queue_stats
;
296 spin_lock_bh(&stats
->pending
.lock
);
297 skb_queue_walk(&stats
->pending
, skb
) {
298 req
= wfx_skb_txreq(skb
);
299 if (req
->packet_id
== packet_id
) {
300 spin_unlock_bh(&stats
->pending
.lock
);
304 spin_unlock_bh(&stats
->pending
.lock
);
305 WARN(1, "cannot find packet in pending queue");
309 void wfx_pending_dump_old_frames(struct wfx_dev
*wdev
, unsigned int limit_ms
)
311 struct wfx_queue_stats
*stats
= &wdev
->tx_queue_stats
;
312 ktime_t now
= ktime_get();
313 struct wfx_tx_priv
*tx_priv
;
314 struct hif_req_tx
*req
;
318 spin_lock_bh(&stats
->pending
.lock
);
319 skb_queue_walk(&stats
->pending
, skb
) {
320 tx_priv
= wfx_skb_tx_priv(skb
);
321 req
= wfx_skb_txreq(skb
);
322 if (ktime_after(now
, ktime_add_ms(tx_priv
->xmit_timestamp
,
325 dev_info(wdev
->dev
, "frames stuck in firmware since %dms or more:\n",
329 dev_info(wdev
->dev
, " id %08x sent %lldms ago\n",
331 ktime_ms_delta(now
, tx_priv
->xmit_timestamp
));
334 spin_unlock_bh(&stats
->pending
.lock
);
337 unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev
*wdev
,
340 ktime_t now
= ktime_get();
341 struct wfx_tx_priv
*tx_priv
= wfx_skb_tx_priv(skb
);
343 return ktime_us_delta(now
, tx_priv
->xmit_timestamp
);
346 bool wfx_tx_queues_is_empty(struct wfx_dev
*wdev
)
349 struct sk_buff_head
*queue
;
352 for (i
= 0; i
< IEEE80211_NUM_ACS
; i
++) {
353 queue
= &wdev
->tx_queue
[i
].queue
;
354 spin_lock_bh(&queue
->lock
);
355 if (!skb_queue_empty(queue
))
357 spin_unlock_bh(&queue
->lock
);
362 static bool hif_handle_tx_data(struct wfx_vif
*wvif
, struct sk_buff
*skb
,
363 struct wfx_queue
*queue
)
365 bool handled
= false;
366 struct wfx_tx_priv
*tx_priv
= wfx_skb_tx_priv(skb
);
367 struct hif_req_tx
*req
= wfx_skb_txreq(skb
);
368 struct ieee80211_hdr
*frame
= (struct ieee80211_hdr
*) (req
->frame
+ req
->data_flags
.fc_offset
);
377 switch (wvif
->vif
->type
) {
378 case NL80211_IFTYPE_STATION
:
379 if (wvif
->state
< WFX_STATE_PRE_STA
)
382 case NL80211_IFTYPE_AP
:
385 } else if (!(BIT(tx_priv
->raw_link_id
) &
386 (BIT(0) | wvif
->link_id_map
))) {
387 dev_warn(wvif
->wdev
->dev
, "a frame with expired link-id is dropped\n");
391 case NL80211_IFTYPE_ADHOC
:
392 if (wvif
->state
!= WFX_STATE_IBSS
)
395 case NL80211_IFTYPE_MONITOR
:
401 if (action
== do_tx
) {
402 if (ieee80211_is_nullfunc(frame
->frame_control
)) {
403 mutex_lock(&wvif
->bss_loss_lock
);
404 if (wvif
->bss_loss_state
) {
405 wvif
->bss_loss_confirm_id
= req
->packet_id
;
406 req
->queue_id
.queue_id
= HIF_QUEUE_ID_VOICE
;
408 mutex_unlock(&wvif
->bss_loss_lock
);
409 } else if (ieee80211_has_protected(frame
->frame_control
) &&
411 tx_priv
->hw_key
->keyidx
!= wvif
->wep_default_key_id
&&
412 (tx_priv
->hw_key
->cipher
== WLAN_CIPHER_SUITE_WEP40
||
413 tx_priv
->hw_key
->cipher
== WLAN_CIPHER_SUITE_WEP104
)) {
420 wfx_pending_remove(wvif
->wdev
, skb
);
424 wfx_tx_lock(wvif
->wdev
);
425 wvif
->wep_default_key_id
= tx_priv
->hw_key
->keyidx
;
426 wvif
->wep_pending_skb
= skb
;
427 if (!schedule_work(&wvif
->wep_key_work
))
428 wfx_tx_unlock(wvif
->wdev
);
440 static int wfx_get_prio_queue(struct wfx_vif
*wvif
,
441 u32 tx_allowed_mask
, int *total
)
443 static const int urgent
= BIT(WFX_LINK_ID_AFTER_DTIM
) |
444 BIT(WFX_LINK_ID_UAPSD
);
445 struct hif_req_edca_queue_params
*edca
;
446 unsigned int score
, best
= -1;
450 /* search for a winner using edca params */
451 for (i
= 0; i
< IEEE80211_NUM_ACS
; ++i
) {
454 edca
= &wvif
->edca
.params
[i
];
455 queued
= wfx_tx_queue_get_num_queued(&wvif
->wdev
->tx_queue
[i
],
460 score
= ((edca
->aifsn
+ edca
->cw_min
) << 16) +
461 ((edca
->cw_max
- edca
->cw_min
) *
462 (get_random_int() & 0xFFFF));
463 if (score
< best
&& (winner
< 0 || i
!= 3)) {
469 /* override winner if bursting */
470 if (winner
>= 0 && wvif
->wdev
->tx_burst_idx
>= 0 &&
471 winner
!= wvif
->wdev
->tx_burst_idx
&&
472 !wfx_tx_queue_get_num_queued(&wvif
->wdev
->tx_queue
[winner
],
473 tx_allowed_mask
& urgent
) &&
474 wfx_tx_queue_get_num_queued(&wvif
->wdev
->tx_queue
[wvif
->wdev
->tx_burst_idx
], tx_allowed_mask
))
475 winner
= wvif
->wdev
->tx_burst_idx
;
480 static int wfx_tx_queue_mask_get(struct wfx_vif
*wvif
,
481 struct wfx_queue
**queue_p
,
482 u32
*tx_allowed_mask_p
,
489 /* Search for a queue with multicast frames buffered */
490 if (wvif
->mcast_tx
) {
491 tx_allowed_mask
= BIT(WFX_LINK_ID_AFTER_DTIM
);
492 idx
= wfx_get_prio_queue(wvif
, tx_allowed_mask
, &total
);
499 /* Search for unicast traffic */
500 tx_allowed_mask
= ~wvif
->sta_asleep_mask
;
501 tx_allowed_mask
|= BIT(WFX_LINK_ID_UAPSD
);
502 if (wvif
->sta_asleep_mask
) {
503 tx_allowed_mask
|= wvif
->pspoll_mask
;
504 tx_allowed_mask
&= ~BIT(WFX_LINK_ID_AFTER_DTIM
);
506 tx_allowed_mask
|= BIT(WFX_LINK_ID_AFTER_DTIM
);
508 idx
= wfx_get_prio_queue(wvif
, tx_allowed_mask
, &total
);
513 *queue_p
= &wvif
->wdev
->tx_queue
[idx
];
514 *tx_allowed_mask_p
= tx_allowed_mask
;
518 struct hif_msg
*wfx_tx_queues_get(struct wfx_dev
*wdev
)
521 struct hif_msg
*hif
= NULL
;
522 struct hif_req_tx
*req
= NULL
;
523 struct wfx_queue
*queue
= NULL
;
524 struct wfx_queue
*vif_queue
= NULL
;
525 u32 tx_allowed_mask
= 0;
526 u32 vif_tx_allowed_mask
= 0;
527 const struct wfx_tx_priv
*tx_priv
= NULL
;
528 struct wfx_vif
*wvif
;
529 /* More is used only for broadcasts. */
531 bool vif_more
= false;
538 struct ieee80211_hdr
*hdr
;
540 if (atomic_read(&wdev
->tx_lock
))
544 while ((wvif
= wvif_iterate(wdev
, wvif
)) != NULL
) {
545 spin_lock_bh(&wvif
->ps_state_lock
);
547 not_found
= wfx_tx_queue_mask_get(wvif
, &vif_queue
,
548 &vif_tx_allowed_mask
,
551 if (wvif
->mcast_buffered
&& (not_found
|| !vif_more
) &&
553 !wvif
->sta_asleep_mask
)) {
554 wvif
->mcast_buffered
= false;
555 if (wvif
->mcast_tx
) {
556 wvif
->mcast_tx
= false;
557 schedule_work(&wvif
->mcast_stop_work
);
561 spin_unlock_bh(&wvif
->ps_state_lock
);
565 tx_allowed_mask
= vif_tx_allowed_mask
;
569 } else if (!not_found
) {
570 if (queue
&& queue
!= vif_queue
)
571 dev_info(wdev
->dev
, "vifs disagree about queue priority\n");
572 tx_allowed_mask
|= vif_tx_allowed_mask
;
581 queue_num
= queue
- wdev
->tx_queue
;
583 skb
= wfx_tx_queue_get(wdev
, queue
, tx_allowed_mask
);
586 tx_priv
= wfx_skb_tx_priv(skb
);
587 hif
= (struct hif_msg
*) skb
->data
;
588 wvif
= wdev_to_wvif(wdev
, hif
->interface
);
591 if (hif_handle_tx_data(wvif
, skb
, queue
))
592 continue; /* Handled by WSM */
594 wvif
->pspoll_mask
&= ~BIT(tx_priv
->raw_link_id
);
596 /* allow bursting if txop is set */
597 if (wvif
->edca
.params
[queue_num
].tx_op_limit
)
598 burst
= (int)wfx_tx_queue_get_num_queued(queue
, tx_allowed_mask
) + 1;
602 /* store index of bursting queue */
604 wdev
->tx_burst_idx
= queue_num
;
606 wdev
->tx_burst_idx
= -1;
608 /* more buffered multicast/broadcast frames
609 * ==> set MoreData flag in IEEE 802.11 header
613 req
= (struct hif_req_tx
*) hif
->body
;
614 hdr
= (struct ieee80211_hdr
*) (req
->frame
+ req
->data_flags
.fc_offset
);
615 hdr
->frame_control
|= cpu_to_le16(IEEE80211_FCTL_MOREDATA
);