1 // SPDX-License-Identifier: GPL-2.0-only
3 * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers
5 * Copyright (c) 2010, ST-Ericsson
6 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
9 #include <net/mac80211.h>
10 #include <linux/sched.h>
15 /* private */ struct cw1200_queue_item
17 struct list_head head
;
20 unsigned long queue_timestamp
;
21 unsigned long xmit_timestamp
;
22 struct cw1200_txpriv txpriv
;
26 static inline void __cw1200_queue_lock(struct cw1200_queue
*queue
)
28 struct cw1200_queue_stats
*stats
= queue
->stats
;
29 if (queue
->tx_locked_cnt
++ == 0) {
30 pr_debug("[TX] Queue %d is locked.\n",
32 ieee80211_stop_queue(stats
->priv
->hw
, queue
->queue_id
);
36 static inline void __cw1200_queue_unlock(struct cw1200_queue
*queue
)
38 struct cw1200_queue_stats
*stats
= queue
->stats
;
39 BUG_ON(!queue
->tx_locked_cnt
);
40 if (--queue
->tx_locked_cnt
== 0) {
41 pr_debug("[TX] Queue %d is unlocked.\n",
43 ieee80211_wake_queue(stats
->priv
->hw
, queue
->queue_id
);
47 static inline void cw1200_queue_parse_id(u32 packet_id
, u8
*queue_generation
,
48 u8
*queue_id
, u8
*item_generation
,
51 *item_id
= (packet_id
>> 0) & 0xFF;
52 *item_generation
= (packet_id
>> 8) & 0xFF;
53 *queue_id
= (packet_id
>> 16) & 0xFF;
54 *queue_generation
= (packet_id
>> 24) & 0xFF;
57 static inline u32
cw1200_queue_mk_packet_id(u8 queue_generation
, u8 queue_id
,
58 u8 item_generation
, u8 item_id
)
60 return ((u32
)item_id
<< 0) |
61 ((u32
)item_generation
<< 8) |
62 ((u32
)queue_id
<< 16) |
63 ((u32
)queue_generation
<< 24);
66 static void cw1200_queue_post_gc(struct cw1200_queue_stats
*stats
,
67 struct list_head
*gc_list
)
69 struct cw1200_queue_item
*item
, *tmp
;
71 list_for_each_entry_safe(item
, tmp
, gc_list
, head
) {
72 list_del(&item
->head
);
73 stats
->skb_dtor(stats
->priv
, item
->skb
, &item
->txpriv
);
78 static void cw1200_queue_register_post_gc(struct list_head
*gc_list
,
79 struct cw1200_queue_item
*item
)
81 struct cw1200_queue_item
*gc_item
;
82 gc_item
= kmemdup(item
, sizeof(struct cw1200_queue_item
),
85 list_add_tail(&gc_item
->head
, gc_list
);
88 static void __cw1200_queue_gc(struct cw1200_queue
*queue
,
89 struct list_head
*head
,
92 struct cw1200_queue_stats
*stats
= queue
->stats
;
93 struct cw1200_queue_item
*item
= NULL
, *tmp
;
94 bool wakeup_stats
= false;
96 list_for_each_entry_safe(item
, tmp
, &queue
->queue
, head
) {
97 if (jiffies
- item
->queue_timestamp
< queue
->ttl
)
100 --queue
->link_map_cache
[item
->txpriv
.link_id
];
101 spin_lock_bh(&stats
->lock
);
103 if (!--stats
->link_map_cache
[item
->txpriv
.link_id
])
105 spin_unlock_bh(&stats
->lock
);
106 cw1200_debug_tx_ttl(stats
->priv
);
107 cw1200_queue_register_post_gc(head
, item
);
109 list_move_tail(&item
->head
, &queue
->free_pool
);
113 wake_up(&stats
->wait_link_id_empty
);
115 if (queue
->overfull
) {
116 if (queue
->num_queued
<= (queue
->capacity
>> 1)) {
117 queue
->overfull
= false;
119 __cw1200_queue_unlock(queue
);
121 unsigned long tmo
= item
->queue_timestamp
+ queue
->ttl
;
122 mod_timer(&queue
->gc
, tmo
);
123 cw1200_pm_stay_awake(&stats
->priv
->pm_state
,
129 static void cw1200_queue_gc(struct timer_list
*t
)
132 struct cw1200_queue
*queue
=
133 from_timer(queue
, t
, gc
);
135 spin_lock_bh(&queue
->lock
);
136 __cw1200_queue_gc(queue
, &list
, true);
137 spin_unlock_bh(&queue
->lock
);
138 cw1200_queue_post_gc(queue
->stats
, &list
);
141 int cw1200_queue_stats_init(struct cw1200_queue_stats
*stats
,
143 cw1200_queue_skb_dtor_t skb_dtor
,
144 struct cw1200_common
*priv
)
146 memset(stats
, 0, sizeof(*stats
));
147 stats
->map_capacity
= map_capacity
;
148 stats
->skb_dtor
= skb_dtor
;
150 spin_lock_init(&stats
->lock
);
151 init_waitqueue_head(&stats
->wait_link_id_empty
);
153 stats
->link_map_cache
= kcalloc(map_capacity
, sizeof(int),
155 if (!stats
->link_map_cache
)
161 int cw1200_queue_init(struct cw1200_queue
*queue
,
162 struct cw1200_queue_stats
*stats
,
169 memset(queue
, 0, sizeof(*queue
));
170 queue
->stats
= stats
;
171 queue
->capacity
= capacity
;
172 queue
->queue_id
= queue_id
;
174 INIT_LIST_HEAD(&queue
->queue
);
175 INIT_LIST_HEAD(&queue
->pending
);
176 INIT_LIST_HEAD(&queue
->free_pool
);
177 spin_lock_init(&queue
->lock
);
178 timer_setup(&queue
->gc
, cw1200_queue_gc
, 0);
180 queue
->pool
= kcalloc(capacity
, sizeof(struct cw1200_queue_item
),
185 queue
->link_map_cache
= kcalloc(stats
->map_capacity
, sizeof(int),
187 if (!queue
->link_map_cache
) {
193 for (i
= 0; i
< capacity
; ++i
)
194 list_add_tail(&queue
->pool
[i
].head
, &queue
->free_pool
);
199 int cw1200_queue_clear(struct cw1200_queue
*queue
)
203 struct cw1200_queue_stats
*stats
= queue
->stats
;
204 struct cw1200_queue_item
*item
, *tmp
;
206 spin_lock_bh(&queue
->lock
);
208 list_splice_tail_init(&queue
->queue
, &queue
->pending
);
209 list_for_each_entry_safe(item
, tmp
, &queue
->pending
, head
) {
211 cw1200_queue_register_post_gc(&gc_list
, item
);
213 list_move_tail(&item
->head
, &queue
->free_pool
);
215 queue
->num_queued
= 0;
216 queue
->num_pending
= 0;
218 spin_lock_bh(&stats
->lock
);
219 for (i
= 0; i
< stats
->map_capacity
; ++i
) {
220 stats
->num_queued
-= queue
->link_map_cache
[i
];
221 stats
->link_map_cache
[i
] -= queue
->link_map_cache
[i
];
222 queue
->link_map_cache
[i
] = 0;
224 spin_unlock_bh(&stats
->lock
);
225 if (queue
->overfull
) {
226 queue
->overfull
= false;
227 __cw1200_queue_unlock(queue
);
229 spin_unlock_bh(&queue
->lock
);
230 wake_up(&stats
->wait_link_id_empty
);
231 cw1200_queue_post_gc(stats
, &gc_list
);
235 void cw1200_queue_stats_deinit(struct cw1200_queue_stats
*stats
)
237 kfree(stats
->link_map_cache
);
238 stats
->link_map_cache
= NULL
;
241 void cw1200_queue_deinit(struct cw1200_queue
*queue
)
243 cw1200_queue_clear(queue
);
244 del_timer_sync(&queue
->gc
);
245 INIT_LIST_HEAD(&queue
->free_pool
);
247 kfree(queue
->link_map_cache
);
249 queue
->link_map_cache
= NULL
;
253 size_t cw1200_queue_get_num_queued(struct cw1200_queue
*queue
,
258 size_t map_capacity
= queue
->stats
->map_capacity
;
263 spin_lock_bh(&queue
->lock
);
264 if (link_id_map
== (u32
)-1) {
265 ret
= queue
->num_queued
- queue
->num_pending
;
268 for (i
= 0, bit
= 1; i
< map_capacity
; ++i
, bit
<<= 1) {
269 if (link_id_map
& bit
)
270 ret
+= queue
->link_map_cache
[i
];
273 spin_unlock_bh(&queue
->lock
);
277 int cw1200_queue_put(struct cw1200_queue
*queue
,
279 struct cw1200_txpriv
*txpriv
)
282 struct cw1200_queue_stats
*stats
= queue
->stats
;
284 if (txpriv
->link_id
>= queue
->stats
->map_capacity
)
287 spin_lock_bh(&queue
->lock
);
288 if (!WARN_ON(list_empty(&queue
->free_pool
))) {
289 struct cw1200_queue_item
*item
= list_first_entry(
290 &queue
->free_pool
, struct cw1200_queue_item
, head
);
293 list_move_tail(&item
->head
, &queue
->queue
);
295 item
->txpriv
= *txpriv
;
296 item
->generation
= 0;
297 item
->packet_id
= cw1200_queue_mk_packet_id(queue
->generation
,
301 item
->queue_timestamp
= jiffies
;
304 ++queue
->link_map_cache
[txpriv
->link_id
];
306 spin_lock_bh(&stats
->lock
);
308 ++stats
->link_map_cache
[txpriv
->link_id
];
309 spin_unlock_bh(&stats
->lock
);
311 /* TX may happen in parallel sometimes.
312 * Leave extra queue slots so we don't overflow.
314 if (queue
->overfull
== false &&
316 (queue
->capacity
- (num_present_cpus() - 1))) {
317 queue
->overfull
= true;
318 __cw1200_queue_lock(queue
);
319 mod_timer(&queue
->gc
, jiffies
);
324 spin_unlock_bh(&queue
->lock
);
328 int cw1200_queue_get(struct cw1200_queue
*queue
,
331 struct ieee80211_tx_info
**tx_info
,
332 const struct cw1200_txpriv
**txpriv
)
335 struct cw1200_queue_item
*item
;
336 struct cw1200_queue_stats
*stats
= queue
->stats
;
337 bool wakeup_stats
= false;
339 spin_lock_bh(&queue
->lock
);
340 list_for_each_entry(item
, &queue
->queue
, head
) {
341 if (link_id_map
& BIT(item
->txpriv
.link_id
)) {
348 *tx
= (struct wsm_tx
*)item
->skb
->data
;
349 *tx_info
= IEEE80211_SKB_CB(item
->skb
);
350 *txpriv
= &item
->txpriv
;
351 (*tx
)->packet_id
= item
->packet_id
;
352 list_move_tail(&item
->head
, &queue
->pending
);
353 ++queue
->num_pending
;
354 --queue
->link_map_cache
[item
->txpriv
.link_id
];
355 item
->xmit_timestamp
= jiffies
;
357 spin_lock_bh(&stats
->lock
);
359 if (!--stats
->link_map_cache
[item
->txpriv
.link_id
])
361 spin_unlock_bh(&stats
->lock
);
363 spin_unlock_bh(&queue
->lock
);
365 wake_up(&stats
->wait_link_id_empty
);
369 int cw1200_queue_requeue(struct cw1200_queue
*queue
, u32 packet_id
)
372 u8 queue_generation
, queue_id
, item_generation
, item_id
;
373 struct cw1200_queue_item
*item
;
374 struct cw1200_queue_stats
*stats
= queue
->stats
;
376 cw1200_queue_parse_id(packet_id
, &queue_generation
, &queue_id
,
377 &item_generation
, &item_id
);
379 item
= &queue
->pool
[item_id
];
381 spin_lock_bh(&queue
->lock
);
382 BUG_ON(queue_id
!= queue
->queue_id
);
383 if (queue_generation
!= queue
->generation
) {
385 } else if (item_id
>= (unsigned) queue
->capacity
) {
388 } else if (item
->generation
!= item_generation
) {
392 --queue
->num_pending
;
393 ++queue
->link_map_cache
[item
->txpriv
.link_id
];
395 spin_lock_bh(&stats
->lock
);
397 ++stats
->link_map_cache
[item
->txpriv
.link_id
];
398 spin_unlock_bh(&stats
->lock
);
400 item
->generation
= ++item_generation
;
401 item
->packet_id
= cw1200_queue_mk_packet_id(queue_generation
,
405 list_move(&item
->head
, &queue
->queue
);
407 spin_unlock_bh(&queue
->lock
);
411 int cw1200_queue_requeue_all(struct cw1200_queue
*queue
)
413 struct cw1200_queue_item
*item
, *tmp
;
414 struct cw1200_queue_stats
*stats
= queue
->stats
;
415 spin_lock_bh(&queue
->lock
);
417 list_for_each_entry_safe_reverse(item
, tmp
, &queue
->pending
, head
) {
418 --queue
->num_pending
;
419 ++queue
->link_map_cache
[item
->txpriv
.link_id
];
421 spin_lock_bh(&stats
->lock
);
423 ++stats
->link_map_cache
[item
->txpriv
.link_id
];
424 spin_unlock_bh(&stats
->lock
);
427 item
->packet_id
= cw1200_queue_mk_packet_id(queue
->generation
,
431 list_move(&item
->head
, &queue
->queue
);
433 spin_unlock_bh(&queue
->lock
);
438 int cw1200_queue_remove(struct cw1200_queue
*queue
, u32 packet_id
)
441 u8 queue_generation
, queue_id
, item_generation
, item_id
;
442 struct cw1200_queue_item
*item
;
443 struct cw1200_queue_stats
*stats
= queue
->stats
;
444 struct sk_buff
*gc_skb
= NULL
;
445 struct cw1200_txpriv gc_txpriv
;
447 cw1200_queue_parse_id(packet_id
, &queue_generation
, &queue_id
,
448 &item_generation
, &item_id
);
450 item
= &queue
->pool
[item_id
];
452 spin_lock_bh(&queue
->lock
);
453 BUG_ON(queue_id
!= queue
->queue_id
);
454 if (queue_generation
!= queue
->generation
) {
456 } else if (item_id
>= (unsigned) queue
->capacity
) {
459 } else if (item
->generation
!= item_generation
) {
463 gc_txpriv
= item
->txpriv
;
466 --queue
->num_pending
;
470 /* Do not use list_move_tail here, but list_move:
471 * try to utilize cache row.
473 list_move(&item
->head
, &queue
->free_pool
);
475 if (queue
->overfull
&&
476 (queue
->num_queued
<= (queue
->capacity
>> 1))) {
477 queue
->overfull
= false;
478 __cw1200_queue_unlock(queue
);
481 spin_unlock_bh(&queue
->lock
);
484 stats
->skb_dtor(stats
->priv
, gc_skb
, &gc_txpriv
);
489 int cw1200_queue_get_skb(struct cw1200_queue
*queue
, u32 packet_id
,
490 struct sk_buff
**skb
,
491 const struct cw1200_txpriv
**txpriv
)
494 u8 queue_generation
, queue_id
, item_generation
, item_id
;
495 struct cw1200_queue_item
*item
;
496 cw1200_queue_parse_id(packet_id
, &queue_generation
, &queue_id
,
497 &item_generation
, &item_id
);
499 item
= &queue
->pool
[item_id
];
501 spin_lock_bh(&queue
->lock
);
502 BUG_ON(queue_id
!= queue
->queue_id
);
503 if (queue_generation
!= queue
->generation
) {
505 } else if (item_id
>= (unsigned) queue
->capacity
) {
508 } else if (item
->generation
!= item_generation
) {
513 *txpriv
= &item
->txpriv
;
515 spin_unlock_bh(&queue
->lock
);
519 void cw1200_queue_lock(struct cw1200_queue
*queue
)
521 spin_lock_bh(&queue
->lock
);
522 __cw1200_queue_lock(queue
);
523 spin_unlock_bh(&queue
->lock
);
526 void cw1200_queue_unlock(struct cw1200_queue
*queue
)
528 spin_lock_bh(&queue
->lock
);
529 __cw1200_queue_unlock(queue
);
530 spin_unlock_bh(&queue
->lock
);
533 bool cw1200_queue_get_xmit_timestamp(struct cw1200_queue
*queue
,
534 unsigned long *timestamp
,
535 u32 pending_frame_id
)
537 struct cw1200_queue_item
*item
;
540 spin_lock_bh(&queue
->lock
);
541 ret
= !list_empty(&queue
->pending
);
543 list_for_each_entry(item
, &queue
->pending
, head
) {
544 if (item
->packet_id
!= pending_frame_id
)
545 if (time_before(item
->xmit_timestamp
,
547 *timestamp
= item
->xmit_timestamp
;
550 spin_unlock_bh(&queue
->lock
);
554 bool cw1200_queue_stats_is_empty(struct cw1200_queue_stats
*stats
,
559 spin_lock_bh(&stats
->lock
);
560 if (link_id_map
== (u32
)-1) {
561 empty
= stats
->num_queued
== 0;
564 for (i
= 0; i
< stats
->map_capacity
; ++i
) {
565 if (link_id_map
& BIT(i
)) {
566 if (stats
->link_map_cache
[i
]) {
573 spin_unlock_bh(&stats
->lock
);