1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2016 Facebook
4 * Copyright (C) 2013-2014 Jens Axboe
7 #include <linux/sched.h>
8 #include <linux/random.h>
9 #include <linux/sbitmap.h>
10 #include <linux/seq_file.h>
12 static int init_alloc_hint(struct sbitmap
*sb
, gfp_t flags
)
14 unsigned depth
= sb
->depth
;
16 sb
->alloc_hint
= alloc_percpu_gfp(unsigned int, flags
);
20 if (depth
&& !sb
->round_robin
) {
23 for_each_possible_cpu(i
)
24 *per_cpu_ptr(sb
->alloc_hint
, i
) = get_random_u32_below(depth
);
29 static inline unsigned update_alloc_hint_before_get(struct sbitmap
*sb
,
34 hint
= this_cpu_read(*sb
->alloc_hint
);
35 if (unlikely(hint
>= depth
)) {
36 hint
= depth
? get_random_u32_below(depth
) : 0;
37 this_cpu_write(*sb
->alloc_hint
, hint
);
43 static inline void update_alloc_hint_after_get(struct sbitmap
*sb
,
49 /* If the map is full, a hint won't do us much good. */
50 this_cpu_write(*sb
->alloc_hint
, 0);
51 } else if (nr
== hint
|| unlikely(sb
->round_robin
)) {
52 /* Only update the hint if we used it. */
54 if (hint
>= depth
- 1)
56 this_cpu_write(*sb
->alloc_hint
, hint
);
61 * See if we have deferred clears that we can batch move
63 static inline bool sbitmap_deferred_clear(struct sbitmap_word
*map
,
64 unsigned int depth
, unsigned int alloc_hint
, bool wrap
)
66 unsigned long mask
, word_mask
;
68 guard(raw_spinlock_irqsave
)(&map
->swap_lock
);
74 word_mask
= (~0UL) >> (BITS_PER_LONG
- depth
);
76 * The current behavior is to always retry after moving
77 * ->cleared to word, and we change it to retry in case
78 * of any free bits. To avoid an infinite loop, we need
79 * to take wrap & alloc_hint into account, otherwise a
80 * soft lockup may occur.
82 if (!wrap
&& alloc_hint
)
83 word_mask
&= ~((1UL << alloc_hint
) - 1);
85 return (READ_ONCE(map
->word
) & word_mask
) != word_mask
;
89 * First get a stable cleared mask, setting the old mask to 0.
91 mask
= xchg(&map
->cleared
, 0);
94 * Now clear the masked bits in our free word
96 atomic_long_andnot(mask
, (atomic_long_t
*)&map
->word
);
97 BUILD_BUG_ON(sizeof(atomic_long_t
) != sizeof(map
->word
));
101 int sbitmap_init_node(struct sbitmap
*sb
, unsigned int depth
, int shift
,
102 gfp_t flags
, int node
, bool round_robin
,
105 unsigned int bits_per_word
;
109 shift
= sbitmap_calculate_shift(depth
);
111 bits_per_word
= 1U << shift
;
112 if (bits_per_word
> BITS_PER_LONG
)
117 sb
->map_nr
= DIV_ROUND_UP(sb
->depth
, bits_per_word
);
118 sb
->round_robin
= round_robin
;
126 if (init_alloc_hint(sb
, flags
))
129 sb
->alloc_hint
= NULL
;
132 sb
->map
= kvzalloc_node(sb
->map_nr
* sizeof(*sb
->map
), flags
, node
);
134 free_percpu(sb
->alloc_hint
);
138 for (i
= 0; i
< sb
->map_nr
; i
++)
139 raw_spin_lock_init(&sb
->map
[i
].swap_lock
);
143 EXPORT_SYMBOL_GPL(sbitmap_init_node
);
145 void sbitmap_resize(struct sbitmap
*sb
, unsigned int depth
)
147 unsigned int bits_per_word
= 1U << sb
->shift
;
150 for (i
= 0; i
< sb
->map_nr
; i
++)
151 sbitmap_deferred_clear(&sb
->map
[i
], 0, 0, 0);
154 sb
->map_nr
= DIV_ROUND_UP(sb
->depth
, bits_per_word
);
156 EXPORT_SYMBOL_GPL(sbitmap_resize
);
158 static int __sbitmap_get_word(unsigned long *word
, unsigned long depth
,
159 unsigned int hint
, bool wrap
)
163 /* don't wrap if starting from 0 */
167 nr
= find_next_zero_bit(word
, depth
, hint
);
168 if (unlikely(nr
>= depth
)) {
170 * We started with an offset, and we didn't reset the
171 * offset to 0 in a failure case, so start from 0 to
181 if (!test_and_set_bit_lock(nr
, word
))
185 if (hint
>= depth
- 1)
192 static int sbitmap_find_bit_in_word(struct sbitmap_word
*map
,
194 unsigned int alloc_hint
,
200 nr
= __sbitmap_get_word(&map
->word
, depth
,
204 if (!sbitmap_deferred_clear(map
, depth
, alloc_hint
, wrap
))
211 static int sbitmap_find_bit(struct sbitmap
*sb
,
214 unsigned int alloc_hint
,
220 for (i
= 0; i
< sb
->map_nr
; i
++) {
221 nr
= sbitmap_find_bit_in_word(&sb
->map
[index
],
223 __map_depth(sb
, index
),
228 nr
+= index
<< sb
->shift
;
232 /* Jump to next index. */
234 if (++index
>= sb
->map_nr
)
241 static int __sbitmap_get(struct sbitmap
*sb
, unsigned int alloc_hint
)
245 index
= SB_NR_TO_INDEX(sb
, alloc_hint
);
248 * Unless we're doing round robin tag allocation, just use the
249 * alloc_hint to find the right word index. No point in looping
250 * twice in find_next_zero_bit() for that case.
253 alloc_hint
= SB_NR_TO_BIT(sb
, alloc_hint
);
257 return sbitmap_find_bit(sb
, UINT_MAX
, index
, alloc_hint
,
261 int sbitmap_get(struct sbitmap
*sb
)
264 unsigned int hint
, depth
;
266 if (WARN_ON_ONCE(unlikely(!sb
->alloc_hint
)))
269 depth
= READ_ONCE(sb
->depth
);
270 hint
= update_alloc_hint_before_get(sb
, depth
);
271 nr
= __sbitmap_get(sb
, hint
);
272 update_alloc_hint_after_get(sb
, depth
, hint
, nr
);
276 EXPORT_SYMBOL_GPL(sbitmap_get
);
278 static int __sbitmap_get_shallow(struct sbitmap
*sb
,
279 unsigned int alloc_hint
,
280 unsigned long shallow_depth
)
284 index
= SB_NR_TO_INDEX(sb
, alloc_hint
);
285 alloc_hint
= SB_NR_TO_BIT(sb
, alloc_hint
);
287 return sbitmap_find_bit(sb
, shallow_depth
, index
, alloc_hint
, true);
290 int sbitmap_get_shallow(struct sbitmap
*sb
, unsigned long shallow_depth
)
293 unsigned int hint
, depth
;
295 if (WARN_ON_ONCE(unlikely(!sb
->alloc_hint
)))
298 depth
= READ_ONCE(sb
->depth
);
299 hint
= update_alloc_hint_before_get(sb
, depth
);
300 nr
= __sbitmap_get_shallow(sb
, hint
, shallow_depth
);
301 update_alloc_hint_after_get(sb
, depth
, hint
, nr
);
305 EXPORT_SYMBOL_GPL(sbitmap_get_shallow
);
307 bool sbitmap_any_bit_set(const struct sbitmap
*sb
)
311 for (i
= 0; i
< sb
->map_nr
; i
++) {
312 if (sb
->map
[i
].word
& ~sb
->map
[i
].cleared
)
317 EXPORT_SYMBOL_GPL(sbitmap_any_bit_set
);
319 static unsigned int __sbitmap_weight(const struct sbitmap
*sb
, bool set
)
321 unsigned int i
, weight
= 0;
323 for (i
= 0; i
< sb
->map_nr
; i
++) {
324 const struct sbitmap_word
*word
= &sb
->map
[i
];
325 unsigned int word_depth
= __map_depth(sb
, i
);
328 weight
+= bitmap_weight(&word
->word
, word_depth
);
330 weight
+= bitmap_weight(&word
->cleared
, word_depth
);
335 static unsigned int sbitmap_cleared(const struct sbitmap
*sb
)
337 return __sbitmap_weight(sb
, false);
340 unsigned int sbitmap_weight(const struct sbitmap
*sb
)
342 return __sbitmap_weight(sb
, true) - sbitmap_cleared(sb
);
344 EXPORT_SYMBOL_GPL(sbitmap_weight
);
346 void sbitmap_show(struct sbitmap
*sb
, struct seq_file
*m
)
348 seq_printf(m
, "depth=%u\n", sb
->depth
);
349 seq_printf(m
, "busy=%u\n", sbitmap_weight(sb
));
350 seq_printf(m
, "cleared=%u\n", sbitmap_cleared(sb
));
351 seq_printf(m
, "bits_per_word=%u\n", 1U << sb
->shift
);
352 seq_printf(m
, "map_nr=%u\n", sb
->map_nr
);
354 EXPORT_SYMBOL_GPL(sbitmap_show
);
356 static inline void emit_byte(struct seq_file
*m
, unsigned int offset
, u8 byte
)
358 if ((offset
& 0xf) == 0) {
361 seq_printf(m
, "%08x:", offset
);
363 if ((offset
& 0x1) == 0)
365 seq_printf(m
, "%02x", byte
);
368 void sbitmap_bitmap_show(struct sbitmap
*sb
, struct seq_file
*m
)
371 unsigned int byte_bits
= 0;
372 unsigned int offset
= 0;
375 for (i
= 0; i
< sb
->map_nr
; i
++) {
376 unsigned long word
= READ_ONCE(sb
->map
[i
].word
);
377 unsigned long cleared
= READ_ONCE(sb
->map
[i
].cleared
);
378 unsigned int word_bits
= __map_depth(sb
, i
);
382 while (word_bits
> 0) {
383 unsigned int bits
= min(8 - byte_bits
, word_bits
);
385 byte
|= (word
& (BIT(bits
) - 1)) << byte_bits
;
387 if (byte_bits
== 8) {
388 emit_byte(m
, offset
, byte
);
398 emit_byte(m
, offset
, byte
);
404 EXPORT_SYMBOL_GPL(sbitmap_bitmap_show
);
406 static unsigned int sbq_calc_wake_batch(struct sbitmap_queue
*sbq
,
409 unsigned int wake_batch
;
410 unsigned int shallow_depth
;
413 * Each full word of the bitmap has bits_per_word bits, and there might
414 * be a partial word. There are depth / bits_per_word full words and
415 * depth % bits_per_word bits left over. In bitwise arithmetic:
417 * bits_per_word = 1 << shift
418 * depth / bits_per_word = depth >> shift
419 * depth % bits_per_word = depth & ((1 << shift) - 1)
421 * Each word can be limited to sbq->min_shallow_depth bits.
423 shallow_depth
= min(1U << sbq
->sb
.shift
, sbq
->min_shallow_depth
);
424 depth
= ((depth
>> sbq
->sb
.shift
) * shallow_depth
+
425 min(depth
& ((1U << sbq
->sb
.shift
) - 1), shallow_depth
));
426 wake_batch
= clamp_t(unsigned int, depth
/ SBQ_WAIT_QUEUES
, 1,
432 int sbitmap_queue_init_node(struct sbitmap_queue
*sbq
, unsigned int depth
,
433 int shift
, bool round_robin
, gfp_t flags
, int node
)
438 ret
= sbitmap_init_node(&sbq
->sb
, depth
, shift
, flags
, node
,
443 sbq
->min_shallow_depth
= UINT_MAX
;
444 sbq
->wake_batch
= sbq_calc_wake_batch(sbq
, depth
);
445 atomic_set(&sbq
->wake_index
, 0);
446 atomic_set(&sbq
->ws_active
, 0);
447 atomic_set(&sbq
->completion_cnt
, 0);
448 atomic_set(&sbq
->wakeup_cnt
, 0);
450 sbq
->ws
= kzalloc_node(SBQ_WAIT_QUEUES
* sizeof(*sbq
->ws
), flags
, node
);
452 sbitmap_free(&sbq
->sb
);
456 for (i
= 0; i
< SBQ_WAIT_QUEUES
; i
++)
457 init_waitqueue_head(&sbq
->ws
[i
].wait
);
461 EXPORT_SYMBOL_GPL(sbitmap_queue_init_node
);
463 static void sbitmap_queue_update_wake_batch(struct sbitmap_queue
*sbq
,
466 unsigned int wake_batch
;
468 wake_batch
= sbq_calc_wake_batch(sbq
, depth
);
469 if (sbq
->wake_batch
!= wake_batch
)
470 WRITE_ONCE(sbq
->wake_batch
, wake_batch
);
473 void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue
*sbq
,
476 unsigned int wake_batch
;
477 unsigned int depth
= (sbq
->sb
.depth
+ users
- 1) / users
;
479 wake_batch
= clamp_val(depth
/ SBQ_WAIT_QUEUES
,
482 WRITE_ONCE(sbq
->wake_batch
, wake_batch
);
484 EXPORT_SYMBOL_GPL(sbitmap_queue_recalculate_wake_batch
);
486 void sbitmap_queue_resize(struct sbitmap_queue
*sbq
, unsigned int depth
)
488 sbitmap_queue_update_wake_batch(sbq
, depth
);
489 sbitmap_resize(&sbq
->sb
, depth
);
491 EXPORT_SYMBOL_GPL(sbitmap_queue_resize
);
493 int __sbitmap_queue_get(struct sbitmap_queue
*sbq
)
495 return sbitmap_get(&sbq
->sb
);
497 EXPORT_SYMBOL_GPL(__sbitmap_queue_get
);
499 unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue
*sbq
, int nr_tags
,
500 unsigned int *offset
)
502 struct sbitmap
*sb
= &sbq
->sb
;
503 unsigned int hint
, depth
;
504 unsigned long index
, nr
;
507 if (unlikely(sb
->round_robin
))
510 depth
= READ_ONCE(sb
->depth
);
511 hint
= update_alloc_hint_before_get(sb
, depth
);
513 index
= SB_NR_TO_INDEX(sb
, hint
);
515 for (i
= 0; i
< sb
->map_nr
; i
++) {
516 struct sbitmap_word
*map
= &sb
->map
[index
];
517 unsigned long get_mask
;
518 unsigned int map_depth
= __map_depth(sb
, index
);
521 sbitmap_deferred_clear(map
, 0, 0, 0);
522 val
= READ_ONCE(map
->word
);
523 if (val
== (1UL << (map_depth
- 1)) - 1)
526 nr
= find_first_zero_bit(&val
, map_depth
);
527 if (nr
+ nr_tags
<= map_depth
) {
528 atomic_long_t
*ptr
= (atomic_long_t
*) &map
->word
;
530 get_mask
= ((1UL << nr_tags
) - 1) << nr
;
531 while (!atomic_long_try_cmpxchg(ptr
, &val
,
534 get_mask
= (get_mask
& ~val
) >> nr
;
536 *offset
= nr
+ (index
<< sb
->shift
);
537 update_alloc_hint_after_get(sb
, depth
, hint
,
538 *offset
+ nr_tags
- 1);
543 /* Jump to next index. */
544 if (++index
>= sb
->map_nr
)
551 int sbitmap_queue_get_shallow(struct sbitmap_queue
*sbq
,
552 unsigned int shallow_depth
)
554 WARN_ON_ONCE(shallow_depth
< sbq
->min_shallow_depth
);
556 return sbitmap_get_shallow(&sbq
->sb
, shallow_depth
);
558 EXPORT_SYMBOL_GPL(sbitmap_queue_get_shallow
);
560 void sbitmap_queue_min_shallow_depth(struct sbitmap_queue
*sbq
,
561 unsigned int min_shallow_depth
)
563 sbq
->min_shallow_depth
= min_shallow_depth
;
564 sbitmap_queue_update_wake_batch(sbq
, sbq
->sb
.depth
);
566 EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth
);
568 static void __sbitmap_queue_wake_up(struct sbitmap_queue
*sbq
, int nr
)
570 int i
, wake_index
, woken
;
572 if (!atomic_read(&sbq
->ws_active
))
575 wake_index
= atomic_read(&sbq
->wake_index
);
576 for (i
= 0; i
< SBQ_WAIT_QUEUES
; i
++) {
577 struct sbq_wait_state
*ws
= &sbq
->ws
[wake_index
];
580 * Advance the index before checking the current queue.
581 * It improves fairness, by ensuring the queue doesn't
582 * need to be fully emptied before trying to wake up
585 wake_index
= sbq_index_inc(wake_index
);
587 if (waitqueue_active(&ws
->wait
)) {
588 woken
= wake_up_nr(&ws
->wait
, nr
);
595 if (wake_index
!= atomic_read(&sbq
->wake_index
))
596 atomic_set(&sbq
->wake_index
, wake_index
);
599 void sbitmap_queue_wake_up(struct sbitmap_queue
*sbq
, int nr
)
601 unsigned int wake_batch
= READ_ONCE(sbq
->wake_batch
);
602 unsigned int wakeups
;
604 if (!atomic_read(&sbq
->ws_active
))
607 atomic_add(nr
, &sbq
->completion_cnt
);
608 wakeups
= atomic_read(&sbq
->wakeup_cnt
);
611 if (atomic_read(&sbq
->completion_cnt
) - wakeups
< wake_batch
)
613 } while (!atomic_try_cmpxchg(&sbq
->wakeup_cnt
,
614 &wakeups
, wakeups
+ wake_batch
));
616 __sbitmap_queue_wake_up(sbq
, wake_batch
);
618 EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up
);
620 static inline void sbitmap_update_cpu_hint(struct sbitmap
*sb
, int cpu
, int tag
)
622 if (likely(!sb
->round_robin
&& tag
< sb
->depth
))
623 data_race(*per_cpu_ptr(sb
->alloc_hint
, cpu
) = tag
);
626 void sbitmap_queue_clear_batch(struct sbitmap_queue
*sbq
, int offset
,
627 int *tags
, int nr_tags
)
629 struct sbitmap
*sb
= &sbq
->sb
;
630 unsigned long *addr
= NULL
;
631 unsigned long mask
= 0;
634 smp_mb__before_atomic();
635 for (i
= 0; i
< nr_tags
; i
++) {
636 const int tag
= tags
[i
] - offset
;
637 unsigned long *this_addr
;
639 /* since we're clearing a batch, skip the deferred map */
640 this_addr
= &sb
->map
[SB_NR_TO_INDEX(sb
, tag
)].word
;
643 } else if (addr
!= this_addr
) {
644 atomic_long_andnot(mask
, (atomic_long_t
*) addr
);
648 mask
|= (1UL << SB_NR_TO_BIT(sb
, tag
));
652 atomic_long_andnot(mask
, (atomic_long_t
*) addr
);
654 smp_mb__after_atomic();
655 sbitmap_queue_wake_up(sbq
, nr_tags
);
656 sbitmap_update_cpu_hint(&sbq
->sb
, raw_smp_processor_id(),
657 tags
[nr_tags
- 1] - offset
);
660 void sbitmap_queue_clear(struct sbitmap_queue
*sbq
, unsigned int nr
,
664 * Once the clear bit is set, the bit may be allocated out.
666 * Orders READ/WRITE on the associated instance(such as request
667 * of blk_mq) by this bit for avoiding race with re-allocation,
668 * and its pair is the memory barrier implied in __sbitmap_get_word.
670 * One invariant is that the clear bit has to be zero when the bit
673 smp_mb__before_atomic();
674 sbitmap_deferred_clear_bit(&sbq
->sb
, nr
);
677 * Pairs with the memory barrier in set_current_state() to ensure the
678 * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker
679 * and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the
680 * waiter. See the comment on waitqueue_active().
682 smp_mb__after_atomic();
683 sbitmap_queue_wake_up(sbq
, 1);
684 sbitmap_update_cpu_hint(&sbq
->sb
, cpu
, nr
);
686 EXPORT_SYMBOL_GPL(sbitmap_queue_clear
);
688 void sbitmap_queue_wake_all(struct sbitmap_queue
*sbq
)
693 * Pairs with the memory barrier in set_current_state() like in
694 * sbitmap_queue_wake_up().
697 wake_index
= atomic_read(&sbq
->wake_index
);
698 for (i
= 0; i
< SBQ_WAIT_QUEUES
; i
++) {
699 struct sbq_wait_state
*ws
= &sbq
->ws
[wake_index
];
701 if (waitqueue_active(&ws
->wait
))
704 wake_index
= sbq_index_inc(wake_index
);
707 EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all
);
709 void sbitmap_queue_show(struct sbitmap_queue
*sbq
, struct seq_file
*m
)
714 sbitmap_show(&sbq
->sb
, m
);
716 seq_puts(m
, "alloc_hint={");
718 for_each_possible_cpu(i
) {
722 seq_printf(m
, "%u", *per_cpu_ptr(sbq
->sb
.alloc_hint
, i
));
726 seq_printf(m
, "wake_batch=%u\n", sbq
->wake_batch
);
727 seq_printf(m
, "wake_index=%d\n", atomic_read(&sbq
->wake_index
));
728 seq_printf(m
, "ws_active=%d\n", atomic_read(&sbq
->ws_active
));
730 seq_puts(m
, "ws={\n");
731 for (i
= 0; i
< SBQ_WAIT_QUEUES
; i
++) {
732 struct sbq_wait_state
*ws
= &sbq
->ws
[i
];
733 seq_printf(m
, "\t{.wait=%s},\n",
734 waitqueue_active(&ws
->wait
) ? "active" : "inactive");
738 seq_printf(m
, "round_robin=%d\n", sbq
->sb
.round_robin
);
739 seq_printf(m
, "min_shallow_depth=%u\n", sbq
->min_shallow_depth
);
741 EXPORT_SYMBOL_GPL(sbitmap_queue_show
);
743 void sbitmap_add_wait_queue(struct sbitmap_queue
*sbq
,
744 struct sbq_wait_state
*ws
,
745 struct sbq_wait
*sbq_wait
)
747 if (!sbq_wait
->sbq
) {
749 atomic_inc(&sbq
->ws_active
);
750 add_wait_queue(&ws
->wait
, &sbq_wait
->wait
);
753 EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue
);
755 void sbitmap_del_wait_queue(struct sbq_wait
*sbq_wait
)
757 list_del_init(&sbq_wait
->wait
.entry
);
759 atomic_dec(&sbq_wait
->sbq
->ws_active
);
760 sbq_wait
->sbq
= NULL
;
763 EXPORT_SYMBOL_GPL(sbitmap_del_wait_queue
);
765 void sbitmap_prepare_to_wait(struct sbitmap_queue
*sbq
,
766 struct sbq_wait_state
*ws
,
767 struct sbq_wait
*sbq_wait
, int state
)
769 if (!sbq_wait
->sbq
) {
770 atomic_inc(&sbq
->ws_active
);
773 prepare_to_wait_exclusive(&ws
->wait
, &sbq_wait
->wait
, state
);
775 EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait
);
777 void sbitmap_finish_wait(struct sbitmap_queue
*sbq
, struct sbq_wait_state
*ws
,
778 struct sbq_wait
*sbq_wait
)
780 finish_wait(&ws
->wait
, &sbq_wait
->wait
);
782 atomic_dec(&sbq
->ws_active
);
783 sbq_wait
->sbq
= NULL
;
786 EXPORT_SYMBOL_GPL(sbitmap_finish_wait
);