1 // SPDX-License-Identifier: GPL-2.0
3 * A fast, small, non-recursive O(n log n) sort for the Linux kernel
5 * This performs n*log2(n) + 0.37*n + o(n) comparisons on average,
6 * and 1.5*n*log2(n) + O(n) in the (very contrived) worst case.
8 * Quicksort manages n*log2(n) - 1.26*n for random inputs (1.63*n
9 * better) at the expense of stack usage and much larger code to avoid
10 * quicksort's O(n^2) worst case.
13 #include <linux/types.h>
14 #include <linux/export.h>
15 #include <linux/sort.h>
18 * is_aligned - is this pointer & size okay for word-wide copying?
19 * @base: pointer to data
20 * @size: size of each element
21 * @align: required alignment (typically 4 or 8)
23 * Returns true if elements can be copied using word loads and stores.
24 * The size must be a multiple of the alignment, and the base address must
25 * be if we do not have CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS.
27 * For some reason, gcc doesn't know to optimize "if (a & mask || b & mask)"
28 * to "if ((a | b) & mask)", so we do that by hand.
30 __attribute_const__ __always_inline
31 static bool is_aligned(const void *base
, size_t size
, unsigned char align
)
33 unsigned char lsbits
= (unsigned char)size
;
36 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
37 lsbits
|= (unsigned char)(uintptr_t)base
;
39 return (lsbits
& (align
- 1)) == 0;
43 * swap_words_32 - swap two elements in 32-bit chunks
44 * @a: pointer to the first element to swap
45 * @b: pointer to the second element to swap
46 * @n: element size (must be a multiple of 4)
48 * Exchange the two objects in memory. This exploits base+index addressing,
49 * which basically all CPUs have, to minimize loop overhead computations.
51 * For some reason, on x86 gcc 7.3.0 adds a redundant test of n at the
52 * bottom of the loop, even though the zero flag is still valid from the
53 * subtract (since the intervening mov instructions don't alter the flags).
54 * Gcc 8.1.0 doesn't have that problem.
56 static void swap_words_32(void *a
, void *b
, size_t n
)
59 u32 t
= *(u32
*)(a
+ (n
-= 4));
60 *(u32
*)(a
+ n
) = *(u32
*)(b
+ n
);
66 * swap_words_64 - swap two elements in 64-bit chunks
67 * @a: pointer to the first element to swap
68 * @b: pointer to the second element to swap
69 * @n: element size (must be a multiple of 8)
71 * Exchange the two objects in memory. This exploits base+index
72 * addressing, which basically all CPUs have, to minimize loop overhead
75 * We'd like to use 64-bit loads if possible. If they're not, emulating
76 * one requires base+index+4 addressing which x86 has but most other
77 * processors do not. If CONFIG_64BIT, we definitely have 64-bit loads,
78 * but it's possible to have 64-bit loads without 64-bit pointers (e.g.
79 * x32 ABI). Are there any cases the kernel needs to worry about?
81 static void swap_words_64(void *a
, void *b
, size_t n
)
85 u64 t
= *(u64
*)(a
+ (n
-= 8));
86 *(u64
*)(a
+ n
) = *(u64
*)(b
+ n
);
89 /* Use two 32-bit transfers to avoid base+index+4 addressing */
90 u32 t
= *(u32
*)(a
+ (n
-= 4));
91 *(u32
*)(a
+ n
) = *(u32
*)(b
+ n
);
94 t
= *(u32
*)(a
+ (n
-= 4));
95 *(u32
*)(a
+ n
) = *(u32
*)(b
+ n
);
102 * swap_bytes - swap two elements a byte at a time
103 * @a: pointer to the first element to swap
104 * @b: pointer to the second element to swap
107 * This is the fallback if alignment doesn't allow using larger chunks.
109 static void swap_bytes(void *a
, void *b
, size_t n
)
112 char t
= ((char *)a
)[--n
];
113 ((char *)a
)[n
] = ((char *)b
)[n
];
119 * The values are arbitrary as long as they can't be confused with
120 * a pointer, but small integers make for the smallest compare
123 #define SWAP_WORDS_64 (swap_r_func_t)0
124 #define SWAP_WORDS_32 (swap_r_func_t)1
125 #define SWAP_BYTES (swap_r_func_t)2
126 #define SWAP_WRAPPER (swap_r_func_t)3
134 * The function pointer is last to make tail calls most efficient if the
135 * compiler decides not to inline this function.
137 static void do_swap(void *a
, void *b
, size_t size
, swap_r_func_t swap_func
, const void *priv
)
139 if (swap_func
== SWAP_WRAPPER
) {
140 ((const struct wrapper
*)priv
)->swap(a
, b
, (int)size
);
144 if (swap_func
== SWAP_WORDS_64
)
145 swap_words_64(a
, b
, size
);
146 else if (swap_func
== SWAP_WORDS_32
)
147 swap_words_32(a
, b
, size
);
148 else if (swap_func
== SWAP_BYTES
)
149 swap_bytes(a
, b
, size
);
151 swap_func(a
, b
, (int)size
, priv
);
154 #define _CMP_WRAPPER ((cmp_r_func_t)0L)
156 static int do_cmp(const void *a
, const void *b
, cmp_r_func_t cmp
, const void *priv
)
158 if (cmp
== _CMP_WRAPPER
)
159 return ((const struct wrapper
*)priv
)->cmp(a
, b
);
160 return cmp(a
, b
, priv
);
164 * parent - given the offset of the child, find the offset of the parent.
165 * @i: the offset of the heap element whose parent is sought. Non-zero.
166 * @lsbit: a precomputed 1-bit mask, equal to "size & -size"
167 * @size: size of each element
169 * In terms of array indexes, the parent of element j = @i/@size is simply
170 * (j-1)/2. But when working in byte offsets, we can't use implicit
171 * truncation of integer divides.
173 * Fortunately, we only need one bit of the quotient, not the full divide.
174 * @size has a least significant bit. That bit will be clear if @i is
175 * an even multiple of @size, and set if it's an odd multiple.
177 * Logically, we're doing "if (i & lsbit) i -= size;", but since the
178 * branch is unpredictable, it's done with a bit of clever branch-free
181 __attribute_const__ __always_inline
182 static size_t parent(size_t i
, unsigned int lsbit
, size_t size
)
185 i
-= size
& -(i
& lsbit
);
190 * sort_r - sort an array of elements
191 * @base: pointer to data to sort
192 * @num: number of elements
193 * @size: size of each element
194 * @cmp_func: pointer to comparison function
195 * @swap_func: pointer to swap function or NULL
196 * @priv: third argument passed to comparison function
198 * This function does a heapsort on the given array. You may provide
199 * a swap_func function if you need to do something more than a memory
200 * copy (e.g. fix up pointers or auxiliary data), but the built-in swap
201 * avoids a slow retpoline and so is significantly faster.
203 * Sorting time is O(n log n) both on average and worst-case. While
204 * quicksort is slightly faster on average, it suffers from exploitable
205 * O(n*n) worst-case behavior and extra memory requirements that make
206 * it less suitable for kernel use.
208 void sort_r(void *base
, size_t num
, size_t size
,
209 cmp_r_func_t cmp_func
,
210 swap_r_func_t swap_func
,
213 /* pre-scale counters for performance */
214 size_t n
= num
* size
, a
= (num
/2) * size
;
215 const unsigned int lsbit
= size
& -size
; /* Used to find parent */
218 if (!a
) /* num < 2 || size == 0 */
221 /* called from 'sort' without swap function, let's pick the default */
222 if (swap_func
== SWAP_WRAPPER
&& !((struct wrapper
*)priv
)->swap
)
226 if (is_aligned(base
, size
, 8))
227 swap_func
= SWAP_WORDS_64
;
228 else if (is_aligned(base
, size
, 4))
229 swap_func
= SWAP_WORDS_32
;
231 swap_func
= SWAP_BYTES
;
236 * 1. elements [a,n) satisfy the heap property (compare greater than
237 * all of their children),
238 * 2. elements [n,num*size) are sorted, and
239 * 3. a <= b <= c <= d <= n (whenever they are valid).
244 if (a
) /* Building heap: sift down a */
246 else if (n
> 3 * size
) { /* Sorting: Extract two largest elements */
248 do_swap(base
, base
+ n
, size
, swap_func
, priv
);
249 shift
= do_cmp(base
+ size
, base
+ 2 * size
, cmp_func
, priv
) <= 0;
252 do_swap(base
+ a
, base
+ n
, size
, swap_func
, priv
);
253 } else { /* Sort complete */
258 * Sift element at "a" down into heap. This is the
259 * "bottom-up" variant, which significantly reduces
260 * calls to cmp_func(): we find the sift-down path all
261 * the way to the leaves (one compare per level), then
262 * backtrack to find where to insert the target element.
264 * Because elements tend to sift down close to the leaves,
265 * this uses fewer compares than doing two per level
266 * on the way down. (A bit more than half as many on
267 * average, 3/4 worst-case.)
269 for (b
= a
; c
= 2*b
+ size
, (d
= c
+ size
) < n
;)
270 b
= do_cmp(base
+ c
, base
+ d
, cmp_func
, priv
) > 0 ? c
: d
;
271 if (d
== n
) /* Special case last leaf with no sibling */
274 /* Now backtrack from "b" to the correct location for "a" */
275 while (b
!= a
&& do_cmp(base
+ a
, base
+ b
, cmp_func
, priv
) >= 0)
276 b
= parent(b
, lsbit
, size
);
277 c
= b
; /* Where "a" belongs */
278 while (b
!= a
) { /* Shift it into place */
279 b
= parent(b
, lsbit
, size
);
280 do_swap(base
+ b
, base
+ c
, size
, swap_func
, priv
);
285 do_swap(base
, base
+ n
, size
, swap_func
, priv
);
286 if (n
== size
* 2 && do_cmp(base
, base
+ size
, cmp_func
, priv
) > 0)
287 do_swap(base
, base
+ size
, size
, swap_func
, priv
);
289 EXPORT_SYMBOL(sort_r
);
291 void sort(void *base
, size_t num
, size_t size
,
293 swap_func_t swap_func
)
300 return sort_r(base
, num
, size
, _CMP_WRAPPER
, SWAP_WRAPPER
, &w
);