i2c: mxs: use MXS_DMA_CTRL_WAIT4END instead of DMA_CTRL_ACK
[linux/fpc-iii.git] / lib / list_sort.c
blob52f0c258c895a0fb62ef1384774a38036648e848
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/bug.h>
4 #include <linux/compiler.h>
5 #include <linux/export.h>
6 #include <linux/string.h>
7 #include <linux/list_sort.h>
8 #include <linux/list.h>
10 typedef int __attribute__((nonnull(2,3))) (*cmp_func)(void *,
11 struct list_head const *, struct list_head const *);
14 * Returns a list organized in an intermediate format suited
15 * to chaining of merge() calls: null-terminated, no reserved or
16 * sentinel head node, "prev" links not maintained.
18 __attribute__((nonnull(2,3,4)))
19 static struct list_head *merge(void *priv, cmp_func cmp,
20 struct list_head *a, struct list_head *b)
22 struct list_head *head, **tail = &head;
24 for (;;) {
25 /* if equal, take 'a' -- important for sort stability */
26 if (cmp(priv, a, b) <= 0) {
27 *tail = a;
28 tail = &a->next;
29 a = a->next;
30 if (!a) {
31 *tail = b;
32 break;
34 } else {
35 *tail = b;
36 tail = &b->next;
37 b = b->next;
38 if (!b) {
39 *tail = a;
40 break;
44 return head;
48 * Combine final list merge with restoration of standard doubly-linked
49 * list structure. This approach duplicates code from merge(), but
50 * runs faster than the tidier alternatives of either a separate final
51 * prev-link restoration pass, or maintaining the prev links
52 * throughout.
54 __attribute__((nonnull(2,3,4,5)))
55 static void merge_final(void *priv, cmp_func cmp, struct list_head *head,
56 struct list_head *a, struct list_head *b)
58 struct list_head *tail = head;
59 u8 count = 0;
61 for (;;) {
62 /* if equal, take 'a' -- important for sort stability */
63 if (cmp(priv, a, b) <= 0) {
64 tail->next = a;
65 a->prev = tail;
66 tail = a;
67 a = a->next;
68 if (!a)
69 break;
70 } else {
71 tail->next = b;
72 b->prev = tail;
73 tail = b;
74 b = b->next;
75 if (!b) {
76 b = a;
77 break;
82 /* Finish linking remainder of list b on to tail */
83 tail->next = b;
84 do {
86 * If the merge is highly unbalanced (e.g. the input is
87 * already sorted), this loop may run many iterations.
88 * Continue callbacks to the client even though no
89 * element comparison is needed, so the client's cmp()
90 * routine can invoke cond_resched() periodically.
92 if (unlikely(!++count))
93 cmp(priv, b, b);
94 b->prev = tail;
95 tail = b;
96 b = b->next;
97 } while (b);
99 /* And the final links to make a circular doubly-linked list */
100 tail->next = head;
101 head->prev = tail;
105 * list_sort - sort a list
106 * @priv: private data, opaque to list_sort(), passed to @cmp
107 * @head: the list to sort
108 * @cmp: the elements comparison function
110 * The comparison funtion @cmp must return > 0 if @a should sort after
111 * @b ("@a > @b" if you want an ascending sort), and <= 0 if @a should
112 * sort before @b *or* their original order should be preserved. It is
113 * always called with the element that came first in the input in @a,
114 * and list_sort is a stable sort, so it is not necessary to distinguish
115 * the @a < @b and @a == @b cases.
117 * This is compatible with two styles of @cmp function:
118 * - The traditional style which returns <0 / =0 / >0, or
119 * - Returning a boolean 0/1.
120 * The latter offers a chance to save a few cycles in the comparison
121 * (which is used by e.g. plug_ctx_cmp() in block/blk-mq.c).
123 * A good way to write a multi-word comparison is::
125 * if (a->high != b->high)
126 * return a->high > b->high;
127 * if (a->middle != b->middle)
128 * return a->middle > b->middle;
129 * return a->low > b->low;
132 * This mergesort is as eager as possible while always performing at least
133 * 2:1 balanced merges. Given two pending sublists of size 2^k, they are
134 * merged to a size-2^(k+1) list as soon as we have 2^k following elements.
136 * Thus, it will avoid cache thrashing as long as 3*2^k elements can
137 * fit into the cache. Not quite as good as a fully-eager bottom-up
138 * mergesort, but it does use 0.2*n fewer comparisons, so is faster in
139 * the common case that everything fits into L1.
142 * The merging is controlled by "count", the number of elements in the
143 * pending lists. This is beautiully simple code, but rather subtle.
145 * Each time we increment "count", we set one bit (bit k) and clear
146 * bits k-1 .. 0. Each time this happens (except the very first time
147 * for each bit, when count increments to 2^k), we merge two lists of
148 * size 2^k into one list of size 2^(k+1).
150 * This merge happens exactly when the count reaches an odd multiple of
151 * 2^k, which is when we have 2^k elements pending in smaller lists,
152 * so it's safe to merge away two lists of size 2^k.
154 * After this happens twice, we have created two lists of size 2^(k+1),
155 * which will be merged into a list of size 2^(k+2) before we create
156 * a third list of size 2^(k+1), so there are never more than two pending.
158 * The number of pending lists of size 2^k is determined by the
159 * state of bit k of "count" plus two extra pieces of information:
161 * - The state of bit k-1 (when k == 0, consider bit -1 always set), and
162 * - Whether the higher-order bits are zero or non-zero (i.e.
163 * is count >= 2^(k+1)).
165 * There are six states we distinguish. "x" represents some arbitrary
166 * bits, and "y" represents some arbitrary non-zero bits:
167 * 0: 00x: 0 pending of size 2^k; x pending of sizes < 2^k
168 * 1: 01x: 0 pending of size 2^k; 2^(k-1) + x pending of sizes < 2^k
169 * 2: x10x: 0 pending of size 2^k; 2^k + x pending of sizes < 2^k
170 * 3: x11x: 1 pending of size 2^k; 2^(k-1) + x pending of sizes < 2^k
171 * 4: y00x: 1 pending of size 2^k; 2^k + x pending of sizes < 2^k
172 * 5: y01x: 2 pending of size 2^k; 2^(k-1) + x pending of sizes < 2^k
173 * (merge and loop back to state 2)
175 * We gain lists of size 2^k in the 2->3 and 4->5 transitions (because
176 * bit k-1 is set while the more significant bits are non-zero) and
177 * merge them away in the 5->2 transition. Note in particular that just
178 * before the 5->2 transition, all lower-order bits are 11 (state 3),
179 * so there is one list of each smaller size.
181 * When we reach the end of the input, we merge all the pending
182 * lists, from smallest to largest. If you work through cases 2 to
183 * 5 above, you can see that the number of elements we merge with a list
184 * of size 2^k varies from 2^(k-1) (cases 3 and 5 when x == 0) to
185 * 2^(k+1) - 1 (second merge of case 5 when x == 2^(k-1) - 1).
187 __attribute__((nonnull(2,3)))
188 void list_sort(void *priv, struct list_head *head,
189 int (*cmp)(void *priv, struct list_head *a,
190 struct list_head *b))
192 struct list_head *list = head->next, *pending = NULL;
193 size_t count = 0; /* Count of pending */
195 if (list == head->prev) /* Zero or one elements */
196 return;
198 /* Convert to a null-terminated singly-linked list. */
199 head->prev->next = NULL;
202 * Data structure invariants:
203 * - All lists are singly linked and null-terminated; prev
204 * pointers are not maintained.
205 * - pending is a prev-linked "list of lists" of sorted
206 * sublists awaiting further merging.
207 * - Each of the sorted sublists is power-of-two in size.
208 * - Sublists are sorted by size and age, smallest & newest at front.
209 * - There are zero to two sublists of each size.
210 * - A pair of pending sublists are merged as soon as the number
211 * of following pending elements equals their size (i.e.
212 * each time count reaches an odd multiple of that size).
213 * That ensures each later final merge will be at worst 2:1.
214 * - Each round consists of:
215 * - Merging the two sublists selected by the highest bit
216 * which flips when count is incremented, and
217 * - Adding an element from the input as a size-1 sublist.
219 do {
220 size_t bits;
221 struct list_head **tail = &pending;
223 /* Find the least-significant clear bit in count */
224 for (bits = count; bits & 1; bits >>= 1)
225 tail = &(*tail)->prev;
226 /* Do the indicated merge */
227 if (likely(bits)) {
228 struct list_head *a = *tail, *b = a->prev;
230 a = merge(priv, (cmp_func)cmp, b, a);
231 /* Install the merged result in place of the inputs */
232 a->prev = b->prev;
233 *tail = a;
236 /* Move one element from input list to pending */
237 list->prev = pending;
238 pending = list;
239 list = list->next;
240 pending->next = NULL;
241 count++;
242 } while (list);
244 /* End of input; merge together all the pending lists. */
245 list = pending;
246 pending = pending->prev;
247 for (;;) {
248 struct list_head *next = pending->prev;
250 if (!next)
251 break;
252 list = merge(priv, (cmp_func)cmp, pending, list);
253 pending = next;
255 /* The final merge, rebuilding prev links */
256 merge_final(priv, (cmp_func)cmp, head, pending, list);
258 EXPORT_SYMBOL(list_sort);