1 // SPDX-License-Identifier: GPL-2.0-only
5 * Global CPU deadline management
7 * Author: Juri Lelli <j.lelli@sssup.it>
11 static inline int parent(int i
)
16 static inline int left_child(int i
)
21 static inline int right_child(int i
)
26 static void cpudl_heapify_down(struct cpudl
*cp
, int idx
)
30 int orig_cpu
= cp
->elements
[idx
].cpu
;
31 u64 orig_dl
= cp
->elements
[idx
].dl
;
33 if (left_child(idx
) >= cp
->size
)
36 /* adapted from lib/prio_heap.c */
45 if ((l
< cp
->size
) && dl_time_before(orig_dl
,
46 cp
->elements
[l
].dl
)) {
48 largest_dl
= cp
->elements
[l
].dl
;
50 if ((r
< cp
->size
) && dl_time_before(largest_dl
,
57 /* pull largest child onto idx */
58 cp
->elements
[idx
].cpu
= cp
->elements
[largest
].cpu
;
59 cp
->elements
[idx
].dl
= cp
->elements
[largest
].dl
;
60 cp
->elements
[cp
->elements
[idx
].cpu
].idx
= idx
;
63 /* actual push down of saved original values orig_* */
64 cp
->elements
[idx
].cpu
= orig_cpu
;
65 cp
->elements
[idx
].dl
= orig_dl
;
66 cp
->elements
[cp
->elements
[idx
].cpu
].idx
= idx
;
69 static void cpudl_heapify_up(struct cpudl
*cp
, int idx
)
73 int orig_cpu
= cp
->elements
[idx
].cpu
;
74 u64 orig_dl
= cp
->elements
[idx
].dl
;
81 if (dl_time_before(orig_dl
, cp
->elements
[p
].dl
))
83 /* pull parent onto idx */
84 cp
->elements
[idx
].cpu
= cp
->elements
[p
].cpu
;
85 cp
->elements
[idx
].dl
= cp
->elements
[p
].dl
;
86 cp
->elements
[cp
->elements
[idx
].cpu
].idx
= idx
;
89 /* actual push up of saved original values orig_* */
90 cp
->elements
[idx
].cpu
= orig_cpu
;
91 cp
->elements
[idx
].dl
= orig_dl
;
92 cp
->elements
[cp
->elements
[idx
].cpu
].idx
= idx
;
95 static void cpudl_heapify(struct cpudl
*cp
, int idx
)
97 if (idx
> 0 && dl_time_before(cp
->elements
[parent(idx
)].dl
,
98 cp
->elements
[idx
].dl
))
99 cpudl_heapify_up(cp
, idx
);
101 cpudl_heapify_down(cp
, idx
);
104 static inline int cpudl_maximum(struct cpudl
*cp
)
106 return cp
->elements
[0].cpu
;
110 * cpudl_find - find the best (later-dl) CPU in the system
111 * @cp: the cpudl max-heap context
113 * @later_mask: a mask to fill in with the selected CPUs (or NULL)
115 * Returns: int - CPUs were found
117 int cpudl_find(struct cpudl
*cp
, struct task_struct
*p
,
118 struct cpumask
*later_mask
)
120 const struct sched_dl_entity
*dl_se
= &p
->dl
;
123 cpumask_and(later_mask
, cp
->free_cpus
, p
->cpus_ptr
)) {
126 int best_cpu
= cpudl_maximum(cp
);
128 WARN_ON(best_cpu
!= -1 && !cpu_present(best_cpu
));
130 if (cpumask_test_cpu(best_cpu
, p
->cpus_ptr
) &&
131 dl_time_before(dl_se
->deadline
, cp
->elements
[0].dl
)) {
133 cpumask_set_cpu(best_cpu
, later_mask
);
142 * cpudl_clear - remove a CPU from the cpudl max-heap
143 * @cp: the cpudl max-heap context
144 * @cpu: the target CPU
146 * Notes: assumes cpu_rq(cpu)->lock is locked
150 void cpudl_clear(struct cpudl
*cp
, int cpu
)
152 int old_idx
, new_cpu
;
155 WARN_ON(!cpu_present(cpu
));
157 raw_spin_lock_irqsave(&cp
->lock
, flags
);
159 old_idx
= cp
->elements
[cpu
].idx
;
160 if (old_idx
== IDX_INVALID
) {
162 * Nothing to remove if old_idx was invalid.
163 * This could happen if a rq_offline_dl is
164 * called for a CPU without -dl tasks running.
167 new_cpu
= cp
->elements
[cp
->size
- 1].cpu
;
168 cp
->elements
[old_idx
].dl
= cp
->elements
[cp
->size
- 1].dl
;
169 cp
->elements
[old_idx
].cpu
= new_cpu
;
171 cp
->elements
[new_cpu
].idx
= old_idx
;
172 cp
->elements
[cpu
].idx
= IDX_INVALID
;
173 cpudl_heapify(cp
, old_idx
);
175 cpumask_set_cpu(cpu
, cp
->free_cpus
);
177 raw_spin_unlock_irqrestore(&cp
->lock
, flags
);
181 * cpudl_set - update the cpudl max-heap
182 * @cp: the cpudl max-heap context
183 * @cpu: the target CPU
184 * @dl: the new earliest deadline for this CPU
186 * Notes: assumes cpu_rq(cpu)->lock is locked
190 void cpudl_set(struct cpudl
*cp
, int cpu
, u64 dl
)
195 WARN_ON(!cpu_present(cpu
));
197 raw_spin_lock_irqsave(&cp
->lock
, flags
);
199 old_idx
= cp
->elements
[cpu
].idx
;
200 if (old_idx
== IDX_INVALID
) {
201 int new_idx
= cp
->size
++;
203 cp
->elements
[new_idx
].dl
= dl
;
204 cp
->elements
[new_idx
].cpu
= cpu
;
205 cp
->elements
[cpu
].idx
= new_idx
;
206 cpudl_heapify_up(cp
, new_idx
);
207 cpumask_clear_cpu(cpu
, cp
->free_cpus
);
209 cp
->elements
[old_idx
].dl
= dl
;
210 cpudl_heapify(cp
, old_idx
);
213 raw_spin_unlock_irqrestore(&cp
->lock
, flags
);
217 * cpudl_set_freecpu - Set the cpudl.free_cpus
218 * @cp: the cpudl max-heap context
219 * @cpu: rd attached CPU
221 void cpudl_set_freecpu(struct cpudl
*cp
, int cpu
)
223 cpumask_set_cpu(cpu
, cp
->free_cpus
);
227 * cpudl_clear_freecpu - Clear the cpudl.free_cpus
228 * @cp: the cpudl max-heap context
229 * @cpu: rd attached CPU
231 void cpudl_clear_freecpu(struct cpudl
*cp
, int cpu
)
233 cpumask_clear_cpu(cpu
, cp
->free_cpus
);
237 * cpudl_init - initialize the cpudl structure
238 * @cp: the cpudl max-heap context
240 int cpudl_init(struct cpudl
*cp
)
244 raw_spin_lock_init(&cp
->lock
);
247 cp
->elements
= kcalloc(nr_cpu_ids
,
248 sizeof(struct cpudl_item
),
253 if (!zalloc_cpumask_var(&cp
->free_cpus
, GFP_KERNEL
)) {
258 for_each_possible_cpu(i
)
259 cp
->elements
[i
].idx
= IDX_INVALID
;
265 * cpudl_cleanup - clean up the cpudl structure
266 * @cp: the cpudl max-heap context
268 void cpudl_cleanup(struct cpudl
*cp
)
270 free_cpumask_var(cp
->free_cpus
);