2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * Copyright (C) 2016 ARM Limited
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/atomic.h>
17 #include <linux/completion.h>
18 #include <linux/cpu.h>
19 #include <linux/cpuidle.h>
20 #include <linux/cpu_pm.h>
21 #include <linux/kernel.h>
22 #include <linux/kthread.h>
23 #include <uapi/linux/sched/types.h>
24 #include <linux/module.h>
25 #include <linux/preempt.h>
26 #include <linux/psci.h>
27 #include <linux/slab.h>
28 #include <linux/tick.h>
29 #include <linux/topology.h>
31 #include <asm/cpuidle.h>
33 #include <uapi/linux/psci.h>
35 #define NUM_SUSPEND_CYCLE (10)
37 static unsigned int nb_available_cpus
;
38 static int tos_resident_cpu
= -1;
40 static atomic_t nb_active_threads
;
41 static struct completion suspend_threads_started
=
42 COMPLETION_INITIALIZER(suspend_threads_started
);
43 static struct completion suspend_threads_done
=
44 COMPLETION_INITIALIZER(suspend_threads_done
);
47 * We assume that PSCI operations are used if they are available. This is not
48 * necessarily true on arm64, since the decision is based on the
49 * "enable-method" property of each CPU in the DT, but given that there is no
50 * arch-specific way to check this, we assume that the DT is sensible.
52 static int psci_ops_check(void)
54 int migrate_type
= -1;
57 if (!(psci_ops
.cpu_off
&& psci_ops
.cpu_on
&& psci_ops
.cpu_suspend
)) {
58 pr_warn("Missing PSCI operations, aborting tests\n");
62 if (psci_ops
.migrate_info_type
)
63 migrate_type
= psci_ops
.migrate_info_type();
65 if (migrate_type
== PSCI_0_2_TOS_UP_MIGRATE
||
66 migrate_type
== PSCI_0_2_TOS_UP_NO_MIGRATE
) {
67 /* There is a UP Trusted OS, find on which core it resides. */
68 for_each_online_cpu(cpu
)
69 if (psci_tos_resident_on(cpu
)) {
70 tos_resident_cpu
= cpu
;
73 if (tos_resident_cpu
== -1)
74 pr_warn("UP Trusted OS resides on no online CPU\n");
80 static int find_cpu_groups(const struct cpumask
*cpus
,
81 const struct cpumask
**cpu_groups
)
86 if (!alloc_cpumask_var(&tmp
, GFP_KERNEL
))
88 cpumask_copy(tmp
, cpus
);
90 while (!cpumask_empty(tmp
)) {
91 const struct cpumask
*cpu_group
=
92 topology_core_cpumask(cpumask_any(tmp
));
94 cpu_groups
[nb
++] = cpu_group
;
95 cpumask_andnot(tmp
, tmp
, cpu_group
);
98 free_cpumask_var(tmp
);
103 * offlined_cpus is a temporary array but passing it as an argument avoids
104 * multiple allocations.
106 static unsigned int down_and_up_cpus(const struct cpumask
*cpus
,
107 struct cpumask
*offlined_cpus
)
112 cpumask_clear(offlined_cpus
);
114 /* Try to power down all CPUs in the mask. */
115 for_each_cpu(cpu
, cpus
) {
116 int ret
= cpu_down(cpu
);
119 * cpu_down() checks the number of online CPUs before the TOS
122 if (cpumask_weight(offlined_cpus
) + 1 == nb_available_cpus
) {
124 pr_err("Unexpected return code %d while trying "
125 "to power down last online CPU %d\n",
129 } else if (cpu
== tos_resident_cpu
) {
131 pr_err("Unexpected return code %d while trying "
132 "to power down TOS resident CPU %d\n",
136 } else if (ret
!= 0) {
137 pr_err("Error occurred (%d) while trying "
138 "to power down CPU %d\n", ret
, cpu
);
143 cpumask_set_cpu(cpu
, offlined_cpus
);
146 /* Try to power up all the CPUs that have been offlined. */
147 for_each_cpu(cpu
, offlined_cpus
) {
148 int ret
= cpu_up(cpu
);
151 pr_err("Error occurred (%d) while trying "
152 "to power up CPU %d\n", ret
, cpu
);
155 cpumask_clear_cpu(cpu
, offlined_cpus
);
160 * Something went bad at some point and some CPUs could not be turned
163 WARN_ON(!cpumask_empty(offlined_cpus
) ||
164 num_online_cpus() != nb_available_cpus
);
169 static int hotplug_tests(void)
172 cpumask_var_t offlined_cpus
;
174 const struct cpumask
**cpu_groups
;
178 if (!alloc_cpumask_var(&offlined_cpus
, GFP_KERNEL
))
180 /* We may have up to nb_available_cpus cpu_groups. */
181 cpu_groups
= kmalloc_array(nb_available_cpus
, sizeof(*cpu_groups
),
185 page_buf
= (char *)__get_free_page(GFP_KERNEL
);
187 goto out_free_cpu_groups
;
190 nb_cpu_group
= find_cpu_groups(cpu_online_mask
, cpu_groups
);
193 * Of course the last CPU cannot be powered down and cpu_down() should
196 pr_info("Trying to turn off and on again all CPUs\n");
197 err
+= down_and_up_cpus(cpu_online_mask
, offlined_cpus
);
200 * Take down CPUs by cpu group this time. When the last CPU is turned
201 * off, the cpu group itself should shut down.
203 for (i
= 0; i
< nb_cpu_group
; ++i
) {
204 ssize_t len
= cpumap_print_to_pagebuf(true, page_buf
,
206 /* Remove trailing newline. */
207 page_buf
[len
- 1] = '\0';
208 pr_info("Trying to turn off and on again group %d (CPUs %s)\n",
210 err
+= down_and_up_cpus(cpu_groups
[i
], offlined_cpus
);
213 free_page((unsigned long)page_buf
);
217 free_cpumask_var(offlined_cpus
);
221 static void dummy_callback(struct timer_list
*unused
) {}
223 static int suspend_cpu(int index
, bool broadcast
)
227 arch_cpu_idle_enter();
231 * The local timer will be shut down, we need to enter tick
234 ret
= tick_broadcast_enter();
237 * In the absence of hardware broadcast mechanism,
238 * this CPU might be used to broadcast wakeups, which
239 * may be why entering tick broadcast has failed.
240 * There is little the kernel can do to work around
241 * that, so enter WFI instead (idle state 0).
250 * Replicate the common ARM cpuidle enter function
251 * (arm_enter_idle_state).
253 ret
= CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend
, index
);
256 tick_broadcast_exit();
259 arch_cpu_idle_exit();
264 static int suspend_test_thread(void *arg
)
267 int i
, nb_suspend
= 0, nb_shallow_sleep
= 0, nb_err
= 0;
268 struct sched_param sched_priority
= { .sched_priority
= MAX_RT_PRIO
-1 };
269 struct cpuidle_device
*dev
;
270 struct cpuidle_driver
*drv
;
271 /* No need for an actual callback, we just want to wake up the CPU. */
272 struct timer_list wakeup_timer
;
274 /* Wait for the main thread to give the start signal. */
275 wait_for_completion(&suspend_threads_started
);
277 /* Set maximum priority to preempt all other threads on this CPU. */
278 if (sched_setscheduler_nocheck(current
, SCHED_FIFO
, &sched_priority
))
279 pr_warn("Failed to set suspend thread scheduler on CPU %d\n",
282 dev
= this_cpu_read(cpuidle_devices
);
283 drv
= cpuidle_get_cpu_driver(dev
);
285 pr_info("CPU %d entering suspend cycles, states 1 through %d\n",
286 cpu
, drv
->state_count
- 1);
288 timer_setup_on_stack(&wakeup_timer
, dummy_callback
, 0);
289 for (i
= 0; i
< NUM_SUSPEND_CYCLE
; ++i
) {
292 * Test all possible states, except 0 (which is usually WFI and
295 for (index
= 1; index
< drv
->state_count
; ++index
) {
296 struct cpuidle_state
*state
= &drv
->states
[index
];
297 bool broadcast
= state
->flags
& CPUIDLE_FLAG_TIMER_STOP
;
301 * Set the timer to wake this CPU up in some time (which
302 * should be largely sufficient for entering suspend).
303 * If the local tick is disabled when entering suspend,
304 * suspend_cpu() takes care of switching to a broadcast
305 * tick, so the timer will still wake us up.
307 mod_timer(&wakeup_timer
, jiffies
+
308 usecs_to_jiffies(state
->target_residency
));
310 /* IRQs must be disabled during suspend operations. */
313 ret
= suspend_cpu(index
, broadcast
);
316 * We have woken up. Re-enable IRQs to handle any
317 * pending interrupt, do not wait until the end of the
324 } else if (ret
>= 0) {
325 /* We did not enter the expected state. */
328 pr_err("Failed to suspend CPU %d: error %d "
329 "(requested state %d, cycle %d)\n",
337 * Disable the timer to make sure that the timer will not trigger
340 del_timer(&wakeup_timer
);
341 destroy_timer_on_stack(&wakeup_timer
);
343 if (atomic_dec_return_relaxed(&nb_active_threads
) == 0)
344 complete(&suspend_threads_done
);
346 /* Give up on RT scheduling and wait for termination. */
347 sched_priority
.sched_priority
= 0;
348 if (sched_setscheduler_nocheck(current
, SCHED_NORMAL
, &sched_priority
))
349 pr_warn("Failed to set suspend thread scheduler on CPU %d\n",
352 /* Needs to be set first to avoid missing a wakeup. */
353 set_current_state(TASK_INTERRUPTIBLE
);
354 if (kthread_should_stop()) {
355 __set_current_state(TASK_RUNNING
);
361 pr_info("CPU %d suspend test results: success %d, shallow states %d, errors %d\n",
362 cpu
, nb_suspend
, nb_shallow_sleep
, nb_err
);
367 static int suspend_tests(void)
370 struct task_struct
**threads
;
373 threads
= kmalloc_array(nb_available_cpus
, sizeof(*threads
),
379 * Stop cpuidle to prevent the idle tasks from entering a deep sleep
380 * mode, as it might interfere with the suspend threads on other CPUs.
381 * This does not prevent the suspend threads from using cpuidle (only
382 * the idle tasks check this status). Take the idle lock so that
383 * the cpuidle driver and device look-up can be carried out safely.
385 cpuidle_pause_and_lock();
387 for_each_online_cpu(cpu
) {
388 struct task_struct
*thread
;
389 /* Check that cpuidle is available on that CPU. */
390 struct cpuidle_device
*dev
= per_cpu(cpuidle_devices
, cpu
);
391 struct cpuidle_driver
*drv
= cpuidle_get_cpu_driver(dev
);
394 pr_warn("cpuidle not available on CPU %d, ignoring\n",
399 thread
= kthread_create_on_cpu(suspend_test_thread
,
400 (void *)(long)cpu
, cpu
,
401 "psci_suspend_test");
403 pr_err("Failed to create kthread on CPU %d\n", cpu
);
405 threads
[nb_threads
++] = thread
;
408 if (nb_threads
< 1) {
413 atomic_set(&nb_active_threads
, nb_threads
);
416 * Wake up the suspend threads. To avoid the main thread being preempted
417 * before all the threads have been unparked, the suspend threads will
418 * wait for the completion of suspend_threads_started.
420 for (i
= 0; i
< nb_threads
; ++i
)
421 wake_up_process(threads
[i
]);
422 complete_all(&suspend_threads_started
);
424 wait_for_completion(&suspend_threads_done
);
427 /* Stop and destroy all threads, get return status. */
428 for (i
= 0; i
< nb_threads
; ++i
)
429 err
+= kthread_stop(threads
[i
]);
431 cpuidle_resume_and_unlock();
436 static int __init
psci_checker(void)
441 * Since we're in an initcall, we assume that all the CPUs that all
442 * CPUs that can be onlined have been onlined.
444 * The tests assume that hotplug is enabled but nobody else is using it,
445 * otherwise the results will be unpredictable. However, since there
446 * is no userspace yet in initcalls, that should be fine, as long as
447 * no torture test is running at the same time (see Kconfig).
449 nb_available_cpus
= num_online_cpus();
451 /* Check PSCI operations are set up and working. */
452 ret
= psci_ops_check();
456 pr_info("PSCI checker started using %u CPUs\n", nb_available_cpus
);
458 pr_info("Starting hotplug tests\n");
459 ret
= hotplug_tests();
461 pr_info("Hotplug tests passed OK\n");
463 pr_err("%d error(s) encountered in hotplug tests\n", ret
);
465 pr_err("Out of memory\n");
469 pr_info("Starting suspend tests (%d cycles per state)\n",
471 ret
= suspend_tests();
473 pr_info("Suspend tests passed OK\n");
475 pr_err("%d error(s) encountered in suspend tests\n", ret
);
479 pr_err("Out of memory\n");
482 pr_warn("Could not start suspend tests on any CPU\n");
487 pr_info("PSCI checker completed\n");
488 return ret
< 0 ? ret
: 0;
490 late_initcall(psci_checker
);