io_uring: ensure finish_wait() is always called in __io_uring_task_cancel()
[linux/fpc-iii.git] / arch / s390 / kernel / idle.c
bloba5d4d80d6edeadc4667fa6b1c4e39ce065992192
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Idle functions for s390.
5 * Copyright IBM Corp. 2014
7 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
8 */
10 #include <linux/kernel.h>
11 #include <linux/kernel_stat.h>
12 #include <linux/notifier.h>
13 #include <linux/init.h>
14 #include <linux/cpu.h>
15 #include <linux/sched/cputime.h>
16 #include <trace/events/power.h>
17 #include <asm/nmi.h>
18 #include <asm/smp.h>
19 #include "entry.h"
21 static DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
23 void arch_cpu_idle(void)
25 struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
26 unsigned long long idle_time;
27 unsigned long psw_mask;
29 /* Wait for external, I/O or machine check interrupt. */
30 psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT |
31 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
32 clear_cpu_flag(CIF_NOHZ_DELAY);
34 /* psw_idle() returns with interrupts disabled. */
35 psw_idle(idle, psw_mask);
37 /* Account time spent with enabled wait psw loaded as idle time. */
38 raw_write_seqcount_begin(&idle->seqcount);
39 idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
40 idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
41 idle->idle_time += idle_time;
42 idle->idle_count++;
43 account_idle_time(cputime_to_nsecs(idle_time));
44 raw_write_seqcount_end(&idle->seqcount);
45 raw_local_irq_enable();
48 static ssize_t show_idle_count(struct device *dev,
49 struct device_attribute *attr, char *buf)
51 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
52 unsigned long long idle_count;
53 unsigned int seq;
55 do {
56 seq = read_seqcount_begin(&idle->seqcount);
57 idle_count = READ_ONCE(idle->idle_count);
58 if (READ_ONCE(idle->clock_idle_enter))
59 idle_count++;
60 } while (read_seqcount_retry(&idle->seqcount, seq));
61 return sprintf(buf, "%llu\n", idle_count);
63 DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
65 static ssize_t show_idle_time(struct device *dev,
66 struct device_attribute *attr, char *buf)
68 unsigned long long now, idle_time, idle_enter, idle_exit, in_idle;
69 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
70 unsigned int seq;
72 do {
73 seq = read_seqcount_begin(&idle->seqcount);
74 idle_time = READ_ONCE(idle->idle_time);
75 idle_enter = READ_ONCE(idle->clock_idle_enter);
76 idle_exit = READ_ONCE(idle->clock_idle_exit);
77 } while (read_seqcount_retry(&idle->seqcount, seq));
78 in_idle = 0;
79 now = get_tod_clock();
80 if (idle_enter) {
81 if (idle_exit) {
82 in_idle = idle_exit - idle_enter;
83 } else if (now > idle_enter) {
84 in_idle = now - idle_enter;
87 idle_time += in_idle;
88 return sprintf(buf, "%llu\n", idle_time >> 12);
90 DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
92 u64 arch_cpu_idle_time(int cpu)
94 struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
95 unsigned long long now, idle_enter, idle_exit, in_idle;
96 unsigned int seq;
98 do {
99 seq = read_seqcount_begin(&idle->seqcount);
100 idle_enter = READ_ONCE(idle->clock_idle_enter);
101 idle_exit = READ_ONCE(idle->clock_idle_exit);
102 } while (read_seqcount_retry(&idle->seqcount, seq));
103 in_idle = 0;
104 now = get_tod_clock();
105 if (idle_enter) {
106 if (idle_exit) {
107 in_idle = idle_exit - idle_enter;
108 } else if (now > idle_enter) {
109 in_idle = now - idle_enter;
112 return cputime_to_nsecs(in_idle);
115 void arch_cpu_idle_enter(void)
119 void arch_cpu_idle_exit(void)
123 void arch_cpu_idle_dead(void)
125 cpu_die();