ARM: dts: stm32: add gadget fifo sizes to usbotg_hs on stm32f746
[linux/fpc-iii.git] / kernel / sched / stats.h
blob8e7b58de61e7eba9f8287c734bb0bca26b7df296
1 /* SPDX-License-Identifier: GPL-2.0 */
3 #ifdef CONFIG_SCHEDSTATS
5 /*
6 * Expects runqueue lock to be held for atomicity of update
7 */
8 static inline void
9 rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
11 if (rq) {
12 rq->rq_sched_info.run_delay += delta;
13 rq->rq_sched_info.pcount++;
18 * Expects runqueue lock to be held for atomicity of update
20 static inline void
21 rq_sched_info_depart(struct rq *rq, unsigned long long delta)
23 if (rq)
24 rq->rq_cpu_time += delta;
27 static inline void
28 rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
30 if (rq)
31 rq->rq_sched_info.run_delay += delta;
33 #define schedstat_enabled() static_branch_unlikely(&sched_schedstats)
34 #define __schedstat_inc(var) do { var++; } while (0)
35 #define schedstat_inc(var) do { if (schedstat_enabled()) { var++; } } while (0)
36 #define __schedstat_add(var, amt) do { var += (amt); } while (0)
37 #define schedstat_add(var, amt) do { if (schedstat_enabled()) { var += (amt); } } while (0)
38 #define __schedstat_set(var, val) do { var = (val); } while (0)
39 #define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0)
40 #define schedstat_val(var) (var)
41 #define schedstat_val_or_zero(var) ((schedstat_enabled()) ? (var) : 0)
43 #else /* !CONFIG_SCHEDSTATS */
44 static inline void
45 rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
47 static inline void
48 rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
50 static inline void
51 rq_sched_info_depart(struct rq *rq, unsigned long long delta)
53 #define schedstat_enabled() 0
54 #define __schedstat_inc(var) do { } while (0)
55 #define schedstat_inc(var) do { } while (0)
56 #define __schedstat_add(var, amt) do { } while (0)
57 #define schedstat_add(var, amt) do { } while (0)
58 #define __schedstat_set(var, val) do { } while (0)
59 #define schedstat_set(var, val) do { } while (0)
60 #define schedstat_val(var) 0
61 #define schedstat_val_or_zero(var) 0
62 #endif /* CONFIG_SCHEDSTATS */
64 #ifdef CONFIG_SCHED_INFO
65 static inline void sched_info_reset_dequeued(struct task_struct *t)
67 t->sched_info.last_queued = 0;
71 * We are interested in knowing how long it was from the *first* time a
72 * task was queued to the time that it finally hit a cpu, we call this routine
73 * from dequeue_task() to account for possible rq->clock skew across cpus. The
74 * delta taken on each cpu would annul the skew.
76 static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t)
78 unsigned long long now = rq_clock(rq), delta = 0;
80 if (unlikely(sched_info_on()))
81 if (t->sched_info.last_queued)
82 delta = now - t->sched_info.last_queued;
83 sched_info_reset_dequeued(t);
84 t->sched_info.run_delay += delta;
86 rq_sched_info_dequeued(rq, delta);
90 * Called when a task finally hits the cpu. We can now calculate how
91 * long it was waiting to run. We also note when it began so that we
92 * can keep stats on how long its timeslice is.
94 static void sched_info_arrive(struct rq *rq, struct task_struct *t)
96 unsigned long long now = rq_clock(rq), delta = 0;
98 if (t->sched_info.last_queued)
99 delta = now - t->sched_info.last_queued;
100 sched_info_reset_dequeued(t);
101 t->sched_info.run_delay += delta;
102 t->sched_info.last_arrival = now;
103 t->sched_info.pcount++;
105 rq_sched_info_arrive(rq, delta);
109 * This function is only called from enqueue_task(), but also only updates
110 * the timestamp if it is already not set. It's assumed that
111 * sched_info_dequeued() will clear that stamp when appropriate.
113 static inline void sched_info_queued(struct rq *rq, struct task_struct *t)
115 if (unlikely(sched_info_on()))
116 if (!t->sched_info.last_queued)
117 t->sched_info.last_queued = rq_clock(rq);
121 * Called when a process ceases being the active-running process involuntarily
122 * due, typically, to expiring its time slice (this may also be called when
123 * switching to the idle task). Now we can calculate how long we ran.
124 * Also, if the process is still in the TASK_RUNNING state, call
125 * sched_info_queued() to mark that it has now again started waiting on
126 * the runqueue.
128 static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
130 unsigned long long delta = rq_clock(rq) -
131 t->sched_info.last_arrival;
133 rq_sched_info_depart(rq, delta);
135 if (t->state == TASK_RUNNING)
136 sched_info_queued(rq, t);
140 * Called when tasks are switched involuntarily due, typically, to expiring
141 * their time slice. (This may also be called when switching to or from
142 * the idle task.) We are only called when prev != next.
144 static inline void
145 __sched_info_switch(struct rq *rq,
146 struct task_struct *prev, struct task_struct *next)
149 * prev now departs the cpu. It's not interesting to record
150 * stats about how efficient we were at scheduling the idle
151 * process, however.
153 if (prev != rq->idle)
154 sched_info_depart(rq, prev);
156 if (next != rq->idle)
157 sched_info_arrive(rq, next);
159 static inline void
160 sched_info_switch(struct rq *rq,
161 struct task_struct *prev, struct task_struct *next)
163 if (unlikely(sched_info_on()))
164 __sched_info_switch(rq, prev, next);
166 #else
167 #define sched_info_queued(rq, t) do { } while (0)
168 #define sched_info_reset_dequeued(t) do { } while (0)
169 #define sched_info_dequeued(rq, t) do { } while (0)
170 #define sched_info_depart(rq, t) do { } while (0)
171 #define sched_info_arrive(rq, next) do { } while (0)
172 #define sched_info_switch(rq, t, next) do { } while (0)
173 #endif /* CONFIG_SCHED_INFO */