2 #ifdef CONFIG_SCHEDSTATS
5 * Expects runqueue lock to be held for atomicity of update
8 rq_sched_info_arrive(struct rq
*rq
, unsigned long long delta
)
11 rq
->rq_sched_info
.run_delay
+= delta
;
12 rq
->rq_sched_info
.pcount
++;
17 * Expects runqueue lock to be held for atomicity of update
20 rq_sched_info_depart(struct rq
*rq
, unsigned long long delta
)
23 rq
->rq_cpu_time
+= delta
;
27 rq_sched_info_dequeued(struct rq
*rq
, unsigned long long delta
)
30 rq
->rq_sched_info
.run_delay
+= delta
;
32 # define schedstat_inc(rq, field) do { (rq)->field++; } while (0)
33 # define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0)
34 # define schedstat_set(var, val) do { var = (val); } while (0)
35 #else /* !CONFIG_SCHEDSTATS */
37 rq_sched_info_arrive(struct rq
*rq
, unsigned long long delta
)
40 rq_sched_info_dequeued(struct rq
*rq
, unsigned long long delta
)
43 rq_sched_info_depart(struct rq
*rq
, unsigned long long delta
)
45 # define schedstat_inc(rq, field) do { } while (0)
46 # define schedstat_add(rq, field, amt) do { } while (0)
47 # define schedstat_set(var, val) do { } while (0)
50 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
51 static inline void sched_info_reset_dequeued(struct task_struct
*t
)
53 t
->sched_info
.last_queued
= 0;
57 * We are interested in knowing how long it was from the *first* time a
58 * task was queued to the time that it finally hit a cpu, we call this routine
59 * from dequeue_task() to account for possible rq->clock skew across cpus. The
60 * delta taken on each cpu would annul the skew.
62 static inline void sched_info_dequeued(struct rq
*rq
, struct task_struct
*t
)
64 unsigned long long now
= rq_clock(rq
), delta
= 0;
66 if (unlikely(sched_info_on()))
67 if (t
->sched_info
.last_queued
)
68 delta
= now
- t
->sched_info
.last_queued
;
69 sched_info_reset_dequeued(t
);
70 t
->sched_info
.run_delay
+= delta
;
72 rq_sched_info_dequeued(rq
, delta
);
76 * Called when a task finally hits the cpu. We can now calculate how
77 * long it was waiting to run. We also note when it began so that we
78 * can keep stats on how long its timeslice is.
80 static void sched_info_arrive(struct rq
*rq
, struct task_struct
*t
)
82 unsigned long long now
= rq_clock(rq
), delta
= 0;
84 if (t
->sched_info
.last_queued
)
85 delta
= now
- t
->sched_info
.last_queued
;
86 sched_info_reset_dequeued(t
);
87 t
->sched_info
.run_delay
+= delta
;
88 t
->sched_info
.last_arrival
= now
;
89 t
->sched_info
.pcount
++;
91 rq_sched_info_arrive(rq
, delta
);
95 * This function is only called from enqueue_task(), but also only updates
96 * the timestamp if it is already not set. It's assumed that
97 * sched_info_dequeued() will clear that stamp when appropriate.
99 static inline void sched_info_queued(struct rq
*rq
, struct task_struct
*t
)
101 if (unlikely(sched_info_on()))
102 if (!t
->sched_info
.last_queued
)
103 t
->sched_info
.last_queued
= rq_clock(rq
);
107 * Called when a process ceases being the active-running process involuntarily
108 * due, typically, to expiring its time slice (this may also be called when
109 * switching to the idle task). Now we can calculate how long we ran.
110 * Also, if the process is still in the TASK_RUNNING state, call
111 * sched_info_queued() to mark that it has now again started waiting on
114 static inline void sched_info_depart(struct rq
*rq
, struct task_struct
*t
)
116 unsigned long long delta
= rq_clock(rq
) -
117 t
->sched_info
.last_arrival
;
119 rq_sched_info_depart(rq
, delta
);
121 if (t
->state
== TASK_RUNNING
)
122 sched_info_queued(rq
, t
);
126 * Called when tasks are switched involuntarily due, typically, to expiring
127 * their time slice. (This may also be called when switching to or from
128 * the idle task.) We are only called when prev != next.
131 __sched_info_switch(struct rq
*rq
,
132 struct task_struct
*prev
, struct task_struct
*next
)
135 * prev now departs the cpu. It's not interesting to record
136 * stats about how efficient we were at scheduling the idle
139 if (prev
!= rq
->idle
)
140 sched_info_depart(rq
, prev
);
142 if (next
!= rq
->idle
)
143 sched_info_arrive(rq
, next
);
146 sched_info_switch(struct rq
*rq
,
147 struct task_struct
*prev
, struct task_struct
*next
)
149 if (unlikely(sched_info_on()))
150 __sched_info_switch(rq
, prev
, next
);
153 #define sched_info_queued(rq, t) do { } while (0)
154 #define sched_info_reset_dequeued(t) do { } while (0)
155 #define sched_info_dequeued(rq, t) do { } while (0)
156 #define sched_info_depart(rq, t) do { } while (0)
157 #define sched_info_arrive(rq, next) do { } while (0)
158 #define sched_info_switch(rq, t, next) do { } while (0)
159 #endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
162 * The following are functions that support scheduler-internal time accounting.
163 * These functions are generally called at the timer tick. None of this depends
164 * on CONFIG_SCHEDSTATS.
168 * cputimer_running - return true if cputimer is running
170 * @tsk: Pointer to target task.
172 static inline bool cputimer_running(struct task_struct
*tsk
)
175 struct thread_group_cputimer
*cputimer
= &tsk
->signal
->cputimer
;
177 if (!cputimer
->running
)
181 * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime
182 * in __exit_signal(), we won't account to the signal struct further
183 * cputime consumed by that task, even though the task can still be
184 * ticking after __exit_signal().
186 * In order to keep a consistent behaviour between thread group cputime
187 * and thread group cputimer accounting, lets also ignore the cputime
188 * elapsing after __exit_signal() in any thread group timer running.
190 * This makes sure that POSIX CPU clocks and timers are synchronized, so
191 * that a POSIX CPU timer won't expire while the corresponding POSIX CPU
192 * clock delta is behind the expiring timer value.
194 if (unlikely(!tsk
->sighand
))
201 * account_group_user_time - Maintain utime for a thread group.
203 * @tsk: Pointer to task structure.
204 * @cputime: Time value by which to increment the utime field of the
205 * thread_group_cputime structure.
207 * If thread group time is being maintained, get the structure for the
208 * running CPU and update the utime field there.
210 static inline void account_group_user_time(struct task_struct
*tsk
,
213 struct thread_group_cputimer
*cputimer
= &tsk
->signal
->cputimer
;
215 if (!cputimer_running(tsk
))
218 raw_spin_lock(&cputimer
->lock
);
219 cputimer
->cputime
.utime
+= cputime
;
220 raw_spin_unlock(&cputimer
->lock
);
224 * account_group_system_time - Maintain stime for a thread group.
226 * @tsk: Pointer to task structure.
227 * @cputime: Time value by which to increment the stime field of the
228 * thread_group_cputime structure.
230 * If thread group time is being maintained, get the structure for the
231 * running CPU and update the stime field there.
233 static inline void account_group_system_time(struct task_struct
*tsk
,
236 struct thread_group_cputimer
*cputimer
= &tsk
->signal
->cputimer
;
238 if (!cputimer_running(tsk
))
241 raw_spin_lock(&cputimer
->lock
);
242 cputimer
->cputime
.stime
+= cputime
;
243 raw_spin_unlock(&cputimer
->lock
);
247 * account_group_exec_runtime - Maintain exec runtime for a thread group.
249 * @tsk: Pointer to task structure.
250 * @ns: Time value by which to increment the sum_exec_runtime field
251 * of the thread_group_cputime structure.
253 * If thread group time is being maintained, get the structure for the
254 * running CPU and update the sum_exec_runtime field there.
256 static inline void account_group_exec_runtime(struct task_struct
*tsk
,
257 unsigned long long ns
)
259 struct thread_group_cputimer
*cputimer
= &tsk
->signal
->cputimer
;
261 if (!cputimer_running(tsk
))
264 raw_spin_lock(&cputimer
->lock
);
265 cputimer
->cputime
.sum_exec_runtime
+= ns
;
266 raw_spin_unlock(&cputimer
->lock
);