3 int __update_load_avg_blocked_se(u64 now
, int cpu
, struct sched_entity
*se
);
4 int __update_load_avg_se(u64 now
, int cpu
, struct cfs_rq
*cfs_rq
, struct sched_entity
*se
);
5 int __update_load_avg_cfs_rq(u64 now
, int cpu
, struct cfs_rq
*cfs_rq
);
6 int update_rt_rq_load_avg(u64 now
, struct rq
*rq
, int running
);
7 int update_dl_rq_load_avg(u64 now
, struct rq
*rq
, int running
);
9 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
10 int update_irq_load_avg(struct rq
*rq
, u64 running
);
13 update_irq_load_avg(struct rq
*rq
, u64 running
)
20 * When a task is dequeued, its estimated utilization should not be update if
21 * its util_avg has not been updated at least once.
22 * This flag is used to synchronize util_avg updates with util_est updates.
23 * We map this information into the LSB bit of the utilization saved at
24 * dequeue time (i.e. util_est.dequeued).
26 #define UTIL_AVG_UNCHANGED 0x1
28 static inline void cfs_se_util_change(struct sched_avg
*avg
)
30 unsigned int enqueued
;
32 if (!sched_feat(UTIL_EST
))
35 /* Avoid store if the flag has been already set */
36 enqueued
= avg
->util_est
.enqueued
;
37 if (!(enqueued
& UTIL_AVG_UNCHANGED
))
40 /* Reset flag to report util_avg has been updated */
41 enqueued
&= ~UTIL_AVG_UNCHANGED
;
42 WRITE_ONCE(avg
->util_est
.enqueued
, enqueued
);
48 update_cfs_rq_load_avg(u64 now
, struct cfs_rq
*cfs_rq
)
54 update_rt_rq_load_avg(u64 now
, struct rq
*rq
, int running
)
60 update_dl_rq_load_avg(u64 now
, struct rq
*rq
, int running
)
66 update_irq_load_avg(struct rq
*rq
, u64 running
)