diff options
| -rw-r--r-- | kernel/sched/fair.c | 15 |
1 files changed, 10 insertions, 5 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 69361c63353a..615861d96357 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -847,13 +847,19 @@ static s64 entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 avrunt * Similarly, check that the entity didn't gain positive lag when DELAY_ZERO * is set. * - * Return true if the lag has been adjusted. + * Return true if the vlag has been modified. Specifically: + * + * se->vlag != avg_vruntime() - se->vruntime + * + * This can be due to clamping in entity_lag() or clamping due to + * sched_delayed. Either way, when vlag is modified and the entity is + * retained, the tree needs to be adjusted. */ static __always_inline bool update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se) { - s64 vlag = entity_lag(cfs_rq, se, avg_vruntime(cfs_rq)); - bool ret; + u64 avruntime = avg_vruntime(cfs_rq); + s64 vlag = entity_lag(cfs_rq, se, avruntime); WARN_ON_ONCE(!se->on_rq); @@ -863,10 +869,9 @@ bool update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se) if (sched_feat(DELAY_ZERO)) vlag = min(vlag, 0); } - ret = (vlag == se->vlag); se->vlag = vlag; - return ret; + return avruntime - vlag != se->vruntime; } /* |
