diff options
| -rw-r--r-- | kernel/sched/core.c | 2 | ||||
| -rw-r--r-- | kernel/sched/rt.c | 2 | ||||
| -rw-r--r-- | kernel/sched/sched.h | 2 |
3 files changed, 3 insertions, 3 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index c15c9865299e..49cd5d217161 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4735,7 +4735,7 @@ void sched_post_fork(struct task_struct *p) scx_post_fork(p); } -unsigned long to_ratio(u64 period, u64 runtime) +u64 to_ratio(u64 period, u64 runtime) { if (runtime == RUNTIME_INF) return BW_UNIT; diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 4e5f1957b91b..a48e86794913 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -2666,7 +2666,7 @@ static int tg_rt_schedulable(struct task_group *tg, void *data) { struct rt_schedulable_data *d = data; struct task_group *child; - unsigned long total, sum = 0; + u64 total, sum = 0; u64 period, runtime; period = ktime_to_ns(tg->rt_bandwidth.rt_period); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 9594355a3681..c95584191d58 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2907,7 +2907,7 @@ extern void init_cfs_throttle_work(struct task_struct *p); #define MAX_BW_BITS (64 - BW_SHIFT) #define MAX_BW ((1ULL << MAX_BW_BITS) - 1) -extern unsigned long to_ratio(u64 period, u64 runtime); +extern u64 to_ratio(u64 period, u64 runtime); extern void init_entity_runnable_average(struct sched_entity *se); extern void post_init_entity_util_avg(struct task_struct *p); |
