summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrea Righi <arighi@nvidia.com>2025-06-05 12:30:26 +0300
committerTejun Heo <tj@kernel.org>2025-06-09 19:25:35 +0300
commit086ed90a6453873d4c5d51a18c26b3548af4fa24 (patch)
tree9a40aed67a473df0b5f0021447a5bc43f0dd6b3f
parente212743bd727c3fcffcd73b6c1d906546ee83805 (diff)
downloadlinux-086ed90a6453873d4c5d51a18c26b3548af4fa24.tar.xz
sched_ext: Make scx_locked_rq() inline
scx_locked_rq() is used both from ext.c and ext_idle.c, move it to ext.h as a static inline function. No functional changes. v2: Rename locked_rq to scx_locked_rq_state, expose it and make scx_locked_rq() inline, as suggested by Tejun. Signed-off-by: Andrea Righi <arighi@nvidia.com> Signed-off-by: Tejun Heo <tj@kernel.org>
-rw-r--r--kernel/sched/ext.c13
-rw-r--r--kernel/sched/ext.h11
2 files changed, 13 insertions, 11 deletions
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 3e483138dff6..3623ba98d7d8 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -1247,7 +1247,7 @@ static void scx_kf_disallow(u32 mask)
* This allows kfuncs to safely operate on rq from any scx ops callback,
* knowing which rq is already locked.
*/
-static DEFINE_PER_CPU(struct rq *, locked_rq);
+DEFINE_PER_CPU(struct rq *, scx_locked_rq_state);
static inline void update_locked_rq(struct rq *rq)
{
@@ -1258,16 +1258,7 @@ static inline void update_locked_rq(struct rq *rq)
*/
if (rq)
lockdep_assert_rq_held(rq);
- __this_cpu_write(locked_rq, rq);
-}
-
-/*
- * Return the rq currently locked from an scx callback, or NULL if no rq is
- * locked.
- */
-static inline struct rq *scx_locked_rq(void)
-{
- return __this_cpu_read(locked_rq);
+ __this_cpu_write(scx_locked_rq_state, rq);
}
#define SCX_CALL_OP(sch, mask, op, rq, args...) \
diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h
index d30f2d1bc00d..6d6d00e9de20 100644
--- a/kernel/sched/ext.h
+++ b/kernel/sched/ext.h
@@ -20,6 +20,17 @@ static inline bool scx_rq_bypassing(struct rq *rq)
DECLARE_STATIC_KEY_FALSE(scx_ops_allow_queued_wakeup);
+DECLARE_PER_CPU(struct rq *, scx_locked_rq_state);
+
+/*
+ * Return the rq currently locked from an scx callback, or NULL if no rq is
+ * locked.
+ */
+static inline struct rq *scx_locked_rq(void)
+{
+ return __this_cpu_read(scx_locked_rq_state);
+}
+
void scx_tick(struct rq *rq);
void init_scx_entity(struct sched_ext_entity *scx);
void scx_pre_fork(struct task_struct *p);