diff options
Diffstat (limited to 'include')
| -rw-r--r-- | include/linux/rseq.h | 8 | ||||
| -rw-r--r-- | include/linux/rseq_types.h | 18 |
2 files changed, 17 insertions, 9 deletions
diff --git a/include/linux/rseq.h b/include/linux/rseq.h index bf8a6bf315f3..4c0e8bdd2dd9 100644 --- a/include/linux/rseq.h +++ b/include/linux/rseq.h @@ -73,13 +73,13 @@ static __always_inline void rseq_sched_switch_event(struct task_struct *t) } /* - * Invoked from __set_task_cpu() when a task migrates to enforce an IDs - * update. + * Invoked from __set_task_cpu() when a task migrates or from + * mm_cid_schedin() when the CID changes to enforce an IDs update. * * This does not raise TIF_NOTIFY_RESUME as that happens in * rseq_sched_switch_event(). */ -static __always_inline void rseq_sched_set_task_cpu(struct task_struct *t, unsigned int cpu) +static __always_inline void rseq_sched_set_ids_changed(struct task_struct *t) { t->rseq.event.ids_changed = true; } @@ -168,7 +168,7 @@ static inline void rseq_fork(struct task_struct *t, u64 clone_flags) static inline void rseq_handle_slowpath(struct pt_regs *regs) { } static inline void rseq_signal_deliver(struct ksignal *ksig, struct pt_regs *regs) { } static inline void rseq_sched_switch_event(struct task_struct *t) { } -static inline void rseq_sched_set_task_cpu(struct task_struct *t, unsigned int cpu) { } +static inline void rseq_sched_set_ids_changed(struct task_struct *t) { } static inline void rseq_sched_set_task_mm_cid(struct task_struct *t, unsigned int cid) { } static inline void rseq_force_update(void) { } static inline void rseq_virt_userspace_exit(void) { } diff --git a/include/linux/rseq_types.h b/include/linux/rseq_types.h index 87854effe1ad..66b1482e1146 100644 --- a/include/linux/rseq_types.h +++ b/include/linux/rseq_types.h @@ -119,23 +119,31 @@ struct mm_cid_pcpu { /** * struct mm_mm_cid - Storage for per MM CID data * @pcpu: Per CPU storage for CIDs associated to a CPU + * @percpu: Set, when CIDs are in per CPU mode + * @transit: Set to MM_CID_TRANSIT during a mode change transition phase * @max_cids: The exclusive maximum CID value for allocation and convergence + * @lock: Spinlock to protect all fields except @pcpu. It also protects + * the MM cid cpumask and the MM cidmask bitmap. + * @mutex: Mutex to serialize forks and exits related to this mm * @nr_cpus_allowed: The number of CPUs in the per MM allowed CPUs map. The map * is growth only. * @users: The number of tasks sharing this MM. Separate from mm::mm_users * as that is modified by mmget()/mm_put() by other entities which * do not actually share the MM. - * @lock: Spinlock to protect all fields except @pcpu. It also protects - * the MM cid cpumask and the MM cidmask bitmap. - * @mutex: Mutex to serialize forks and exits related to this mm */ struct mm_mm_cid { + /* Hotpath read mostly members */ struct mm_cid_pcpu __percpu *pcpu; + unsigned int percpu; + unsigned int transit; unsigned int max_cids; - unsigned int nr_cpus_allowed; - unsigned int users; + raw_spinlock_t lock; struct mutex mutex; + + /* Low frequency modified */ + unsigned int nr_cpus_allowed; + unsigned int users; }____cacheline_aligned_in_smp; #else /* CONFIG_SCHED_MM_CID */ struct mm_mm_cid { }; |
