diff options
Diffstat (limited to 'include/linux/preempt.h')
| -rw-r--r-- | include/linux/preempt.h | 11 | 
1 files changed, 4 insertions, 7 deletions
diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 1fad1c8a4c76..102202185d7a 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -372,7 +372,7 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier,  /*   * Migrate-Disable and why it is undesired.   * - * When a preempted task becomes elegible to run under the ideal model (IOW it + * When a preempted task becomes eligible to run under the ideal model (IOW it   * becomes one of the M highest priority tasks), it might still have to wait   * for the preemptee's migrate_disable() section to complete. Thereby suffering   * a reduction in bandwidth in the exact duration of the migrate_disable() @@ -387,7 +387,7 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier,   * - a lower priority tasks; which under preempt_disable() could've instantly   *   migrated away when another CPU becomes available, is now constrained   *   by the ability to push the higher priority task away, which might itself be - *   in a migrate_disable() section, reducing it's available bandwidth. + *   in a migrate_disable() section, reducing its available bandwidth.   *   * IOW it trades latency / moves the interference term, but it stays in the   * system, and as long as it remains unbounded, the system is not fully @@ -399,7 +399,7 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier,   * PREEMPT_RT breaks a number of assumptions traditionally held. By forcing a   * number of primitives into becoming preemptible, they would also allow   * migration. This turns out to break a bunch of per-cpu usage. To this end, - * all these primitives employ migirate_disable() to restore this implicit + * all these primitives employ migrate_disable() to restore this implicit   * assumption.   *   * This is a 'temporary' work-around at best. The correct solution is getting @@ -407,7 +407,7 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier,   * per-cpu locking or short preempt-disable regions.   *   * The end goal must be to get rid of migrate_disable(), alternatively we need - * a schedulability theory that does not depend on abritrary migration. + * a schedulability theory that does not depend on arbitrary migration.   *   *   * Notes on the implementation. @@ -424,8 +424,6 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier,   *       work-conserving schedulers.   *   */ -extern void migrate_disable(void); -extern void migrate_enable(void);  /**   * preempt_disable_nested - Disable preemption inside a normally preempt disabled section @@ -471,7 +469,6 @@ static __always_inline void preempt_enable_nested(void)  DEFINE_LOCK_GUARD_0(preempt, preempt_disable(), preempt_enable())  DEFINE_LOCK_GUARD_0(preempt_notrace, preempt_disable_notrace(), preempt_enable_notrace()) -DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable())  #ifdef CONFIG_PREEMPT_DYNAMIC  | 
