diff options
author | Juergen Gross <jgross@suse.com> | 2022-11-02 10:47:02 +0300 |
---|---|---|
committer | Borislav Petkov <bp@suse.de> | 2022-11-10 15:12:44 +0300 |
commit | 4ad7149e46d048d3a543c96d35c1d255208dd33a (patch) | |
tree | a83d3352e5435a395b70648ae6bcad3f516c63d2 | |
parent | d5f66d5d10611978c3a93cc94a811d74e0cf6cbc (diff) | |
download | linux-4ad7149e46d048d3a543c96d35c1d255208dd33a.tar.xz |
x86/mtrr: Split MTRR-specific handling from cache dis/enabling
Split the MTRR-specific actions from cache_disable() and cache_enable()
into new functions mtrr_disable() and mtrr_enable().
Signed-off-by: Juergen Gross <jgross@suse.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lore.kernel.org/r/20221102074713.21493-6-jgross@suse.com
Signed-off-by: Borislav Petkov <bp@suse.de>
-rw-r--r-- | arch/x86/include/asm/mtrr.h | 4 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mtrr/generic.c | 26 |
2 files changed, 23 insertions, 7 deletions
diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h index 76d726074c16..12a16caed395 100644 --- a/arch/x86/include/asm/mtrr.h +++ b/arch/x86/include/asm/mtrr.h @@ -48,6 +48,8 @@ extern void mtrr_aps_init(void); extern void mtrr_bp_restore(void); extern int mtrr_trim_uncached_memory(unsigned long end_pfn); extern int amd_special_default_mtrr(void); +void mtrr_disable(void); +void mtrr_enable(void); # else static inline u8 mtrr_type_lookup(u64 addr, u64 end, u8 *uniform) { @@ -87,6 +89,8 @@ static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) #define set_mtrr_aps_delayed_init() do {} while (0) #define mtrr_aps_init() do {} while (0) #define mtrr_bp_restore() do {} while (0) +#define mtrr_disable() do {} while (0) +#define mtrr_enable() do {} while (0) # endif #ifdef CONFIG_COMPAT diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index 2f3fc28e5e02..0db0770e75f6 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c @@ -716,6 +716,21 @@ static unsigned long set_mtrr_state(void) return change_mask; } +void mtrr_disable(void) +{ + /* Save MTRR state */ + rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); + + /* Disable MTRRs, and set the default type to uncached */ + mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi); +} + +void mtrr_enable(void) +{ + /* Intel (P6) standard MTRRs */ + mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); +} + /* * Disable and enable caches. Needed for changing MTRRs and the PAT MSR. * @@ -764,11 +779,8 @@ void cache_disable(void) __acquires(cache_disable_lock) count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); flush_tlb_local(); - /* Save MTRR state */ - rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); - - /* Disable MTRRs, and set the default type to uncached */ - mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi); + if (cpu_feature_enabled(X86_FEATURE_MTRR)) + mtrr_disable(); /* Again, only flush caches if we have to. */ if (!static_cpu_has(X86_FEATURE_SELFSNOOP)) @@ -781,8 +793,8 @@ void cache_enable(void) __releases(cache_disable_lock) count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); flush_tlb_local(); - /* Intel (P6) standard MTRRs */ - mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); + if (cpu_feature_enabled(X86_FEATURE_MTRR)) + mtrr_enable(); /* Enable caches */ write_cr0(read_cr0() & ~X86_CR0_CD); |