summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2026-04-13 12:51:34 +0300
committerPaolo Bonzini <pbonzini@redhat.com>2026-04-13 12:51:34 +0300
commit276f81a4912157a018141dfe71ab0f40e575a796 (patch)
treedd929b93a5af92cf70fc9a3894ed6047e81abfb6
parented86b06bb35f6ccd2b81995803b8ec93ab97955b (diff)
parentde0bfdc7137d5132b71dd1fe7aa3ca3df4d68241 (diff)
downloadlinux-276f81a4912157a018141dfe71ab0f40e575a796.tar.xz
Merge tag 'kvm-x86-misc-7.1' of https://github.com/kvm-x86/linux into HEAD
KVM x86 misc changes for 7.1 - Advertise support for AVX512 Bit Matrix Multiply (BMM) when it's present in hardware (no additional emulation/virtualization required). - Immediately fail the build if a required #define is missing in one of KVM's headers that is included multiple times. - Reject SET_GUEST_DEBUG with -EBUSY if there's an already injected exception, mostly to prevent syzkaller from abusing the uAPI to trigger WARNs, but also because it can help prevent userspace from unintentionally crashing the VM. - Exempt SMM from CPUID faulting on Intel, as per the spec. - Misc hardening and cleanup changes.
-rw-r--r--arch/x86/include/asm/cpufeatures.h1
-rw-r--r--arch/x86/include/asm/kvm-x86-ops.h10
-rw-r--r--arch/x86/include/asm/kvm-x86-pmu-ops.h8
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/kvm/cpuid.c6
-rw-r--r--arch/x86/kvm/emulate.c10
-rw-r--r--arch/x86/kvm/lapic.c13
-rw-r--r--arch/x86/kvm/pmu.c2
-rw-r--r--arch/x86/kvm/svm/avic.c4
-rw-r--r--arch/x86/kvm/svm/sev.c8
-rw-r--r--arch/x86/kvm/svm/svm.c39
-rw-r--r--arch/x86/kvm/vmx/vmcs_shadow_fields.h5
-rw-r--r--arch/x86/kvm/vmx/vmx.c4
-rw-r--r--arch/x86/kvm/x86.c26
-rw-r--r--virt/kvm/kvm_main.c12
15 files changed, 81 insertions, 69 deletions
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index dbe104df339b..de7bd88e539d 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -473,6 +473,7 @@
#define X86_FEATURE_GP_ON_USER_CPUID (20*32+17) /* User CPUID faulting */
#define X86_FEATURE_PREFETCHI (20*32+20) /* Prefetch Data/Instruction to Cache Level */
+#define X86_FEATURE_AVX512_BMM (20*32+23) /* AVX512 Bit Matrix Multiply instructions */
#define X86_FEATURE_ERAPS (20*32+24) /* Enhanced Return Address Predictor Security */
#define X86_FEATURE_SBPB (20*32+27) /* Selective Branch Prediction Barrier */
#define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* MSR_PRED_CMD[IBPB] flushes all branch type predictions */
diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index de709fb5bd76..3776cf5382a2 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -1,8 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#if !defined(KVM_X86_OP) || !defined(KVM_X86_OP_OPTIONAL)
-BUILD_BUG_ON(1)
-#endif
-
+#if !defined(KVM_X86_OP) || \
+ !defined(KVM_X86_OP_OPTIONAL) || \
+ !defined(KVM_X86_OP_OPTIONAL_RET0)
+#error Missing one or more KVM_X86_OP #defines
+#else
/*
* KVM_X86_OP() and KVM_X86_OP_OPTIONAL() are used to help generate
* both DECLARE/DEFINE_STATIC_CALL() invocations and
@@ -148,6 +149,7 @@ KVM_X86_OP_OPTIONAL(alloc_apic_backing_page)
KVM_X86_OP_OPTIONAL_RET0(gmem_prepare)
KVM_X86_OP_OPTIONAL_RET0(gmem_max_mapping_level)
KVM_X86_OP_OPTIONAL(gmem_invalidate)
+#endif
#undef KVM_X86_OP
#undef KVM_X86_OP_OPTIONAL
diff --git a/arch/x86/include/asm/kvm-x86-pmu-ops.h b/arch/x86/include/asm/kvm-x86-pmu-ops.h
index f0aa6996811f..d5452b3433b7 100644
--- a/arch/x86/include/asm/kvm-x86-pmu-ops.h
+++ b/arch/x86/include/asm/kvm-x86-pmu-ops.h
@@ -1,7 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#if !defined(KVM_X86_PMU_OP) || !defined(KVM_X86_PMU_OP_OPTIONAL)
-BUILD_BUG_ON(1)
-#endif
+#if !defined(KVM_X86_PMU_OP) || \
+ !defined(KVM_X86_PMU_OP_OPTIONAL)
+#error Missing one or more KVM_X86_PMU_OP #defines
+#else
/*
* KVM_X86_PMU_OP() and KVM_X86_PMU_OP_OPTIONAL() are used to help generate
@@ -26,6 +27,7 @@ KVM_X86_PMU_OP_OPTIONAL(cleanup)
KVM_X86_PMU_OP_OPTIONAL(write_global_ctrl)
KVM_X86_PMU_OP(mediated_load)
KVM_X86_PMU_OP(mediated_put)
+#endif
#undef KVM_X86_PMU_OP
#undef KVM_X86_PMU_OP_OPTIONAL
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 6e4e3ef9b8c7..1d9fff94ac97 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1261,7 +1261,7 @@ struct kvm_x86_pmu_event_filter {
__u32 nr_excludes;
__u64 *includes;
__u64 *excludes;
- __u64 events[];
+ __u64 events[] __counted_by(nevents);
};
enum kvm_apicv_inhibit {
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 8137927e7387..e69156b54cff 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -1246,11 +1246,12 @@ void kvm_initialize_cpu_caps(void)
F(NULL_SEL_CLR_BASE),
/* UpperAddressIgnore */
F(AUTOIBRS),
- F(PREFETCHI),
EMULATED_F(NO_SMM_CTL_MSR),
/* PrefetchCtlMsr */
/* GpOnUserCpuid */
/* EPSF */
+ F(PREFETCHI),
+ F(AVX512_BMM),
F(ERAPS),
SYNTHESIZED_F(SBPB),
SYNTHESIZED_F(IBPB_BRTYPE),
@@ -2160,7 +2161,8 @@ int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
{
u32 eax, ebx, ecx, edx;
- if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, 0))
+ if (!is_smm(vcpu) && cpuid_fault_enabled(vcpu) &&
+ !kvm_require_cpl(vcpu, 0))
return 1;
eax = kvm_rax_read(vcpu);
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index c8e292e9a24d..500711c6f069 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -3583,10 +3583,10 @@ static int em_cpuid(struct x86_emulate_ctxt *ctxt)
u64 msr = 0;
ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
- if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
- ctxt->ops->cpl(ctxt)) {
+ if (!ctxt->ops->is_smm(ctxt) &&
+ (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT) &&
+ ctxt->ops->cpl(ctxt))
return emulate_gp(ctxt, 0);
- }
eax = reg_read(ctxt, VCPU_REGS_RAX);
ecx = reg_read(ctxt, VCPU_REGS_RCX);
@@ -3708,7 +3708,7 @@ static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
*/
static int em_fxsave(struct x86_emulate_ctxt *ctxt)
{
- struct fxregs_state fx_state;
+ struct fxregs_state fx_state = {};
int rc;
rc = check_fxsr(ctxt);
@@ -3738,7 +3738,7 @@ static int em_fxsave(struct x86_emulate_ctxt *ctxt)
static noinline int fxregs_fixup(struct fxregs_state *fx_state,
const size_t used_size)
{
- struct fxregs_state fx_tmp;
+ struct fxregs_state fx_tmp = {};
int rc;
rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 9381c58d4c85..e3ec4d8607c1 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -840,16 +840,16 @@ static int __pv_send_ipi(unsigned long *ipi_bitmap, struct kvm_apic_map *map,
{
int i, count = 0;
struct kvm_vcpu *vcpu;
+ size_t map_index;
if (min > map->max_apic_id)
return 0;
- min = array_index_nospec(min, map->max_apic_id + 1);
-
for_each_set_bit(i, ipi_bitmap,
- min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
- if (map->phys_map[min + i]) {
- vcpu = map->phys_map[min + i]->vcpu;
+ min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
+ map_index = array_index_nospec(min + i, map->max_apic_id + 1);
+ if (map->phys_map[map_index]) {
+ vcpu = map->phys_map[map_index]->vcpu;
count += kvm_apic_set_irq(vcpu, irq, NULL);
}
}
@@ -2657,6 +2657,9 @@ void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
{
struct kvm_lapic *apic = vcpu->arch.apic;
+ if (KVM_BUG_ON(!lapic_in_kernel(vcpu), vcpu->kvm))
+ return;
+
/*
* ICR is a single 64-bit register when x2APIC is enabled, all others
* registers hold 32-bit values. For legacy xAPIC, ICR writes need to
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index bd6b785cf261..e218352e3423 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -1256,7 +1256,7 @@ int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
r = -EFAULT;
if (copy_from_user(filter->events, user_filter->events,
- sizeof(filter->events[0]) * filter->nevents))
+ flex_array_size(filter, events, filter->nevents)))
goto cleanup;
r = prepare_filter_lists(filter);
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index f7ec7914e3c4..2885c5993ebc 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -86,13 +86,13 @@ static const struct kernel_param_ops avic_ops = {
* Enable / disable AVIC. In "auto" mode (default behavior), AVIC is enabled
* for Zen4+ CPUs with x2AVIC (and all other criteria for enablement are met).
*/
-static int avic = AVIC_AUTO_MODE;
+static int __ro_after_init avic = AVIC_AUTO_MODE;
module_param_cb(avic, &avic_ops, &avic, 0444);
__MODULE_PARM_TYPE(avic, "bool");
module_param(enable_ipiv, bool, 0444);
-static bool force_avic;
+static bool __ro_after_init force_avic;
module_param_unsafe(force_avic, bool, 0444);
/* Note:
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 3f9c1aa39a0a..77ebc166abfd 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -52,18 +52,18 @@
#define SNP_GUEST_VMM_ERR_GENERIC (~0U)
/* enable/disable SEV support */
-static bool sev_enabled = true;
+static bool __ro_after_init sev_enabled = true;
module_param_named(sev, sev_enabled, bool, 0444);
/* enable/disable SEV-ES support */
-static bool sev_es_enabled = true;
+static bool __ro_after_init sev_es_enabled = true;
module_param_named(sev_es, sev_es_enabled, bool, 0444);
/* enable/disable SEV-SNP support */
-static bool sev_snp_enabled = true;
+static bool __ro_after_init sev_snp_enabled = true;
module_param_named(sev_snp, sev_snp_enabled, bool, 0444);
-static unsigned int nr_ciphertext_hiding_asids;
+static unsigned int __ro_after_init nr_ciphertext_hiding_asids;
module_param_named(ciphertext_hiding_asids, nr_ciphertext_hiding_asids, uint, 0444);
#define AP_RESET_HOLD_NONE 0
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index e6477affac9a..d304568588c7 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -110,52 +110,52 @@ static DEFINE_PER_CPU(u64, current_tsc_ratio);
* count only mode.
*/
-static unsigned short pause_filter_thresh = KVM_DEFAULT_PLE_GAP;
+static unsigned short __ro_after_init pause_filter_thresh = KVM_DEFAULT_PLE_GAP;
module_param(pause_filter_thresh, ushort, 0444);
-static unsigned short pause_filter_count = KVM_SVM_DEFAULT_PLE_WINDOW;
+static unsigned short __ro_after_init pause_filter_count = KVM_SVM_DEFAULT_PLE_WINDOW;
module_param(pause_filter_count, ushort, 0444);
/* Default doubles per-vcpu window every exit. */
-static unsigned short pause_filter_count_grow = KVM_DEFAULT_PLE_WINDOW_GROW;
+static unsigned short __ro_after_init pause_filter_count_grow = KVM_DEFAULT_PLE_WINDOW_GROW;
module_param(pause_filter_count_grow, ushort, 0444);
/* Default resets per-vcpu window every exit to pause_filter_count. */
-static unsigned short pause_filter_count_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK;
+static unsigned short __ro_after_init pause_filter_count_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK;
module_param(pause_filter_count_shrink, ushort, 0444);
/* Default is to compute the maximum so we can never overflow. */
-static unsigned short pause_filter_count_max = KVM_SVM_DEFAULT_PLE_WINDOW_MAX;
+static unsigned short __ro_after_init pause_filter_count_max = KVM_SVM_DEFAULT_PLE_WINDOW_MAX;
module_param(pause_filter_count_max, ushort, 0444);
/*
* Use nested page tables by default. Note, NPT may get forced off by
* svm_hardware_setup() if it's unsupported by hardware or the host kernel.
*/
-bool npt_enabled = true;
+bool __ro_after_init npt_enabled = true;
module_param_named(npt, npt_enabled, bool, 0444);
/* allow nested virtualization in KVM/SVM */
-static int nested = true;
+static int __ro_after_init nested = true;
module_param(nested, int, 0444);
/* enable/disable Next RIP Save */
-int nrips = true;
+int __ro_after_init nrips = true;
module_param(nrips, int, 0444);
/* enable/disable Virtual VMLOAD VMSAVE */
-static int vls = true;
+static int __ro_after_init vls = true;
module_param(vls, int, 0444);
/* enable/disable Virtual GIF */
-int vgif = true;
+int __ro_after_init vgif = true;
module_param(vgif, int, 0444);
/* enable/disable LBR virtualization */
-int lbrv = true;
+int __ro_after_init lbrv = true;
module_param(lbrv, int, 0444);
-static int tsc_scaling = true;
+static int __ro_after_init tsc_scaling = true;
module_param(tsc_scaling, int, 0444);
module_param(enable_device_posted_irqs, bool, 0444);
@@ -164,19 +164,19 @@ bool __read_mostly dump_invalid_vmcb;
module_param(dump_invalid_vmcb, bool, 0644);
-bool intercept_smi = true;
+bool __ro_after_init intercept_smi = true;
module_param(intercept_smi, bool, 0444);
-bool vnmi = true;
+bool __ro_after_init vnmi = true;
module_param(vnmi, bool, 0444);
module_param(enable_mediated_pmu, bool, 0444);
-static bool svm_gp_erratum_intercept = true;
+static bool __ro_after_init svm_gp_erratum_intercept = true;
static u8 rsm_ins_bytes[] = "\x0f\xaa";
-static unsigned long iopm_base;
+static unsigned long __read_mostly iopm_base;
DEFINE_PER_CPU(struct svm_cpu_data, svm_data);
@@ -5410,14 +5410,10 @@ static __init int svm_hardware_setup(void)
pr_err_ratelimited("NX (Execute Disable) not supported\n");
return -EOPNOTSUPP;
}
- kvm_enable_efer_bits(EFER_NX);
kvm_caps.supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS |
XFEATURE_MASK_BNDCSR);
- if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
- kvm_enable_efer_bits(EFER_FFXSR);
-
if (tsc_scaling) {
if (!boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
tsc_scaling = false;
@@ -5431,9 +5427,6 @@ static __init int svm_hardware_setup(void)
tsc_aux_uret_slot = kvm_add_user_return_msr(MSR_TSC_AUX);
- if (boot_cpu_has(X86_FEATURE_AUTOIBRS))
- kvm_enable_efer_bits(EFER_AUTOIBRS);
-
/* Check for pause filtering support */
if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
pause_filter_count = 0;
diff --git a/arch/x86/kvm/vmx/vmcs_shadow_fields.h b/arch/x86/kvm/vmx/vmcs_shadow_fields.h
index cad128d1657b..67e821c2be6d 100644
--- a/arch/x86/kvm/vmx/vmcs_shadow_fields.h
+++ b/arch/x86/kvm/vmx/vmcs_shadow_fields.h
@@ -1,6 +1,6 @@
#if !defined(SHADOW_FIELD_RO) && !defined(SHADOW_FIELD_RW)
-BUILD_BUG_ON(1)
-#endif
+#error Must #define at least one of SHADOW_FIELD_RO or SHADOW_FIELD_RW
+#else
#ifndef SHADOW_FIELD_RO
#define SHADOW_FIELD_RO(x, y)
@@ -74,6 +74,7 @@ SHADOW_FIELD_RW(HOST_GS_BASE, host_gs_base)
/* 64-bit */
SHADOW_FIELD_RO(GUEST_PHYSICAL_ADDRESS, guest_physical_address)
SHADOW_FIELD_RO(GUEST_PHYSICAL_ADDRESS_HIGH, guest_physical_address)
+#endif
#undef SHADOW_FIELD_RO
#undef SHADOW_FIELD_RW
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 8b24e682535b..d16427a079f6 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -8698,10 +8698,6 @@ __init int vmx_hardware_setup(void)
vmx_setup_user_return_msrs();
-
- if (boot_cpu_has(X86_FEATURE_NX))
- kvm_enable_efer_bits(EFER_NX);
-
if (boot_cpu_has(X86_FEATURE_MPX)) {
rdmsrq(MSR_IA32_BNDCFGS, host_bndcfgs);
WARN_ONCE(host_bndcfgs, "BNDCFGS in host will be lost");
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index fd1c4a36b593..0bbb314c32c0 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -9998,6 +9998,18 @@ void kvm_setup_xss_caps(void)
}
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_setup_xss_caps);
+static void kvm_setup_efer_caps(void)
+{
+ if (kvm_cpu_cap_has(X86_FEATURE_NX))
+ kvm_enable_efer_bits(EFER_NX);
+
+ if (kvm_cpu_cap_has(X86_FEATURE_FXSR_OPT))
+ kvm_enable_efer_bits(EFER_FFXSR);
+
+ if (kvm_cpu_cap_has(X86_FEATURE_AUTOIBRS))
+ kvm_enable_efer_bits(EFER_AUTOIBRS);
+}
+
static inline void kvm_ops_update(struct kvm_x86_init_ops *ops)
{
memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops));
@@ -10134,6 +10146,8 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
if (r != 0)
goto out_mmu_exit;
+ kvm_setup_efer_caps();
+
enable_device_posted_irqs &= enable_apicv &&
irq_remapping_cap(IRQ_POSTING_CAP);
@@ -10736,12 +10750,10 @@ static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu,
__kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) |
X86_EFLAGS_RF);
- if (vcpu->arch.exception.vector == DB_VECTOR) {
- kvm_deliver_exception_payload(vcpu, &vcpu->arch.exception);
- if (vcpu->arch.dr7 & DR7_GD) {
- vcpu->arch.dr7 &= ~DR7_GD;
- kvm_update_dr7(vcpu);
- }
+ if (vcpu->arch.exception.vector == DB_VECTOR &&
+ vcpu->arch.dr7 & DR7_GD) {
+ vcpu->arch.dr7 &= ~DR7_GD;
+ kvm_update_dr7(vcpu);
}
kvm_inject_exception(vcpu);
@@ -12529,7 +12541,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
r = -EBUSY;
- if (kvm_is_exception_pending(vcpu))
+ if (kvm_is_exception_pending(vcpu) || vcpu->arch.exception.injected)
goto out;
if (dbg->control & KVM_GUESTDBG_INJECT_DB)
kvm_queue_exception(vcpu, DB_VECTOR);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 9093251beb39..46d79fdde6f5 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -76,22 +76,22 @@ MODULE_DESCRIPTION("Kernel-based Virtual Machine (KVM) Hypervisor");
MODULE_LICENSE("GPL");
/* Architectures should define their poll value according to the halt latency */
-unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
+unsigned int __read_mostly halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
module_param(halt_poll_ns, uint, 0644);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(halt_poll_ns);
/* Default doubles per-vcpu halt_poll_ns. */
-unsigned int halt_poll_ns_grow = 2;
+unsigned int __read_mostly halt_poll_ns_grow = 2;
module_param(halt_poll_ns_grow, uint, 0644);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(halt_poll_ns_grow);
/* The start value to grow halt_poll_ns from */
-unsigned int halt_poll_ns_grow_start = 10000; /* 10us */
+unsigned int __read_mostly halt_poll_ns_grow_start = 10000; /* 10us */
module_param(halt_poll_ns_grow_start, uint, 0644);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(halt_poll_ns_grow_start);
/* Default halves per-vcpu halt_poll_ns. */
-unsigned int halt_poll_ns_shrink = 2;
+unsigned int __read_mostly halt_poll_ns_shrink = 2;
module_param(halt_poll_ns_shrink, uint, 0644);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(halt_poll_ns_shrink);
@@ -99,7 +99,7 @@ EXPORT_SYMBOL_FOR_KVM_INTERNAL(halt_poll_ns_shrink);
* Allow direct access (from KVM or the CPU) without MMU notifier protection
* to unpinned pages.
*/
-static bool allow_unsafe_mappings;
+static bool __ro_after_init allow_unsafe_mappings;
module_param(allow_unsafe_mappings, bool, 0444);
/*
@@ -5574,7 +5574,7 @@ static struct miscdevice kvm_dev = {
};
#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
-bool enable_virt_at_load = true;
+bool __ro_after_init enable_virt_at_load = true;
module_param(enable_virt_at_load, bool, 0444);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(enable_virt_at_load);