summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-02-20 19:48:31 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2026-02-20 19:48:31 +0300
commitd31558c077d8be422b65e97974017c030b4bd91a (patch)
tree6b894308c43fb455ce02e515dd6bd91d1412835d /arch
parent8bf22c33e7a172fbc72464f4cc484d23a6b412ba (diff)
parent158ebb578cd5f7881fdc7c4ecebddcf9463f91fd (diff)
downloadlinux-d31558c077d8be422b65e97974017c030b4bd91a.tar.xz
Merge tag 'hyperv-next-signed-20260218' of git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux
Pull Hyper-V updates from Wei Liu: - Debugfs support for MSHV statistics (Nuno Das Neves) - Support for the integrated scheduler (Stanislav Kinsburskii) - Various fixes for MSHV memory management and hypervisor status handling (Stanislav Kinsburskii) - Expose more capabilities and flags for MSHV partition management (Anatol Belski, Muminul Islam, Magnus Kulke) - Miscellaneous fixes to improve code quality and stability (Carlos López, Ethan Nelson-Moore, Li RongQing, Michael Kelley, Mukesh Rathor, Purna Pavan Chandra Aekkaladevi, Stanislav Kinsburskii, Uros Bizjak) - PREEMPT_RT fixes for vmbus interrupts (Jan Kiszka) * tag 'hyperv-next-signed-20260218' of git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux: (34 commits) mshv: Handle insufficient root memory hypervisor statuses mshv: Handle insufficient contiguous memory hypervisor status mshv: Introduce hv_deposit_memory helper functions mshv: Introduce hv_result_needs_memory() helper function mshv: Add SMT_ENABLED_GUEST partition creation flag mshv: Add nested virtualization creation flag Drivers: hv: vmbus: Simplify allocation of vmbus_evt mshv: expose the scrub partition hypercall mshv: Add support for integrated scheduler mshv: Use try_cmpxchg() instead of cmpxchg() x86/hyperv: Fix error pointer dereference x86/hyperv: Reserve 3 interrupt vectors used exclusively by MSHV Drivers: hv: vmbus: Use kthread for vmbus interrupts on PREEMPT_RT x86/hyperv: Remove ASM_CALL_CONSTRAINT with VMMCALL insn x86/hyperv: Use savesegment() instead of inline asm() to save segment registers mshv: fix SRCU protection in irqfd resampler ack handler mshv: make field names descriptive in a header struct x86/hyperv: Update comment in hyperv_cleanup() mshv: clear eventfd counter on irqfd shutdown x86/hyperv: Use memremap()/memunmap() instead of ioremap_cache()/iounmap() ...
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/hyperv/hv_crash.c3
-rw-r--r--arch/x86/hyperv/hv_init.c20
-rw-r--r--arch/x86/hyperv/hv_vtl.c8
-rw-r--r--arch/x86/hyperv/ivm.c11
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c25
5 files changed, 50 insertions, 17 deletions
diff --git a/arch/x86/hyperv/hv_crash.c b/arch/x86/hyperv/hv_crash.c
index a78e4fed5720..92da1b4f2e73 100644
--- a/arch/x86/hyperv/hv_crash.c
+++ b/arch/x86/hyperv/hv_crash.c
@@ -279,7 +279,6 @@ static void hv_notify_prepare_hyp(void)
static noinline __noclone void crash_nmi_callback(struct pt_regs *regs)
{
struct hv_input_disable_hyp_ex *input;
- u64 status;
int msecs = 1000, ccpu = smp_processor_id();
if (ccpu == 0) {
@@ -313,7 +312,7 @@ static noinline __noclone void crash_nmi_callback(struct pt_regs *regs)
input->rip = trampoline_pa;
input->arg = devirt_arg;
- status = hv_do_hypercall(HVCALL_DISABLE_HYP_EX, input, NULL);
+ (void)hv_do_hypercall(HVCALL_DISABLE_HYP_EX, input, NULL);
hv_panic_timeout_reboot();
}
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index 14de43f4bc6c..5dbe9bd67891 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -103,9 +103,9 @@ static int hyperv_init_ghcb(void)
*/
rdmsrq(MSR_AMD64_SEV_ES_GHCB, ghcb_gpa);
- /* Mask out vTOM bit. ioremap_cache() maps decrypted */
+ /* Mask out vTOM bit and map as decrypted */
ghcb_gpa &= ~ms_hyperv.shared_gpa_boundary;
- ghcb_va = (void *)ioremap_cache(ghcb_gpa, HV_HYP_PAGE_SIZE);
+ ghcb_va = memremap(ghcb_gpa, HV_HYP_PAGE_SIZE, MEMREMAP_WB | MEMREMAP_DEC);
if (!ghcb_va)
return -ENOMEM;
@@ -277,7 +277,7 @@ static int hv_cpu_die(unsigned int cpu)
if (hv_ghcb_pg) {
ghcb_va = (void **)this_cpu_ptr(hv_ghcb_pg);
if (*ghcb_va)
- iounmap(*ghcb_va);
+ memunmap(*ghcb_va);
*ghcb_va = NULL;
}
@@ -558,7 +558,6 @@ void __init hyperv_init(void)
memunmap(src);
hv_remap_tsc_clocksource();
- hv_root_crash_init();
hv_sleep_notifiers_register();
} else {
hypercall_msr.guest_physical_address = vmalloc_to_pfn(hv_hypercall_pg);
@@ -567,6 +566,9 @@ void __init hyperv_init(void)
hv_set_hypercall_pg(hv_hypercall_pg);
+ if (hv_root_partition()) /* after set hypercall pg */
+ hv_root_crash_init();
+
skip_hypercall_pg_init:
/*
* hyperv_init() is called before LAPIC is initialized: see
@@ -633,9 +635,13 @@ void hyperv_cleanup(void)
hv_ivm_msr_write(HV_X64_MSR_GUEST_OS_ID, 0);
/*
- * Reset hypercall page reference before reset the page,
- * let hypercall operations fail safely rather than
- * panic the kernel for using invalid hypercall page
+ * Reset hv_hypercall_pg before resetting it in the hypervisor.
+ * hv_set_hypercall_pg(NULL) is not used because at this point in the
+ * panic path other CPUs have been stopped, causing static_call_update()
+ * to hang. So resetting hv_hypercall_pg to cause hypercalls to fail
+ * cleanly is only operative on 32-bit builds. But this is OK as it is
+ * just a preventative measure to ease detecting a hypercall being made
+ * after this point, which shouldn't be happening anyway.
*/
hv_hypercall_pg = NULL;
diff --git a/arch/x86/hyperv/hv_vtl.c b/arch/x86/hyperv/hv_vtl.c
index c0edaed0efb3..9b6a9bc4ab76 100644
--- a/arch/x86/hyperv/hv_vtl.c
+++ b/arch/x86/hyperv/hv_vtl.c
@@ -110,7 +110,7 @@ static void hv_vtl_ap_entry(void)
static int hv_vtl_bringup_vcpu(u32 target_vp_index, int cpu, u64 eip_ignored)
{
- u64 status;
+ u64 status, rsp, rip;
int ret = 0;
struct hv_enable_vp_vtl *input;
unsigned long irq_flags;
@@ -123,9 +123,11 @@ static int hv_vtl_bringup_vcpu(u32 target_vp_index, int cpu, u64 eip_ignored)
struct desc_struct *gdt;
struct task_struct *idle = idle_thread_get(cpu);
- u64 rsp = (unsigned long)idle->thread.sp;
+ if (IS_ERR(idle))
+ return PTR_ERR(idle);
- u64 rip = (u64)&hv_vtl_ap_entry;
+ rsp = (unsigned long)idle->thread.sp;
+ rip = (u64)&hv_vtl_ap_entry;
native_store_gdt(&gdt_ptr);
store_idt(&idt_ptr);
diff --git a/arch/x86/hyperv/ivm.c b/arch/x86/hyperv/ivm.c
index 651771534cae..be7fad43a88d 100644
--- a/arch/x86/hyperv/ivm.c
+++ b/arch/x86/hyperv/ivm.c
@@ -25,6 +25,7 @@
#include <asm/e820/api.h>
#include <asm/desc.h>
#include <asm/msr.h>
+#include <asm/segment.h>
#include <uapi/asm/vmx.h>
#ifdef CONFIG_AMD_MEM_ENCRYPT
@@ -315,16 +316,16 @@ int hv_snp_boot_ap(u32 apic_id, unsigned long start_ip, unsigned int cpu)
vmsa->gdtr.base = gdtr.address;
vmsa->gdtr.limit = gdtr.size;
- asm volatile("movl %%es, %%eax;" : "=a" (vmsa->es.selector));
+ savesegment(es, vmsa->es.selector);
hv_populate_vmcb_seg(vmsa->es, vmsa->gdtr.base);
- asm volatile("movl %%cs, %%eax;" : "=a" (vmsa->cs.selector));
+ savesegment(cs, vmsa->cs.selector);
hv_populate_vmcb_seg(vmsa->cs, vmsa->gdtr.base);
- asm volatile("movl %%ss, %%eax;" : "=a" (vmsa->ss.selector));
+ savesegment(ss, vmsa->ss.selector);
hv_populate_vmcb_seg(vmsa->ss, vmsa->gdtr.base);
- asm volatile("movl %%ds, %%eax;" : "=a" (vmsa->ds.selector));
+ savesegment(ds, vmsa->ds.selector);
hv_populate_vmcb_seg(vmsa->ds, vmsa->gdtr.base);
vmsa->efer = native_read_msr(MSR_EFER);
@@ -391,7 +392,7 @@ u64 hv_snp_hypercall(u64 control, u64 param1, u64 param2)
register u64 __r8 asm("r8") = param2;
asm volatile("vmmcall"
- : "=a" (hv_status), ASM_CALL_CONSTRAINT,
+ : "=a" (hv_status),
"+c" (control), "+d" (param1), "+r" (__r8)
: : "cc", "memory", "r9", "r10", "r11");
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 579fb2c64cfd..89a2eb8a0722 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -478,6 +478,28 @@ int hv_get_hypervisor_version(union hv_hypervisor_version_info *info)
}
EXPORT_SYMBOL_GPL(hv_get_hypervisor_version);
+/*
+ * Reserved vectors hard coded in the hypervisor. If used outside, the hypervisor
+ * will either crash or hang or attempt to break into debugger.
+ */
+static void hv_reserve_irq_vectors(void)
+{
+ #define HYPERV_DBG_FASTFAIL_VECTOR 0x29
+ #define HYPERV_DBG_ASSERT_VECTOR 0x2C
+ #define HYPERV_DBG_SERVICE_VECTOR 0x2D
+
+ if (cpu_feature_enabled(X86_FEATURE_FRED))
+ return;
+
+ if (test_and_set_bit(HYPERV_DBG_ASSERT_VECTOR, system_vectors) ||
+ test_and_set_bit(HYPERV_DBG_SERVICE_VECTOR, system_vectors) ||
+ test_and_set_bit(HYPERV_DBG_FASTFAIL_VECTOR, system_vectors))
+ BUG();
+
+ pr_info("Hyper-V: reserve vectors: %d %d %d\n", HYPERV_DBG_ASSERT_VECTOR,
+ HYPERV_DBG_SERVICE_VECTOR, HYPERV_DBG_FASTFAIL_VECTOR);
+}
+
static void __init ms_hyperv_init_platform(void)
{
int hv_max_functions_eax, eax;
@@ -510,6 +532,9 @@ static void __init ms_hyperv_init_platform(void)
hv_identify_partition_type();
+ if (hv_root_partition())
+ hv_reserve_irq_vectors();
+
if (cc_platform_has(CC_ATTR_SNP_SECURE_AVIC))
ms_hyperv.hints |= HV_DEPRECATING_AEOI_RECOMMENDED;