summaryrefslogtreecommitdiff
path: root/arch/x86/hyperv/hv_apic.c
diff options
context:
space:
mode:
authorVitaly Kuznetsov <vkuznets@redhat.com>2018-06-22 20:06:23 +0300
committerThomas Gleixner <tglx@linutronix.de>2018-07-03 10:00:33 +0300
commitd8e6b232cfdd5d141c03e40a14c1c781480ea05e (patch)
tree7da4e56bb4512352f4e8594012ec174d5000b43d /arch/x86/hyperv/hv_apic.c
parent53e52966901a5b14caa2a7c77428a693fe71f734 (diff)
downloadlinux-d8e6b232cfdd5d141c03e40a14c1c781480ea05e.tar.xz
x86/hyper-v: Use 'fast' hypercall for HVCALL_SEND_IPI
Current Hyper-V TLFS (v5.0b) claims that HvCallSendSyntheticClusterIpi hypercall can't be 'fast' (passing parameters through registers) but apparently this is not true, Windows always uses 'fast' version. We can do the same in Linux too. Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Michael Kelley <mikelley@microsoft.com> Cc: devel@linuxdriverproject.org Cc: "K. Y. Srinivasan" <kys@microsoft.com> Cc: Haiyang Zhang <haiyangz@microsoft.com> Cc: Stephen Hemminger <sthemmin@microsoft.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Tianyu Lan <Tianyu.Lan@microsoft.com> Cc: "Michael Kelley (EOSG)" <Michael.H.Kelley@microsoft.com> Link: https://lkml.kernel.org/r/20180622170625.30688-3-vkuznets@redhat.com
Diffstat (limited to 'arch/x86/hyperv/hv_apic.c')
-rw-r--r--arch/x86/hyperv/hv_apic.c22
1 files changed, 6 insertions, 16 deletions
diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c
index f68855499391..90055f89223b 100644
--- a/arch/x86/hyperv/hv_apic.c
+++ b/arch/x86/hyperv/hv_apic.c
@@ -128,10 +128,8 @@ ipi_mask_ex_done:
static bool __send_ipi_mask(const struct cpumask *mask, int vector)
{
int cur_cpu, vcpu;
- struct ipi_arg_non_ex **arg;
- struct ipi_arg_non_ex *ipi_arg;
+ struct ipi_arg_non_ex ipi_arg;
int ret = 1;
- unsigned long flags;
if (cpumask_empty(mask))
return true;
@@ -145,16 +143,8 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
if ((ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
return __send_ipi_mask_ex(mask, vector);
- local_irq_save(flags);
- arg = (struct ipi_arg_non_ex **)this_cpu_ptr(hyperv_pcpu_input_arg);
-
- ipi_arg = *arg;
- if (unlikely(!ipi_arg))
- goto ipi_mask_done;
-
- ipi_arg->vector = vector;
- ipi_arg->reserved = 0;
- ipi_arg->cpu_mask = 0;
+ ipi_arg.vector = vector;
+ ipi_arg.cpu_mask = 0;
for_each_cpu(cur_cpu, mask) {
vcpu = hv_cpu_number_to_vp_number(cur_cpu);
@@ -165,13 +155,13 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
if (vcpu >= 64)
goto ipi_mask_done;
- __set_bit(vcpu, (unsigned long *)&ipi_arg->cpu_mask);
+ __set_bit(vcpu, (unsigned long *)&ipi_arg.cpu_mask);
}
- ret = hv_do_hypercall(HVCALL_SEND_IPI, ipi_arg, NULL);
+ ret = hv_do_fast_hypercall16(HVCALL_SEND_IPI, ipi_arg.vector,
+ ipi_arg.cpu_mask);
ipi_mask_done:
- local_irq_restore(flags);
return ((ret == 0) ? true : false);
}