diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-16 19:55:35 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-16 19:55:35 +0300 |
commit | 10dc3747661bea9215417b659449bb7b8ed3df2c (patch) | |
tree | d943974b4941203a7db2fabe4896852cf0f16bc4 /drivers/clocksource | |
parent | 047486d8e7c2a7e8d75b068b69cb67b47364f5d4 (diff) | |
parent | f958ee745f70b60d0e41927cab2c073104bc70c2 (diff) | |
download | linux-10dc3747661bea9215417b659449bb7b8ed3df2c.tar.xz |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini:
"One of the largest releases for KVM... Hardly any generic
changes, but lots of architecture-specific updates.
ARM:
- VHE support so that we can run the kernel at EL2 on ARMv8.1 systems
- PMU support for guests
- 32bit world switch rewritten in C
- various optimizations to the vgic save/restore code.
PPC:
- enabled KVM-VFIO integration ("VFIO device")
- optimizations to speed up IPIs between vcpus
- in-kernel handling of IOMMU hypercalls
- support for dynamic DMA windows (DDW).
s390:
- provide the floating point registers via sync regs;
- separated instruction vs. data accesses
- dirty log improvements for huge guests
- bugfixes and documentation improvements.
x86:
- Hyper-V VMBus hypercall userspace exit
- alternative implementation of lowest-priority interrupts using
vector hashing (for better VT-d posted interrupt support)
- fixed guest debugging with nested virtualizations
- improved interrupt tracking in the in-kernel IOAPIC
- generic infrastructure for tracking writes to guest
memory - currently its only use is to speedup the legacy shadow
paging (pre-EPT) case, but in the future it will be used for
virtual GPUs as well
- much cleanup (LAPIC, kvmclock, MMU, PIT), including ubsan fixes"
* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (217 commits)
KVM: x86: remove eager_fpu field of struct kvm_vcpu_arch
KVM: x86: disable MPX if host did not enable MPX XSAVE features
arm64: KVM: vgic-v3: Only wipe LRs on vcpu exit
arm64: KVM: vgic-v3: Reset LRs at boot time
arm64: KVM: vgic-v3: Do not save an LR known to be empty
arm64: KVM: vgic-v3: Save maintenance interrupt state only if required
arm64: KVM: vgic-v3: Avoid accessing ICH registers
KVM: arm/arm64: vgic-v2: Make GICD_SGIR quicker to hit
KVM: arm/arm64: vgic-v2: Only wipe LRs on vcpu exit
KVM: arm/arm64: vgic-v2: Reset LRs at boot time
KVM: arm/arm64: vgic-v2: Do not save an LR known to be empty
KVM: arm/arm64: vgic-v2: Move GICH_ELRSR saving to its own function
KVM: arm/arm64: vgic-v2: Save maintenance interrupt state only if required
KVM: arm/arm64: vgic-v2: Avoid accessing GICH registers
KVM: s390: allocate only one DMA page per VM
KVM: s390: enable STFLE interpretation only if enabled for the guest
KVM: s390: wake up when the VCPU cpu timer expires
KVM: s390: step the VCPU timer while in enabled wait
KVM: s390: protect VCPU cpu timer with a seqcount
KVM: s390: step VCPU cpu timer during kvm_run ioctl
...
Diffstat (limited to 'drivers/clocksource')
-rw-r--r-- | drivers/clocksource/arm_arch_timer.c | 96 |
1 files changed, 59 insertions, 37 deletions
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index f0dd9d42bc7b..5152b3898155 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -75,7 +75,7 @@ static int arch_timer_ppi[MAX_TIMER_PPI]; static struct clock_event_device __percpu *arch_timer_evt; -static bool arch_timer_use_virtual = true; +static enum ppi_nr arch_timer_uses_ppi = VIRT_PPI; static bool arch_timer_c3stop; static bool arch_timer_mem_use_virtual; @@ -271,16 +271,22 @@ static void __arch_timer_setup(unsigned type, clk->name = "arch_sys_timer"; clk->rating = 450; clk->cpumask = cpumask_of(smp_processor_id()); - if (arch_timer_use_virtual) { - clk->irq = arch_timer_ppi[VIRT_PPI]; + clk->irq = arch_timer_ppi[arch_timer_uses_ppi]; + switch (arch_timer_uses_ppi) { + case VIRT_PPI: clk->set_state_shutdown = arch_timer_shutdown_virt; clk->set_state_oneshot_stopped = arch_timer_shutdown_virt; clk->set_next_event = arch_timer_set_next_event_virt; - } else { - clk->irq = arch_timer_ppi[PHYS_SECURE_PPI]; + break; + case PHYS_SECURE_PPI: + case PHYS_NONSECURE_PPI: + case HYP_PPI: clk->set_state_shutdown = arch_timer_shutdown_phys; clk->set_state_oneshot_stopped = arch_timer_shutdown_phys; clk->set_next_event = arch_timer_set_next_event_phys; + break; + default: + BUG(); } } else { clk->features |= CLOCK_EVT_FEAT_DYNIRQ; @@ -350,17 +356,20 @@ static void arch_counter_set_user_access(void) arch_timer_set_cntkctl(cntkctl); } +static bool arch_timer_has_nonsecure_ppi(void) +{ + return (arch_timer_uses_ppi == PHYS_SECURE_PPI && + arch_timer_ppi[PHYS_NONSECURE_PPI]); +} + static int arch_timer_setup(struct clock_event_device *clk) { __arch_timer_setup(ARCH_CP15_TIMER, clk); - if (arch_timer_use_virtual) - enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0); - else { - enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0); - if (arch_timer_ppi[PHYS_NONSECURE_PPI]) - enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0); - } + enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], 0); + + if (arch_timer_has_nonsecure_ppi()) + enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0); arch_counter_set_user_access(); if (IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM)) @@ -402,7 +411,7 @@ static void arch_timer_banner(unsigned type) (unsigned long)arch_timer_rate / 1000000, (unsigned long)(arch_timer_rate / 10000) % 100, type & ARCH_CP15_TIMER ? - arch_timer_use_virtual ? "virt" : "phys" : + (arch_timer_uses_ppi == VIRT_PPI) ? "virt" : "phys" : "", type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? "/" : "", type & ARCH_MEM_TIMER ? @@ -472,7 +481,7 @@ static void __init arch_counter_register(unsigned type) /* Register the CP15 based counter if we have one */ if (type & ARCH_CP15_TIMER) { - if (IS_ENABLED(CONFIG_ARM64) || arch_timer_use_virtual) + if (IS_ENABLED(CONFIG_ARM64) || arch_timer_uses_ppi == VIRT_PPI) arch_timer_read_counter = arch_counter_get_cntvct; else arch_timer_read_counter = arch_counter_get_cntpct; @@ -502,13 +511,9 @@ static void arch_timer_stop(struct clock_event_device *clk) pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n", clk->irq, smp_processor_id()); - if (arch_timer_use_virtual) - disable_percpu_irq(arch_timer_ppi[VIRT_PPI]); - else { - disable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI]); - if (arch_timer_ppi[PHYS_NONSECURE_PPI]) - disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]); - } + disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]); + if (arch_timer_has_nonsecure_ppi()) + disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]); clk->set_state_shutdown(clk); } @@ -574,12 +579,14 @@ static int __init arch_timer_register(void) goto out; } - if (arch_timer_use_virtual) { - ppi = arch_timer_ppi[VIRT_PPI]; + ppi = arch_timer_ppi[arch_timer_uses_ppi]; + switch (arch_timer_uses_ppi) { + case VIRT_PPI: err = request_percpu_irq(ppi, arch_timer_handler_virt, "arch_timer", arch_timer_evt); - } else { - ppi = arch_timer_ppi[PHYS_SECURE_PPI]; + break; + case PHYS_SECURE_PPI: + case PHYS_NONSECURE_PPI: err = request_percpu_irq(ppi, arch_timer_handler_phys, "arch_timer", arch_timer_evt); if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) { @@ -590,6 +597,13 @@ static int __init arch_timer_register(void) free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], arch_timer_evt); } + break; + case HYP_PPI: + err = request_percpu_irq(ppi, arch_timer_handler_phys, + "arch_timer", arch_timer_evt); + break; + default: + BUG(); } if (err) { @@ -614,15 +628,10 @@ static int __init arch_timer_register(void) out_unreg_notify: unregister_cpu_notifier(&arch_timer_cpu_nb); out_free_irq: - if (arch_timer_use_virtual) - free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt); - else { - free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], + free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt); + if (arch_timer_has_nonsecure_ppi()) + free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], arch_timer_evt); - if (arch_timer_ppi[PHYS_NONSECURE_PPI]) - free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], - arch_timer_evt); - } out_free: free_percpu(arch_timer_evt); @@ -709,12 +718,25 @@ static void __init arch_timer_init(void) * * If no interrupt provided for virtual timer, we'll have to * stick to the physical timer. It'd better be accessible... + * + * On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE + * accesses to CNTP_*_EL1 registers are silently redirected to + * their CNTHP_*_EL2 counterparts, and use a different PPI + * number. */ if (is_hyp_mode_available() || !arch_timer_ppi[VIRT_PPI]) { - arch_timer_use_virtual = false; + bool has_ppi; + + if (is_kernel_in_hyp_mode()) { + arch_timer_uses_ppi = HYP_PPI; + has_ppi = !!arch_timer_ppi[HYP_PPI]; + } else { + arch_timer_uses_ppi = PHYS_SECURE_PPI; + has_ppi = (!!arch_timer_ppi[PHYS_SECURE_PPI] || + !!arch_timer_ppi[PHYS_NONSECURE_PPI]); + } - if (!arch_timer_ppi[PHYS_SECURE_PPI] || - !arch_timer_ppi[PHYS_NONSECURE_PPI]) { + if (!has_ppi) { pr_warn("arch_timer: No interrupt available, giving up\n"); return; } @@ -747,7 +769,7 @@ static void __init arch_timer_of_init(struct device_node *np) */ if (IS_ENABLED(CONFIG_ARM) && of_property_read_bool(np, "arm,cpu-registers-not-fw-configured")) - arch_timer_use_virtual = false; + arch_timer_uses_ppi = PHYS_SECURE_PPI; arch_timer_init(); } |