summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/svm.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/svm.c')
-rw-r--r--arch/x86/kvm/svm.c141
1 files changed, 98 insertions, 43 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 6b0d5fa5bab3..e2ee264740c7 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -27,6 +27,8 @@
#include <asm/desc.h>
+#define __ex(x) __kvm_handle_fault_on_reboot(x)
+
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
@@ -129,17 +131,17 @@ static inline void push_irq(struct kvm_vcpu *vcpu, u8 irq)
static inline void clgi(void)
{
- asm volatile (SVM_CLGI);
+ asm volatile (__ex(SVM_CLGI));
}
static inline void stgi(void)
{
- asm volatile (SVM_STGI);
+ asm volatile (__ex(SVM_STGI));
}
static inline void invlpga(unsigned long addr, u32 asid)
{
- asm volatile (SVM_INVLPGA :: "a"(addr), "c"(asid));
+ asm volatile (__ex(SVM_INVLPGA) :: "a"(addr), "c"(asid));
}
static inline unsigned long kvm_read_cr2(void)
@@ -270,19 +272,11 @@ static int has_svm(void)
static void svm_hardware_disable(void *garbage)
{
- struct svm_cpu_data *svm_data
- = per_cpu(svm_data, raw_smp_processor_id());
-
- if (svm_data) {
- uint64_t efer;
+ uint64_t efer;
- wrmsrl(MSR_VM_HSAVE_PA, 0);
- rdmsrl(MSR_EFER, efer);
- wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK);
- per_cpu(svm_data, raw_smp_processor_id()) = NULL;
- __free_page(svm_data->save_area);
- kfree(svm_data);
- }
+ wrmsrl(MSR_VM_HSAVE_PA, 0);
+ rdmsrl(MSR_EFER, efer);
+ wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK);
}
static void svm_hardware_enable(void *garbage)
@@ -321,6 +315,19 @@ static void svm_hardware_enable(void *garbage)
page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
}
+static void svm_cpu_uninit(int cpu)
+{
+ struct svm_cpu_data *svm_data
+ = per_cpu(svm_data, raw_smp_processor_id());
+
+ if (!svm_data)
+ return;
+
+ per_cpu(svm_data, raw_smp_processor_id()) = NULL;
+ __free_page(svm_data->save_area);
+ kfree(svm_data);
+}
+
static int svm_cpu_init(int cpu)
{
struct svm_cpu_data *svm_data;
@@ -446,7 +453,8 @@ static __init int svm_hardware_setup(void)
if (npt_enabled) {
printk(KERN_INFO "kvm: Nested Paging enabled\n");
kvm_enable_tdp();
- }
+ } else
+ kvm_disable_tdp();
return 0;
@@ -458,6 +466,11 @@ err:
static __exit void svm_hardware_unsetup(void)
{
+ int cpu;
+
+ for_each_online_cpu(cpu)
+ svm_cpu_uninit(cpu);
+
__free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
iopm_base = 0;
}
@@ -707,10 +720,6 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
rdtscll(vcpu->arch.host_tsc);
}
-static void svm_vcpu_decache(struct kvm_vcpu *vcpu)
-{
-}
-
static void svm_cache_regs(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -949,7 +958,9 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data)
static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
{
- return to_svm(vcpu)->db_regs[dr];
+ unsigned long val = to_svm(vcpu)->db_regs[dr];
+ KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler);
+ return val;
}
static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
@@ -997,13 +1008,28 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
struct kvm *kvm = svm->vcpu.kvm;
u64 fault_address;
u32 error_code;
+ bool event_injection = false;
if (!irqchip_in_kernel(kvm) &&
- is_external_interrupt(exit_int_info))
+ is_external_interrupt(exit_int_info)) {
+ event_injection = true;
push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
+ }
fault_address = svm->vmcb->control.exit_info_2;
error_code = svm->vmcb->control.exit_info_1;
+
+ if (!npt_enabled)
+ KVMTRACE_3D(PAGE_FAULT, &svm->vcpu, error_code,
+ (u32)fault_address, (u32)(fault_address >> 32),
+ handler);
+ else
+ KVMTRACE_3D(TDP_FAULT, &svm->vcpu, error_code,
+ (u32)fault_address, (u32)(fault_address >> 32),
+ handler);
+
+ if (event_injection)
+ kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
}
@@ -1081,6 +1107,19 @@ static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
return kvm_emulate_pio(&svm->vcpu, kvm_run, in, size, port);
}
+static int nmi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
+ KVMTRACE_0D(NMI, &svm->vcpu, handler);
+ return 1;
+}
+
+static int intr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
+ ++svm->vcpu.stat.irq_exits;
+ KVMTRACE_0D(INTR, &svm->vcpu, handler);
+ return 1;
+}
+
static int nop_on_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
{
return 1;
@@ -1219,6 +1258,9 @@ static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
if (svm_get_msr(&svm->vcpu, ecx, &data))
kvm_inject_gp(&svm->vcpu, 0);
else {
+ KVMTRACE_3D(MSR_READ, &svm->vcpu, ecx, (u32)data,
+ (u32)(data >> 32), handler);
+
svm->vmcb->save.rax = data & 0xffffffff;
svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
svm->next_rip = svm->vmcb->save.rip + 2;
@@ -1284,16 +1326,19 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
case MSR_K7_EVNTSEL1:
case MSR_K7_EVNTSEL2:
case MSR_K7_EVNTSEL3:
+ case MSR_K7_PERFCTR0:
+ case MSR_K7_PERFCTR1:
+ case MSR_K7_PERFCTR2:
+ case MSR_K7_PERFCTR3:
/*
- * only support writing 0 to the performance counters for now
- * to make Windows happy. Should be replaced by a real
- * performance counter emulation later.
+ * Just discard all writes to the performance counters; this
+ * should keep both older linux and windows 64-bit guests
+ * happy
*/
- if (data != 0)
- goto unhandled;
+ pr_unimpl(vcpu, "unimplemented perfctr wrmsr: 0x%x data 0x%llx\n", ecx, data);
+
break;
default:
- unhandled:
return kvm_set_msr_common(vcpu, ecx, data);
}
return 0;
@@ -1304,6 +1349,10 @@ static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
u64 data = (svm->vmcb->save.rax & -1u)
| ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
+
+ KVMTRACE_3D(MSR_WRITE, &svm->vcpu, ecx, (u32)data, (u32)(data >> 32),
+ handler);
+
svm->next_rip = svm->vmcb->save.rip + 2;
if (svm_set_msr(&svm->vcpu, ecx, data))
kvm_inject_gp(&svm->vcpu, 0);
@@ -1323,6 +1372,8 @@ static int msr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
static int interrupt_window_interception(struct vcpu_svm *svm,
struct kvm_run *kvm_run)
{
+ KVMTRACE_0D(PEND_INTR, &svm->vcpu, handler);
+
svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR);
svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
/*
@@ -1364,8 +1415,8 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
[SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
[SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
[SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
- [SVM_EXIT_INTR] = nop_on_interception,
- [SVM_EXIT_NMI] = nop_on_interception,
+ [SVM_EXIT_INTR] = intr_interception,
+ [SVM_EXIT_NMI] = nmi_interception,
[SVM_EXIT_SMI] = nop_on_interception,
[SVM_EXIT_INIT] = nop_on_interception,
[SVM_EXIT_VINTR] = interrupt_window_interception,
@@ -1397,6 +1448,9 @@ static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
struct vcpu_svm *svm = to_svm(vcpu);
u32 exit_code = svm->vmcb->control.exit_code;
+ KVMTRACE_3D(VMEXIT, vcpu, exit_code, (u32)svm->vmcb->save.rip,
+ (u32)((u64)svm->vmcb->save.rip >> 32), entryexit);
+
if (npt_enabled) {
int mmu_reload = 0;
if ((vcpu->arch.cr0 ^ svm->vmcb->save.cr0) & X86_CR0_PG) {
@@ -1470,6 +1524,8 @@ static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
{
struct vmcb_control_area *control;
+ KVMTRACE_1D(INJ_VIRQ, &svm->vcpu, (u32)irq, handler);
+
control = &svm->vmcb->control;
control->int_vector = irq;
control->int_ctl &= ~V_INTR_PRIO_MASK;
@@ -1660,9 +1716,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
sync_lapic_to_cr8(vcpu);
save_host_msrs(vcpu);
- fs_selector = read_fs();
- gs_selector = read_gs();
- ldt_selector = read_ldt();
+ fs_selector = kvm_read_fs();
+ gs_selector = kvm_read_gs();
+ ldt_selector = kvm_read_ldt();
svm->host_cr2 = kvm_read_cr2();
svm->host_dr6 = read_dr6();
svm->host_dr7 = read_dr7();
@@ -1716,17 +1772,17 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
/* Enter guest mode */
"push %%rax \n\t"
"mov %c[vmcb](%[svm]), %%rax \n\t"
- SVM_VMLOAD "\n\t"
- SVM_VMRUN "\n\t"
- SVM_VMSAVE "\n\t"
+ __ex(SVM_VMLOAD) "\n\t"
+ __ex(SVM_VMRUN) "\n\t"
+ __ex(SVM_VMSAVE) "\n\t"
"pop %%rax \n\t"
#else
/* Enter guest mode */
"push %%eax \n\t"
"mov %c[vmcb](%[svm]), %%eax \n\t"
- SVM_VMLOAD "\n\t"
- SVM_VMRUN "\n\t"
- SVM_VMSAVE "\n\t"
+ __ex(SVM_VMLOAD) "\n\t"
+ __ex(SVM_VMRUN) "\n\t"
+ __ex(SVM_VMSAVE) "\n\t"
"pop %%eax \n\t"
#endif
@@ -1795,9 +1851,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
write_dr7(svm->host_dr7);
kvm_write_cr2(svm->host_cr2);
- load_fs(fs_selector);
- load_gs(gs_selector);
- load_ldt(ldt_selector);
+ kvm_load_fs(fs_selector);
+ kvm_load_gs(gs_selector);
+ kvm_load_ldt(ldt_selector);
load_host_msrs(vcpu);
reload_tss(vcpu);
@@ -1889,7 +1945,6 @@ static struct kvm_x86_ops svm_x86_ops = {
.prepare_guest_switch = svm_prepare_guest_switch,
.vcpu_load = svm_vcpu_load,
.vcpu_put = svm_vcpu_put,
- .vcpu_decache = svm_vcpu_decache,
.set_guest_debug = svm_guest_debug,
.get_msr = svm_get_msr,