summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-06-28 22:12:52 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-28 22:12:52 +0400
commit9a8fb9ee7a80f5280388b98dc7636d537866fa72 (patch)
treec52f1af6afdc9d1e56d548c92ff23ee00a010dd8
parent4142ac678a864972ce1dfad6d9cb22f075fee5de (diff)
parentbde892232532ed522bb56b04576d07f91e59b3c7 (diff)
downloadlinux-9a8fb9ee7a80f5280388b98dc7636d537866fa72.tar.xz
Merge branch 'kvm-updates/2.6.31' of git://git.kernel.org/pub/scm/virt/kvm/kvm
* 'kvm-updates/2.6.31' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: shut up uninit compiler warning in paging_tmpl.h KVM: Ignore reads to K7 EVNTSEL MSRs KVM: VMX: Handle vmx instruction vmexits KVM: s390: Allow stfle instruction in the guest KVM: kvm/x86_emulate.c toggle_interruptibility() should be static KVM: ia64: fix ia64 build due to missing kallsyms_lookup() and double export KVM: protect concurrent make_all_cpus_request KVM: MMU: Allow 4K ptes with bit 7 (PAT) set KVM: Fix dirty bit tracking for slots with large pages
-rw-r--r--arch/ia64/kvm/kvm_lib.c6
-rw-r--r--arch/s390/include/asm/kvm_host.h4
-rw-r--r--arch/s390/kvm/kvm-s390.c23
-rw-r--r--arch/s390/kvm/priv.c2
-rw-r--r--arch/x86/kvm/mmu.c6
-rw-r--r--arch/x86/kvm/paging_tmpl.h2
-rw-r--r--arch/x86/kvm/vmx.c15
-rw-r--r--arch/x86/kvm/x86.c1
-rw-r--r--arch/x86/kvm/x86_emulate.c2
-rw-r--r--include/linux/kvm_host.h1
-rw-r--r--virt/kvm/kvm_main.c5
11 files changed, 59 insertions, 8 deletions
diff --git a/arch/ia64/kvm/kvm_lib.c b/arch/ia64/kvm/kvm_lib.c
index a85cb611ecd7..f1268b8e6f9e 100644
--- a/arch/ia64/kvm/kvm_lib.c
+++ b/arch/ia64/kvm/kvm_lib.c
@@ -11,5 +11,11 @@
*
*/
#undef CONFIG_MODULES
+#include <linux/module.h>
+#undef CONFIG_KALLSYMS
+#undef EXPORT_SYMBOL
+#undef EXPORT_SYMBOL_GPL
+#define EXPORT_SYMBOL(sym)
+#define EXPORT_SYMBOL_GPL(sym)
#include "../../../lib/vsprintf.c"
#include "../../../lib/ctype.c"
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index a27d0d5a6f86..1cd02f6073a0 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -99,7 +99,9 @@ struct kvm_s390_sie_block {
__u8 reservedd0[48]; /* 0x00d0 */
__u64 gcr[16]; /* 0x0100 */
__u64 gbea; /* 0x0180 */
- __u8 reserved188[120]; /* 0x0188 */
+ __u8 reserved188[24]; /* 0x0188 */
+ __u32 fac; /* 0x01a0 */
+ __u8 reserved1a4[92]; /* 0x01a4 */
} __attribute__((packed));
struct kvm_vcpu_stat {
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index c18b21d6991c..90d9d1ba258b 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -25,6 +25,7 @@
#include <asm/lowcore.h>
#include <asm/pgtable.h>
#include <asm/nmi.h>
+#include <asm/system.h>
#include "kvm-s390.h"
#include "gaccess.h"
@@ -69,6 +70,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ NULL }
};
+static unsigned long long *facilities;
/* Section: not file related */
void kvm_arch_hardware_enable(void *garbage)
@@ -288,6 +290,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->gmsor = vcpu->kvm->arch.guest_origin;
vcpu->arch.sie_block->ecb = 2;
vcpu->arch.sie_block->eca = 0xC1002001U;
+ vcpu->arch.sie_block->fac = (int) (long) facilities;
hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
(unsigned long) vcpu);
@@ -739,11 +742,29 @@ gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
static int __init kvm_s390_init(void)
{
- return kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
+ int ret;
+ ret = kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
+ if (ret)
+ return ret;
+
+ /*
+ * guests can ask for up to 255+1 double words, we need a full page
+ * to hold the maximum amount of facilites. On the other hand, we
+ * only set facilities that are known to work in KVM.
+ */
+ facilities = (unsigned long long *) get_zeroed_page(GFP_DMA);
+ if (!facilities) {
+ kvm_exit();
+ return -ENOMEM;
+ }
+ stfle(facilities, 1);
+ facilities[0] &= 0xff00fff3f0700000ULL;
+ return 0;
}
static void __exit kvm_s390_exit(void)
{
+ free_page((unsigned long) facilities);
kvm_exit();
}
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 93ecd06e1a74..d426aac8095d 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -158,7 +158,7 @@ static int handle_stfl(struct kvm_vcpu *vcpu)
vcpu->stat.instruction_stfl++;
/* only pass the facility bits, which we can handle */
- facility_list &= 0xfe00fff3;
+ facility_list &= 0xff00fff3;
rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
&facility_list, sizeof(facility_list));
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 5c3d6e81a7dc..7030b5f911bf 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2157,7 +2157,7 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
else
/* 32 bits PSE 4MB page */
context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
- context->rsvd_bits_mask[1][0] = ~0ull;
+ context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
break;
case PT32E_ROOT_LEVEL:
context->rsvd_bits_mask[0][2] =
@@ -2170,7 +2170,7 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
rsvd_bits(maxphyaddr, 62) |
rsvd_bits(13, 20); /* large page */
- context->rsvd_bits_mask[1][0] = ~0ull;
+ context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
break;
case PT64_ROOT_LEVEL:
context->rsvd_bits_mask[0][3] = exb_bit_rsvd |
@@ -2186,7 +2186,7 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
rsvd_bits(maxphyaddr, 51) |
rsvd_bits(13, 20); /* large page */
- context->rsvd_bits_mask[1][0] = ~0ull;
+ context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
break;
}
}
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 258e4591e1ca..67785f635399 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -281,7 +281,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
{
unsigned access = gw->pt_access;
struct kvm_mmu_page *shadow_page;
- u64 spte, *sptep;
+ u64 spte, *sptep = NULL;
int direct;
gfn_t table_gfn;
int r;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index e770bf349ec4..356a0ce85c68 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3012,6 +3012,12 @@ static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
return 1;
}
+static int handle_vmx_insn(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+}
+
static int handle_invlpg(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
@@ -3198,6 +3204,15 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
[EXIT_REASON_HLT] = handle_halt,
[EXIT_REASON_INVLPG] = handle_invlpg,
[EXIT_REASON_VMCALL] = handle_vmcall,
+ [EXIT_REASON_VMCLEAR] = handle_vmx_insn,
+ [EXIT_REASON_VMLAUNCH] = handle_vmx_insn,
+ [EXIT_REASON_VMPTRLD] = handle_vmx_insn,
+ [EXIT_REASON_VMPTRST] = handle_vmx_insn,
+ [EXIT_REASON_VMREAD] = handle_vmx_insn,
+ [EXIT_REASON_VMRESUME] = handle_vmx_insn,
+ [EXIT_REASON_VMWRITE] = handle_vmx_insn,
+ [EXIT_REASON_VMOFF] = handle_vmx_insn,
+ [EXIT_REASON_VMON] = handle_vmx_insn,
[EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,
[EXIT_REASON_APIC_ACCESS] = handle_apic_access,
[EXIT_REASON_WBINVD] = handle_wbinvd,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 249540f98513..fe5474aec41a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -898,6 +898,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
case MSR_VM_HSAVE_PA:
case MSR_P6_EVNTSEL0:
case MSR_P6_EVNTSEL1:
+ case MSR_K7_EVNTSEL0:
data = 0;
break;
case MSR_MTRRcap:
diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c
index c1b6c232e02b..616de4628d60 100644
--- a/arch/x86/kvm/x86_emulate.c
+++ b/arch/x86/kvm/x86_emulate.c
@@ -1361,7 +1361,7 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt,
return 0;
}
-void toggle_interruptibility(struct x86_emulate_ctxt *ctxt, u32 mask)
+static void toggle_interruptibility(struct x86_emulate_ctxt *ctxt, u32 mask)
{
u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(ctxt->vcpu, mask);
/*
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index aacc5449f586..16713dc672e4 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -125,6 +125,7 @@ struct kvm_kernel_irq_routing_entry {
struct kvm {
struct mutex lock; /* protects the vcpus array and APIC accesses */
spinlock_t mmu_lock;
+ spinlock_t requests_lock;
struct rw_semaphore slots_lock;
struct mm_struct *mm; /* userspace tied to this vm */
int nmemslots;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 764554350ed8..2884baf1d5f9 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -746,6 +746,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
cpumask_clear(cpus);
me = get_cpu();
+ spin_lock(&kvm->requests_lock);
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
vcpu = kvm->vcpus[i];
if (!vcpu)
@@ -762,6 +763,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
smp_call_function_many(cpus, ack_flush, NULL, 1);
else
called = false;
+ spin_unlock(&kvm->requests_lock);
put_cpu();
free_cpumask_var(cpus);
return called;
@@ -982,6 +984,7 @@ static struct kvm *kvm_create_vm(void)
kvm->mm = current->mm;
atomic_inc(&kvm->mm->mm_count);
spin_lock_init(&kvm->mmu_lock);
+ spin_lock_init(&kvm->requests_lock);
kvm_io_bus_init(&kvm->pio_bus);
mutex_init(&kvm->lock);
kvm_io_bus_init(&kvm->mmio_bus);
@@ -1194,6 +1197,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
if (!new.dirty_bitmap)
goto out_free;
memset(new.dirty_bitmap, 0, dirty_bytes);
+ if (old.npages)
+ kvm_arch_flush_shadow(kvm);
}
#endif /* not defined CONFIG_S390 */