From 29ecd66019047768080e8eeab4cd6582b28383a2 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Sun, 6 Sep 2015 16:24:50 +0200 Subject: KVM: x86: avoid uninitialized variable warning This does not show up on all compiler versions, so it sneaked into the first 4.3 pull request. The fix is to mimic the logic of the "print sptes" loop in the "fill array" loop. Then leaf and root can be both initialized unconditionally. Note that "leaf" now points to the first unused element of the array, not the last filled element. Reported-by: Linus Torvalds Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index fb16a8ea3dee..69088a1ba509 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -3309,13 +3309,14 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) walk_shadow_page_lockless_begin(vcpu); - for (shadow_walk_init(&iterator, vcpu, addr), root = iterator.level; + for (shadow_walk_init(&iterator, vcpu, addr), + leaf = root = iterator.level; shadow_walk_okay(&iterator); __shadow_walk_next(&iterator, spte)) { - leaf = iterator.level; spte = mmu_spte_get_lockless(iterator.sptep); sptes[leaf - 1] = spte; + leaf--; if (!is_shadow_present_pte(spte)) break; @@ -3329,7 +3330,7 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) if (reserved) { pr_err("%s: detect reserved bits on spte, addr 0x%llx, dump hierarchy:\n", __func__, addr); - while (root >= leaf) { + while (root > leaf) { pr_err("------ spte 0x%llx level %d.\n", sptes[root - 1], root); root--; -- cgit v1.2.3 From efbb288afc2f3079fa5e9308f4d9d06a390babdc Mon Sep 17 00:00:00 2001 From: Alexander Kuleshov Date: Sun, 6 Sep 2015 19:35:41 +0600 Subject: kvm: compile process_smi_save_seg_64() only for x86_64 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The process_smi_save_seg_64() function called only in the process_smi_save_state_64() if the CONFIG_X86_64 is set. This patch adds #ifdef CONFIG_X86_64 around process_smi_save_seg_64() to prevent following warning message: arch/x86/kvm/x86.c:5946:13: warning: ‘process_smi_save_seg_64’ defined but not used [-Wunused-function] static void process_smi_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n) ^ Signed-off-by: Alexander Kuleshov Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c7b6aed998e9..13728dbdbb56 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5955,6 +5955,7 @@ static void process_smi_save_seg_32(struct kvm_vcpu *vcpu, char *buf, int n) put_smstate(u32, buf, offset, process_smi_get_segment_flags(&seg)); } +#ifdef CONFIG_X86_64 static void process_smi_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n) { struct kvm_segment seg; @@ -5970,6 +5971,7 @@ static void process_smi_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n) put_smstate(u32, buf, offset + 4, seg.limit); put_smstate(u64, buf, offset + 8, seg.base); } +#endif static void process_smi_save_state_32(struct kvm_vcpu *vcpu, char *buf) { -- cgit v1.2.3 From e8dd2d2d641cb2724ee10e76c0ad02e04289c017 Mon Sep 17 00:00:00 2001 From: Valdis Kletnieks Date: Sat, 29 Aug 2015 17:49:16 -0400 Subject: Silence compiler warning in arch/x86/kvm/emulate.c Compiler warning: CC [M] arch/x86/kvm/emulate.o arch/x86/kvm/emulate.c: In function "__do_insn_fetch_bytes": arch/x86/kvm/emulate.c:814:9: warning: "linear" may be used uninitialized in this function [-Wmaybe-uninitialized] GCC is smart enough to realize that the inlined __linearize may return before setting the value of linear, but not smart enough to realize the same X86EMU_CONTINUE blocks actual use of the value. However, the value of 'linear' can only be set to one value, so hoisting the one line of code upwards makes GCC happy with the code. Reported-by: Aruna Hewapathirane Tested-by: Aruna Hewapathirane Signed-off-by: Valdis Kletnieks Signed-off-by: Paolo Bonzini --- arch/x86/kvm/emulate.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index e7a4fde5d631..b372a7557c16 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -650,6 +650,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, u16 sel; la = seg_base(ctxt, addr.seg) + addr.ea; + *linear = la; *max_size = 0; switch (mode) { case X86EMUL_MODE_PROT64: @@ -693,7 +694,6 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, } if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0)) return emulate_gp(ctxt, 0); - *linear = la; return X86EMUL_CONTINUE; bad: if (addr.seg == VCPU_SREG_SS) -- cgit v1.2.3