diff options
author | Rodrigo Vivi <rodrigo.vivi@intel.com> | 2020-02-25 20:29:58 +0300 |
---|---|---|
committer | Rodrigo Vivi <rodrigo.vivi@intel.com> | 2020-02-25 20:39:23 +0300 |
commit | ff36e78fdb251b9fa65028554689806961e011eb (patch) | |
tree | f5af925d509224e06a10936196be6c06bcbdc6ae /arch/x86/kvm/x86.h | |
parent | 143d9c3e7b6aa2b785abba04266ed75f2b52e94a (diff) | |
parent | 1b245ec5b685ebf8e6e5d1e6b5bcc03b6608e8b0 (diff) | |
download | linux-ff36e78fdb251b9fa65028554689806961e011eb.tar.xz |
Merge drm/drm-next into drm-intel-next-queued
Some DSI and VBT pending patches from Hans will apply
cleanly and with less ugly conflicts if they are rebuilt
on top of other patches that recently landed on drm-next.
Reference: https://patchwork.freedesktop.org/series/70952/
Cc: Hans de Goede <hdegoede@redhat.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com
Diffstat (limited to 'arch/x86/kvm/x86.h')
-rw-r--r-- | arch/x86/kvm/x86.h | 23 |
1 files changed, 9 insertions, 14 deletions
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 29391af8871d..3624665acee4 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -144,11 +144,6 @@ static inline bool is_pae_paging(struct kvm_vcpu *vcpu) return !is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu); } -static inline u32 bit(int bitno) -{ - return 1 << (bitno & 31); -} - static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu) { return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48; @@ -166,21 +161,13 @@ static inline u64 get_canonical(u64 la, u8 vaddr_bits) static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu) { -#ifdef CONFIG_X86_64 return get_canonical(la, vcpu_virt_addr_bits(vcpu)) != la; -#else - return false; -#endif } static inline bool emul_is_noncanonical_address(u64 la, struct x86_emulate_ctxt *ctxt) { -#ifdef CONFIG_X86_64 return get_canonical(la, ctxt_virt_addr_bits(ctxt)) != la; -#else - return false; -#endif } static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu, @@ -289,8 +276,9 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int page_num); bool kvm_vector_hashing_enabled(void); -int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, +int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, int emulation_type, void *insn, int insn_len); +enum exit_fastpath_completion handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu); #define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \ | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \ @@ -369,7 +357,14 @@ static inline bool kvm_pat_valid(u64 data) return (data | ((data & 0x0202020202020202ull) << 1)) == data; } +static inline bool kvm_dr7_valid(u64 data) +{ + /* Bits [63:32] are reserved */ + return !(data >> 32); +} + void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu); void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu); +u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu); #endif |