diff options
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/alternative.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/asm-offsets.c | 12 | ||||
-rw-r--r-- | arch/x86/kernel/asm-offsets_64.c | 7 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/vmware.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/kvm.c | 19 | ||||
-rw-r--r-- | arch/x86/kernel/kvmclock.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/paravirt-spinlocks.c | 15 | ||||
-rw-r--r-- | arch/x86/kernel/paravirt.c | 290 | ||||
-rw-r--r-- | arch/x86/kernel/paravirt_patch_32.c | 48 | ||||
-rw-r--r-- | arch/x86/kernel/paravirt_patch_64.c | 56 | ||||
-rw-r--r-- | arch/x86/kernel/tsc.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/vsmp_64.c | 18 |
13 files changed, 230 insertions, 249 deletions
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 9e755e1cfe92..22e4da4ae8b9 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -594,7 +594,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start, BUG_ON(p->len > MAX_PATCH_LEN); /* prep the buffer with the original instructions */ memcpy(insnbuf, p->instr, p->len); - used = pv_init_ops.patch(p->instrtype, insnbuf, + used = pv_ops.init.patch(p->instrtype, insnbuf, (unsigned long)p->instr, p->len); BUG_ON(used > p->len); diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index 01de31db300d..46d14498e214 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -66,13 +66,11 @@ void common(void) { #ifdef CONFIG_PARAVIRT BLANK(); - OFFSET(PARAVIRT_PATCH_pv_cpu_ops, paravirt_patch_template, pv_cpu_ops); - OFFSET(PARAVIRT_PATCH_pv_irq_ops, paravirt_patch_template, pv_irq_ops); - OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable); - OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable); - OFFSET(PV_CPU_iret, pv_cpu_ops, iret); - OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0); - OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2); + OFFSET(PV_IRQ_irq_disable, paravirt_patch_template, irq.irq_disable); + OFFSET(PV_IRQ_irq_enable, paravirt_patch_template, irq.irq_enable); + OFFSET(PV_CPU_iret, paravirt_patch_template, cpu.iret); + OFFSET(PV_CPU_read_cr0, paravirt_patch_template, cpu.read_cr0); + OFFSET(PV_MMU_read_cr2, paravirt_patch_template, mmu.read_cr2); #endif #ifdef CONFIG_XEN diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index 3b9405e7ba2b..3384b03e717f 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c @@ -21,10 +21,11 @@ static char syscalls_ia32[] = { int main(void) { #ifdef CONFIG_PARAVIRT - OFFSET(PV_CPU_usergs_sysret64, pv_cpu_ops, usergs_sysret64); - OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs); + OFFSET(PV_CPU_usergs_sysret64, paravirt_patch_template, + cpu.usergs_sysret64); + OFFSET(PV_CPU_swapgs, paravirt_patch_template, cpu.swapgs); #ifdef CONFIG_DEBUG_ENTRY - OFFSET(PV_IRQ_save_fl, pv_irq_ops, save_fl); + OFFSET(PV_IRQ_save_fl, paravirt_patch_template, irq.save_fl); #endif BLANK(); #endif diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 44c4ef3d989b..c4da9c23a96c 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1243,7 +1243,7 @@ static void generic_identify(struct cpuinfo_x86 *c) # ifdef CONFIG_PARAVIRT do { extern void native_iret(void); - if (pv_cpu_ops.iret == native_iret) + if (pv_ops.cpu.iret == native_iret) set_cpu_bug(c, X86_BUG_ESPFIX); } while (0); # else diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index 8e005329648b..d9ab49bed8af 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c @@ -97,14 +97,14 @@ static void __init vmware_sched_clock_setup(void) d->cyc2ns_offset = mul_u64_u32_shr(tsc_now, d->cyc2ns_mul, d->cyc2ns_shift); - pv_time_ops.sched_clock = vmware_sched_clock; + pv_ops.time.sched_clock = vmware_sched_clock; pr_info("using sched offset of %llu ns\n", d->cyc2ns_offset); } static void __init vmware_paravirt_ops_setup(void) { pv_info.name = "VMware hypervisor"; - pv_cpu_ops.io_delay = paravirt_nop; + pv_ops.cpu.io_delay = paravirt_nop; if (vmware_tsc_khz && vmw_sched_clock) vmware_sched_clock_setup(); diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index d9b71924c23c..ba4bfb7f6a36 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -283,7 +283,7 @@ static void __init paravirt_ops_setup(void) pv_info.name = "KVM"; if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY)) - pv_cpu_ops.io_delay = kvm_io_delay; + pv_ops.cpu.io_delay = kvm_io_delay; #ifdef CONFIG_X86_IO_APIC no_timer_check = 1; @@ -632,14 +632,14 @@ static void __init kvm_guest_init(void) if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { has_steal_clock = 1; - pv_time_ops.steal_clock = kvm_steal_clock; + pv_ops.time.steal_clock = kvm_steal_clock; } if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && !kvm_para_has_hint(KVM_HINTS_REALTIME) && kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { - pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others; - pv_mmu_ops.tlb_remove_table = tlb_remove_table; + pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others; + pv_ops.mmu.tlb_remove_table = tlb_remove_table; } if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) @@ -850,13 +850,14 @@ void __init kvm_spinlock_init(void) return; __pv_init_lock_hash(); - pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; - pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock); - pv_lock_ops.wait = kvm_wait; - pv_lock_ops.kick = kvm_kick_cpu; + pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; + pv_ops.lock.queued_spin_unlock = + PV_CALLEE_SAVE(__pv_queued_spin_unlock); + pv_ops.lock.wait = kvm_wait; + pv_ops.lock.kick = kvm_kick_cpu; if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { - pv_lock_ops.vcpu_is_preempted = + pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(__kvm_vcpu_is_preempted); } } diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 1e6764648af3..a36b93a722a2 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -116,13 +116,13 @@ static u64 kvm_sched_clock_read(void) static inline void kvm_sched_clock_init(bool stable) { if (!stable) { - pv_time_ops.sched_clock = kvm_clock_read; + pv_ops.time.sched_clock = kvm_clock_read; clear_sched_clock_stable(); return; } kvm_sched_clock_offset = kvm_clock_read(); - pv_time_ops.sched_clock = kvm_sched_clock_read; + pv_ops.time.sched_clock = kvm_sched_clock_read; pr_info("kvm-clock: using sched offset of %llu cycles", kvm_sched_clock_offset); diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c index 71f2d1125ec0..4f75d0cf6305 100644 --- a/arch/x86/kernel/paravirt-spinlocks.c +++ b/arch/x86/kernel/paravirt-spinlocks.c @@ -17,7 +17,7 @@ PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock); bool pv_is_native_spin_unlock(void) { - return pv_lock_ops.queued_spin_unlock.func == + return pv_ops.lock.queued_spin_unlock.func == __raw_callee_save___native_queued_spin_unlock; } @@ -29,17 +29,6 @@ PV_CALLEE_SAVE_REGS_THUNK(__native_vcpu_is_preempted); bool pv_is_native_vcpu_is_preempted(void) { - return pv_lock_ops.vcpu_is_preempted.func == + return pv_ops.lock.vcpu_is_preempted.func == __raw_callee_save___native_vcpu_is_preempted; } - -struct pv_lock_ops pv_lock_ops = { -#ifdef CONFIG_SMP - .queued_spin_lock_slowpath = native_queued_spin_lock_slowpath, - .queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock), - .wait = paravirt_nop, - .kick = paravirt_nop, - .vcpu_is_preempted = PV_CALLEE_SAVE(__native_vcpu_is_preempted), -#endif /* SMP */ -}; -EXPORT_SYMBOL(pv_lock_ops); diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 287d34513f6a..0ca0576c88ff 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -128,29 +128,14 @@ void __init native_pv_lock_init(void) static_branch_disable(&virt_spin_lock_key); } -/* - * Neat trick to map patch type back to the call within the - * corresponding structure. - */ -static void *get_call_destination(u8 type) -{ - struct paravirt_patch_template tmpl = { - .pv_init_ops = pv_init_ops, - .pv_time_ops = pv_time_ops, - .pv_cpu_ops = pv_cpu_ops, - .pv_irq_ops = pv_irq_ops, - .pv_mmu_ops = pv_mmu_ops, -#ifdef CONFIG_PARAVIRT_SPINLOCKS - .pv_lock_ops = pv_lock_ops, -#endif - }; - return *((void **)&tmpl + type); -} - unsigned paravirt_patch_default(u8 type, void *insnbuf, unsigned long addr, unsigned len) { - void *opfunc = get_call_destination(type); + /* + * Neat trick to map patch type back to the call within the + * corresponding structure. + */ + void *opfunc = *((void **)&pv_ops + type); unsigned ret; if (opfunc == NULL) @@ -165,8 +150,8 @@ unsigned paravirt_patch_default(u8 type, void *insnbuf, else if (opfunc == _paravirt_ident_64) ret = paravirt_patch_ident_64(insnbuf, len); - else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) || - type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret64)) + else if (type == PARAVIRT_PATCH(cpu.iret) || + type == PARAVIRT_PATCH(cpu.usergs_sysret64)) /* If operation requires a jmp, then jmp */ ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len); else @@ -316,77 +301,6 @@ struct pv_info pv_info = { #endif }; -struct pv_init_ops pv_init_ops = { - .patch = native_patch, -}; - -struct pv_time_ops pv_time_ops = { - .sched_clock = native_sched_clock, - .steal_clock = native_steal_clock, -}; - -__visible struct pv_irq_ops pv_irq_ops = { - .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl), - .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl), - .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable), - .irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable), - .safe_halt = native_safe_halt, - .halt = native_halt, -}; - -__visible struct pv_cpu_ops pv_cpu_ops = { - .cpuid = native_cpuid, - .get_debugreg = native_get_debugreg, - .set_debugreg = native_set_debugreg, - .read_cr0 = native_read_cr0, - .write_cr0 = native_write_cr0, - .write_cr4 = native_write_cr4, -#ifdef CONFIG_X86_64 - .read_cr8 = native_read_cr8, - .write_cr8 = native_write_cr8, -#endif - .wbinvd = native_wbinvd, - .read_msr = native_read_msr, - .write_msr = native_write_msr, - .read_msr_safe = native_read_msr_safe, - .write_msr_safe = native_write_msr_safe, - .read_pmc = native_read_pmc, - .load_tr_desc = native_load_tr_desc, - .set_ldt = native_set_ldt, - .load_gdt = native_load_gdt, - .load_idt = native_load_idt, - .store_tr = native_store_tr, - .load_tls = native_load_tls, -#ifdef CONFIG_X86_64 - .load_gs_index = native_load_gs_index, -#endif - .write_ldt_entry = native_write_ldt_entry, - .write_gdt_entry = native_write_gdt_entry, - .write_idt_entry = native_write_idt_entry, - - .alloc_ldt = paravirt_nop, - .free_ldt = paravirt_nop, - - .load_sp0 = native_load_sp0, - -#ifdef CONFIG_X86_64 - .usergs_sysret64 = native_usergs_sysret64, -#endif - .iret = native_iret, - .swapgs = native_swapgs, - - .set_iopl_mask = native_set_iopl_mask, - .io_delay = native_io_delay, - - .start_context_switch = paravirt_nop, - .end_context_switch = paravirt_nop, -}; - -/* At this point, native_get/set_debugreg has real function entries */ -NOKPROBE_SYMBOL(native_get_debugreg); -NOKPROBE_SYMBOL(native_set_debugreg); -NOKPROBE_SYMBOL(native_load_idt); - #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE) /* 32-bit pagetable entries */ #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32) @@ -395,85 +309,163 @@ NOKPROBE_SYMBOL(native_load_idt); #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64) #endif -struct pv_mmu_ops pv_mmu_ops __ro_after_init = { - - .read_cr2 = native_read_cr2, - .write_cr2 = native_write_cr2, - .read_cr3 = __native_read_cr3, - .write_cr3 = native_write_cr3, - - .flush_tlb_user = native_flush_tlb, - .flush_tlb_kernel = native_flush_tlb_global, - .flush_tlb_one_user = native_flush_tlb_one_user, - .flush_tlb_others = native_flush_tlb_others, - .tlb_remove_table = (void (*)(struct mmu_gather *, void *))tlb_remove_page, - - .pgd_alloc = __paravirt_pgd_alloc, - .pgd_free = paravirt_nop, +struct paravirt_patch_template pv_ops = { + /* Init ops. */ + .init.patch = native_patch, + + /* Time ops. */ + .time.sched_clock = native_sched_clock, + .time.steal_clock = native_steal_clock, + + /* Cpu ops. */ + .cpu.cpuid = native_cpuid, + .cpu.get_debugreg = native_get_debugreg, + .cpu.set_debugreg = native_set_debugreg, + .cpu.read_cr0 = native_read_cr0, + .cpu.write_cr0 = native_write_cr0, + .cpu.write_cr4 = native_write_cr4, +#ifdef CONFIG_X86_64 + .cpu.read_cr8 = native_read_cr8, + .cpu.write_cr8 = native_write_cr8, +#endif + .cpu.wbinvd = native_wbinvd, + .cpu.read_msr = native_read_msr, + .cpu.write_msr = native_write_msr, + .cpu.read_msr_safe = native_read_msr_safe, + .cpu.write_msr_safe = native_write_msr_safe, + .cpu.read_pmc = native_read_pmc, + .cpu.load_tr_desc = native_load_tr_desc, + .cpu.set_ldt = native_set_ldt, + .cpu.load_gdt = native_load_gdt, + .cpu.load_idt = native_load_idt, + .cpu.store_tr = native_store_tr, + .cpu.load_tls = native_load_tls, +#ifdef CONFIG_X86_64 + .cpu.load_gs_index = native_load_gs_index, +#endif + .cpu.write_ldt_entry = native_write_ldt_entry, + .cpu.write_gdt_entry = native_write_gdt_entry, + .cpu.write_idt_entry = native_write_idt_entry, - .alloc_pte = paravirt_nop, - .alloc_pmd = paravirt_nop, - .alloc_pud = paravirt_nop, - .alloc_p4d = paravirt_nop, - .release_pte = paravirt_nop, - .release_pmd = paravirt_nop, - .release_pud = paravirt_nop, - .release_p4d = paravirt_nop, + .cpu.alloc_ldt = paravirt_nop, + .cpu.free_ldt = paravirt_nop, - .set_pte = native_set_pte, - .set_pte_at = native_set_pte_at, - .set_pmd = native_set_pmd, + .cpu.load_sp0 = native_load_sp0, - .ptep_modify_prot_start = __ptep_modify_prot_start, - .ptep_modify_prot_commit = __ptep_modify_prot_commit, +#ifdef CONFIG_X86_64 + .cpu.usergs_sysret64 = native_usergs_sysret64, +#endif + .cpu.iret = native_iret, + .cpu.swapgs = native_swapgs, + + .cpu.set_iopl_mask = native_set_iopl_mask, + .cpu.io_delay = native_io_delay, + + .cpu.start_context_switch = paravirt_nop, + .cpu.end_context_switch = paravirt_nop, + + /* Irq ops. */ + .irq.save_fl = __PV_IS_CALLEE_SAVE(native_save_fl), + .irq.restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl), + .irq.irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable), + .irq.irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable), + .irq.safe_halt = native_safe_halt, + .irq.halt = native_halt, + + /* Mmu ops. */ + .mmu.read_cr2 = native_read_cr2, + .mmu.write_cr2 = native_write_cr2, + .mmu.read_cr3 = __native_read_cr3, + .mmu.write_cr3 = native_write_cr3, + + .mmu.flush_tlb_user = native_flush_tlb, + .mmu.flush_tlb_kernel = native_flush_tlb_global, + .mmu.flush_tlb_one_user = native_flush_tlb_one_user, + .mmu.flush_tlb_others = native_flush_tlb_others, + .mmu.tlb_remove_table = + (void (*)(struct mmu_gather *, void *))tlb_remove_page, + + .mmu.pgd_alloc = __paravirt_pgd_alloc, + .mmu.pgd_free = paravirt_nop, + + .mmu.alloc_pte = paravirt_nop, + .mmu.alloc_pmd = paravirt_nop, + .mmu.alloc_pud = paravirt_nop, + .mmu.alloc_p4d = paravirt_nop, + .mmu.release_pte = paravirt_nop, + .mmu.release_pmd = paravirt_nop, + .mmu.release_pud = paravirt_nop, + .mmu.release_p4d = paravirt_nop, + + .mmu.set_pte = native_set_pte, + .mmu.set_pte_at = native_set_pte_at, + .mmu.set_pmd = native_set_pmd, + + .mmu.ptep_modify_prot_start = __ptep_modify_prot_start, + .mmu.ptep_modify_prot_commit = __ptep_modify_prot_commit, #if CONFIG_PGTABLE_LEVELS >= 3 #ifdef CONFIG_X86_PAE - .set_pte_atomic = native_set_pte_atomic, - .pte_clear = native_pte_clear, - .pmd_clear = native_pmd_clear, + .mmu.set_pte_atomic = native_set_pte_atomic, + .mmu.pte_clear = native_pte_clear, + .mmu.pmd_clear = native_pmd_clear, #endif - .set_pud = native_set_pud, + .mmu.set_pud = native_set_pud, - .pmd_val = PTE_IDENT, - .make_pmd = PTE_IDENT, + .mmu.pmd_val = PTE_IDENT, + .mmu.make_pmd = PTE_IDENT, #if CONFIG_PGTABLE_LEVELS >= 4 - .pud_val = PTE_IDENT, - .make_pud = PTE_IDENT, + .mmu.pud_val = PTE_IDENT, + .mmu.make_pud = PTE_IDENT, - .set_p4d = native_set_p4d, + .mmu.set_p4d = native_set_p4d, #if CONFIG_PGTABLE_LEVELS >= 5 - .p4d_val = PTE_IDENT, - .make_p4d = PTE_IDENT, + .mmu.p4d_val = PTE_IDENT, + .mmu.make_p4d = PTE_IDENT, - .set_pgd = native_set_pgd, + .mmu.set_pgd = native_set_pgd, #endif /* CONFIG_PGTABLE_LEVELS >= 5 */ #endif /* CONFIG_PGTABLE_LEVELS >= 4 */ #endif /* CONFIG_PGTABLE_LEVELS >= 3 */ - .pte_val = PTE_IDENT, - .pgd_val = PTE_IDENT, + .mmu.pte_val = PTE_IDENT, + .mmu.pgd_val = PTE_IDENT, - .make_pte = PTE_IDENT, - .make_pgd = PTE_IDENT, + .mmu.make_pte = PTE_IDENT, + .mmu.make_pgd = PTE_IDENT, - .dup_mmap = paravirt_nop, - .exit_mmap = paravirt_nop, - .activate_mm = paravirt_nop, + .mmu.dup_mmap = paravirt_nop, + .mmu.exit_mmap = paravirt_nop, + .mmu.activate_mm = paravirt_nop, - .lazy_mode = { - .enter = paravirt_nop, - .leave = paravirt_nop, - .flush = paravirt_nop, + .mmu.lazy_mode = { + .enter = paravirt_nop, + .leave = paravirt_nop, + .flush = paravirt_nop, }, - .set_fixmap = native_set_fixmap, + .mmu.set_fixmap = native_set_fixmap, + +#if defined(CONFIG_PARAVIRT_SPINLOCKS) + /* Lock ops. */ +#ifdef CONFIG_SMP + .lock.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath, + .lock.queued_spin_unlock = + PV_CALLEE_SAVE(__native_queued_spin_unlock), + .lock.wait = paravirt_nop, + .lock.kick = paravirt_nop, + .lock.vcpu_is_preempted = + PV_CALLEE_SAVE(__native_vcpu_is_preempted), +#endif /* SMP */ +#endif }; -EXPORT_SYMBOL_GPL(pv_time_ops); -EXPORT_SYMBOL (pv_cpu_ops); -EXPORT_SYMBOL (pv_mmu_ops); +/* At this point, native_get/set_debugreg has real function entries */ +NOKPROBE_SYMBOL(native_get_debugreg); +NOKPROBE_SYMBOL(native_set_debugreg); +NOKPROBE_SYMBOL(native_load_idt); + +EXPORT_SYMBOL_GPL(pv_ops); EXPORT_SYMBOL_GPL(pv_info); -EXPORT_SYMBOL (pv_irq_ops); diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c index e5c3a438149e..026fa43e9261 100644 --- a/arch/x86/kernel/paravirt_patch_32.c +++ b/arch/x86/kernel/paravirt_patch_32.c @@ -1,18 +1,18 @@ // SPDX-License-Identifier: GPL-2.0 #include <asm/paravirt.h> -DEF_NATIVE(pv_irq_ops, irq_disable, "cli"); -DEF_NATIVE(pv_irq_ops, irq_enable, "sti"); -DEF_NATIVE(pv_irq_ops, restore_fl, "push %eax; popf"); -DEF_NATIVE(pv_irq_ops, save_fl, "pushf; pop %eax"); -DEF_NATIVE(pv_cpu_ops, iret, "iret"); -DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax"); -DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3"); -DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax"); +DEF_NATIVE(irq, irq_disable, "cli"); +DEF_NATIVE(irq, irq_enable, "sti"); +DEF_NATIVE(irq, restore_fl, "push %eax; popf"); +DEF_NATIVE(irq, save_fl, "pushf; pop %eax"); +DEF_NATIVE(cpu, iret, "iret"); +DEF_NATIVE(mmu, read_cr2, "mov %cr2, %eax"); +DEF_NATIVE(mmu, write_cr3, "mov %eax, %cr3"); +DEF_NATIVE(mmu, read_cr3, "mov %cr3, %eax"); #if defined(CONFIG_PARAVIRT_SPINLOCKS) -DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%eax)"); -DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "xor %eax, %eax"); +DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%eax)"); +DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax"); #endif unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len) @@ -41,27 +41,27 @@ unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len) end = end_##ops##_##x; \ goto patch_site switch (type) { - PATCH_SITE(pv_irq_ops, irq_disable); - PATCH_SITE(pv_irq_ops, irq_enable); - PATCH_SITE(pv_irq_ops, restore_fl); - PATCH_SITE(pv_irq_ops, save_fl); - PATCH_SITE(pv_cpu_ops, iret); - PATCH_SITE(pv_mmu_ops, read_cr2); - PATCH_SITE(pv_mmu_ops, read_cr3); - PATCH_SITE(pv_mmu_ops, write_cr3); + PATCH_SITE(irq, irq_disable); + PATCH_SITE(irq, irq_enable); + PATCH_SITE(irq, restore_fl); + PATCH_SITE(irq, save_fl); + PATCH_SITE(cpu, iret); + PATCH_SITE(mmu, read_cr2); + PATCH_SITE(mmu, read_cr3); + PATCH_SITE(mmu, write_cr3); #if defined(CONFIG_PARAVIRT_SPINLOCKS) - case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock): + case PARAVIRT_PATCH(lock.queued_spin_unlock): if (pv_is_native_spin_unlock()) { - start = start_pv_lock_ops_queued_spin_unlock; - end = end_pv_lock_ops_queued_spin_unlock; + start = start_lock_queued_spin_unlock; + end = end_lock_queued_spin_unlock; goto patch_site; } goto patch_default; - case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted): + case PARAVIRT_PATCH(lock.vcpu_is_preempted): if (pv_is_native_vcpu_is_preempted()) { - start = start_pv_lock_ops_vcpu_is_preempted; - end = end_pv_lock_ops_vcpu_is_preempted; + start = start_lock_vcpu_is_preempted; + end = end_lock_vcpu_is_preempted; goto patch_site; } goto patch_default; diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c index 835f1985a115..582e893728e8 100644 --- a/arch/x86/kernel/paravirt_patch_64.c +++ b/arch/x86/kernel/paravirt_patch_64.c @@ -3,24 +3,24 @@ #include <asm/asm-offsets.h> #include <linux/stringify.h> -DEF_NATIVE(pv_irq_ops, irq_disable, "cli"); -DEF_NATIVE(pv_irq_ops, irq_enable, "sti"); -DEF_NATIVE(pv_irq_ops, restore_fl, "pushq %rdi; popfq"); -DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax"); -DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax"); -DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax"); -DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3"); -DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd"); +DEF_NATIVE(irq, irq_disable, "cli"); +DEF_NATIVE(irq, irq_enable, "sti"); +DEF_NATIVE(irq, restore_fl, "pushq %rdi; popfq"); +DEF_NATIVE(irq, save_fl, "pushfq; popq %rax"); +DEF_NATIVE(mmu, read_cr2, "movq %cr2, %rax"); +DEF_NATIVE(mmu, read_cr3, "movq %cr3, %rax"); +DEF_NATIVE(mmu, write_cr3, "movq %rdi, %cr3"); +DEF_NATIVE(cpu, wbinvd, "wbinvd"); -DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq"); -DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs"); +DEF_NATIVE(cpu, usergs_sysret64, "swapgs; sysretq"); +DEF_NATIVE(cpu, swapgs, "swapgs"); DEF_NATIVE(, mov32, "mov %edi, %eax"); DEF_NATIVE(, mov64, "mov %rdi, %rax"); #if defined(CONFIG_PARAVIRT_SPINLOCKS) -DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%rdi)"); -DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "xor %eax, %eax"); +DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%rdi)"); +DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax"); #endif unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len) @@ -49,29 +49,29 @@ unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len) end = end_##ops##_##x; \ goto patch_site switch(type) { - PATCH_SITE(pv_irq_ops, restore_fl); - PATCH_SITE(pv_irq_ops, save_fl); - PATCH_SITE(pv_irq_ops, irq_enable); - PATCH_SITE(pv_irq_ops, irq_disable); - PATCH_SITE(pv_cpu_ops, usergs_sysret64); - PATCH_SITE(pv_cpu_ops, swapgs); - PATCH_SITE(pv_mmu_ops, read_cr2); - PATCH_SITE(pv_mmu_ops, read_cr3); - PATCH_SITE(pv_mmu_ops, write_cr3); - PATCH_SITE(pv_cpu_ops, wbinvd); + PATCH_SITE(irq, restore_fl); + PATCH_SITE(irq, save_fl); + PATCH_SITE(irq, irq_enable); + PATCH_SITE(irq, irq_disable); + PATCH_SITE(cpu, usergs_sysret64); + PATCH_SITE(cpu, swapgs); + PATCH_SITE(mmu, read_cr2); + PATCH_SITE(mmu, read_cr3); + PATCH_SITE(mmu, write_cr3); + PATCH_SITE(cpu, wbinvd); #if defined(CONFIG_PARAVIRT_SPINLOCKS) - case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock): + case PARAVIRT_PATCH(lock.queued_spin_unlock): if (pv_is_native_spin_unlock()) { - start = start_pv_lock_ops_queued_spin_unlock; - end = end_pv_lock_ops_queued_spin_unlock; + start = start_lock_queued_spin_unlock; + end = end_lock_queued_spin_unlock; goto patch_site; } goto patch_default; - case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted): + case PARAVIRT_PATCH(lock.vcpu_is_preempted): if (pv_is_native_vcpu_is_preempted()) { - start = start_pv_lock_ops_vcpu_is_preempted; - end = end_pv_lock_ops_vcpu_is_preempted; + start = start_lock_vcpu_is_preempted; + end = end_lock_vcpu_is_preempted; goto patch_site; } goto patch_default; diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 1463468ba9a0..9044aa5e2389 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -246,7 +246,7 @@ unsigned long long sched_clock(void) bool using_native_sched_clock(void) { - return pv_time_ops.sched_clock == native_sched_clock; + return pv_ops.time.sched_clock == native_sched_clock; } #else unsigned long long diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c index f194e5e1e95c..789918d78697 100644 --- a/arch/x86/kernel/vsmp_64.c +++ b/arch/x86/kernel/vsmp_64.c @@ -73,10 +73,10 @@ static unsigned __init vsmp_patch(u8 type, void *ibuf, unsigned long addr, unsigned len) { switch (type) { - case PARAVIRT_PATCH(pv_irq_ops.irq_enable): - case PARAVIRT_PATCH(pv_irq_ops.irq_disable): - case PARAVIRT_PATCH(pv_irq_ops.save_fl): - case PARAVIRT_PATCH(pv_irq_ops.restore_fl): + case PARAVIRT_PATCH(irq.irq_enable): + case PARAVIRT_PATCH(irq.irq_disable): + case PARAVIRT_PATCH(irq.save_fl): + case PARAVIRT_PATCH(irq.restore_fl): return paravirt_patch_default(type, ibuf, addr, len); default: return native_patch(type, ibuf, addr, len); @@ -111,11 +111,11 @@ static void __init set_vsmp_pv_ops(void) if (cap & ctl & (1 << 4)) { /* Setup irq ops and turn on vSMP IRQ fastpath handling */ - pv_irq_ops.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable); - pv_irq_ops.irq_enable = PV_CALLEE_SAVE(vsmp_irq_enable); - pv_irq_ops.save_fl = PV_CALLEE_SAVE(vsmp_save_fl); - pv_irq_ops.restore_fl = PV_CALLEE_SAVE(vsmp_restore_fl); - pv_init_ops.patch = vsmp_patch; + pv_ops.irq.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable); + pv_ops.irq.irq_enable = PV_CALLEE_SAVE(vsmp_irq_enable); + pv_ops.irq.save_fl = PV_CALLEE_SAVE(vsmp_save_fl); + pv_ops.irq.restore_fl = PV_CALLEE_SAVE(vsmp_restore_fl); + pv_ops.init.patch = vsmp_patch; ctl &= ~(1 << 4); } writel(ctl, address + 4); |