diff options
-rw-r--r-- | arch/loongarch/include/asm/kvm_vcpu.h | 4 | ||||
-rw-r--r-- | arch/loongarch/kernel/smp.c | 1 | ||||
-rw-r--r-- | arch/loongarch/kvm/mmu.c | 4 | ||||
-rw-r--r-- | arch/loongarch/mm/tlb.c | 16 |
4 files changed, 14 insertions, 11 deletions
diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h index e71ceb88f29e..0cb4fdb8a9b5 100644 --- a/arch/loongarch/include/asm/kvm_vcpu.h +++ b/arch/loongarch/include/asm/kvm_vcpu.h @@ -60,7 +60,7 @@ int kvm_own_lsx(struct kvm_vcpu *vcpu); void kvm_save_lsx(struct loongarch_fpu *fpu); void kvm_restore_lsx(struct loongarch_fpu *fpu); #else -static inline int kvm_own_lsx(struct kvm_vcpu *vcpu) { } +static inline int kvm_own_lsx(struct kvm_vcpu *vcpu) { return -EINVAL; } static inline void kvm_save_lsx(struct loongarch_fpu *fpu) { } static inline void kvm_restore_lsx(struct loongarch_fpu *fpu) { } #endif @@ -70,7 +70,7 @@ int kvm_own_lasx(struct kvm_vcpu *vcpu); void kvm_save_lasx(struct loongarch_fpu *fpu); void kvm_restore_lasx(struct loongarch_fpu *fpu); #else -static inline int kvm_own_lasx(struct kvm_vcpu *vcpu) { } +static inline int kvm_own_lasx(struct kvm_vcpu *vcpu) { return -EINVAL; } static inline void kvm_save_lasx(struct loongarch_fpu *fpu) { } static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { } #endif diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c index a16e3dbe9f09..2b49d30eb7c0 100644 --- a/arch/loongarch/kernel/smp.c +++ b/arch/loongarch/kernel/smp.c @@ -509,7 +509,6 @@ asmlinkage void start_secondary(void) sync_counter(); cpu = raw_smp_processor_id(); set_my_cpu_offset(per_cpu_offset(cpu)); - rcutree_report_cpu_starting(cpu); cpu_probe(); constant_clockevent_init(); diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c index 915f17527893..50a6acd7ffe4 100644 --- a/arch/loongarch/kvm/mmu.c +++ b/arch/loongarch/kvm/mmu.c @@ -675,7 +675,7 @@ static bool fault_supports_huge_mapping(struct kvm_memory_slot *memslot, * * There are several ways to safely use this helper: * - * - Check mmu_invalidate_retry_hva() after grabbing the mapping level, before + * - Check mmu_invalidate_retry_gfn() after grabbing the mapping level, before * consuming it. In this case, mmu_lock doesn't need to be held during the * lookup, but it does need to be held while checking the MMU notifier. * @@ -855,7 +855,7 @@ retry: /* Check if an invalidation has taken place since we got pfn */ spin_lock(&kvm->mmu_lock); - if (mmu_invalidate_retry_hva(kvm, mmu_seq, hva)) { + if (mmu_invalidate_retry_gfn(kvm, mmu_seq, gfn)) { /* * This can happen when mappings are changed asynchronously, but * also synchronously if a COW is triggered by diff --git a/arch/loongarch/mm/tlb.c b/arch/loongarch/mm/tlb.c index 2c0a411f23aa..0b95d32b30c9 100644 --- a/arch/loongarch/mm/tlb.c +++ b/arch/loongarch/mm/tlb.c @@ -284,12 +284,16 @@ static void setup_tlb_handler(int cpu) set_handler(EXCCODE_TLBNR * VECSIZE, handle_tlb_protect, VECSIZE); set_handler(EXCCODE_TLBNX * VECSIZE, handle_tlb_protect, VECSIZE); set_handler(EXCCODE_TLBPE * VECSIZE, handle_tlb_protect, VECSIZE); - } + } else { + int vec_sz __maybe_unused; + void *addr __maybe_unused; + struct page *page __maybe_unused; + + /* Avoid lockdep warning */ + rcutree_report_cpu_starting(cpu); + #ifdef CONFIG_NUMA - else { - void *addr; - struct page *page; - const int vec_sz = sizeof(exception_handlers); + vec_sz = sizeof(exception_handlers); if (pcpu_handlers[cpu]) return; @@ -305,8 +309,8 @@ static void setup_tlb_handler(int cpu) csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_EENTRY); csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_MERRENTRY); csr_write64(pcpu_handlers[cpu] + 80*VECSIZE, LOONGARCH_CSR_TLBRENTRY); - } #endif + } } void tlb_init(int cpu) |