diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-08-17 21:32:50 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-08-17 21:32:50 +0300 |
commit | 5e2d059b52e397d9ac42f4c4d9d9a841887b5818 (patch) | |
tree | c8cd8fd7187113be33e29fcc75f45a8bbc27e6b2 /arch/powerpc/kvm | |
parent | d190775206d06397a9309421cac5ba2f2c243521 (diff) | |
parent | a2dc009afa9ae8b92305be7728676562a104cb40 (diff) | |
download | linux-5e2d059b52e397d9ac42f4c4d9d9a841887b5818.tar.xz |
Merge tag 'powerpc-4.19-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc updates from Michael Ellerman:
"Notable changes:
- A fix for a bug in our page table fragment allocator, where a page
table page could be freed and reallocated for something else while
still in use, leading to memory corruption etc. The fix reuses
pt_mm in struct page (x86 only) for a powerpc only refcount.
- Fixes to our pkey support. Several are user-visible changes, but
bring us in to line with x86 behaviour and/or fix outright bugs.
Thanks to Florian Weimer for reporting many of these.
- A series to improve the hvc driver & related OPAL console code,
which have been seen to cause hardlockups at times. The hvc driver
changes in particular have been in linux-next for ~month.
- Increase our MAX_PHYSMEM_BITS to 128TB when SPARSEMEM_VMEMMAP=y.
- Remove Power8 DD1 and Power9 DD1 support, neither chip should be in
use anywhere other than as a paper weight.
- An optimised memcmp implementation using Power7-or-later VMX
instructions
- Support for barrier_nospec on some NXP CPUs.
- Support for flushing the count cache on context switch on some IBM
CPUs (controlled by firmware), as a Spectre v2 mitigation.
- A series to enhance the information we print on unhandled signals
to bring it into line with other arches, including showing the
offending VMA and dumping the instructions around the fault.
Thanks to: Aaro Koskinen, Akshay Adiga, Alastair D'Silva, Alexey
Kardashevskiy, Alexey Spirkov, Alistair Popple, Andrew Donnellan,
Aneesh Kumar K.V, Anju T Sudhakar, Arnd Bergmann, Bartosz Golaszewski,
Benjamin Herrenschmidt, Bharat Bhushan, Bjoern Noetel, Boqun Feng,
Breno Leitao, Bryant G. Ly, Camelia Groza, Christophe Leroy, Christoph
Hellwig, Cyril Bur, Dan Carpenter, Daniel Klamt, Darren Stevens, Dave
Young, David Gibson, Diana Craciun, Finn Thain, Florian Weimer,
Frederic Barrat, Gautham R. Shenoy, Geert Uytterhoeven, Geoff Levand,
Guenter Roeck, Gustavo Romero, Haren Myneni, Hari Bathini, Joel
Stanley, Jonathan Neuschäfer, Kees Cook, Madhavan Srinivasan, Mahesh
Salgaonkar, Markus Elfring, Mathieu Malaterre, Mauro S. M. Rodrigues,
Michael Hanselmann, Michael Neuling, Michael Schmitz, Mukesh Ojha,
Murilo Opsfelder Araujo, Nicholas Piggin, Parth Y Shah, Paul
Mackerras, Paul Menzel, Ram Pai, Randy Dunlap, Rashmica Gupta, Reza
Arbab, Rodrigo R. Galvao, Russell Currey, Sam Bobroff, Scott Wood,
Shilpasri G Bhat, Simon Guo, Souptick Joarder, Stan Johnson, Thiago
Jung Bauermann, Tyrel Datwyler, Vaibhav Jain, Vasant Hegde, Venkat
Rao, zhong jiang"
* tag 'powerpc-4.19-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (234 commits)
powerpc/mm/book3s/radix: Add mapping statistics
powerpc/uaccess: Enable get_user(u64, *p) on 32-bit
powerpc/mm/hash: Remove unnecessary do { } while(0) loop
powerpc/64s: move machine check SLB flushing to mm/slb.c
powerpc/powernv/idle: Fix build error
powerpc/mm/tlbflush: update the mmu_gather page size while iterating address range
powerpc/mm: remove warning about ‘type’ being set
powerpc/32: Include setup.h header file to fix warnings
powerpc: Move `path` variable inside DEBUG_PROM
powerpc/powermac: Make some functions static
powerpc/powermac: Remove variable x that's never read
cxl: remove a dead branch
powerpc/powermac: Add missing include of header pmac.h
powerpc/kexec: Use common error handling code in setup_new_fdt()
powerpc/xmon: Add address lookup for percpu symbols
powerpc/mm: remove huge_pte_offset_and_shift() prototype
powerpc/lib: Use patch_site to patch copy_32 functions once cache is enabled
powerpc/pseries: Fix endianness while restoring of r3 in MCE handler.
powerpc/fadump: merge adjacent memory ranges to reduce PT_LOAD segements
powerpc/fadump: handle crash memory ranges array index overflow
...
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r-- | arch/powerpc/kvm/book3s.c | 1 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_32_mmu.c | 1 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu.c | 1 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_hv.c | 1 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_radix.c | 15 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_slb.S | 3 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_vio.c | 12 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_vio_hv.c | 20 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv.c | 11 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_interrupts.S | 2 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_rm_mmu.c | 1 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_rmhandlers.S | 18 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_interrupts.S | 1 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_pr.c | 1 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_rmhandlers.S | 1 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_segment.S | 3 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_xive_template.c | 39 | ||||
-rw-r--r-- | arch/powerpc/kvm/e500.c | 1 | ||||
-rw-r--r-- | arch/powerpc/kvm/e500mc.c | 1 | ||||
-rw-r--r-- | arch/powerpc/kvm/powerpc.c | 1 |
20 files changed, 40 insertions, 94 deletions
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index edaf4720d156..87348e498c89 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -28,7 +28,6 @@ #include <asm/reg.h> #include <asm/cputable.h> #include <asm/cacheflush.h> -#include <asm/tlbflush.h> #include <linux/uaccess.h> #include <asm/io.h> #include <asm/kvm_ppc.h> diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c index 45c8ea4a0487..612169988a3d 100644 --- a/arch/powerpc/kvm/book3s_32_mmu.c +++ b/arch/powerpc/kvm/book3s_32_mmu.c @@ -23,7 +23,6 @@ #include <linux/kvm_host.h> #include <linux/highmem.h> -#include <asm/tlbflush.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index cf9d686e8162..c92dd25bed23 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c @@ -23,7 +23,6 @@ #include <linux/kvm_host.h> #include <linux/highmem.h> -#include <asm/tlbflush.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> #include <asm/book3s/64/mmu-hash.h> diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 7f3a8cf5d66f..3c0e8fb2b773 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -29,7 +29,6 @@ #include <linux/file.h> #include <linux/debugfs.h> -#include <asm/tlbflush.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> #include <asm/book3s/64/mmu-hash.h> diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 176f911ee983..0af1c0aea1fe 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -66,10 +66,7 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, bits = root & RPDS_MASK; root = root & RPDB_MASK; - /* P9 DD1 interprets RTS (radix tree size) differently */ offset = rts + 31; - if (cpu_has_feature(CPU_FTR_POWER9_DD1)) - offset -= 3; /* current implementations only support 52-bit space */ if (offset != 52) @@ -160,17 +157,7 @@ static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep, unsigned long clr, unsigned long set, unsigned long addr, unsigned int shift) { - unsigned long old = 0; - - if (!(clr & _PAGE_PRESENT) && cpu_has_feature(CPU_FTR_POWER9_DD1) && - pte_present(*ptep)) { - /* have to invalidate it first */ - old = __radix_pte_update(ptep, _PAGE_PRESENT, 0); - kvmppc_radix_tlbie_page(kvm, addr, shift); - set |= _PAGE_PRESENT; - old &= _PAGE_PRESENT; - } - return __radix_pte_update(ptep, clr, set) | old; + return __radix_pte_update(ptep, clr, set); } void kvmppc_radix_set_pte_at(struct kvm *kvm, unsigned long addr, diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S index 688722acd692..066c665dc86f 100644 --- a/arch/powerpc/kvm/book3s_64_slb.S +++ b/arch/powerpc/kvm/book3s_64_slb.S @@ -17,6 +17,9 @@ * Authors: Alexander Graf <agraf@suse.de> */ +#include <asm/asm-compat.h> +#include <asm/feature-fixups.h> + #define SHADOW_SLB_ENTRY_LEN 0x10 #define OFFSET_ESID(x) (SHADOW_SLB_ENTRY_LEN * x) #define OFFSET_VSID(x) ((SHADOW_SLB_ENTRY_LEN * x) + 8) diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index 8c456fa691a5..e8afdd4f4814 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -31,7 +31,6 @@ #include <linux/iommu.h> #include <linux/file.h> -#include <asm/tlbflush.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> #include <asm/book3s/64/mmu-hash.h> @@ -378,19 +377,19 @@ static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm, { struct mm_iommu_table_group_mem_t *mem = NULL; const unsigned long pgsize = 1ULL << tbl->it_page_shift; - unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry); + __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry); if (!pua) /* it_userspace allocation might be delayed */ return H_TOO_HARD; - mem = mm_iommu_lookup(kvm->mm, *pua, pgsize); + mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize); if (!mem) return H_TOO_HARD; mm_iommu_mapped_dec(mem); - *pua = 0; + *pua = cpu_to_be64(0); return H_SUCCESS; } @@ -437,7 +436,8 @@ long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl, enum dma_data_direction dir) { long ret; - unsigned long hpa, *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry); + unsigned long hpa; + __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry); struct mm_iommu_table_group_mem_t *mem; if (!pua) @@ -464,7 +464,7 @@ long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl, if (dir != DMA_NONE) kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry); - *pua = ua; + *pua = cpu_to_be64(ua); return 0; } diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index 5b298f5a1a14..506a4d400458 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c @@ -26,8 +26,8 @@ #include <linux/slab.h> #include <linux/hugetlb.h> #include <linux/list.h> +#include <linux/stringify.h> -#include <asm/tlbflush.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> #include <asm/book3s/64/mmu-hash.h> @@ -200,23 +200,19 @@ static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm, { struct mm_iommu_table_group_mem_t *mem = NULL; const unsigned long pgsize = 1ULL << tbl->it_page_shift; - unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry); + __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry); if (!pua) /* it_userspace allocation might be delayed */ return H_TOO_HARD; - pua = (void *) vmalloc_to_phys(pua); - if (WARN_ON_ONCE_RM(!pua)) - return H_HARDWARE; - - mem = mm_iommu_lookup_rm(kvm->mm, *pua, pgsize); + mem = mm_iommu_lookup_rm(kvm->mm, be64_to_cpu(*pua), pgsize); if (!mem) return H_TOO_HARD; mm_iommu_mapped_dec(mem); - *pua = 0; + *pua = cpu_to_be64(0); return H_SUCCESS; } @@ -268,7 +264,7 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl, { long ret; unsigned long hpa = 0; - unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry); + __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry); struct mm_iommu_table_group_mem_t *mem; if (!pua) @@ -283,10 +279,6 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl, &hpa))) return H_HARDWARE; - pua = (void *) vmalloc_to_phys(pua); - if (WARN_ON_ONCE_RM(!pua)) - return H_HARDWARE; - if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem))) return H_CLOSED; @@ -303,7 +295,7 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl, if (dir != DMA_NONE) kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry); - *pua = ua; + *pua = cpu_to_be64(ua); return 0; } diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index ee4a8854985e..96f1cc6c97b7 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -53,7 +53,6 @@ #include <asm/disassemble.h> #include <asm/cputable.h> #include <asm/cacheflush.h> -#include <asm/tlbflush.h> #include <linux/uaccess.h> #include <asm/io.h> #include <asm/kvm_ppc.h> @@ -1693,14 +1692,6 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); break; case KVM_REG_PPC_TB_OFFSET: - /* - * POWER9 DD1 has an erratum where writing TBU40 causes - * the timebase to lose ticks. So we don't let the - * timebase offset be changed on P9 DD1. (It is - * initialized to zero.) - */ - if (cpu_has_feature(CPU_FTR_POWER9_DD1)) - break; /* round up to multiple of 2^24 */ vcpu->arch.vcore->tb_offset = ALIGN(set_reg_val(id, *val), 1UL << 24); @@ -2026,8 +2017,6 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm, /* * Set the default HFSCR for the guest from the host value. * This value is only used on POWER9. - * On POWER9 DD1, TM doesn't work, so we make sure to - * prevent the guest from using it. * On POWER9, we want to virtualize the doorbell facility, so we * turn off the HFSCR bit, which causes those instructions to trap. */ diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S index 82f2ff9410b6..666b91c79eb4 100644 --- a/arch/powerpc/kvm/book3s_hv_interrupts.S +++ b/arch/powerpc/kvm/book3s_hv_interrupts.S @@ -27,6 +27,8 @@ #include <asm/asm-offsets.h> #include <asm/exception-64s.h> #include <asm/ppc-opcode.h> +#include <asm/asm-compat.h> +#include <asm/feature-fixups.h> /***************************************************************************** * * diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 1f22d9e977d4..a67cf1cdeda4 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -14,7 +14,6 @@ #include <linux/module.h> #include <linux/log2.h> -#include <asm/tlbflush.h> #include <asm/trace.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 153988d878e8..1d14046124a0 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -32,6 +32,8 @@ #include <asm/opal.h> #include <asm/xive-regs.h> #include <asm/thread_info.h> +#include <asm/asm-compat.h> +#include <asm/feature-fixups.h> /* Sign-extend HDEC if not on POWER9 */ #define EXTEND_HDEC(reg) \ @@ -917,9 +919,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_DAWR) mtspr SPRN_PID, r7 mtspr SPRN_WORT, r8 BEGIN_FTR_SECTION - PPC_INVALIDATE_ERAT -END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1) -BEGIN_FTR_SECTION /* POWER8-only registers */ ld r5, VCPU_TCSCR(r4) ld r6, VCPU_ACOP(r4) @@ -1912,7 +1911,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) ld r5, VCPU_KVM(r9) lbz r0, KVM_RADIX(r5) cmpwi cr2, r0, 0 - beq cr2, 4f + beq cr2, 2f /* * Radix: do eieio; tlbsync; ptesync sequence in case we @@ -1952,11 +1951,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) bdnz 1b ptesync -2: /* Flush the ERAT on radix P9 DD1 guest exit */ -BEGIN_FTR_SECTION - PPC_INVALIDATE_ERAT -END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1) -4: +2: #endif /* CONFIG_PPC_RADIX_MMU */ /* @@ -3367,11 +3362,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) mtspr SPRN_CIABR, r0 mtspr SPRN_DAWRX, r0 - /* Flush the ERAT on radix P9 DD1 guest exit */ -BEGIN_FTR_SECTION - PPC_INVALIDATE_ERAT -END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1) - BEGIN_MMU_FTR_SECTION b 4f END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S index c18e845019ec..d71dab16dc6f 100644 --- a/arch/powerpc/kvm/book3s_interrupts.S +++ b/arch/powerpc/kvm/book3s_interrupts.S @@ -23,6 +23,7 @@ #include <asm/page.h> #include <asm/asm-offsets.h> #include <asm/exception-64s.h> +#include <asm/asm-compat.h> #if defined(CONFIG_PPC_BOOK3S_64) #ifdef PPC64_ELF_ABI_v2 diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index c3b8006f0eac..47ee43bbd696 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -27,7 +27,6 @@ #include <asm/reg.h> #include <asm/cputable.h> #include <asm/cacheflush.h> -#include <asm/tlbflush.h> #include <linux/uaccess.h> #include <asm/io.h> #include <asm/kvm_ppc.h> diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S index 34a5adeff084..b0089e04c8c8 100644 --- a/arch/powerpc/kvm/book3s_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_rmhandlers.S @@ -23,6 +23,7 @@ #include <asm/mmu.h> #include <asm/page.h> #include <asm/asm-offsets.h> +#include <asm/asm-compat.h> #ifdef CONFIG_PPC_BOOK3S_64 #include <asm/exception-64s.h> diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S index 98ccc7ec5d48..e5c542a7c5ac 100644 --- a/arch/powerpc/kvm/book3s_segment.S +++ b/arch/powerpc/kvm/book3s_segment.S @@ -19,6 +19,9 @@ /* Real mode helpers */ +#include <asm/asm-compat.h> +#include <asm/feature-fixups.h> + #if defined(CONFIG_PPC_BOOK3S_64) #define GET_SHADOW_VCPU(reg) \ diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c index 6e41ba7ec8f4..4171ede8722b 100644 --- a/arch/powerpc/kvm/book3s_xive_template.c +++ b/arch/powerpc/kvm/book3s_xive_template.c @@ -25,18 +25,6 @@ static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc) */ eieio(); - /* - * DD1 bug workaround: If PIPR is less favored than CPPR - * ignore the interrupt or we might incorrectly lose an IPB - * bit. - */ - if (cpu_has_feature(CPU_FTR_POWER9_DD1)) { - __be64 qw1 = __x_readq(__x_tima + TM_QW1_OS); - u8 pipr = be64_to_cpu(qw1) & 0xff; - if (pipr >= xc->hw_cppr) - return; - } - /* Perform the acknowledge OS to register cycle. */ ack = be16_to_cpu(__x_readw(__x_tima + TM_SPC_ACK_OS_REG)); @@ -89,8 +77,15 @@ static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd) /* If the XIVE supports the new "store EOI facility, use it */ if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) __x_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI); - else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) { + else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) opal_int_eoi(hw_irq); + else if (xd->flags & XIVE_IRQ_FLAG_LSI) { + /* + * For LSIs the HW EOI cycle is used rather than PQ bits, + * as they are automatically re-triggred in HW when still + * pending. + */ + __x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI); } else { uint64_t eoi_val; @@ -102,20 +97,12 @@ static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd) * * This allows us to then do a re-trigger if Q was set * rather than synthetizing an interrupt in software - * - * For LSIs, using the HW EOI cycle works around a problem - * on P9 DD1 PHBs where the other ESB accesses don't work - * properly. */ - if (xd->flags & XIVE_IRQ_FLAG_LSI) - __x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI); - else { - eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00); - - /* Re-trigger if needed */ - if ((eoi_val & 1) && __x_trig_page(xd)) - __x_writeq(0, __x_trig_page(xd)); - } + eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00); + + /* Re-trigger if needed */ + if ((eoi_val & 1) && __x_trig_page(xd)) + __x_writeq(0, __x_trig_page(xd)); } } diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c index f9f6468f4171..afd3c255a427 100644 --- a/arch/powerpc/kvm/e500.c +++ b/arch/powerpc/kvm/e500.c @@ -21,7 +21,6 @@ #include <asm/reg.h> #include <asm/cputable.h> -#include <asm/tlbflush.h> #include <asm/kvm_ppc.h> #include "../mm/mmu_decl.h" diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c index d0b6b5788afc..d31645491a93 100644 --- a/arch/powerpc/kvm/e500mc.c +++ b/arch/powerpc/kvm/e500mc.c @@ -21,7 +21,6 @@ #include <asm/reg.h> #include <asm/cputable.h> -#include <asm/tlbflush.h> #include <asm/kvm_ppc.h> #include <asm/dbell.h> diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 0e8c20c5eaac..3ccc386b380d 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -33,7 +33,6 @@ #include <asm/cputable.h> #include <linux/uaccess.h> #include <asm/kvm_ppc.h> -#include <asm/tlbflush.h> #include <asm/cputhreads.h> #include <asm/irqflags.h> #include <asm/iommu.h> |