From 51d42f0f5fd6c74144d19bf6a663521e2ea99765 Mon Sep 17 00:00:00 2001 From: Segher Boessenkool Date: Wed, 28 Feb 2018 17:02:49 -0800 Subject: powerpc: Keep const vars out of writable .sdata Newer gcc will support "-mno-readonly-in-sdata"[1], which makes sure that the optimization on PPC32 for variables getting moved into the .sdata section will not apply to const variables (which must be in .rodata). This was originally noticed in mm/rodata_test.c when rodata_test_data was not static: c0695034 g O .data 00000004 rodata_test_data After this patch with an updated compiler, this is correctly in .rodata. [1] https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82411 Reported-by: Christophe Leroy Signed-off-by: Segher Boessenkool Signed-off-by: Kees Cook Signed-off-by: Michael Ellerman --- arch/powerpc/Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index ccd2556bdb53..c7628e973084 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -141,7 +141,9 @@ AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1) endif CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,$(call cc-option,-mminimal-toc)) CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions) + CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 $(MULTIPLEWORD) +CFLAGS-$(CONFIG_PPC32) += $(call cc-option,-mno-readonly-in-sdata) ifeq ($(CONFIG_PPC_BOOK3S_64),y) CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power7,-mtune=power4) -- cgit v1.2.3 From 326691ad4f179e6edc7eb1271e618dd673e4736d Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Thu, 22 Feb 2018 15:27:20 +0100 Subject: powerpc/mm/slice: Remove intermediate bitmap copy bitmap_or() and bitmap_andnot() can work properly with dst identical to src1 or src2. There is no need of an intermediate result bitmap that is copied back to dst in a second step. Signed-off-by: Christophe Leroy Reviewed-by: Aneesh Kumar K.V Reviewed-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/mm/slice.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 23ec2c5e3b78..98b53d48968f 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -388,21 +388,17 @@ static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len, static inline void slice_or_mask(struct slice_mask *dst, struct slice_mask *src) { - DECLARE_BITMAP(result, SLICE_NUM_HIGH); - dst->low_slices |= src->low_slices; - bitmap_or(result, dst->high_slices, src->high_slices, SLICE_NUM_HIGH); - bitmap_copy(dst->high_slices, result, SLICE_NUM_HIGH); + bitmap_or(dst->high_slices, dst->high_slices, src->high_slices, + SLICE_NUM_HIGH); } static inline void slice_andnot_mask(struct slice_mask *dst, struct slice_mask *src) { - DECLARE_BITMAP(result, SLICE_NUM_HIGH); - dst->low_slices &= ~src->low_slices; - bitmap_andnot(result, dst->high_slices, src->high_slices, SLICE_NUM_HIGH); - bitmap_copy(dst->high_slices, result, SLICE_NUM_HIGH); + bitmap_andnot(dst->high_slices, dst->high_slices, src->high_slices, + SLICE_NUM_HIGH); } #ifdef CONFIG_PPC_64K_PAGES -- cgit v1.2.3 From a3286f05bc5a5bc7fc73a9783ec89de78fcd07f8 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Thu, 22 Feb 2018 15:27:22 +0100 Subject: powerpc/mm/slice: create header files dedicated to slices In preparation for the following patch which will enhance 'slices' for supporting PPC32 in order to fix an issue on hugepages on 8xx, this patch takes out of page*.h all bits related to 'slices' and put them into newly created slice.h header files. While common parts go into asm/slice.h, subarch specific parts go into respective books3s/64/slice.c and nohash/64/slice.c 'slices' Signed-off-by: Christophe Leroy Reviewed-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/book3s/64/slice.h | 27 ++++++++++++++ arch/powerpc/include/asm/nohash/64/slice.h | 12 ++++++ arch/powerpc/include/asm/page.h | 1 + arch/powerpc/include/asm/page_64.h | 59 ------------------------------ arch/powerpc/include/asm/slice.h | 40 ++++++++++++++++++++ 5 files changed, 80 insertions(+), 59 deletions(-) create mode 100644 arch/powerpc/include/asm/book3s/64/slice.h create mode 100644 arch/powerpc/include/asm/nohash/64/slice.h create mode 100644 arch/powerpc/include/asm/slice.h diff --git a/arch/powerpc/include/asm/book3s/64/slice.h b/arch/powerpc/include/asm/book3s/64/slice.h new file mode 100644 index 000000000000..db0dedab65ee --- /dev/null +++ b/arch/powerpc/include/asm/book3s/64/slice.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_BOOK3S_64_SLICE_H +#define _ASM_POWERPC_BOOK3S_64_SLICE_H + +#ifdef CONFIG_PPC_MM_SLICES + +#define SLICE_LOW_SHIFT 28 +#define SLICE_LOW_TOP (0x100000000ul) +#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT) +#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT) + +#define SLICE_HIGH_SHIFT 40 +#define SLICE_NUM_HIGH (H_PGTABLE_RANGE >> SLICE_HIGH_SHIFT) +#define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT) + +#else /* CONFIG_PPC_MM_SLICES */ + +#define get_slice_psize(mm, addr) ((mm)->context.user_psize) +#define slice_set_user_psize(mm, psize) \ +do { \ + (mm)->context.user_psize = (psize); \ + (mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \ +} while (0) + +#endif /* CONFIG_PPC_MM_SLICES */ + +#endif /* _ASM_POWERPC_BOOK3S_64_SLICE_H */ diff --git a/arch/powerpc/include/asm/nohash/64/slice.h b/arch/powerpc/include/asm/nohash/64/slice.h new file mode 100644 index 000000000000..ad0d6e3cc1c5 --- /dev/null +++ b/arch/powerpc/include/asm/nohash/64/slice.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_NOHASH_64_SLICE_H +#define _ASM_POWERPC_NOHASH_64_SLICE_H + +#ifdef CONFIG_PPC_64K_PAGES +#define get_slice_psize(mm, addr) MMU_PAGE_64K +#else /* CONFIG_PPC_64K_PAGES */ +#define get_slice_psize(mm, addr) MMU_PAGE_4K +#endif /* !CONFIG_PPC_64K_PAGES */ +#define slice_set_user_psize(mm, psize) do { BUG(); } while (0) + +#endif /* _ASM_POWERPC_NOHASH_64_SLICE_H */ diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 8da5d4c1cab2..d5f1c41b7dba 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -344,5 +344,6 @@ typedef struct page *pgtable_t; #include #endif /* __ASSEMBLY__ */ +#include #endif /* _ASM_POWERPC_PAGE_H */ diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h index 56234c6fcd61..af04acdb873f 100644 --- a/arch/powerpc/include/asm/page_64.h +++ b/arch/powerpc/include/asm/page_64.h @@ -86,65 +86,6 @@ extern u64 ppc64_pft_size; #endif /* __ASSEMBLY__ */ -#ifdef CONFIG_PPC_MM_SLICES - -#define SLICE_LOW_SHIFT 28 -#define SLICE_HIGH_SHIFT 40 - -#define SLICE_LOW_TOP (0x100000000ul) -#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT) -#define SLICE_NUM_HIGH (H_PGTABLE_RANGE >> SLICE_HIGH_SHIFT) - -#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT) -#define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT) - -#ifndef __ASSEMBLY__ -struct mm_struct; - -extern unsigned long slice_get_unmapped_area(unsigned long addr, - unsigned long len, - unsigned long flags, - unsigned int psize, - int topdown); - -extern unsigned int get_slice_psize(struct mm_struct *mm, - unsigned long addr); - -extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize); -extern void slice_set_range_psize(struct mm_struct *mm, unsigned long start, - unsigned long len, unsigned int psize); - -#endif /* __ASSEMBLY__ */ -#else -#define slice_init() -#ifdef CONFIG_PPC_BOOK3S_64 -#define get_slice_psize(mm, addr) ((mm)->context.user_psize) -#define slice_set_user_psize(mm, psize) \ -do { \ - (mm)->context.user_psize = (psize); \ - (mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \ -} while (0) -#else /* !CONFIG_PPC_BOOK3S_64 */ -#ifdef CONFIG_PPC_64K_PAGES -#define get_slice_psize(mm, addr) MMU_PAGE_64K -#else /* CONFIG_PPC_64K_PAGES */ -#define get_slice_psize(mm, addr) MMU_PAGE_4K -#endif /* !CONFIG_PPC_64K_PAGES */ -#define slice_set_user_psize(mm, psize) do { BUG(); } while(0) -#endif /* CONFIG_PPC_BOOK3S_64 */ - -#define slice_set_range_psize(mm, start, len, psize) \ - slice_set_user_psize((mm), (psize)) -#endif /* CONFIG_PPC_MM_SLICES */ - -#ifdef CONFIG_HUGETLB_PAGE - -#ifdef CONFIG_PPC_MM_SLICES -#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA -#endif - -#endif /* !CONFIG_HUGETLB_PAGE */ - #define VM_DATA_DEFAULT_FLAGS \ (is_32bit_task() ? \ VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64) diff --git a/arch/powerpc/include/asm/slice.h b/arch/powerpc/include/asm/slice.h new file mode 100644 index 000000000000..17c5a5d8c418 --- /dev/null +++ b/arch/powerpc/include/asm/slice.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_SLICE_H +#define _ASM_POWERPC_SLICE_H + +#ifdef CONFIG_PPC_BOOK3S_64 +#include +#else +#include +#endif + +#ifdef CONFIG_PPC_MM_SLICES + +#ifdef CONFIG_HUGETLB_PAGE +#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA +#endif +#define HAVE_ARCH_UNMAPPED_AREA +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN + +#ifndef __ASSEMBLY__ + +struct mm_struct; + +unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, + unsigned long flags, unsigned int psize, + int topdown); + +unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr); + +void slice_set_user_psize(struct mm_struct *mm, unsigned int psize); +void slice_set_range_psize(struct mm_struct *mm, unsigned long start, + unsigned long len, unsigned int psize); +#endif /* __ASSEMBLY__ */ + +#else /* CONFIG_PPC_MM_SLICES */ + +#define slice_set_range_psize(mm, start, len, psize) \ + slice_set_user_psize((mm), (psize)) +#endif /* CONFIG_PPC_MM_SLICES */ + +#endif /* _ASM_POWERPC_SLICE_H */ -- cgit v1.2.3 From db3a528db41caaa6dfd4c64e9f5efb1c81a80467 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Thu, 22 Feb 2018 15:27:24 +0100 Subject: powerpc/mm/slice: Enhance for supporting PPC32 In preparation for the following patch which will fix an issue on the 8xx by re-using the 'slices', this patch enhances the 'slices' implementation to support 32 bits CPUs. On PPC32, the address space is limited to 4Gbytes, hence only the low slices will be used. The high slices use bitmaps. As bitmap functions are not prepared to handle bitmaps of size 0, this patch ensures that bitmap functions are called only when SLICE_NUM_HIGH is not nul. Signed-off-by: Christophe Leroy Reviewed-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/nohash/32/slice.h | 18 +++++++++++++++ arch/powerpc/include/asm/slice.h | 4 +++- arch/powerpc/mm/slice.c | 37 +++++++++++++++++++++++------- 3 files changed, 50 insertions(+), 9 deletions(-) create mode 100644 arch/powerpc/include/asm/nohash/32/slice.h diff --git a/arch/powerpc/include/asm/nohash/32/slice.h b/arch/powerpc/include/asm/nohash/32/slice.h new file mode 100644 index 000000000000..95d532e18092 --- /dev/null +++ b/arch/powerpc/include/asm/nohash/32/slice.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_NOHASH_32_SLICE_H +#define _ASM_POWERPC_NOHASH_32_SLICE_H + +#ifdef CONFIG_PPC_MM_SLICES + +#define SLICE_LOW_SHIFT 28 +#define SLICE_LOW_TOP (0x100000000ull) +#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT) +#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT) + +#define SLICE_HIGH_SHIFT 0 +#define SLICE_NUM_HIGH 0ul +#define GET_HIGH_SLICE_INDEX(addr) (addr & 0) + +#endif /* CONFIG_PPC_MM_SLICES */ + +#endif /* _ASM_POWERPC_NOHASH_32_SLICE_H */ diff --git a/arch/powerpc/include/asm/slice.h b/arch/powerpc/include/asm/slice.h index 17c5a5d8c418..172711fadb1c 100644 --- a/arch/powerpc/include/asm/slice.h +++ b/arch/powerpc/include/asm/slice.h @@ -4,8 +4,10 @@ #ifdef CONFIG_PPC_BOOK3S_64 #include -#else +#elif defined(CONFIG_PPC64) #include +#elif defined(CONFIG_PPC_MMU_NOHASH) +#include #endif #ifdef CONFIG_PPC_MM_SLICES diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 98b53d48968f..0beca1ba2282 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -73,10 +73,12 @@ static void slice_range_to_mask(unsigned long start, unsigned long len, unsigned long end = start + len - 1; ret->low_slices = 0; - bitmap_zero(ret->high_slices, SLICE_NUM_HIGH); + if (SLICE_NUM_HIGH) + bitmap_zero(ret->high_slices, SLICE_NUM_HIGH); if (start < SLICE_LOW_TOP) { - unsigned long mend = min(end, (SLICE_LOW_TOP - 1)); + unsigned long mend = min(end, + (unsigned long)(SLICE_LOW_TOP - 1)); ret->low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1)) - (1u << GET_LOW_SLICE_INDEX(start)); @@ -113,11 +115,13 @@ static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice) unsigned long start = slice << SLICE_HIGH_SHIFT; unsigned long end = start + (1ul << SLICE_HIGH_SHIFT); +#ifdef CONFIG_PPC64 /* Hack, so that each addresses is controlled by exactly one * of the high or low area bitmaps, the first high area starts * at 4GB, not 0 */ if (start == 0) start = SLICE_LOW_TOP; +#endif return !slice_area_is_free(mm, start, end - start); } @@ -128,7 +132,8 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret, unsigned long i; ret->low_slices = 0; - bitmap_zero(ret->high_slices, SLICE_NUM_HIGH); + if (SLICE_NUM_HIGH) + bitmap_zero(ret->high_slices, SLICE_NUM_HIGH); for (i = 0; i < SLICE_NUM_LOW; i++) if (!slice_low_has_vma(mm, i)) @@ -151,7 +156,8 @@ static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_ma u64 lpsizes; ret->low_slices = 0; - bitmap_zero(ret->high_slices, SLICE_NUM_HIGH); + if (SLICE_NUM_HIGH) + bitmap_zero(ret->high_slices, SLICE_NUM_HIGH); lpsizes = mm->context.low_slices_psize; for (i = 0; i < SLICE_NUM_LOW; i++) @@ -180,6 +186,10 @@ static int slice_check_fit(struct mm_struct *mm, */ unsigned long slice_count = GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); + if (!SLICE_NUM_HIGH) + return (mask.low_slices & available.low_slices) == + mask.low_slices; + bitmap_and(result, mask.high_slices, available.high_slices, slice_count); @@ -189,6 +199,7 @@ static int slice_check_fit(struct mm_struct *mm, static void slice_flush_segments(void *parm) { +#ifdef CONFIG_PPC64 struct mm_struct *mm = parm; unsigned long flags; @@ -200,6 +211,7 @@ static void slice_flush_segments(void *parm) local_irq_save(flags); slb_flush_and_rebolt(); local_irq_restore(flags); +#endif } static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize) @@ -389,6 +401,8 @@ static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len, static inline void slice_or_mask(struct slice_mask *dst, struct slice_mask *src) { dst->low_slices |= src->low_slices; + if (!SLICE_NUM_HIGH) + return; bitmap_or(dst->high_slices, dst->high_slices, src->high_slices, SLICE_NUM_HIGH); } @@ -397,6 +411,8 @@ static inline void slice_andnot_mask(struct slice_mask *dst, struct slice_mask * { dst->low_slices &= ~src->low_slices; + if (!SLICE_NUM_HIGH) + return; bitmap_andnot(dst->high_slices, dst->high_slices, src->high_slices, SLICE_NUM_HIGH); } @@ -446,14 +462,17 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, * init different masks */ mask.low_slices = 0; - bitmap_zero(mask.high_slices, SLICE_NUM_HIGH); /* silence stupid warning */; potential_mask.low_slices = 0; - bitmap_zero(potential_mask.high_slices, SLICE_NUM_HIGH); compat_mask.low_slices = 0; - bitmap_zero(compat_mask.high_slices, SLICE_NUM_HIGH); + + if (SLICE_NUM_HIGH) { + bitmap_zero(mask.high_slices, SLICE_NUM_HIGH); + bitmap_zero(potential_mask.high_slices, SLICE_NUM_HIGH); + bitmap_zero(compat_mask.high_slices, SLICE_NUM_HIGH); + } /* Sanity checks */ BUG_ON(mm->task_size == 0); @@ -591,7 +610,9 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, convert: slice_andnot_mask(&mask, &good_mask); slice_andnot_mask(&mask, &compat_mask); - if (mask.low_slices || !bitmap_empty(mask.high_slices, SLICE_NUM_HIGH)) { + if (mask.low_slices || + (SLICE_NUM_HIGH && + !bitmap_empty(mask.high_slices, SLICE_NUM_HIGH))) { slice_convert(mm, mask, psize); if (psize > MMU_PAGE_BASE) on_each_cpu(slice_flush_segments, mm, 1); -- cgit v1.2.3 From aa0ab02ba992eb956934b21373e0138211486ddd Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Thu, 22 Feb 2018 15:27:26 +0100 Subject: powerpc/mm/slice: Fix hugepage allocation at hint address on 8xx On the 8xx, the page size is set in the PMD entry and applies to all pages of the page table pointed by the said PMD entry. When an app has some regular pages allocated (e.g. see below) and tries to mmap() a huge page at a hint address covered by the same PMD entry, the kernel accepts the hint allthough the 8xx cannot handle different page sizes in the same PMD entry. 10000000-10001000 r-xp 00000000 00:0f 2597 /root/malloc 10010000-10011000 rwxp 00000000 00:0f 2597 /root/malloc mmap(0x10080000, 524288, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS|0x40000, -1, 0) = 0x10080000 This results the app remaining forever in do_page_fault()/hugetlb_fault() and when interrupting that app, we get the following warning: [162980.035629] WARNING: CPU: 0 PID: 2777 at arch/powerpc/mm/hugetlbpage.c:354 hugetlb_free_pgd_range+0xc8/0x1e4 [162980.035699] CPU: 0 PID: 2777 Comm: malloc Tainted: G W 4.14.6 #85 [162980.035744] task: c67e2c00 task.stack: c668e000 [162980.035783] NIP: c000fe18 LR: c00e1eec CTR: c00f90c0 [162980.035830] REGS: c668fc20 TRAP: 0700 Tainted: G W (4.14.6) [162980.035854] MSR: 00029032 CR: 24044224 XER: 20000000 [162980.036003] [162980.036003] GPR00: c00e1eec c668fcd0 c67e2c00 00000010 c6869410 10080000 00000000 77fb4000 [162980.036003] GPR08: ffff0001 0683c001 00000000 ffffff80 44028228 10018a34 00004008 418004fc [162980.036003] GPR16: c668e000 00040100 c668e000 c06c0000 c668fe78 c668e000 c6835ba0 c668fd48 [162980.036003] GPR24: 00000000 73ffffff 74000000 00000001 77fb4000 100fffff 10100000 10100000 [162980.036743] NIP [c000fe18] hugetlb_free_pgd_range+0xc8/0x1e4 [162980.036839] LR [c00e1eec] free_pgtables+0x12c/0x150 [162980.036861] Call Trace: [162980.036939] [c668fcd0] [c00f0774] unlink_anon_vmas+0x1c4/0x214 (unreliable) [162980.037040] [c668fd10] [c00e1eec] free_pgtables+0x12c/0x150 [162980.037118] [c668fd40] [c00eabac] exit_mmap+0xe8/0x1b4 [162980.037210] [c668fda0] [c0019710] mmput.part.9+0x20/0xd8 [162980.037301] [c668fdb0] [c001ecb0] do_exit+0x1f0/0x93c [162980.037386] [c668fe00] [c001f478] do_group_exit+0x40/0xcc [162980.037479] [c668fe10] [c002a76c] get_signal+0x47c/0x614 [162980.037570] [c668fe70] [c0007840] do_signal+0x54/0x244 [162980.037654] [c668ff30] [c0007ae8] do_notify_resume+0x34/0x88 [162980.037744] [c668ff40] [c000dae8] do_user_signal+0x74/0xc4 [162980.037781] Instruction dump: [162980.037821] 7fdff378 81370000 54a3463a 80890020 7d24182e 7c841a14 712a0004 4082ff94 [162980.038014] 2f890000 419e0010 712a0ff0 408200e0 <0fe00000> 54a9000a 7f984840 419d0094 [162980.038216] ---[ end trace c0ceeca8e7a5800a ]--- [162980.038754] BUG: non-zero nr_ptes on freeing mm: 1 [162985.363322] BUG: non-zero nr_ptes on freeing mm: -1 In order to fix this, this patch uses the address space "slices" implemented for BOOK3S/64 and enhanced to support PPC32 by the preceding patch. This patch modifies the context.id on the 8xx to be in the range [1:16] instead of [0:15] in order to identify context.id == 0 as not initialised contexts as done on BOOK3S This patch activates CONFIG_PPC_MM_SLICES when CONFIG_HUGETLB_PAGE is selected for the 8xx Alltough we could in theory have as many slices as PMD entries, the current slices implementation limits the number of low slices to 16. This limitation is not preventing us to fix the initial issue allthough it is suboptimal. It will be cured in a subsequent patch. Fixes: 4b91428699477 ("powerpc/8xx: Implement support of hugepages") Signed-off-by: Christophe Leroy Reviewed-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/mmu-8xx.h | 6 ++++++ arch/powerpc/kernel/setup-common.c | 2 ++ arch/powerpc/mm/8xx_mmu.c | 2 +- arch/powerpc/mm/hugetlbpage.c | 2 ++ arch/powerpc/mm/mmu_context_nohash.c | 18 ++++++++++++++++-- arch/powerpc/platforms/Kconfig.cputype | 1 + 6 files changed, 28 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h index 2f806e329648..b324ab46d838 100644 --- a/arch/powerpc/include/asm/mmu-8xx.h +++ b/arch/powerpc/include/asm/mmu-8xx.h @@ -191,6 +191,12 @@ typedef struct { unsigned int id; unsigned int active; unsigned long vdso_base; +#ifdef CONFIG_PPC_MM_SLICES + u16 user_psize; /* page size index */ + u64 low_slices_psize; /* page size encodings */ + unsigned char high_slices_psize[0]; + unsigned long slb_addr_limit; +#endif } mm_context_t; #define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000) diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index d73ec518ef80..a6002f9449b1 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -919,6 +919,8 @@ void __init setup_arch(char **cmdline_p) #ifdef CONFIG_PPC64 if (!radix_enabled()) init_mm.context.slb_addr_limit = DEFAULT_MAP_WINDOW_USER64; +#elif defined(CONFIG_PPC_8xx) + init_mm.context.slb_addr_limit = DEFAULT_MAP_WINDOW; #else #error "context.addr_limit not initialized." #endif diff --git a/arch/powerpc/mm/8xx_mmu.c b/arch/powerpc/mm/8xx_mmu.c index 849f50cd62f2..cf77d755246d 100644 --- a/arch/powerpc/mm/8xx_mmu.c +++ b/arch/powerpc/mm/8xx_mmu.c @@ -192,7 +192,7 @@ void set_context(unsigned long id, pgd_t *pgd) mtspr(SPRN_M_TW, __pa(pgd) - offset); /* Update context */ - mtspr(SPRN_M_CASID, id); + mtspr(SPRN_M_CASID, id - 1); /* sync */ mb(); } diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 876da2bc1796..590be3fa0ce2 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -553,9 +553,11 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, struct hstate *hstate = hstate_file(file); int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate)); +#ifdef CONFIG_PPC_RADIX_MMU if (radix_enabled()) return radix__hugetlb_get_unmapped_area(file, addr, len, pgoff, flags); +#endif return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1); } #endif diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c index 4554d6527682..d98f7e5c141b 100644 --- a/arch/powerpc/mm/mmu_context_nohash.c +++ b/arch/powerpc/mm/mmu_context_nohash.c @@ -331,6 +331,20 @@ int init_new_context(struct task_struct *t, struct mm_struct *mm) { pr_hard("initing context for mm @%p\n", mm); +#ifdef CONFIG_PPC_MM_SLICES + if (!mm->context.slb_addr_limit) + mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW; + + /* + * We have MMU_NO_CONTEXT set to be ~0. Hence check + * explicitly against context.id == 0. This ensures that we properly + * initialize context slice details for newly allocated mm's (which will + * have id == 0) and don't alter context slice inherited via fork (which + * will have id != 0). + */ + if (mm->context.id == 0) + slice_set_user_psize(mm, mmu_virtual_psize); +#endif mm->context.id = MMU_NO_CONTEXT; mm->context.active = 0; return 0; @@ -428,8 +442,8 @@ void __init mmu_context_init(void) * -- BenH */ if (mmu_has_feature(MMU_FTR_TYPE_8xx)) { - first_context = 0; - last_context = 15; + first_context = 1; + last_context = 16; no_selective_tlbil = true; } else if (mmu_has_feature(MMU_FTR_TYPE_47x)) { first_context = 1; diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index a429d859f15d..5a8b1bf1e819 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -326,6 +326,7 @@ config PPC_BOOK3E_MMU config PPC_MM_SLICES bool default y if PPC_BOOK3S_64 + default y if PPC_8xx && HUGETLB_PAGE default n config PPC_HAVE_PMU_SUPPORT -- cgit v1.2.3 From 15472423ce47d6397d08d48daaae8590c9f9f242 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Thu, 22 Feb 2018 15:27:28 +0100 Subject: powerpc/mm/slice: Allow up to 64 low slices While the implementation of the "slices" address space allows a significant amount of high slices, it limits the number of low slices to 16 due to the use of a single u64 low_slices_psize element in struct mm_context_t On the 8xx, the minimum slice size is the size of the area covered by a single PMD entry, ie 4M in 4K pages mode and 64M in 16K pages mode. This means we could have at least 64 slices. In order to override this limitation, this patch switches the handling of low_slices_psize to char array as done already for high_slices_psize. Signed-off-by: Christophe Leroy Reviewed-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/book3s/64/mmu.h | 3 +- arch/powerpc/include/asm/mmu-8xx.h | 7 +++- arch/powerpc/include/asm/paca.h | 2 +- arch/powerpc/kernel/paca.c | 3 +- arch/powerpc/mm/hash_utils_64.c | 13 ++++---- arch/powerpc/mm/slb_low.S | 8 +++-- arch/powerpc/mm/slice.c | 57 +++++++++++++++++--------------- 7 files changed, 52 insertions(+), 41 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h index 0abeb0e2d616..bef6e39ed63a 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu.h +++ b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -91,7 +91,8 @@ typedef struct { struct npu_context *npu_context; #ifdef CONFIG_PPC_MM_SLICES - u64 low_slices_psize; /* SLB page size encodings */ + /* SLB page size encodings*/ + unsigned char low_slices_psize[BITS_PER_LONG / BITS_PER_BYTE]; unsigned char high_slices_psize[SLICE_ARRAY_SIZE]; unsigned long slb_addr_limit; #else diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h index b324ab46d838..d3d7e79140c6 100644 --- a/arch/powerpc/include/asm/mmu-8xx.h +++ b/arch/powerpc/include/asm/mmu-8xx.h @@ -186,6 +186,11 @@ #define M_APG2 0x00000040 #define M_APG3 0x00000060 +#ifdef CONFIG_PPC_MM_SLICES +#include +#define SLICE_ARRAY_SIZE (1 << (32 - SLICE_LOW_SHIFT - 1)) +#endif + #ifndef __ASSEMBLY__ typedef struct { unsigned int id; @@ -193,7 +198,7 @@ typedef struct { unsigned long vdso_base; #ifdef CONFIG_PPC_MM_SLICES u16 user_psize; /* page size index */ - u64 low_slices_psize; /* page size encodings */ + unsigned char low_slices_psize[SLICE_ARRAY_SIZE]; unsigned char high_slices_psize[0]; unsigned long slb_addr_limit; #endif diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index b62c31037cad..d2bf71dddbef 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -141,7 +141,7 @@ struct paca_struct { #ifdef CONFIG_PPC_BOOK3S mm_context_id_t mm_ctx_id; #ifdef CONFIG_PPC_MM_SLICES - u64 mm_ctx_low_slices_psize; + unsigned char mm_ctx_low_slices_psize[BITS_PER_LONG / BITS_PER_BYTE]; unsigned char mm_ctx_high_slices_psize[SLICE_ARRAY_SIZE]; unsigned long mm_ctx_slb_addr_limit; #else diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index 95ffedf14885..2fd563d05831 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -265,7 +265,8 @@ void copy_mm_to_paca(struct mm_struct *mm) #ifdef CONFIG_PPC_MM_SLICES VM_BUG_ON(!mm->context.slb_addr_limit); get_paca()->mm_ctx_slb_addr_limit = mm->context.slb_addr_limit; - get_paca()->mm_ctx_low_slices_psize = context->low_slices_psize; + memcpy(&get_paca()->mm_ctx_low_slices_psize, + &context->low_slices_psize, sizeof(context->low_slices_psize)); memcpy(&get_paca()->mm_ctx_high_slices_psize, &context->high_slices_psize, TASK_SLICE_ARRAY_SZ(mm)); #else /* CONFIG_PPC_MM_SLICES */ diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index cf290d415dcd..b578148d89e6 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -1110,19 +1110,18 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap) #ifdef CONFIG_PPC_MM_SLICES static unsigned int get_paca_psize(unsigned long addr) { - u64 lpsizes; - unsigned char *hpsizes; + unsigned char *psizes; unsigned long index, mask_index; if (addr < SLICE_LOW_TOP) { - lpsizes = get_paca()->mm_ctx_low_slices_psize; + psizes = get_paca()->mm_ctx_low_slices_psize; index = GET_LOW_SLICE_INDEX(addr); - return (lpsizes >> (index * 4)) & 0xF; + } else { + psizes = get_paca()->mm_ctx_high_slices_psize; + index = GET_HIGH_SLICE_INDEX(addr); } - hpsizes = get_paca()->mm_ctx_high_slices_psize; - index = GET_HIGH_SLICE_INDEX(addr); mask_index = index & 0x1; - return (hpsizes[index >> 1] >> (mask_index * 4)) & 0xF; + return (psizes[index >> 1] >> (mask_index * 4)) & 0xF; } #else diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index 2cf5ef3fc50d..2c7c717fd2ea 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S @@ -200,10 +200,12 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) 5: /* * Handle lpsizes - * r9 is get_paca()->context.low_slices_psize, r11 is index + * r9 is get_paca()->context.low_slices_psize[index], r11 is mask_index */ - ld r9,PACALOWSLICESPSIZE(r13) - mr r11,r10 + srdi r11,r10,1 /* index */ + addi r9,r11,PACALOWSLICESPSIZE + lbzx r9,r13,r9 /* r9 is lpsizes[r11] */ + rldicl r11,r10,0,63 /* r11 = r10 & 0x1 */ 6: sldi r11,r11,2 /* index * 4 */ /* Extract the psize and multiply to get an array offset */ diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 0beca1ba2282..5e9e1e57d580 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -150,19 +150,21 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret, static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_mask *ret, unsigned long high_limit) { - unsigned char *hpsizes; + unsigned char *hpsizes, *lpsizes; int index, mask_index; unsigned long i; - u64 lpsizes; ret->low_slices = 0; if (SLICE_NUM_HIGH) bitmap_zero(ret->high_slices, SLICE_NUM_HIGH); lpsizes = mm->context.low_slices_psize; - for (i = 0; i < SLICE_NUM_LOW; i++) - if (((lpsizes >> (i * 4)) & 0xf) == psize) + for (i = 0; i < SLICE_NUM_LOW; i++) { + mask_index = i & 0x1; + index = i >> 1; + if (((lpsizes[index] >> (mask_index * 4)) & 0xf) == psize) ret->low_slices |= 1u << i; + } if (high_limit <= SLICE_LOW_TOP) return; @@ -218,8 +220,7 @@ static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psiz { int index, mask_index; /* Write the new slice psize bits */ - unsigned char *hpsizes; - u64 lpsizes; + unsigned char *hpsizes, *lpsizes; unsigned long i, flags; slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize); @@ -232,12 +233,13 @@ static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psiz lpsizes = mm->context.low_slices_psize; for (i = 0; i < SLICE_NUM_LOW; i++) - if (mask.low_slices & (1u << i)) - lpsizes = (lpsizes & ~(0xful << (i * 4))) | - (((unsigned long)psize) << (i * 4)); - - /* Assign the value back */ - mm->context.low_slices_psize = lpsizes; + if (mask.low_slices & (1u << i)) { + mask_index = i & 0x1; + index = i >> 1; + lpsizes[index] = (lpsizes[index] & + ~(0xf << (mask_index * 4))) | + (((unsigned long)psize) << (mask_index * 4)); + } hpsizes = mm->context.high_slices_psize; for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++) { @@ -644,7 +646,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr) { - unsigned char *hpsizes; + unsigned char *psizes; int index, mask_index; /* @@ -658,15 +660,14 @@ unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr) #endif } if (addr < SLICE_LOW_TOP) { - u64 lpsizes; - lpsizes = mm->context.low_slices_psize; + psizes = mm->context.low_slices_psize; index = GET_LOW_SLICE_INDEX(addr); - return (lpsizes >> (index * 4)) & 0xf; + } else { + psizes = mm->context.high_slices_psize; + index = GET_HIGH_SLICE_INDEX(addr); } - hpsizes = mm->context.high_slices_psize; - index = GET_HIGH_SLICE_INDEX(addr); mask_index = index & 0x1; - return (hpsizes[index >> 1] >> (mask_index * 4)) & 0xf; + return (psizes[index >> 1] >> (mask_index * 4)) & 0xf; } EXPORT_SYMBOL_GPL(get_slice_psize); @@ -687,8 +688,8 @@ EXPORT_SYMBOL_GPL(get_slice_psize); void slice_set_user_psize(struct mm_struct *mm, unsigned int psize) { int index, mask_index; - unsigned char *hpsizes; - unsigned long flags, lpsizes; + unsigned char *hpsizes, *lpsizes; + unsigned long flags; unsigned int old_psize; int i; @@ -706,12 +707,14 @@ void slice_set_user_psize(struct mm_struct *mm, unsigned int psize) wmb(); lpsizes = mm->context.low_slices_psize; - for (i = 0; i < SLICE_NUM_LOW; i++) - if (((lpsizes >> (i * 4)) & 0xf) == old_psize) - lpsizes = (lpsizes & ~(0xful << (i * 4))) | - (((unsigned long)psize) << (i * 4)); - /* Assign the value back */ - mm->context.low_slices_psize = lpsizes; + for (i = 0; i < SLICE_NUM_LOW; i++) { + mask_index = i & 0x1; + index = i >> 1; + if (((lpsizes[index] >> (mask_index * 4)) & 0xf) == old_psize) + lpsizes[index] = (lpsizes[index] & + ~(0xf << (mask_index * 4))) | + (((unsigned long)psize) << (mask_index * 4)); + } hpsizes = mm->context.high_slices_psize; for (i = 0; i < SLICE_NUM_HIGH; i++) { -- cgit v1.2.3 From 4bd13772eeb2b9989789e8dbc183026867168db4 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Thu, 22 Feb 2018 15:27:30 +0100 Subject: powerpc/8xx: Increase number of slices to 64 On the 8xx, the minimum slice size is the size of the area covered by a single PMD entry, ie 4M in 4K pages mode and 64M in 16K pages mode. This patch increases the number of slices from 16 to 64 on the 8xx. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/nohash/32/slice.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/nohash/32/slice.h b/arch/powerpc/include/asm/nohash/32/slice.h index 95d532e18092..777d62e40ac0 100644 --- a/arch/powerpc/include/asm/nohash/32/slice.h +++ b/arch/powerpc/include/asm/nohash/32/slice.h @@ -4,7 +4,7 @@ #ifdef CONFIG_PPC_MM_SLICES -#define SLICE_LOW_SHIFT 28 +#define SLICE_LOW_SHIFT 26 /* 64 slices */ #define SLICE_LOW_TOP (0x100000000ull) #define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT) #define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT) -- cgit v1.2.3 From 47d703e1d55f068c737c0c9f0c18f080fb08242b Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Tue, 30 Jan 2018 08:23:51 -0600 Subject: macintosh: Add module license to ans-lcd In kernel 4.15, the modprobe step on my PowerBook G4 started complaining that there was no module license for ans-lcd. Signed-off-by: Larry Finger Signed-off-by: Michael Ellerman --- drivers/macintosh/ans-lcd.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/macintosh/ans-lcd.c b/drivers/macintosh/ans-lcd.c index 1de81d922d8a..c8e078b911c7 100644 --- a/drivers/macintosh/ans-lcd.c +++ b/drivers/macintosh/ans-lcd.c @@ -201,3 +201,4 @@ anslcd_exit(void) module_init(anslcd_init); module_exit(anslcd_exit); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3 From 9c96c932871efeabe82fcfdc952f35810484b510 Mon Sep 17 00:00:00 2001 From: Laurent Dufour Date: Mon, 22 Aug 2016 18:53:02 +0200 Subject: selftest/powerpc: Add test for sigreturn in transaction Ensure that kernel is throwing away the suspended transaction when sigreturn() is called otherwise it if fails to restore the signal frame's TM SPRS. Signed-off-by: Laurent Dufour Reviewed-by: Cyril Bur [mpe: Add have_htm() check, minor formatting, add SPDX tag] Signed-off-by: Michael Ellerman --- tools/testing/selftests/powerpc/tm/Makefile | 2 +- tools/testing/selftests/powerpc/tm/tm-sigreturn.c | 92 +++++++++++++++++++++++ 2 files changed, 93 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/powerpc/tm/tm-sigreturn.c diff --git a/tools/testing/selftests/powerpc/tm/Makefile b/tools/testing/selftests/powerpc/tm/Makefile index a23453943ad2..1c9e5d0a0491 100644 --- a/tools/testing/selftests/powerpc/tm/Makefile +++ b/tools/testing/selftests/powerpc/tm/Makefile @@ -4,7 +4,7 @@ SIGNAL_CONTEXT_CHK_TESTS := tm-signal-context-chk-gpr tm-signal-context-chk-fpu TEST_GEN_PROGS := tm-resched-dscr tm-syscall tm-signal-msr-resv tm-signal-stack \ tm-vmxcopy tm-fork tm-tar tm-tmspr tm-vmx-unavail tm-unavailable tm-trap \ - $(SIGNAL_CONTEXT_CHK_TESTS) + $(SIGNAL_CONTEXT_CHK_TESTS) tm-sigreturn include ../../lib.mk diff --git a/tools/testing/selftests/powerpc/tm/tm-sigreturn.c b/tools/testing/selftests/powerpc/tm/tm-sigreturn.c new file mode 100644 index 000000000000..85d63449243b --- /dev/null +++ b/tools/testing/selftests/powerpc/tm/tm-sigreturn.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2015, Laurent Dufour, IBM Corp. + * + * Test the kernel's signal returning code to check reclaim is done if the + * sigreturn() is called while in a transaction (suspended since active is + * already dropped trough the system call path). + * + * The kernel must discard the transaction when entering sigreturn, since + * restoring the potential TM SPRS from the signal frame is requiring to not be + * in a transaction. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "tm.h" +#include "utils.h" + + +void handler(int sig) +{ + uint64_t ret; + + asm __volatile__( + "li 3,1 ;" + "tbegin. ;" + "beq 1f ;" + "li 3,0 ;" + "tsuspend. ;" + "1: ;" + "std%X[ret] 3, %[ret] ;" + : [ret] "=m"(ret) + : + : "memory", "3", "cr0"); + + if (ret) + exit(1); + + /* + * We return from the signal handle while in a suspended transaction + */ +} + + +int tm_sigreturn(void) +{ + struct sigaction sa; + uint64_t ret = 0; + + SKIP_IF(!have_htm()); + + memset(&sa, 0, sizeof(sa)); + sa.sa_handler = handler; + sigemptyset(&sa.sa_mask); + + if (sigaction(SIGSEGV, &sa, NULL)) + exit(1); + + asm __volatile__( + "tbegin. ;" + "beq 1f ;" + "li 3,0 ;" + "std 3,0(3) ;" /* trigger SEGV */ + "li 3,1 ;" + "std%X[ret] 3,%[ret] ;" + "tend. ;" + "b 2f ;" + "1: ;" + "li 3,2 ;" + "std%X[ret] 3,%[ret] ;" + "2: ;" + : [ret] "=m"(ret) + : + : "memory", "3", "cr0"); + + if (ret != 2) + exit(1); + + exit(0); +} + +int main(void) +{ + return test_harness(tm_sigreturn, "tm_sigreturn"); +} -- cgit v1.2.3 From 2f65272a2a304ec6aa32ad9a45b1506929406321 Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Tue, 16 Jan 2018 18:50:39 -0800 Subject: powerpc/powernv/vas: Remove a stray line in Makefile Remove a bogus line from arch/powerpc/platforms/powernv/Makefile that was added by commit ece4e51 ("powerpc/vas: Export HVWC to debugfs"). Signed-off-by: Sukadev Bhattiprolu Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/powernv/Makefile | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile index 6c9d5199a7e2..703a350a7f4e 100644 --- a/arch/powerpc/platforms/powernv/Makefile +++ b/arch/powerpc/platforms/powernv/Makefile @@ -16,5 +16,4 @@ obj-$(CONFIG_OPAL_PRD) += opal-prd.o obj-$(CONFIG_PERF_EVENTS) += opal-imc.o obj-$(CONFIG_PPC_MEMTRACE) += memtrace.o obj-$(CONFIG_PPC_VAS) += vas.o vas-window.o vas-debug.o -obj-$(CONFIG_PPC_FTW) += nx-ftw.o obj-$(CONFIG_OCXL_BASE) += ocxl.o -- cgit v1.2.3 From 1373cc31074dff09419d616c2ce911eea624cff5 Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Tue, 19 Dec 2017 17:10:29 -0800 Subject: powerpc/powernv/vas: Fix order of cleanup in vas_window_init_dbgdir() Fix the order of cleanup to ensure we free the name buffer in case of an error creating 'hvwc' or 'info' files. Signed-off-by: Sukadev Bhattiprolu Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/powernv/vas-debug.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/platforms/powernv/vas-debug.c b/arch/powerpc/platforms/powernv/vas-debug.c index ca22f1eae050..b4de4c6fd38b 100644 --- a/arch/powerpc/platforms/powernv/vas-debug.c +++ b/arch/powerpc/platforms/powernv/vas-debug.c @@ -166,13 +166,13 @@ void vas_window_init_dbgdir(struct vas_window *window) return; -free_name: - kfree(window->dbgname); - window->dbgname = NULL; - remove_dir: debugfs_remove_recursive(window->dbgdir); window->dbgdir = NULL; + +free_name: + kfree(window->dbgname); + window->dbgname = NULL; } void vas_instance_init_dbgdir(struct vas_instance *vinst) -- cgit v1.2.3 From 58935176ad17976b7a7f6ea25c0ceb2ca4308a30 Mon Sep 17 00:00:00 2001 From: Mathieu Malaterre Date: Wed, 14 Feb 2018 22:15:18 +0100 Subject: powerpc/via-pmu: Fix section mismatch warning Make the struct via_pmu_driver const to avoid following warning: WARNING: vmlinux.o(.data+0x4739c): Section mismatch in reference from the variable via_pmu_driver to the function .init.text:pmu_init() The variable via_pmu_driver references the function __init pmu_init() If the reference is valid then annotate the variable with __init* or __refdata (see linux/init.h) or name the variable: *_template, *_timer, *_sht, *_ops, *_probe, *_probe_one, *_console Signed-off-by: Mathieu Malaterre Suggested-by: Laurent Vivier Reviewed-by: Laurent Vivier Signed-off-by: Michael Ellerman --- drivers/macintosh/via-pmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index 94c0f3f7df69..fc56c7067732 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c @@ -198,7 +198,7 @@ static const struct file_operations pmu_battery_proc_fops; static const struct file_operations pmu_options_proc_fops; #ifdef CONFIG_ADB -struct adb_driver via_pmu_driver = { +const struct adb_driver via_pmu_driver = { "PMU", pmu_probe, pmu_init, -- cgit v1.2.3 From b395e55b49ecd56ea28dc629f4ca4c6239fc07c3 Mon Sep 17 00:00:00 2001 From: Gustavo Romero Date: Mon, 5 Mar 2018 15:48:55 -0500 Subject: selftests/powerpc: Skip tm-unavailable if TM is not enabled Some processor revisions do not support transactional memory, and additionally kernel support can be disabled. In either case the tm-unavailable test should be skipped, otherwise it will fail with a SIGILL. That commit also sets this selftest to be called through the test harness as it's done for other TM selftests. Finally, it avoids using "ping" as a thread name since it's ambiguous and can be confusing when shown, for instance, in a kernel backtrace log. Fixes: 77fad8bfb1d2 ("selftests/powerpc: Check FP/VEC on exception in TM") Signed-off-by: Gustavo Romero Reviewed-by: Cyril Bur Signed-off-by: Michael Ellerman --- .../testing/selftests/powerpc/tm/tm-unavailable.c | 24 ++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/tools/testing/selftests/powerpc/tm/tm-unavailable.c b/tools/testing/selftests/powerpc/tm/tm-unavailable.c index e6a0fad2bfd0..156c8e750259 100644 --- a/tools/testing/selftests/powerpc/tm/tm-unavailable.c +++ b/tools/testing/selftests/powerpc/tm/tm-unavailable.c @@ -80,7 +80,7 @@ bool is_failure(uint64_t condition_reg) return ((condition_reg >> 28) & 0xa) == 0xa; } -void *ping(void *input) +void *tm_una_ping(void *input) { /* @@ -280,7 +280,7 @@ void *ping(void *input) } /* Thread to force context switch */ -void *pong(void *not_used) +void *tm_una_pong(void *not_used) { /* Wait thread get its name "pong". */ if (DEBUG) @@ -311,11 +311,11 @@ void test_fp_vec(int fp, int vec, pthread_attr_t *attr) do { int rc; - /* Bind 'ping' to CPU 0, as specified in 'attr'. */ - rc = pthread_create(&t0, attr, ping, (void *) &flags); + /* Bind to CPU 0, as specified in 'attr'. */ + rc = pthread_create(&t0, attr, tm_una_ping, (void *) &flags); if (rc) pr_err(rc, "pthread_create()"); - rc = pthread_setname_np(t0, "ping"); + rc = pthread_setname_np(t0, "tm_una_ping"); if (rc) pr_warn(rc, "pthread_setname_np"); rc = pthread_join(t0, &ret_value); @@ -333,13 +333,15 @@ void test_fp_vec(int fp, int vec, pthread_attr_t *attr) } } -int main(int argc, char **argv) +int tm_unavailable_test(void) { int rc, exception; /* FP = 0, VEC = 1, VSX = 2 */ pthread_t t1; pthread_attr_t attr; cpu_set_t cpuset; + SKIP_IF(!have_htm()); + /* Set only CPU 0 in the mask. Both threads will be bound to CPU 0. */ CPU_ZERO(&cpuset); CPU_SET(0, &cpuset); @@ -354,12 +356,12 @@ int main(int argc, char **argv) if (rc) pr_err(rc, "pthread_attr_setaffinity_np()"); - rc = pthread_create(&t1, &attr /* Bind 'pong' to CPU 0 */, pong, NULL); + rc = pthread_create(&t1, &attr /* Bind to CPU 0 */, tm_una_pong, NULL); if (rc) pr_err(rc, "pthread_create()"); /* Name it for systemtap convenience */ - rc = pthread_setname_np(t1, "pong"); + rc = pthread_setname_np(t1, "tm_una_pong"); if (rc) pr_warn(rc, "pthread_create()"); @@ -394,3 +396,9 @@ int main(int argc, char **argv) exit(0); } } + +int main(int argc, char **argv) +{ + test_harness_set_timeout(220); + return test_harness(tm_unavailable_test, "tm_unavailable_test"); +} -- cgit v1.2.3 From e1368d0c9edbc366e45216e7295fd61ae55c2b12 Mon Sep 17 00:00:00 2001 From: Vaibhav Jain Date: Sun, 4 Mar 2018 23:00:25 +0530 Subject: powerpc/xmon: Setup debugger hooks when first break-point is set Presently sysrq key for xmon('x') is registered during kernel init irrespective of the value of kernel param 'xmon'. Thus xmon is enabled even if 'xmon=off' is passed on the kernel command line. However this doesn't enable the kernel debugger hooks needed for instruction or data breakpoints. Thus when a break-point is hit with xmon=off a kernel oops of the form below is reported: Oops: Exception in kernel mode, sig: 5 [#1] < snip > Trace/breakpoint trap To fix this the patch checks and enables debugger hooks when an instruction or data break-point is set via xmon console. Signed-off-by: Vaibhav Jain Reviewed-by: Balbir Singh [mpe: Just printf directly, no need for static const char[]] Signed-off-by: Michael Ellerman --- arch/powerpc/xmon/xmon.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 82e1a3ee6e0f..ee4b6071007d 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -1273,6 +1273,16 @@ static long check_bp_loc(unsigned long addr) return 1; } +/* Force enable xmon if not already enabled */ +static inline void force_enable_xmon(void) +{ + /* Enable xmon hooks if needed */ + if (!xmon_on) { + printf("xmon: Enabling debugger hooks\n"); + xmon_on = 1; + } +} + static char *breakpoint_help_string = "Breakpoint command usage:\n" "b show breakpoints\n" @@ -1315,6 +1325,8 @@ bpt_cmds(void) dabr.address &= ~HW_BRK_TYPE_DABR; dabr.enabled = mode | BP_DABR; } + + force_enable_xmon(); break; case 'i': /* bi - hardware instr breakpoint */ @@ -1335,6 +1347,7 @@ bpt_cmds(void) if (bp != NULL) { bp->enabled |= BP_CIABR; iabr = bp; + force_enable_xmon(); } break; #endif @@ -1399,8 +1412,10 @@ bpt_cmds(void) if (!check_bp_loc(a)) break; bp = new_breakpoint(a); - if (bp != NULL) + if (bp != NULL) { bp->enabled |= BP_TRAP; + force_enable_xmon(); + } break; } } -- cgit v1.2.3 From 1ff3b404019adf9d605224e1dce0677a0375d274 Mon Sep 17 00:00:00 2001 From: Vaibhav Jain Date: Sun, 4 Mar 2018 23:01:32 +0530 Subject: powerpc/xmon: Clear all breakpoints when xmon is disabled via debugfs Presently when xmon is disabled by debugfs any existing instruction/data-access breakpoints set are not disabled. This may lead to kernel oops when those breakpoints are hit as the necessary debugger hooks aren't installed. Hence this patch introduces a new function named clear_all_bpt() which is called when xmon is disabled via debugfs. The function will unpatch/clear all the trap and ciabr/dab based breakpoints. Signed-off-by: Vaibhav Jain Reviewed-by: Balbir Singh [mpe: Fix build break when CONFIG_DEBUG_FS=n] Signed-off-by: Michael Ellerman --- arch/powerpc/xmon/xmon.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index ee4b6071007d..3ddf9dd9a55f 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -3664,11 +3664,35 @@ device_initcall(setup_xmon_sysrq); #endif /* CONFIG_MAGIC_SYSRQ */ #ifdef CONFIG_DEBUG_FS +static void clear_all_bpt(void) +{ + int i; + + /* clear/unpatch all breakpoints */ + remove_bpts(); + remove_cpu_bpts(); + + /* Disable all breakpoints */ + for (i = 0; i < NBPTS; ++i) + bpts[i].enabled = 0; + + /* Clear any data or iabr breakpoints */ + if (iabr || dabr.enabled) { + iabr = NULL; + dabr.enabled = 0; + } + + printf("xmon: All breakpoints cleared\n"); +} + static int xmon_dbgfs_set(void *data, u64 val) { xmon_on = !!val; xmon_init(xmon_on); + /* make sure all breakpoints removed when disabling */ + if (!xmon_on) + clear_all_bpt(); return 0; } -- cgit v1.2.3 From 03ebb419b896e0fb2da3f34b57d45e62cafe4009 Mon Sep 17 00:00:00 2001 From: Vaibhav Jain Date: Fri, 9 Feb 2018 09:39:16 +0530 Subject: cxl: Enable NORST bit in PSL_DEBUG register for PSL9 We enable the NORST bit by default for debug afu images to prevent reset of AFU trace-data on a PCI link drop. For production AFU images this bit is always ignored and PSL gets reconfigured anyways thereby resetting the trace data. So setting this bit for non-debug images doesn't have any impact. Signed-off-by: Vaibhav Jain Reviewed-by: Christophe Lombard Acked-by: Frederic Barrat Acked-by: Andrew Donnellan Signed-off-by: Michael Ellerman --- drivers/misc/cxl/pci.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index 758842f65a1b..c983f23cc2ed 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -503,8 +503,12 @@ static int init_implementation_adapter_regs_psl9(struct cxl *adapter, if (cxl_is_power9_dd1()) { /* Disabling deadlock counter CAR */ cxl_p1_write(adapter, CXL_PSL9_GP_CT, 0x0020000000000001ULL); - } else - cxl_p1_write(adapter, CXL_PSL9_DEBUG, 0x4000000000000000ULL); + /* Enable NORST */ + cxl_p1_write(adapter, CXL_PSL9_DEBUG, 0x8000000000000000ULL); + } else { + /* Enable NORST and DD2 features */ + cxl_p1_write(adapter, CXL_PSL9_DEBUG, 0xC000000000000000ULL); + } return 0; } -- cgit v1.2.3 From 02b63b420223db3e33e19cc0aaf346371e8efe48 Mon Sep 17 00:00:00 2001 From: Vaibhav Jain Date: Thu, 15 Feb 2018 11:49:36 +0530 Subject: cxl: Remove function write_timebase_ctrl_psl9() for PSL9 For PSL9 the contents of PSL_TB_CTLSTAT register have changed in PSL9 and all of the register is now readonly. Hence we don't need an sl_ops implementation for 'write_timebase_ctrl' for to populate this register for PSL9. Hence this patch removes function write_timebase_ctrl_psl9() and its references from the code. Signed-off-by: Vaibhav Jain Acked-by: Andrew Donnellan Acked-by: Frederic Barrat Signed-off-by: Michael Ellerman --- drivers/misc/cxl/pci.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index c983f23cc2ed..9bc30c20b66b 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -572,12 +572,6 @@ static int init_implementation_adapter_regs_xsl(struct cxl *adapter, struct pci_ /* For the PSL this is a multiple for 0 < n <= 7: */ #define PSL_2048_250MHZ_CYCLES 1 -static void write_timebase_ctrl_psl9(struct cxl *adapter) -{ - cxl_p1_write(adapter, CXL_PSL9_TB_CTLSTAT, - TBSYNC_CNT(2 * PSL_2048_250MHZ_CYCLES)); -} - static void write_timebase_ctrl_psl8(struct cxl *adapter) { cxl_p1_write(adapter, CXL_PSL_TB_CTLSTAT, @@ -639,7 +633,8 @@ static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev) * Setup PSL Timebase Control and Status register * with the recommended Timebase Sync Count value */ - adapter->native->sl_ops->write_timebase_ctrl(adapter); + if (adapter->native->sl_ops->write_timebase_ctrl) + adapter->native->sl_ops->write_timebase_ctrl(adapter); /* Enable PSL Timebase */ cxl_p1_write(adapter, CXL_PSL_Control, 0x0000000000000000); @@ -1805,7 +1800,6 @@ static const struct cxl_service_layer_ops psl9_ops = { .psl_irq_dump_registers = cxl_native_irq_dump_regs_psl9, .err_irq_dump_registers = cxl_native_err_irq_dump_regs_psl9, .debugfs_stop_trace = cxl_stop_trace_psl9, - .write_timebase_ctrl = write_timebase_ctrl_psl9, .timebase_read = timebase_read_psl9, .capi_mode = OPAL_PHB_CAPI_MODE_CAPI, .needs_reset_before_disable = true, -- cgit v1.2.3 From 94322ed8e857e3b2a33cf75118051af9baaa110f Mon Sep 17 00:00:00 2001 From: Vaibhav Jain Date: Thu, 15 Feb 2018 21:19:24 +0530 Subject: cxl: Check if PSL data-cache is available before issue flush request PSL9D doesn't have a data-cache that needs to be flushed before resetting the card. However when cxl tries to flush data-cache on such a card, it times-out as PSL_Control register never indicates flush operation complete due to missing data-cache. This is usually indicated in the kernel logs with this message: "WARNING: cache flush timed out" To fix this the patch checks PSL_Debug register CDC-Field(BIT:27) which indicates the absence of a data-cache and sets a flag 'no_data_cache' in 'struct cxl_native' to indicate this. When cxl_data_cache_flush() is called it checks the flag and if set bails out early without requesting a data-cache flush operation to the PSL. Signed-off-by: Vaibhav Jain Acked-by: Andrew Donnellan Acked-by: Frederic Barrat Signed-off-by: Michael Ellerman --- drivers/misc/cxl/cxl.h | 4 ++++ drivers/misc/cxl/native.c | 11 ++++++++++- drivers/misc/cxl/pci.c | 19 +++++++++++++------ 3 files changed, 27 insertions(+), 7 deletions(-) diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h index 4f015da78f28..4949b8d5a748 100644 --- a/drivers/misc/cxl/cxl.h +++ b/drivers/misc/cxl/cxl.h @@ -369,6 +369,9 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0}; #define CXL_PSL_TFC_An_AE (1ull << (63-30)) /* Restart PSL with address error */ #define CXL_PSL_TFC_An_R (1ull << (63-31)) /* Restart PSL transaction */ +/****** CXL_PSL_DEBUG *****************************************************/ +#define CXL_PSL_DEBUG_CDC (1ull << (63-27)) /* Coherent Data cache support */ + /****** CXL_XSL9_IERAT_ERAT - CAIA 2 **********************************/ #define CXL_XSL9_IERAT_MLPID (1ull << (63-0)) /* Match LPID */ #define CXL_XSL9_IERAT_MPID (1ull << (63-1)) /* Match PID */ @@ -669,6 +672,7 @@ struct cxl_native { irq_hw_number_t err_hwirq; unsigned int err_virq; u64 ps_off; + bool no_data_cache; /* set if no data cache on the card */ const struct cxl_service_layer_ops *sl_ops; }; diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c index 1b3d7c65ea3f..98f867fcef24 100644 --- a/drivers/misc/cxl/native.c +++ b/drivers/misc/cxl/native.c @@ -353,8 +353,17 @@ int cxl_data_cache_flush(struct cxl *adapter) u64 reg; unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); - pr_devel("Flushing data cache\n"); + /* + * Do a datacache flush only if datacache is available. + * In case of PSL9D datacache absent hence flush operation. + * would timeout. + */ + if (adapter->native->no_data_cache) { + pr_devel("No PSL data cache. Ignoring cache flush req.\n"); + return 0; + } + pr_devel("Flushing data cache\n"); reg = cxl_p1_read(adapter, CXL_PSL_Control); reg |= CXL_PSL_Control_Fr; cxl_p1_write(adapter, CXL_PSL_Control, reg); diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index 9bc30c20b66b..35f486912ddc 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -456,6 +456,7 @@ static int init_implementation_adapter_regs_psl9(struct cxl *adapter, u64 chipid; u32 phb_index; u64 capp_unit_id; + u64 psl_debug; int rc; rc = cxl_calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id); @@ -510,6 +511,16 @@ static int init_implementation_adapter_regs_psl9(struct cxl *adapter, cxl_p1_write(adapter, CXL_PSL9_DEBUG, 0xC000000000000000ULL); } + /* + * Check if PSL has data-cache. We need to flush adapter datacache + * when as its about to be removed. + */ + psl_debug = cxl_p1_read(adapter, CXL_PSL9_DEBUG); + if (psl_debug & CXL_PSL_DEBUG_CDC) { + dev_dbg(&dev->dev, "No data-cache present\n"); + adapter->native->no_data_cache = true; + } + return 0; } @@ -1448,10 +1459,8 @@ int cxl_pci_reset(struct cxl *adapter) /* * The adapter is about to be reset, so ignore errors. - * Not supported on P9 DD1 */ - if ((cxl_is_power8()) || (!(cxl_is_power9_dd1()))) - cxl_data_cache_flush(adapter); + cxl_data_cache_flush(adapter); /* pcie_warm_reset requests a fundamental pci reset which includes a * PERST assert/deassert. PERST triggers a loading of the image @@ -1934,10 +1943,8 @@ static void cxl_pci_remove_adapter(struct cxl *adapter) /* * Flush adapter datacache as its about to be removed. - * Not supported on P9 DD1. */ - if ((cxl_is_power8()) || (!(cxl_is_power9_dd1()))) - cxl_data_cache_flush(adapter); + cxl_data_cache_flush(adapter); cxl_deconfigure_adapter(adapter); -- cgit v1.2.3 From 3d4f5f58484c8f86bfd9a97f152db6399d2b88d7 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Sat, 3 Mar 2018 14:54:02 +0530 Subject: powerpc/mm: Drop the function native_register_proc_table() This is left over from the segment table implementation and not getting called from any where now. Hence just drop it. Suggested-by: Aneesh Kumar K.V Signed-off-by: Anshuman Khandual Signed-off-by: Michael Ellerman --- arch/powerpc/mm/hash_native_64.c | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index a0675e91ad7d..993842f1ed60 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -852,18 +852,6 @@ static void native_flush_hash_range(unsigned long number, int local) local_irq_restore(flags); } -static int native_register_proc_table(unsigned long base, unsigned long page_size, - unsigned long table_size) -{ - unsigned long patb1 = base << 25; /* VSID */ - - patb1 |= (page_size << 5); /* sllp */ - patb1 |= table_size; - - partition_tb->patb1 = cpu_to_be64(patb1); - return 0; -} - void __init hpte_init_native(void) { mmu_hash_ops.hpte_invalidate = native_hpte_invalidate; @@ -875,7 +863,4 @@ void __init hpte_init_native(void) mmu_hash_ops.hpte_clear_all = native_hpte_clear; mmu_hash_ops.flush_hash_range = native_flush_hash_range; mmu_hash_ops.hugepage_invalidate = native_hugepage_invalidate; - - if (cpu_has_feature(CPU_FTR_ARCH_300)) - register_process_table = native_register_proc_table; } -- cgit v1.2.3 From c554ac91ce2213faa91c51c45423770218cccce3 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 27 Feb 2018 12:25:55 +0100 Subject: powerpc/8xx: fix cpm_cascade() dual end of interrupt cpm_cascade() doesn't have to call eoi() as it is already called by handle_fasteoi_irq() And cpm_get_irq() will always return an unsigned int so the test is useless Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/8xx/m8xx_setup.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/arch/powerpc/platforms/8xx/m8xx_setup.c b/arch/powerpc/platforms/8xx/m8xx_setup.c index e1274db53d48..2188d691a40f 100644 --- a/arch/powerpc/platforms/8xx/m8xx_setup.c +++ b/arch/powerpc/platforms/8xx/m8xx_setup.c @@ -217,13 +217,7 @@ void __noreturn mpc8xx_restart(char *cmd) static void cpm_cascade(struct irq_desc *desc) { - struct irq_chip *chip = irq_desc_get_chip(desc); - int cascade_irq = cpm_get_irq(); - - if (cascade_irq >= 0) - generic_handle_irq(cascade_irq); - - chip->irq_eoi(&desc->irq_data); + generic_handle_irq(cpm_get_irq()); } /* Initialize the internal interrupt controllers. The number of -- cgit v1.2.3 From 2b74e2a9b39df40a2b489af2d24079617c61ee0e Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Fri, 2 Mar 2018 16:18:45 +1100 Subject: powerpc/powernv/npu: Fix deadlock in mmio_invalidate() When sending TLB invalidates to the NPU we need to send extra flushes due to a hardware issue. The original implementation would lock the all the ATSD MMIO registers sequentially before unlocking and relocking each of them sequentially to do the extra flush. This introduced a deadlock as it is possible for one thread to hold one ATSD register whilst waiting for another register to be freed while the other thread is holding that register waiting for the one in the first thread to be freed. For example if there are two threads and two ATSD registers: Thread A Thread B ---------------------- Acquire 1 Acquire 2 Release 1 Acquire 1 Wait 1 Wait 2 Both threads will be stuck waiting to acquire a register resulting in an RCU stall warning or soft lockup. This patch solves the deadlock by refactoring the code to ensure registers are not released between flushes and to ensure all registers are either acquired or released together and in order. Fixes: bbd5ff50afff ("powerpc/powernv/npu-dma: Add explicit flush when sending an ATSD") Signed-off-by: Alistair Popple Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/powernv/npu-dma.c | 229 +++++++++++++++++++------------ 1 file changed, 141 insertions(+), 88 deletions(-) diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c index 0a253b64ac5f..77d6061fd0ce 100644 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ b/arch/powerpc/platforms/powernv/npu-dma.c @@ -410,6 +410,11 @@ struct npu_context { void *priv; }; +struct mmio_atsd_reg { + struct npu *npu; + int reg; +}; + /* * Find a free MMIO ATSD register and mark it in use. Return -ENOSPC * if none are available. @@ -419,7 +424,7 @@ static int get_mmio_atsd_reg(struct npu *npu) int i; for (i = 0; i < npu->mmio_atsd_count; i++) { - if (!test_and_set_bit(i, &npu->mmio_atsd_usage)) + if (!test_and_set_bit_lock(i, &npu->mmio_atsd_usage)) return i; } @@ -428,86 +433,90 @@ static int get_mmio_atsd_reg(struct npu *npu) static void put_mmio_atsd_reg(struct npu *npu, int reg) { - clear_bit(reg, &npu->mmio_atsd_usage); + clear_bit_unlock(reg, &npu->mmio_atsd_usage); } /* MMIO ATSD register offsets */ #define XTS_ATSD_AVA 1 #define XTS_ATSD_STAT 2 -static int mmio_launch_invalidate(struct npu *npu, unsigned long launch, - unsigned long va) +static void mmio_launch_invalidate(struct mmio_atsd_reg *mmio_atsd_reg, + unsigned long launch, unsigned long va) { - int mmio_atsd_reg; - - do { - mmio_atsd_reg = get_mmio_atsd_reg(npu); - cpu_relax(); - } while (mmio_atsd_reg < 0); + struct npu *npu = mmio_atsd_reg->npu; + int reg = mmio_atsd_reg->reg; __raw_writeq(cpu_to_be64(va), - npu->mmio_atsd_regs[mmio_atsd_reg] + XTS_ATSD_AVA); + npu->mmio_atsd_regs[reg] + XTS_ATSD_AVA); eieio(); - __raw_writeq(cpu_to_be64(launch), npu->mmio_atsd_regs[mmio_atsd_reg]); - - return mmio_atsd_reg; + __raw_writeq(cpu_to_be64(launch), npu->mmio_atsd_regs[reg]); } -static int mmio_invalidate_pid(struct npu *npu, unsigned long pid, bool flush) +static void mmio_invalidate_pid(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], + unsigned long pid, bool flush) { + int i; unsigned long launch; - /* IS set to invalidate matching PID */ - launch = PPC_BIT(12); + for (i = 0; i <= max_npu2_index; i++) { + if (mmio_atsd_reg[i].reg < 0) + continue; + + /* IS set to invalidate matching PID */ + launch = PPC_BIT(12); - /* PRS set to process-scoped */ - launch |= PPC_BIT(13); + /* PRS set to process-scoped */ + launch |= PPC_BIT(13); - /* AP */ - launch |= (u64) mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17); + /* AP */ + launch |= (u64) + mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17); - /* PID */ - launch |= pid << PPC_BITLSHIFT(38); + /* PID */ + launch |= pid << PPC_BITLSHIFT(38); - /* No flush */ - launch |= !flush << PPC_BITLSHIFT(39); + /* No flush */ + launch |= !flush << PPC_BITLSHIFT(39); - /* Invalidating the entire process doesn't use a va */ - return mmio_launch_invalidate(npu, launch, 0); + /* Invalidating the entire process doesn't use a va */ + mmio_launch_invalidate(&mmio_atsd_reg[i], launch, 0); + } } -static int mmio_invalidate_va(struct npu *npu, unsigned long va, - unsigned long pid, bool flush) +static void mmio_invalidate_va(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], + unsigned long va, unsigned long pid, bool flush) { + int i; unsigned long launch; - /* IS set to invalidate target VA */ - launch = 0; + for (i = 0; i <= max_npu2_index; i++) { + if (mmio_atsd_reg[i].reg < 0) + continue; + + /* IS set to invalidate target VA */ + launch = 0; - /* PRS set to process scoped */ - launch |= PPC_BIT(13); + /* PRS set to process scoped */ + launch |= PPC_BIT(13); - /* AP */ - launch |= (u64) mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17); + /* AP */ + launch |= (u64) + mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17); - /* PID */ - launch |= pid << PPC_BITLSHIFT(38); + /* PID */ + launch |= pid << PPC_BITLSHIFT(38); - /* No flush */ - launch |= !flush << PPC_BITLSHIFT(39); + /* No flush */ + launch |= !flush << PPC_BITLSHIFT(39); - return mmio_launch_invalidate(npu, launch, va); + mmio_launch_invalidate(&mmio_atsd_reg[i], launch, va); + } } #define mn_to_npu_context(x) container_of(x, struct npu_context, mn) -struct mmio_atsd_reg { - struct npu *npu; - int reg; -}; - static void mmio_invalidate_wait( - struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], bool flush) + struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS]) { struct npu *npu; int i, reg; @@ -522,16 +531,67 @@ static void mmio_invalidate_wait( reg = mmio_atsd_reg[i].reg; while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT)) cpu_relax(); + } +} + +/* + * Acquires all the address translation shootdown (ATSD) registers required to + * launch an ATSD on all links this npu_context is active on. + */ +static void acquire_atsd_reg(struct npu_context *npu_context, + struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS]) +{ + int i, j; + struct npu *npu; + struct pci_dev *npdev; + struct pnv_phb *nphb; - put_mmio_atsd_reg(npu, reg); + for (i = 0; i <= max_npu2_index; i++) { + mmio_atsd_reg[i].reg = -1; + for (j = 0; j < NV_MAX_LINKS; j++) { + /* + * There are no ordering requirements with respect to + * the setup of struct npu_context, but to ensure + * consistent behaviour we need to ensure npdev[][] is + * only read once. + */ + npdev = READ_ONCE(npu_context->npdev[i][j]); + if (!npdev) + continue; + nphb = pci_bus_to_host(npdev->bus)->private_data; + npu = &nphb->npu; + mmio_atsd_reg[i].npu = npu; + mmio_atsd_reg[i].reg = get_mmio_atsd_reg(npu); + while (mmio_atsd_reg[i].reg < 0) { + mmio_atsd_reg[i].reg = get_mmio_atsd_reg(npu); + cpu_relax(); + } + break; + } + } +} + +/* + * Release previously acquired ATSD registers. To avoid deadlocks the registers + * must be released in the same order they were acquired above in + * acquire_atsd_reg. + */ +static void release_atsd_reg(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS]) +{ + int i; + + for (i = 0; i <= max_npu2_index; i++) { /* - * The GPU requires two flush ATSDs to ensure all entries have - * been flushed. We use PID 0 as it will never be used for a - * process on the GPU. + * We can't rely on npu_context->npdev[][] being the same here + * as when acquire_atsd_reg() was called, hence we use the + * values stored in mmio_atsd_reg during the acquire phase + * rather than re-reading npdev[][]. */ - if (flush) - mmio_invalidate_pid(npu, 0, true); + if (mmio_atsd_reg[i].reg < 0) + continue; + + put_mmio_atsd_reg(mmio_atsd_reg[i].npu, mmio_atsd_reg[i].reg); } } @@ -542,10 +602,6 @@ static void mmio_invalidate_wait( static void mmio_invalidate(struct npu_context *npu_context, int va, unsigned long address, bool flush) { - int i, j; - struct npu *npu; - struct pnv_phb *nphb; - struct pci_dev *npdev; struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS]; unsigned long pid = npu_context->mm->context.id; @@ -561,37 +617,25 @@ static void mmio_invalidate(struct npu_context *npu_context, int va, * Loop over all the NPUs this process is active on and launch * an invalidate. */ - for (i = 0; i <= max_npu2_index; i++) { - mmio_atsd_reg[i].reg = -1; - for (j = 0; j < NV_MAX_LINKS; j++) { - npdev = npu_context->npdev[i][j]; - if (!npdev) - continue; - - nphb = pci_bus_to_host(npdev->bus)->private_data; - npu = &nphb->npu; - mmio_atsd_reg[i].npu = npu; - - if (va) - mmio_atsd_reg[i].reg = - mmio_invalidate_va(npu, address, pid, - flush); - else - mmio_atsd_reg[i].reg = - mmio_invalidate_pid(npu, pid, flush); - - /* - * The NPU hardware forwards the shootdown to all GPUs - * so we only have to launch one shootdown per NPU. - */ - break; - } + acquire_atsd_reg(npu_context, mmio_atsd_reg); + if (va) + mmio_invalidate_va(mmio_atsd_reg, address, pid, flush); + else + mmio_invalidate_pid(mmio_atsd_reg, pid, flush); + + mmio_invalidate_wait(mmio_atsd_reg); + if (flush) { + /* + * The GPU requires two flush ATSDs to ensure all entries have + * been flushed. We use PID 0 as it will never be used for a + * process on the GPU. + */ + mmio_invalidate_pid(mmio_atsd_reg, 0, true); + mmio_invalidate_wait(mmio_atsd_reg); + mmio_invalidate_pid(mmio_atsd_reg, 0, true); + mmio_invalidate_wait(mmio_atsd_reg); } - - mmio_invalidate_wait(mmio_atsd_reg, flush); - if (flush) - /* Wait for the flush to complete */ - mmio_invalidate_wait(mmio_atsd_reg, false); + release_atsd_reg(mmio_atsd_reg); } static void pnv_npu2_mn_release(struct mmu_notifier *mn, @@ -726,7 +770,16 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev, if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index", &nvlink_index))) return ERR_PTR(-ENODEV); - npu_context->npdev[npu->index][nvlink_index] = npdev; + + /* + * npdev is a pci_dev pointer setup by the PCI code. We assign it to + * npdev[][] to indicate to the mmu notifiers that an invalidation + * should also be sent over this nvlink. The notifiers don't use any + * other fields in npu_context, so we just need to ensure that when they + * deference npu_context->npdev[][] it is either a valid pointer or + * NULL. + */ + WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], npdev); if (!nphb->npu.nmmu_flush) { /* @@ -778,7 +831,7 @@ void pnv_npu2_destroy_context(struct npu_context *npu_context, if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index", &nvlink_index))) return; - npu_context->npdev[npu->index][nvlink_index] = NULL; + WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], NULL); opal_npu_destroy_context(nphb->opal_id, npu_context->mm->context.id, PCI_DEVID(gpdev->bus->number, gpdev->devfn)); kref_put(&npu_context->kref, pnv_npu2_release_context); -- cgit v1.2.3 From d6a90bb83b5084829558788ea5b8818c9be3da63 Mon Sep 17 00:00:00 2001 From: Philippe Bergheaud Date: Fri, 2 Mar 2018 10:56:11 +0100 Subject: powerpc/powernv: Enable tunneled operations P9 supports PCI tunneled operations (atomics and as_notify). This patch adds support for tunneled operations on powernv, with a new API, to be called by device drivers: pnv_pci_enable_tunnel() Enable tunnel operations, tell driver the 16-bit ASN indication used by kernel. pnv_pci_disable_tunnel() Disable tunnel operations. pnv_pci_set_tunnel_bar() Tell kernel the Tunnel BAR Response address used by driver. This function uses two new OPAL calls, as the PBCQ Tunnel BAR register is configured by skiboot. pnv_pci_get_as_notify_info() Return the ASN info of the thread to be woken up. Signed-off-by: Philippe Bergheaud Reviewed-by: Frederic Barrat Reviewed-by: Christophe Lombard Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/opal-api.h | 4 +- arch/powerpc/include/asm/opal.h | 2 + arch/powerpc/include/asm/pnv-pci.h | 6 ++ arch/powerpc/platforms/powernv/opal-wrappers.S | 2 + arch/powerpc/platforms/powernv/pci-cxl.c | 8 -- arch/powerpc/platforms/powernv/pci.c | 135 +++++++++++++++++++++++++ 6 files changed, 148 insertions(+), 9 deletions(-) diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h index 94bd1bf2c873..d886a5b7ff21 100644 --- a/arch/powerpc/include/asm/opal-api.h +++ b/arch/powerpc/include/asm/opal-api.h @@ -204,7 +204,9 @@ #define OPAL_NPU_SPA_SETUP 159 #define OPAL_NPU_SPA_CLEAR_CACHE 160 #define OPAL_NPU_TL_SET 161 -#define OPAL_LAST 161 +#define OPAL_PCI_GET_PBCQ_TUNNEL_BAR 164 +#define OPAL_PCI_SET_PBCQ_TUNNEL_BAR 165 +#define OPAL_LAST 165 /* Device tree flags */ diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index 12e70fb58700..dde60089d0d4 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h @@ -204,6 +204,8 @@ int64_t opal_unregister_dump_region(uint32_t id); int64_t opal_slw_set_reg(uint64_t cpu_pir, uint64_t sprn, uint64_t val); int64_t opal_config_cpu_idle_state(uint64_t state, uint64_t flag); int64_t opal_pci_set_phb_cxl_mode(uint64_t phb_id, uint64_t mode, uint64_t pe_number); +int64_t opal_pci_get_pbcq_tunnel_bar(uint64_t phb_id, uint64_t *addr); +int64_t opal_pci_set_pbcq_tunnel_bar(uint64_t phb_id, uint64_t addr); int64_t opal_ipmi_send(uint64_t interface, struct opal_ipmi_msg *msg, uint64_t msg_len); int64_t opal_ipmi_recv(uint64_t interface, struct opal_ipmi_msg *msg, diff --git a/arch/powerpc/include/asm/pnv-pci.h b/arch/powerpc/include/asm/pnv-pci.h index 3e5cf251ad9a..d2d8c28db336 100644 --- a/arch/powerpc/include/asm/pnv-pci.h +++ b/arch/powerpc/include/asm/pnv-pci.h @@ -29,6 +29,12 @@ extern int pnv_pci_set_power_state(uint64_t id, uint8_t state, extern int pnv_pci_set_p2p(struct pci_dev *initiator, struct pci_dev *target, u64 desc); +extern int pnv_pci_enable_tunnel(struct pci_dev *dev, uint64_t *asnind); +extern int pnv_pci_disable_tunnel(struct pci_dev *dev); +extern int pnv_pci_set_tunnel_bar(struct pci_dev *dev, uint64_t addr, + int enable); +extern int pnv_pci_get_as_notify_info(struct task_struct *task, u32 *lpid, + u32 *pid, u32 *tid); int pnv_phb_to_cxl_mode(struct pci_dev *dev, uint64_t mode); int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq, unsigned int virq); diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S index 1b2936ba6040..3da30c2f26b4 100644 --- a/arch/powerpc/platforms/powernv/opal-wrappers.S +++ b/arch/powerpc/platforms/powernv/opal-wrappers.S @@ -323,3 +323,5 @@ OPAL_CALL(opal_sensor_group_clear, OPAL_SENSOR_GROUP_CLEAR); OPAL_CALL(opal_npu_spa_setup, OPAL_NPU_SPA_SETUP); OPAL_CALL(opal_npu_spa_clear_cache, OPAL_NPU_SPA_CLEAR_CACHE); OPAL_CALL(opal_npu_tl_set, OPAL_NPU_TL_SET); +OPAL_CALL(opal_pci_get_pbcq_tunnel_bar, OPAL_PCI_GET_PBCQ_TUNNEL_BAR); +OPAL_CALL(opal_pci_set_pbcq_tunnel_bar, OPAL_PCI_SET_PBCQ_TUNNEL_BAR); diff --git a/arch/powerpc/platforms/powernv/pci-cxl.c b/arch/powerpc/platforms/powernv/pci-cxl.c index 94498a04558b..cee003de63af 100644 --- a/arch/powerpc/platforms/powernv/pci-cxl.c +++ b/arch/powerpc/platforms/powernv/pci-cxl.c @@ -16,14 +16,6 @@ #include "pci.h" -struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev) -{ - struct pci_controller *hose = pci_bus_to_host(dev->bus); - - return of_node_get(hose->dn); -} -EXPORT_SYMBOL(pnv_pci_get_phb_node); - int pnv_phb_to_cxl_mode(struct pci_dev *dev, uint64_t mode) { struct pci_controller *hose = pci_bus_to_host(dev->bus); diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index 69d102cbf48f..b265ecc0836a 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -38,6 +39,7 @@ #include "pci.h" static DEFINE_MUTEX(p2p_mutex); +static DEFINE_MUTEX(tunnel_mutex); int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id) { @@ -1092,6 +1094,139 @@ out: } EXPORT_SYMBOL_GPL(pnv_pci_set_p2p); +struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev) +{ + struct pci_controller *hose = pci_bus_to_host(dev->bus); + + return of_node_get(hose->dn); +} +EXPORT_SYMBOL(pnv_pci_get_phb_node); + +int pnv_pci_enable_tunnel(struct pci_dev *dev, u64 *asnind) +{ + struct device_node *np; + const __be32 *prop; + struct pnv_ioda_pe *pe; + uint16_t window_id; + int rc; + + if (!radix_enabled()) + return -ENXIO; + + if (!(np = pnv_pci_get_phb_node(dev))) + return -ENXIO; + + prop = of_get_property(np, "ibm,phb-indications", NULL); + of_node_put(np); + + if (!prop || !prop[1]) + return -ENXIO; + + *asnind = (u64)be32_to_cpu(prop[1]); + pe = pnv_ioda_get_pe(dev); + if (!pe) + return -ENODEV; + + /* Increase real window size to accept as_notify messages. */ + window_id = (pe->pe_number << 1 ) + 1; + rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id, pe->pe_number, + window_id, pe->tce_bypass_base, + (uint64_t)1 << 48); + return opal_error_code(rc); +} +EXPORT_SYMBOL_GPL(pnv_pci_enable_tunnel); + +int pnv_pci_disable_tunnel(struct pci_dev *dev) +{ + struct pnv_ioda_pe *pe; + + pe = pnv_ioda_get_pe(dev); + if (!pe) + return -ENODEV; + + /* Restore default real window size. */ + pnv_pci_ioda2_set_bypass(pe, true); + return 0; +} +EXPORT_SYMBOL_GPL(pnv_pci_disable_tunnel); + +int pnv_pci_set_tunnel_bar(struct pci_dev *dev, u64 addr, int enable) +{ + __be64 val; + struct pci_controller *hose; + struct pnv_phb *phb; + u64 tunnel_bar; + int rc; + + if (!opal_check_token(OPAL_PCI_GET_PBCQ_TUNNEL_BAR)) + return -ENXIO; + if (!opal_check_token(OPAL_PCI_SET_PBCQ_TUNNEL_BAR)) + return -ENXIO; + + hose = pci_bus_to_host(dev->bus); + phb = hose->private_data; + + mutex_lock(&tunnel_mutex); + rc = opal_pci_get_pbcq_tunnel_bar(phb->opal_id, &val); + if (rc != OPAL_SUCCESS) { + rc = -EIO; + goto out; + } + tunnel_bar = be64_to_cpu(val); + if (enable) { + /* + * Only one device per PHB can use atomics. + * Our policy is first-come, first-served. + */ + if (tunnel_bar) { + if (tunnel_bar != addr) + rc = -EBUSY; + else + rc = 0; /* Setting same address twice is ok */ + goto out; + } + } else { + /* + * The device that owns atomics and wants to release + * them must pass the same address with enable == 0. + */ + if (tunnel_bar != addr) { + rc = -EPERM; + goto out; + } + addr = 0x0ULL; + } + rc = opal_pci_set_pbcq_tunnel_bar(phb->opal_id, addr); + rc = opal_error_code(rc); +out: + mutex_unlock(&tunnel_mutex); + return rc; +} +EXPORT_SYMBOL_GPL(pnv_pci_set_tunnel_bar); + +#ifdef CONFIG_PPC64 /* for thread.tidr */ +int pnv_pci_get_as_notify_info(struct task_struct *task, u32 *lpid, u32 *pid, + u32 *tid) +{ + struct mm_struct *mm = NULL; + + if (task == NULL) + return -EINVAL; + + mm = get_task_mm(task); + if (mm == NULL) + return -EINVAL; + + *pid = mm->context.id; + mmput(mm); + + *tid = task->thread.tidr; + *lpid = mfspr(SPRN_LPID); + return 0; +} +EXPORT_SYMBOL_GPL(pnv_pci_get_as_notify_info); +#endif + void pnv_pci_shutdown(void) { struct pci_controller *hose; -- cgit v1.2.3 From 9dbcbfa1fe0c3b556e889ea213a73eb80d74307b Mon Sep 17 00:00:00 2001 From: Philippe Bergheaud Date: Fri, 2 Mar 2018 10:56:12 +0100 Subject: cxl: read PHB indications from the device tree Configure the P9 XSL_DSNCTL register with PHB indications found in the device tree, or else use legacy hard-coded values. Signed-off-by: Philippe Bergheaud Reviewed-by: Frederic Barrat Reviewed-by: Christophe Lombard Signed-off-by: Michael Ellerman --- drivers/misc/cxl/cxl.h | 2 +- drivers/misc/cxl/cxllib.c | 2 +- drivers/misc/cxl/pci.c | 48 ++++++++++++++++++++++++++++++++++++++++++----- 3 files changed, 45 insertions(+), 7 deletions(-) diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h index 4949b8d5a748..a4c9c8297a6d 100644 --- a/drivers/misc/cxl/cxl.h +++ b/drivers/misc/cxl/cxl.h @@ -1069,7 +1069,7 @@ int cxl_psl_purge(struct cxl_afu *afu); int cxl_calc_capp_routing(struct pci_dev *dev, u64 *chipid, u32 *phb_index, u64 *capp_unit_id); int cxl_slot_is_switched(struct pci_dev *dev); -int cxl_get_xsl9_dsnctl(u64 capp_unit_id, u64 *reg); +int cxl_get_xsl9_dsnctl(struct pci_dev *dev, u64 capp_unit_id, u64 *reg); u64 cxl_calculate_sr(bool master, bool kernel, bool real_mode, bool p9); void cxl_native_irq_dump_regs_psl9(struct cxl_context *ctx); diff --git a/drivers/misc/cxl/cxllib.c b/drivers/misc/cxl/cxllib.c index 30ccba436b3b..bea1eb004b49 100644 --- a/drivers/misc/cxl/cxllib.c +++ b/drivers/misc/cxl/cxllib.c @@ -99,7 +99,7 @@ int cxllib_get_xsl_config(struct pci_dev *dev, struct cxllib_xsl_config *cfg) if (rc) return rc; - rc = cxl_get_xsl9_dsnctl(capp_unit_id, &cfg->dsnctl); + rc = cxl_get_xsl9_dsnctl(dev, capp_unit_id, &cfg->dsnctl); if (rc) return rc; if (cpu_has_feature(CPU_FTR_POWER9_DD1)) { diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index 35f486912ddc..e7ac78e85494 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -407,21 +407,59 @@ int cxl_calc_capp_routing(struct pci_dev *dev, u64 *chipid, return 0; } -int cxl_get_xsl9_dsnctl(u64 capp_unit_id, u64 *reg) +static DEFINE_MUTEX(indications_mutex); + +static int get_phb_indications(struct pci_dev *dev, u64 *capiind, u64 *asnind, + u64 *nbwind) +{ + static u64 nbw, asn, capi = 0; + struct device_node *np; + const __be32 *prop; + + mutex_lock(&indications_mutex); + if (!capi) { + if (!(np = pnv_pci_get_phb_node(dev))) { + mutex_unlock(&indications_mutex); + return -ENODEV; + } + + prop = of_get_property(np, "ibm,phb-indications", NULL); + if (!prop) { + nbw = 0x0300UL; /* legacy values */ + asn = 0x0400UL; + capi = 0x0200UL; + } else { + nbw = (u64)be32_to_cpu(prop[2]); + asn = (u64)be32_to_cpu(prop[1]); + capi = (u64)be32_to_cpu(prop[0]); + } + of_node_put(np); + } + *capiind = capi; + *asnind = asn; + *nbwind = nbw; + mutex_unlock(&indications_mutex); + return 0; +} + +int cxl_get_xsl9_dsnctl(struct pci_dev *dev, u64 capp_unit_id, u64 *reg) { u64 xsl_dsnctl; + u64 capiind, asnind, nbwind; /* * CAPI Identifier bits [0:7] * bit 61:60 MSI bits --> 0 * bit 59 TVT selector --> 0 */ + if (get_phb_indications(dev, &capiind, &asnind, &nbwind)) + return -ENODEV; /* * Tell XSL where to route data to. * The field chipid should match the PHB CAPI_CMPM register */ - xsl_dsnctl = ((u64)0x2 << (63-7)); /* Bit 57 */ + xsl_dsnctl = (capiind << (63-15)); /* Bit 57 */ xsl_dsnctl |= (capp_unit_id << (63-15)); /* nMMU_ID Defaults to: b’000001001’*/ @@ -435,14 +473,14 @@ int cxl_get_xsl9_dsnctl(u64 capp_unit_id, u64 *reg) * nbwind=0x03, bits [57:58], must include capi indicator. * Not supported on P9 DD1. */ - xsl_dsnctl |= ((u64)0x03 << (63-47)); + xsl_dsnctl |= (nbwind << (63-55)); /* * Upper 16b address bits of ASB_Notify messages sent to the * system. Need to match the PHB’s ASN Compare/Mask Register. * Not supported on P9 DD1. */ - xsl_dsnctl |= ((u64)0x04 << (63-55)); + xsl_dsnctl |= asnind; } *reg = xsl_dsnctl; @@ -463,7 +501,7 @@ static int init_implementation_adapter_regs_psl9(struct cxl *adapter, if (rc) return rc; - rc = cxl_get_xsl9_dsnctl(capp_unit_id, &xsl_dsnctl); + rc = cxl_get_xsl9_dsnctl(dev, capp_unit_id, &xsl_dsnctl); if (rc) return rc; -- cgit v1.2.3 From 5ee573e8ef034e687c420cb10911371488d14b10 Mon Sep 17 00:00:00 2001 From: Balbir Singh Date: Thu, 8 Mar 2018 11:36:06 +1100 Subject: powerpc/powernv/mce: Don't silently restart the machine On MCE the current code will restart the machine with ppc_md.restart(). This case was extremely unlikely since prior to that a skiboot call is made and that resulted in a checkstop for analysis. With newer skiboots, on P9 we don't checkstop the box by default, instead we return back to the kernel to extract useful information at the time of the MCE. While we still get this information, this patch converts the restart to a panic(), so that if configured a dump can be taken and we can track and probably debug the potential issue causing the MCE. Signed-off-by: Balbir Singh Reviewed-by: Nicholas Piggin Reviewed-by: Stewart Smith Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/powernv/opal.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index c15182765ff5..516e23de5a3d 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -490,9 +490,12 @@ void pnv_platform_error_reboot(struct pt_regs *regs, const char *msg) * opal to trigger checkstop explicitly for error analysis. * The FSP PRD component would have already got notified * about this error through other channels. + * 4. We are running on a newer skiboot that by default does + * not cause a checkstop, drops us back to the kernel to + * extract context and state at the time of the error. */ - ppc_md.restart(NULL); + panic(msg); } int opal_machine_check(struct pt_regs *regs) -- cgit v1.2.3 From 97c6f25d5828b497e3e802b1f7c70187c88df623 Mon Sep 17 00:00:00 2001 From: Simon Guo Date: Wed, 7 Mar 2018 16:46:04 +0800 Subject: PCI/hotplug: ppc: correct a php_slot usage after free In pnv_php_unregister_one(), pnv_php_put_slot() might kfree php_slot structure. But there is pci_hp_deregister() after that with php_slot reference. This patch moves pnv_php_put_slot() to the end of function. Signed-off-by: Simon Guo Signed-off-by: Michael Ellerman --- drivers/pci/hotplug/pnv_php.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c index 23da3046f160..d44100687dfe 100644 --- a/drivers/pci/hotplug/pnv_php.c +++ b/drivers/pci/hotplug/pnv_php.c @@ -919,8 +919,8 @@ static void pnv_php_unregister_one(struct device_node *dn) return; php_slot->state = PNV_PHP_STATE_OFFLINE; - pnv_php_put_slot(php_slot); pci_hp_deregister(&php_slot->slot); + pnv_php_put_slot(php_slot); } static void pnv_php_unregister(struct device_node *dn) -- cgit v1.2.3 From 65e13c202d7826dc3497c32961008db03fb3eb4b Mon Sep 17 00:00:00 2001 From: Mathieu Malaterre Date: Wed, 31 Jan 2018 08:54:43 +0100 Subject: powerpc/epapr: Move register keyword at the beginning of declaration MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix warning for all register unsigned long (0,3-12) that appear during W=1 compilation: ./arch/powerpc/include/asm/epapr_hcalls.h:479:2: warning: ‘register’ is not at beginning of declaration [-Wold-style-declaration] unsigned long register r[\d] asm("r[\d]"); Signed-off-by: Mathieu Malaterre Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/epapr_hcalls.h | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/arch/powerpc/include/asm/epapr_hcalls.h b/arch/powerpc/include/asm/epapr_hcalls.h index 90863245df53..d3a7e36f1402 100644 --- a/arch/powerpc/include/asm/epapr_hcalls.h +++ b/arch/powerpc/include/asm/epapr_hcalls.h @@ -466,17 +466,17 @@ static inline unsigned long epapr_hypercall(unsigned long *in, unsigned long *out, unsigned long nr) { - unsigned long register r0 asm("r0"); - unsigned long register r3 asm("r3") = in[0]; - unsigned long register r4 asm("r4") = in[1]; - unsigned long register r5 asm("r5") = in[2]; - unsigned long register r6 asm("r6") = in[3]; - unsigned long register r7 asm("r7") = in[4]; - unsigned long register r8 asm("r8") = in[5]; - unsigned long register r9 asm("r9") = in[6]; - unsigned long register r10 asm("r10") = in[7]; - unsigned long register r11 asm("r11") = nr; - unsigned long register r12 asm("r12"); + register unsigned long r0 asm("r0"); + register unsigned long r3 asm("r3") = in[0]; + register unsigned long r4 asm("r4") = in[1]; + register unsigned long r5 asm("r5") = in[2]; + register unsigned long r6 asm("r6") = in[3]; + register unsigned long r7 asm("r7") = in[4]; + register unsigned long r8 asm("r8") = in[5]; + register unsigned long r9 asm("r9") = in[6]; + register unsigned long r10 asm("r10") = in[7]; + register unsigned long r11 asm("r11") = nr; + register unsigned long r12 asm("r12"); asm volatile("bl epapr_hypercall_start" : "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6), -- cgit v1.2.3 From 174b701d3dcd14514f869e2bc08ac404b16fdb9d Mon Sep 17 00:00:00 2001 From: Mathieu Malaterre Date: Sun, 25 Feb 2018 18:22:17 +0100 Subject: powerpc/32: Move the inline keyword at the beginning of function declaration MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The inline keyword was not at the beginning of the function declaration. Fix the following warning (treated as error in W=1): arch/powerpc/lib/sstep.c:283:1: error: ‘inline’ is not at beginning of declaration static int nokprobe_inline copy_mem_in(u8 *dest, unsigned long ea, int nb, arch/powerpc/lib/sstep.c:388:1: error: ‘inline’ is not at beginning of declaration static int nokprobe_inline copy_mem_out(u8 *dest, unsigned long ea, int nb, Signed-off-by: Mathieu Malaterre Signed-off-by: Michael Ellerman --- arch/powerpc/lib/sstep.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index 70274b7b4773..34d68f1b1b40 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -280,7 +280,7 @@ static nokprobe_inline int read_mem_aligned(unsigned long *dest, * Copy from userspace to a buffer, using the largest possible * aligned accesses, up to sizeof(long). */ -static int nokprobe_inline copy_mem_in(u8 *dest, unsigned long ea, int nb, +static nokprobe_inline int copy_mem_in(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs) { int err = 0; @@ -385,7 +385,7 @@ static nokprobe_inline int write_mem_aligned(unsigned long val, * Copy from a buffer to userspace, using the largest possible * aligned accesses, up to sizeof(long). */ -static int nokprobe_inline copy_mem_out(u8 *dest, unsigned long ea, int nb, +static nokprobe_inline int copy_mem_out(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs) { int err = 0; -- cgit v1.2.3 From 67b464a89c21c9edd45ad15c457bb5f25dadecfd Mon Sep 17 00:00:00 2001 From: Mathieu Malaterre Date: Sun, 25 Feb 2018 18:22:19 +0100 Subject: powerpc/32: Mark both tmp variables as unused MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since the value of `tmp` is never intended to be read, declare both `tmp` variables as unused. Fix warning (treated as error in W=1): arch/powerpc/kernel/signal_32.c: In function ‘sys_swapcontext’: arch/powerpc/kernel/signal_32.c:1048:16: error: variable ‘tmp’ set but not used arch/powerpc/kernel/signal_32.c: In function ‘sys_debug_setcontext’: arch/powerpc/kernel/signal_32.c:1234:16: error: variable ‘tmp’ set but not used Signed-off-by: Mathieu Malaterre Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/signal_32.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index a46de0035214..492f03451877 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -1045,7 +1045,7 @@ long sys_swapcontext(struct ucontext __user *old_ctx, struct ucontext __user *new_ctx, int ctx_size, int r6, int r7, int r8, struct pt_regs *regs) { - unsigned char tmp; + unsigned char tmp __maybe_unused; int ctx_has_vsx_region = 0; #ifdef CONFIG_PPC64 @@ -1231,7 +1231,7 @@ int sys_debug_setcontext(struct ucontext __user *ctx, { struct sig_dbg_op op; int i; - unsigned char tmp; + unsigned char tmp __maybe_unused; unsigned long new_msr = regs->msr; #ifdef CONFIG_PPC_ADV_DEBUG_REGS unsigned long new_dbcr0 = current->thread.debug.dbcr0; -- cgit v1.2.3 From 8b51e679a54e808bdf1f2cc6552cf25577f0a3ea Mon Sep 17 00:00:00 2001 From: Mathieu Malaterre Date: Sun, 25 Feb 2018 18:22:22 +0100 Subject: powerpc/embedded6xx: Make functions flipper_pic_init() & ug_udbg_putc() static MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Change signature of two functions, adding static keyword to prevent the following two warnings (treated as errors on W=1): arch/powerpc/platforms/embedded6xx/flipper-pic.c:135:28: error: no previous prototype for ‘flipper_pic_init’ arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c:172:6: error: no previous prototype for ‘ug_udbg_putc’ Signed-off-by: Mathieu Malaterre Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/embedded6xx/flipper-pic.c | 2 +- arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c index ade83829d5e8..7206f3f573d4 100644 --- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c +++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c @@ -132,7 +132,7 @@ static void __flipper_quiesce(void __iomem *io_base) out_be32(io_base + FLIPPER_ICR, 0xffffffff); } -struct irq_domain * __init flipper_pic_init(struct device_node *np) +static struct irq_domain * __init flipper_pic_init(struct device_node *np) { struct device_node *pi; struct irq_domain *irq_domain = NULL; diff --git a/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c b/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c index 7feb325b636b..5c7e7ce6dbab 100644 --- a/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c +++ b/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c @@ -169,7 +169,7 @@ static int ug_getc(void) /* * Transmits a character. */ -void ug_udbg_putc(char ch) +static void ug_udbg_putc(char ch) { ug_putc(ch); } -- cgit v1.2.3 From 1cdf039bf82a39d816ca4b8161b01c0acfca3e62 Mon Sep 17 00:00:00 2001 From: Mathieu Malaterre Date: Sun, 25 Feb 2018 18:22:23 +0100 Subject: powerpc/kernel: Make function __giveup_fpu() static __giveup_fpu() is never called outside process.c, so it can be static. That also means we don't need an empty definition in switch_to.h Signed-off-by: Mathieu Malaterre [mpe: Also drop the empty version, rewrite change log] Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/switch_to.h | 1 - arch/powerpc/kernel/process.c | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index c3ca42cdc9f5..be8c9fa23983 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h @@ -35,7 +35,6 @@ static inline void disable_kernel_fp(void) msr_check_and_clear(MSR_FP); } #else -static inline void __giveup_fpu(struct task_struct *t) { } static inline void save_fpu(struct task_struct *t) { } static inline void flush_fp_to_thread(struct task_struct *t) { } #endif diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 1738c4127b32..ec4f363ebb89 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -173,7 +173,7 @@ void __msr_check_and_clear(unsigned long bits) EXPORT_SYMBOL(__msr_check_and_clear); #ifdef CONFIG_PPC_FPU -void __giveup_fpu(struct task_struct *tsk) +static void __giveup_fpu(struct task_struct *tsk) { unsigned long msr; @@ -556,7 +556,7 @@ void restore_math(struct pt_regs *regs) regs->msr = msr; } -void save_all(struct task_struct *tsk) +static void save_all(struct task_struct *tsk) { unsigned long usermsr; -- cgit v1.2.3 From 45b4d27a3897d6094bcf84bc87743954e038620a Mon Sep 17 00:00:00 2001 From: Mathieu Malaterre Date: Sun, 25 Feb 2018 18:22:25 +0100 Subject: powerpc: Add missing prototype for slb_miss_bad_addr() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In commit f0f558b131db ("powerpc/mm: Preserve CFAR value on SLB miss caused by access to bogus address"), the function slb_miss_bad_addr() was added without a prototype. This commit adds it. Fix a warning (treated as error in W=1): arch/powerpc/kernel/traps.c:1498:6: error: no previous prototype for ‘slb_miss_bad_addr’ Signed-off-by: Mathieu Malaterre Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/asm-prototypes.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index 7330150bfe34..0af1925e30db 100644 --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h @@ -62,6 +62,7 @@ void RunModeException(struct pt_regs *regs); void single_step_exception(struct pt_regs *regs); void program_check_exception(struct pt_regs *regs); void alignment_exception(struct pt_regs *regs); +void slb_miss_bad_addr(struct pt_regs *regs); void StackOverflow(struct pt_regs *regs); void nonrecoverable_exception(struct pt_regs *regs); void kernel_fp_unavailable_exception(struct pt_regs *regs); -- cgit v1.2.3 From 8b604faff7d421904ebd1fa65d642f566f4a6648 Mon Sep 17 00:00:00 2001 From: Mathieu Malaterre Date: Sun, 25 Feb 2018 18:22:26 +0100 Subject: powerpc: Add missing prototype for hdec_interrupt MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In commit dabe859ec636 ("powerpc: Give hypervisor decrementer interrupts their own handler") an empty body function was added, but no prototype was declared. Fix warning (treated as error in W=1): arch/powerpc/kernel/time.c:629:6: error: no previous prototype for ‘hdec_interrupt’ Signed-off-by: Mathieu Malaterre Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/time.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h index b240666b7bc1..a7a8a9ac5991 100644 --- a/arch/powerpc/include/asm/time.h +++ b/arch/powerpc/include/asm/time.h @@ -31,6 +31,7 @@ extern void to_tm(int tim, struct rtc_time * tm); extern void tick_broadcast_ipi_handler(void); extern void generic_calibrate_decr(void); +extern void hdec_interrupt(struct pt_regs *regs); /* Some sane defaults: 125 MHz timebase, 1GHz processor */ extern unsigned long ppc_proc_freq; -- cgit v1.2.3 From 848092faa0c7687a99bf465808f7dae5bb5ca6be Mon Sep 17 00:00:00 2001 From: Mathieu Malaterre Date: Sun, 25 Feb 2018 18:22:27 +0100 Subject: powerpc: Add missing prototype for time_init() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The function time_init did not have a prototype defined in the time.h header. Fix the following warning (treated as error in W=1): arch/powerpc/kernel/time.c:1068:13: error: no previous prototype for ‘time_init’ Signed-off-by: Mathieu Malaterre Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/time.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h index a7a8a9ac5991..828ebe7ba7dc 100644 --- a/arch/powerpc/include/asm/time.h +++ b/arch/powerpc/include/asm/time.h @@ -205,6 +205,7 @@ struct cpu_usage { DECLARE_PER_CPU(struct cpu_usage, cpu_usage_array); extern void secondary_cpu_time_init(void); +extern void __init time_init(void); DECLARE_PER_CPU(u64, decrementers_next_tb); -- cgit v1.2.3 From fd70d9f96d8182a67e28b99c999157262f2096e9 Mon Sep 17 00:00:00 2001 From: Mathieu Malaterre Date: Sun, 25 Feb 2018 18:22:28 +0100 Subject: powerpc: Add missing prototype for arch_dup_task_struct() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In commit 55ccf3fe3f9a ("fork: move the real prepare_to_copy() users to arch_dup_task_struct()") a new arch_dup_task_struct() was added without a prototype declared in thread_info.h header. Fix the following warning (treated as error in W=1): arch/powerpc/kernel/process.c:1609:5: error: no previous prototype for ‘arch_dup_task_struct’ Signed-off-by: Mathieu Malaterre Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/thread_info.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index 4a12c00f8de3..5964145db03d 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -70,6 +70,7 @@ static inline struct thread_info *current_thread_info(void) return (struct thread_info *)val; } +extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); #endif /* __ASSEMBLY__ */ /* -- cgit v1.2.3 From f5246862f82f1e16bbf84cda4cddf287672b30fe Mon Sep 17 00:00:00 2001 From: Mathieu Malaterre Date: Sun, 25 Feb 2018 18:22:29 +0100 Subject: powerpc: Add missing prototype for arch_irq_work_raise() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In commit 4f8b50bbbe63 ("irq_work, ppc: Fix up arch hooks") a new function arch_irq_work_raise() was added without a prototype in header irq_work.h. Fix the following warning (treated as error in W=1): arch/powerpc/kernel/time.c:523:6: error: no previous prototype for ‘arch_irq_work_raise’ Signed-off-by: Mathieu Malaterre Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/irq_work.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/include/asm/irq_work.h b/arch/powerpc/include/asm/irq_work.h index c6d3078bd8c3..b8b0be8f1a07 100644 --- a/arch/powerpc/include/asm/irq_work.h +++ b/arch/powerpc/include/asm/irq_work.h @@ -6,5 +6,6 @@ static inline bool arch_irq_work_has_interrupt(void) { return true; } +extern void arch_irq_work_raise(void); #endif /* _ASM_POWERPC_IRQ_WORK_H */ -- cgit v1.2.3 From 23a6d8b9634897add6ebff32372f34348387b1ee Mon Sep 17 00:00:00 2001 From: Mathieu Malaterre Date: Sun, 25 Feb 2018 18:22:31 +0100 Subject: powerpc: Add missing prototype for init_IRQ() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A function init_IRQ() was added without a prototype declared in header irq.h. Fix the following warning (treated as error in W=1): arch/powerpc/kernel/irq.c:662:13: error: no previous prototype for ‘init_IRQ’ Signed-off-by: Mathieu Malaterre Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/irq.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h index e8e3a0a04eb0..ee39ce56b2a2 100644 --- a/arch/powerpc/include/asm/irq.h +++ b/arch/powerpc/include/asm/irq.h @@ -66,6 +66,7 @@ extern void irq_ctx_init(void); extern void call_do_softirq(struct thread_info *tp); extern void call_do_irq(struct pt_regs *regs, struct thread_info *tp); extern void do_IRQ(struct pt_regs *regs); +extern void __init init_IRQ(void); extern void __do_irq(struct pt_regs *regs); int irq_choose_cpu(const struct cpumask *mask); -- cgit v1.2.3 From 0d60619e1c0ca20eb26103610349923451827688 Mon Sep 17 00:00:00 2001 From: Mathieu Malaterre Date: Sun, 25 Feb 2018 18:22:32 +0100 Subject: powerpc: Add missing prototype for sys_debug_setcontext() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In commit 81e7009ea46c ("powerpc: merge ppc signal.c and ppc64 signal32.c") the function sys_debug_setcontext was added without a prototype. Fix compilation warning (treated as error in W=1): arch/powerpc/kernel/signal_32.c:1227:5: error: no previous prototype for ‘sys_debug_setcontext’ Signed-off-by: Mathieu Malaterre Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/asm-prototypes.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index 0af1925e30db..7c23d9ead694 100644 --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h @@ -89,6 +89,10 @@ int sys_swapcontext(struct ucontext __user *old_ctx, long sys_swapcontext(struct ucontext __user *old_ctx, struct ucontext __user *new_ctx, int ctx_size, int r6, int r7, int r8, struct pt_regs *regs); +int sys_debug_setcontext(struct ucontext __user *ctx, + int ndbg, struct sig_dbg_op __user *dbg, + int r6, int r7, int r8, + struct pt_regs *regs); #endif long sys_switch_endian(void); notrace unsigned int __check_irq_replay(void); -- cgit v1.2.3 From b53875c4b4f212a7a8e505e2d1063500cdc9431e Mon Sep 17 00:00:00 2001 From: Mathieu Malaterre Date: Sun, 25 Feb 2018 18:22:33 +0100 Subject: powerpc: Add missing prototypes for sys_sigreturn() & sys_rt_sigreturn() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Two functions did not have a prototype defined in signal.h header. Fix the following two warnings (treated as errors in W=1): arch/powerpc/kernel/signal_32.c:1135:6: error: no previous prototype for ‘sys_rt_sigreturn’ arch/powerpc/kernel/signal_32.c:1422:6: error: no previous prototype for ‘sys_sigreturn’ Signed-off-by: Mathieu Malaterre Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/signal.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h index 7c59d88b9d86..a6467f843acf 100644 --- a/arch/powerpc/kernel/signal.h +++ b/arch/powerpc/kernel/signal.h @@ -49,6 +49,11 @@ extern int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, #else /* CONFIG_PPC64 */ +extern long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, + struct pt_regs *regs); +extern long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, + struct pt_regs *regs); + static inline int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct task_struct *tsk) { -- cgit v1.2.3 From b0d876da1d1cd33a1c28049512a136e61d0ff371 Mon Sep 17 00:00:00 2001 From: Mathieu Malaterre Date: Sun, 25 Feb 2018 18:22:34 +0100 Subject: powerpc: Add missing prototypes for hw_breakpoint_handler() & arch_unregister_hw_breakpoint() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In commit 5aae8a537080 ("powerpc, hw_breakpoints: Implement hw_breakpoints for 64-bit server processors") function hw_breakpoint_handler() and arch_unregister_hw_breakpoint() were added without function prototypes in hw_breakpoint.h header. Fix the following warning(s) (treated as error in W=1): arch/powerpc/kernel/hw_breakpoint.c:106:6: error: no previous prototype for ‘arch_unregister_hw_breakpoint’ arch/powerpc/kernel/hw_breakpoint.c:209:5: error: no previous prototype for ‘hw_breakpoint_handler’ Signed-off-by: Mathieu Malaterre Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/hw_breakpoint.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h index ac6432d9be46..90c708e5e7c4 100644 --- a/arch/powerpc/include/asm/hw_breakpoint.h +++ b/arch/powerpc/include/asm/hw_breakpoint.h @@ -66,6 +66,7 @@ extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, unsigned long val, void *data); int arch_install_hw_breakpoint(struct perf_event *bp); void arch_uninstall_hw_breakpoint(struct perf_event *bp); +void arch_unregister_hw_breakpoint(struct perf_event *bp); void hw_breakpoint_pmu_read(struct perf_event *bp); extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk); @@ -82,6 +83,7 @@ static inline void hw_breakpoint_disable(void) __set_breakpoint(&brk); } extern void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs); +int hw_breakpoint_handler(struct die_args *args); #else /* CONFIG_HAVE_HW_BREAKPOINT */ static inline void hw_breakpoint_disable(void) { } -- cgit v1.2.3 From bf7fb32dd5fc8a6bd25aaab05d3acddd1223ba03 Mon Sep 17 00:00:00 2001 From: Mathieu Malaterre Date: Sun, 25 Feb 2018 18:22:35 +0100 Subject: powerpc: Add missing prototypes for ppc_select() & ppc_fadvise64_64() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add missing prototypes for ppc_select() & ppc_fadvise64_64() to header asm-prototypes.h. Fix the following warnings (treated as errors in W=1) arch/powerpc/kernel/syscalls.c:87:1: error: no previous prototype for ‘ppc_select’ arch/powerpc/kernel/syscalls.c:119:6: error: no previous prototype for ‘ppc_fadvise64_64’ Signed-off-by: Mathieu Malaterre Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/asm-prototypes.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index 7c23d9ead694..4d8b89b46018 100644 --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h @@ -93,7 +93,11 @@ int sys_debug_setcontext(struct ucontext __user *ctx, int ndbg, struct sig_dbg_op __user *dbg, int r6, int r7, int r8, struct pt_regs *regs); +int +ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timeval __user *tvp); #endif +long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low, + u32 len_high, u32 len_low); long sys_switch_endian(void); notrace unsigned int __check_irq_replay(void); void notrace restore_interrupts(void); -- cgit v1.2.3 From 4f1f40f7b2b4487f582ecafec64076e430c72b22 Mon Sep 17 00:00:00 2001 From: Mathieu Malaterre Date: Fri, 2 Mar 2018 20:49:18 +0100 Subject: powerpc/prom: Remove warning on array size when empty MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When neither CONFIG_ALTIVEC, nor CONFIG_VSX or CONFIG_PPC64 is defined, the array feature_properties is defined as an empty array, which in turn triggers the following warning (treated as error on W=1): arch/powerpc/kernel/prom.c: In function ‘check_cpu_feature_properties’: arch/powerpc/kernel/prom.c:298:16: error: comparison of unsigned expression < 0 is always false for (i = 0; i < ARRAY_SIZE(feature_properties); ++i, ++fp) { ^ Suggested-by: Michael Ellerman Signed-off-by: Mathieu Malaterre Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/prom.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 4dffef947b8a..330c65f04820 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -291,11 +291,11 @@ static inline void identical_pvr_fixup(unsigned long node) static void __init check_cpu_feature_properties(unsigned long node) { - unsigned long i; + int i; struct feature_property *fp = feature_properties; const __be32 *prop; - for (i = 0; i < ARRAY_SIZE(feature_properties); ++i, ++fp) { + for (i = 0; i < (int)ARRAY_SIZE(feature_properties); ++i, ++fp) { prop = of_get_flat_dt_prop(node, fp->name, NULL); if (prop && be32_to_cpup(prop) >= fp->min_value) { cur_cpu_spec->cpu_features |= fp->cpu_feature; -- cgit v1.2.3 From 603b892200e653dd7e86a0e4a315561534d97441 Mon Sep 17 00:00:00 2001 From: Mathieu Malaterre Date: Wed, 7 Mar 2018 21:34:35 +0100 Subject: powerpc: Avoid comparison of unsigned long >= 0 in pfn_valid() Rewrite comparison since all values compared are of type `unsigned long`. Instead of using unsigned properties and rewriting the original code as: (originally suggested by Segher Boessenkool ) #define pfn_valid(pfn) \ (((pfn) - ARCH_PFN_OFFSET) < (max_mapnr - ARCH_PFN_OFFSET)) Prefer a static inline function to make code as readable as possible. Fix a warning (treated as error in W=1): arch/powerpc/include/asm/page.h:129:32: error: comparison of unsigned expression >= 0 is always true [-Werror=type-limits] #define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < max_mapnr) ^ Suggested-by: Christophe Leroy Signed-off-by: Mathieu Malaterre Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/page.h | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index d5f1c41b7dba..dec9ce5ba8af 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -126,7 +126,15 @@ extern long long virt_phys_offset; #ifdef CONFIG_FLATMEM #define ARCH_PFN_OFFSET ((unsigned long)(MEMORY_START >> PAGE_SHIFT)) -#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < max_mapnr) +#ifndef __ASSEMBLY__ +extern unsigned long max_mapnr; +static inline bool pfn_valid(unsigned long pfn) +{ + unsigned long min_pfn = ARCH_PFN_OFFSET; + + return pfn >= min_pfn && pfn < max_mapnr; +} +#endif #endif #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) -- cgit v1.2.3 From ef85dffd4251ff6c23056651f6f83bdce83cd1cf Mon Sep 17 00:00:00 2001 From: Mathieu Malaterre Date: Fri, 2 Mar 2018 20:50:51 +0100 Subject: powerpc: Avoid comparison of unsigned long >= 0 in __access_ok() Rewrite function-like macro into regular static inline function to avoid a warning during macro expansion. Fix warning (treated as error in W=1): ./arch/powerpc/include/asm/uaccess.h:52:35: error: comparison of unsigned expression >= 0 is always true (((size) == 0) || (((size) - 1) <= ((segment).seg - (addr))))) ^ Suggested-by: Segher Boessenkool Signed-off-by: Mathieu Malaterre Reviewed-by: Christophe Leroy Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/uaccess.h | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 51bfeb8777f0..a62ee663b2c8 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -47,9 +47,13 @@ #else -#define __access_ok(addr, size, segment) \ - (((addr) <= (segment).seg) && \ - (((size) == 0) || (((size) - 1) <= ((segment).seg - (addr))))) +static inline int __access_ok(unsigned long addr, unsigned long size, + mm_segment_t seg) +{ + if (addr > seg.seg) + return 0; + return (size == 0 || size - 1 <= seg.seg - addr); +} #endif -- cgit v1.2.3 From d15a261d876da3267c8c706ef21e7fdf10c582be Mon Sep 17 00:00:00 2001 From: Mathieu Malaterre Date: Wed, 7 Mar 2018 21:32:55 +0100 Subject: powerpc/32: Make some functions static These functions can all be static, make it so. Signed-off-by: Mathieu Malaterre [mpe: Combine a patch of Mathieu's with some other static conversions] Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/setup_32.c | 7 +++---- arch/powerpc/mm/init_32.c | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 51ebc01fff52..57b668412e9c 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -121,7 +121,7 @@ notrace void __init machine_init(u64 dt_ptr) } /* Checks "l2cr=xxxx" command-line option */ -int __init ppc_setup_l2cr(char *str) +static int __init ppc_setup_l2cr(char *str) { if (cpu_has_feature(CPU_FTR_L2CR)) { unsigned long val = simple_strtoul(str, NULL, 0); @@ -134,7 +134,7 @@ int __init ppc_setup_l2cr(char *str) __setup("l2cr=", ppc_setup_l2cr); /* Checks "l3cr=xxxx" command-line option */ -int __init ppc_setup_l3cr(char *str) +static int __init ppc_setup_l3cr(char *str) { if (cpu_has_feature(CPU_FTR_L3CR)) { unsigned long val = simple_strtoul(str, NULL, 0); @@ -180,7 +180,7 @@ EXPORT_SYMBOL(nvram_sync); #endif /* CONFIG_NVRAM */ -int __init ppc_init(void) +static int __init ppc_init(void) { /* clear the progress line */ if (ppc_md.progress) @@ -192,7 +192,6 @@ int __init ppc_init(void) } return 0; } - arch_initcall(ppc_init); void __init irqstack_early_init(void) diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 6419b33ca309..a2bf6965d04f 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -99,7 +99,7 @@ unsigned long __max_low_memory = MAX_LOW_MEM; /* * Check for command-line options that affect what MMU_init will do. */ -void __init MMU_setup(void) +static void __init MMU_setup(void) { /* Check for nobats option (used in mapin_ram). */ if (strstr(boot_command_line, "nobats")) { -- cgit v1.2.3 From e82d70cf965072a3872ea97b7d8df4b6f29fc09f Mon Sep 17 00:00:00 2001 From: Mathieu Malaterre Date: Thu, 8 Mar 2018 22:31:59 +1100 Subject: powerpc/32: Add missing prototypes for (early|machine)_init() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit early_init() and machine_init() have no prototype, add one in asm-prototypes.h. Fixes the following warnings (treated as error in W=1): arch/powerpc/kernel/setup_32.c:68:30: error: no previous prototype for ‘early_init’ arch/powerpc/kernel/setup_32.c:99:21: error: no previous prototype for ‘machine_init’ Signed-off-by: Mathieu Malaterre [mpe: Move them to asm-prototypes.h, drop other functions] Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/asm-prototypes.h | 3 +++ arch/powerpc/kernel/setup_32.c | 1 + 2 files changed, 4 insertions(+) diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index 4d8b89b46018..0bdeff415a72 100644 --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h @@ -95,7 +95,10 @@ int sys_debug_setcontext(struct ucontext __user *ctx, struct pt_regs *regs); int ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timeval __user *tvp); +unsigned long __init early_init(unsigned long dt_ptr); +void __init machine_init(u64 dt_ptr); #endif + long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low, u32 len_high, u32 len_low); long sys_switch_endian(void); diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 57b668412e9c..74457485574b 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -39,6 +39,7 @@ #include #include #include +#include #define DBG(fmt...) -- cgit v1.2.3 From 16560e88320d276036b393d4ba51cf0184f6caca Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 8 Mar 2018 13:54:39 +1100 Subject: powerpc/pseries: Move smp_query_cpu_stopped() etc. out of plpar_wrappers.h smp_query_cpu_stopped() and related #defines are currently in plpar_wrappers.h. The function actually does an RTAS call, not an hcall, and basically has nothing to do with plpar_wrappers.h Move it into pseries.h, where it can easily be used by the only two callers in pseries/smp.c and pseries/hotplug-cpu.c. Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/plpar_wrappers.h | 8 -------- arch/powerpc/platforms/pseries/pseries.h | 8 ++++++++ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h index 55eddf50d149..1776af9e0118 100644 --- a/arch/powerpc/include/asm/plpar_wrappers.h +++ b/arch/powerpc/include/asm/plpar_wrappers.h @@ -9,14 +9,6 @@ #include #include -/* Get state of physical CPU from query_cpu_stopped */ -int smp_query_cpu_stopped(unsigned int pcpu); -#define QCSS_STOPPED 0 -#define QCSS_STOPPING 1 -#define QCSS_NOT_STOPPED 2 -#define QCSS_HARDWARE_ERROR -1 -#define QCSS_HARDWARE_BUSY -2 - static inline long poll_pending(void) { return plpar_hcall_norets(H_POLL_PENDING); diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h index 1ae1d9f4dbe9..c73351cea276 100644 --- a/arch/powerpc/platforms/pseries/pseries.h +++ b/arch/powerpc/platforms/pseries/pseries.h @@ -27,6 +27,14 @@ extern int pSeries_machine_check_exception(struct pt_regs *regs); #ifdef CONFIG_SMP extern void smp_init_pseries(void); + +/* Get state of physical CPU from query_cpu_stopped */ +int smp_query_cpu_stopped(unsigned int pcpu); +#define QCSS_STOPPED 0 +#define QCSS_STOPPING 1 +#define QCSS_NOT_STOPPED 2 +#define QCSS_HARDWARE_ERROR -1 +#define QCSS_HARDWARE_BUSY -2 #else static inline void smp_init_pseries(void) { }; #endif -- cgit v1.2.3 From 5017e875e497c00dbc17558161fec3ff30b2b4a9 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 8 Mar 2018 13:54:40 +1100 Subject: powerpc/pseries: Make plpar_wrappers.h safe to include when PSERIES=n Currently plpar_wrappers.h is not safe to include when CONFIG_PPC_PSERIES=n, or at least it can be depending on other config options and so on. Fix that by wrapping the entire content in an ifdef. Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/plpar_wrappers.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h index 1776af9e0118..09cb26816b2d 100644 --- a/arch/powerpc/include/asm/plpar_wrappers.h +++ b/arch/powerpc/include/asm/plpar_wrappers.h @@ -2,6 +2,8 @@ #ifndef _ASM_POWERPC_PLPAR_WRAPPERS_H #define _ASM_POWERPC_PLPAR_WRAPPERS_H +#ifdef CONFIG_PPC_PSERIES + #include #include @@ -332,4 +334,6 @@ static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p) return rc; } +#endif /* CONFIG_PPC_PSERIES */ + #endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */ -- cgit v1.2.3 From 7c09c1869c9ceb8b356e23161d2ceb0ed0849ac5 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 8 Mar 2018 13:54:41 +1100 Subject: powerpc: Rename plapr routines to plpar Back in 2013 we added some hypercall wrappers which misspelled "plpar" (P-series Logical PARtition) as "plapr". Visually they're hard to distinguish and it almost doesn't matter, but it is confusing when grepping to miss some calls because of the typo. They've also started spreading, so before they take over let's fix them all to be "plpar". Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/plpar_wrappers.h | 6 +++--- arch/powerpc/platforms/pseries/setup.c | 2 +- arch/powerpc/platforms/pseries/smp.c | 2 +- arch/powerpc/xmon/xmon.c | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h index 09cb26816b2d..9233b84f489a 100644 --- a/arch/powerpc/include/asm/plpar_wrappers.h +++ b/arch/powerpc/include/asm/plpar_wrappers.h @@ -305,17 +305,17 @@ static inline long enable_little_endian_exceptions(void) return plpar_set_mode(1, H_SET_MODE_RESOURCE_LE, 0, 0); } -static inline long plapr_set_ciabr(unsigned long ciabr) +static inline long plpar_set_ciabr(unsigned long ciabr) { return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_CIABR, ciabr, 0); } -static inline long plapr_set_watchpoint0(unsigned long dawr0, unsigned long dawrx0) +static inline long plpar_set_watchpoint0(unsigned long dawr0, unsigned long dawrx0) { return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_DAWR, dawr0, dawrx0); } -static inline long plapr_signal_sys_reset(long cpu) +static inline long plpar_signal_sys_reset(long cpu) { return plpar_hcall_norets(H_SIGNAL_SYS_RESET, cpu); } diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 1a527625acf7..4642e48d1c2e 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -739,7 +739,7 @@ static int pseries_set_dawr(unsigned long dawr, unsigned long dawrx) /* PAPR says we can't set HYP */ dawrx &= ~DAWRX_HYP; - return plapr_set_watchpoint0(dawr, dawrx); + return plpar_set_watchpoint0(dawr, dawrx); } #define CMO_CHARACTERISTICS_TOKEN 44 diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index 2e184829e5d4..66b6f119d599 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c @@ -215,7 +215,7 @@ static int pseries_cause_nmi_ipi(int cpu) hwcpu = get_hard_smp_processor_id(cpu); } - if (plapr_signal_sys_reset(hwcpu) == H_SUCCESS) + if (plpar_signal_sys_reset(hwcpu) == H_SUCCESS) return 1; return 0; diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 3ddf9dd9a55f..2bf6e2af28c2 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -64,7 +64,7 @@ #if defined(CONFIG_PPC_SPLPAR) #include #else -static inline long plapr_set_ciabr(unsigned long ciabr) {return 0; }; +static inline long plpar_set_ciabr(unsigned long ciabr) {return 0; }; #endif #include "nonstdio.h" @@ -328,7 +328,7 @@ static void write_ciabr(unsigned long ciabr) mtspr(SPRN_CIABR, ciabr); return; } - plapr_set_ciabr(ciabr); + plpar_set_ciabr(ciabr); } /** -- cgit v1.2.3 From ab83dc794c9d8870e4844cca9a2945b782b8ee7e Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 8 Mar 2018 13:54:42 +1100 Subject: powerpc/xmon: Move empty plpar_set_ciabr() into plpar_wrappers.h Now that plpar_wrappers.h has an #ifdef PSERIES we can move the empty version of plpar_set_ciabr() which xmon wants into there. Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/plpar_wrappers.h | 6 ++++++ arch/powerpc/xmon/xmon.c | 7 +------ 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h index 9233b84f489a..96c1a46acbd0 100644 --- a/arch/powerpc/include/asm/plpar_wrappers.h +++ b/arch/powerpc/include/asm/plpar_wrappers.h @@ -334,6 +334,12 @@ static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p) return rc; } +#else /* !CONFIG_PPC_PSERIES */ + +static inline long plpar_set_ciabr(unsigned long ciabr) +{ + return 0; +} #endif /* CONFIG_PPC_PSERIES */ #endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */ diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 2bf6e2af28c2..a06cf6e38978 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include #include @@ -61,12 +62,6 @@ #include #endif -#if defined(CONFIG_PPC_SPLPAR) -#include -#else -static inline long plpar_set_ciabr(unsigned long ciabr) {return 0; }; -#endif - #include "nonstdio.h" #include "dis-asm.h" -- cgit v1.2.3 From 1753dd1830367709144f68f539554dadd7a7ccba Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 7 Mar 2018 11:37:09 +1000 Subject: powerpc/mm/slice: Simplify and optimise slice context initialisation The slice state of an mm gets zeroed then initialised upon exec. This is the only caller of slice_set_user_psize now, so that can be removed and instead implement a faster and simplified approach that requires no locking or checking existing state. This speeds up vfork+exec+exit performance on POWER8 by 3%. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/slice.h | 8 ++-- arch/powerpc/mm/mmu_context_book3s64.c | 9 +---- arch/powerpc/mm/mmu_context_nohash.c | 5 +-- arch/powerpc/mm/slice.c | 72 +++++++++------------------------- 4 files changed, 23 insertions(+), 71 deletions(-) diff --git a/arch/powerpc/include/asm/slice.h b/arch/powerpc/include/asm/slice.h index 172711fadb1c..e40406cf5628 100644 --- a/arch/powerpc/include/asm/slice.h +++ b/arch/powerpc/include/asm/slice.h @@ -28,15 +28,13 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr); -void slice_set_user_psize(struct mm_struct *mm, unsigned int psize); void slice_set_range_psize(struct mm_struct *mm, unsigned long start, unsigned long len, unsigned int psize); -#endif /* __ASSEMBLY__ */ -#else /* CONFIG_PPC_MM_SLICES */ +void slice_init_new_context_exec(struct mm_struct *mm); + +#endif /* __ASSEMBLY__ */ -#define slice_set_range_psize(mm, start, len, psize) \ - slice_set_user_psize((mm), (psize)) #endif /* CONFIG_PPC_MM_SLICES */ #endif /* _ASM_POWERPC_SLICE_H */ diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c index 929d9ef7083f..80acad52b006 100644 --- a/arch/powerpc/mm/mmu_context_book3s64.c +++ b/arch/powerpc/mm/mmu_context_book3s64.c @@ -93,13 +93,6 @@ static int hash__init_new_context(struct mm_struct *mm) if (index < 0) return index; - /* - * In the case of exec, use the default limit, - * otherwise inherit it from the mm we are duplicating. - */ - if (!mm->context.slb_addr_limit) - mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW_USER64; - /* * The old code would re-promote on fork, we don't do that when using * slices as it could cause problem promoting slices that have been @@ -115,7 +108,7 @@ static int hash__init_new_context(struct mm_struct *mm) * check against 0 is OK. */ if (mm->context.id == 0) - slice_set_user_psize(mm, mmu_virtual_psize); + slice_init_new_context_exec(mm); subpage_prot_init_new_context(mm); diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c index d98f7e5c141b..be8f5c9d4d08 100644 --- a/arch/powerpc/mm/mmu_context_nohash.c +++ b/arch/powerpc/mm/mmu_context_nohash.c @@ -332,9 +332,6 @@ int init_new_context(struct task_struct *t, struct mm_struct *mm) pr_hard("initing context for mm @%p\n", mm); #ifdef CONFIG_PPC_MM_SLICES - if (!mm->context.slb_addr_limit) - mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW; - /* * We have MMU_NO_CONTEXT set to be ~0. Hence check * explicitly against context.id == 0. This ensures that we properly @@ -343,7 +340,7 @@ int init_new_context(struct task_struct *t, struct mm_struct *mm) * will have id != 0). */ if (mm->context.id == 0) - slice_set_user_psize(mm, mmu_virtual_psize); + slice_init_new_context_exec(mm); #endif mm->context.id = MMU_NO_CONTEXT; mm->context.active = 0; diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 5e9e1e57d580..7b51f962ce0c 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -671,70 +671,34 @@ unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr) } EXPORT_SYMBOL_GPL(get_slice_psize); -/* - * This is called by hash_page when it needs to do a lazy conversion of - * an address space from real 64K pages to combo 4K pages (typically - * when hitting a non cacheable mapping on a processor or hypervisor - * that won't allow them for 64K pages). - * - * This is also called in init_new_context() to change back the user - * psize from whatever the parent context had it set to - * N.B. This may be called before mm->context.id has been set. - * - * This function will only change the content of the {low,high)_slice_psize - * masks, it will not flush SLBs as this shall be handled lazily by the - * caller. - */ -void slice_set_user_psize(struct mm_struct *mm, unsigned int psize) +void slice_init_new_context_exec(struct mm_struct *mm) { - int index, mask_index; unsigned char *hpsizes, *lpsizes; - unsigned long flags; - unsigned int old_psize; - int i; + unsigned int psize = mmu_virtual_psize; - slice_dbg("slice_set_user_psize(mm=%p, psize=%d)\n", mm, psize); + slice_dbg("slice_init_new_context_exec(mm=%p)\n", mm); - VM_BUG_ON(radix_enabled()); - spin_lock_irqsave(&slice_convert_lock, flags); - - old_psize = mm->context.user_psize; - slice_dbg(" old_psize=%d\n", old_psize); - if (old_psize == psize) - goto bail; + /* + * In the case of exec, use the default limit. In the + * case of fork it is just inherited from the mm being + * duplicated. + */ +#ifdef CONFIG_PPC64 + mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW_USER64; +#else + mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW; +#endif mm->context.user_psize = psize; - wmb(); + /* + * Set all slice psizes to the default. + */ lpsizes = mm->context.low_slices_psize; - for (i = 0; i < SLICE_NUM_LOW; i++) { - mask_index = i & 0x1; - index = i >> 1; - if (((lpsizes[index] >> (mask_index * 4)) & 0xf) == old_psize) - lpsizes[index] = (lpsizes[index] & - ~(0xf << (mask_index * 4))) | - (((unsigned long)psize) << (mask_index * 4)); - } + memset(lpsizes, (psize << 4) | psize, SLICE_NUM_LOW >> 1); hpsizes = mm->context.high_slices_psize; - for (i = 0; i < SLICE_NUM_HIGH; i++) { - mask_index = i & 0x1; - index = i >> 1; - if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == old_psize) - hpsizes[index] = (hpsizes[index] & - ~(0xf << (mask_index * 4))) | - (((unsigned long)psize) << (mask_index * 4)); - } - - - - - slice_dbg(" lsps=%lx, hsps=%lx\n", - (unsigned long)mm->context.low_slices_psize, - (unsigned long)mm->context.high_slices_psize); - - bail: - spin_unlock_irqrestore(&slice_convert_lock, flags); + memset(hpsizes, (psize << 4) | psize, SLICE_NUM_HIGH >> 1); } void slice_set_range_psize(struct mm_struct *mm, unsigned long start, -- cgit v1.2.3 From 5a807e04bd2a704821f7027b06c7025c96e015f9 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 7 Mar 2018 11:37:10 +1000 Subject: powerpc/mm/slice: tidy lpsizes and hpsizes update loops Make these loops look the same, and change their form so the important part is not wrapped over so many lines. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/mm/slice.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 7b51f962ce0c..432c328b3e94 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -232,22 +232,24 @@ static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psiz spin_lock_irqsave(&slice_convert_lock, flags); lpsizes = mm->context.low_slices_psize; - for (i = 0; i < SLICE_NUM_LOW; i++) - if (mask.low_slices & (1u << i)) { - mask_index = i & 0x1; - index = i >> 1; - lpsizes[index] = (lpsizes[index] & - ~(0xf << (mask_index * 4))) | + for (i = 0; i < SLICE_NUM_LOW; i++) { + if (!(mask.low_slices & (1u << i))) + continue; + + mask_index = i & 0x1; + index = i >> 1; + lpsizes[index] = (lpsizes[index] & ~(0xf << (mask_index * 4))) | (((unsigned long)psize) << (mask_index * 4)); - } + } hpsizes = mm->context.high_slices_psize; for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++) { + if (!test_bit(i, mask.high_slices)) + continue; + mask_index = i & 0x1; index = i >> 1; - if (test_bit(i, mask.high_slices)) - hpsizes[index] = (hpsizes[index] & - ~(0xf << (mask_index * 4))) | + hpsizes[index] = (hpsizes[index] & ~(0xf << (mask_index * 4))) | (((unsigned long)psize) << (mask_index * 4)); } -- cgit v1.2.3 From 830fd2d45aa11631325404904d02c7b530d40119 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 7 Mar 2018 11:37:11 +1000 Subject: powerpc/mm/slice: pass pointers to struct slice_mask where possible Pass around const pointers to struct slice_mask where possible, rather than copies of slice_mask, to reduce stack and call overhead. checkstack.pl gives, before: 0x00000d1c slice_get_unmapped_area [slice.o]: 592 0x00001864 is_hugepage_only_range [slice.o]: 448 0x00000754 slice_find_area_topdown [slice.o]: 400 0x00000484 slice_find_area_bottomup.isra.1 [slice.o]: 272 0x000017b4 slice_set_range_psize [slice.o]: 224 0x00000a4c slice_find_area [slice.o]: 128 0x00000160 slice_check_fit [slice.o]: 112 after: 0x00000ad0 slice_get_unmapped_area [slice.o]: 448 0x00001464 is_hugepage_only_range [slice.o]: 288 0x000006c0 slice_find_area [slice.o]: 144 0x0000016c slice_check_fit [slice.o]: 128 0x00000528 slice_find_area_bottomup.isra.2 [slice.o]: 128 0x000013e4 slice_set_range_psize [slice.o]: 128 This increases vfork+exec+exit performance by 1.5%. Reduces time to mmap+munmap a 64kB page by 17%. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/mm/slice.c | 84 ++++++++++++++++++++++++++----------------------- 1 file changed, 45 insertions(+), 39 deletions(-) diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 432c328b3e94..420d791f0e18 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -50,19 +50,21 @@ struct slice_mask { #ifdef DEBUG int _slice_debug = 1; -static void slice_print_mask(const char *label, struct slice_mask mask) +static void slice_print_mask(const char *label, const struct slice_mask *mask) { if (!_slice_debug) return; - pr_devel("%s low_slice: %*pbl\n", label, (int)SLICE_NUM_LOW, &mask.low_slices); - pr_devel("%s high_slice: %*pbl\n", label, (int)SLICE_NUM_HIGH, mask.high_slices); + pr_devel("%s low_slice: %*pbl\n", label, + (int)SLICE_NUM_LOW, &mask->low_slices); + pr_devel("%s high_slice: %*pbl\n", label, + (int)SLICE_NUM_HIGH, mask->high_slices); } #define slice_dbg(fmt...) do { if (_slice_debug) pr_devel(fmt); } while (0) #else -static void slice_print_mask(const char *label, struct slice_mask mask) {} +static void slice_print_mask(const char *label, const struct slice_mask *mask) {} #define slice_dbg(fmt...) #endif @@ -179,7 +181,8 @@ static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_ma } static int slice_check_fit(struct mm_struct *mm, - struct slice_mask mask, struct slice_mask available) + const struct slice_mask *mask, + const struct slice_mask *available) { DECLARE_BITMAP(result, SLICE_NUM_HIGH); /* @@ -189,14 +192,14 @@ static int slice_check_fit(struct mm_struct *mm, unsigned long slice_count = GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); if (!SLICE_NUM_HIGH) - return (mask.low_slices & available.low_slices) == - mask.low_slices; + return (mask->low_slices & available->low_slices) == + mask->low_slices; - bitmap_and(result, mask.high_slices, - available.high_slices, slice_count); + bitmap_and(result, mask->high_slices, + available->high_slices, slice_count); - return (mask.low_slices & available.low_slices) == mask.low_slices && - bitmap_equal(result, mask.high_slices, slice_count); + return (mask->low_slices & available->low_slices) == mask->low_slices && + bitmap_equal(result, mask->high_slices, slice_count); } static void slice_flush_segments(void *parm) @@ -216,7 +219,8 @@ static void slice_flush_segments(void *parm) #endif } -static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize) +static void slice_convert(struct mm_struct *mm, + const struct slice_mask *mask, int psize) { int index, mask_index; /* Write the new slice psize bits */ @@ -233,7 +237,7 @@ static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psiz lpsizes = mm->context.low_slices_psize; for (i = 0; i < SLICE_NUM_LOW; i++) { - if (!(mask.low_slices & (1u << i))) + if (!(mask->low_slices & (1u << i))) continue; mask_index = i & 0x1; @@ -244,7 +248,7 @@ static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psiz hpsizes = mm->context.high_slices_psize; for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++) { - if (!test_bit(i, mask.high_slices)) + if (!test_bit(i, mask->high_slices)) continue; mask_index = i & 0x1; @@ -270,26 +274,25 @@ static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psiz * 'available' slice_mark. */ static bool slice_scan_available(unsigned long addr, - struct slice_mask available, - int end, - unsigned long *boundary_addr) + const struct slice_mask *available, + int end, unsigned long *boundary_addr) { unsigned long slice; if (addr < SLICE_LOW_TOP) { slice = GET_LOW_SLICE_INDEX(addr); *boundary_addr = (slice + end) << SLICE_LOW_SHIFT; - return !!(available.low_slices & (1u << slice)); + return !!(available->low_slices & (1u << slice)); } else { slice = GET_HIGH_SLICE_INDEX(addr); *boundary_addr = (slice + end) ? ((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP; - return !!test_bit(slice, available.high_slices); + return !!test_bit(slice, available->high_slices); } } static unsigned long slice_find_area_bottomup(struct mm_struct *mm, unsigned long len, - struct slice_mask available, + const struct slice_mask *available, int psize, unsigned long high_limit) { int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); @@ -335,7 +338,7 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm, static unsigned long slice_find_area_topdown(struct mm_struct *mm, unsigned long len, - struct slice_mask available, + const struct slice_mask *available, int psize, unsigned long high_limit) { int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); @@ -393,7 +396,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm, static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len, - struct slice_mask mask, int psize, + const struct slice_mask *mask, int psize, int topdown, unsigned long high_limit) { if (topdown) @@ -402,7 +405,8 @@ static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len, return slice_find_area_bottomup(mm, len, mask, psize, high_limit); } -static inline void slice_or_mask(struct slice_mask *dst, struct slice_mask *src) +static inline void slice_or_mask(struct slice_mask *dst, + const struct slice_mask *src) { dst->low_slices |= src->low_slices; if (!SLICE_NUM_HIGH) @@ -411,7 +415,8 @@ static inline void slice_or_mask(struct slice_mask *dst, struct slice_mask *src) SLICE_NUM_HIGH); } -static inline void slice_andnot_mask(struct slice_mask *dst, struct slice_mask *src) +static inline void slice_andnot_mask(struct slice_mask *dst, + const struct slice_mask *src) { dst->low_slices &= ~src->low_slices; @@ -501,7 +506,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, * already */ slice_mask_for_size(mm, psize, &good_mask, high_limit); - slice_print_mask(" good_mask", good_mask); + slice_print_mask(" good_mask", &good_mask); /* * Here "good" means slices that are already the right page size, @@ -535,12 +540,12 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, if (addr != 0 || fixed) { /* Build a mask for the requested range */ slice_range_to_mask(addr, len, &mask); - slice_print_mask(" mask", mask); + slice_print_mask(" mask", &mask); /* Check if we fit in the good mask. If we do, we just return, * nothing else to do */ - if (slice_check_fit(mm, mask, good_mask)) { + if (slice_check_fit(mm, &mask, &good_mask)) { slice_dbg(" fits good !\n"); return addr; } @@ -548,7 +553,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, /* Now let's see if we can find something in the existing * slices for that size */ - newaddr = slice_find_area(mm, len, good_mask, + newaddr = slice_find_area(mm, len, &good_mask, psize, topdown, high_limit); if (newaddr != -ENOMEM) { /* Found within the good mask, we don't have to setup, @@ -564,9 +569,10 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, */ slice_mask_for_free(mm, &potential_mask, high_limit); slice_or_mask(&potential_mask, &good_mask); - slice_print_mask(" potential", potential_mask); + slice_print_mask(" potential", &potential_mask); - if ((addr != 0 || fixed) && slice_check_fit(mm, mask, potential_mask)) { + if ((addr != 0 || fixed) && + slice_check_fit(mm, &mask, &potential_mask)) { slice_dbg(" fits potential !\n"); goto convert; } @@ -581,7 +587,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, * anywhere in the good area. */ if (addr) { - addr = slice_find_area(mm, len, good_mask, + addr = slice_find_area(mm, len, &good_mask, psize, topdown, high_limit); if (addr != -ENOMEM) { slice_dbg(" found area at 0x%lx\n", addr); @@ -592,14 +598,14 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, /* Now let's see if we can find something in the existing slices * for that size plus free slices */ - addr = slice_find_area(mm, len, potential_mask, + addr = slice_find_area(mm, len, &potential_mask, psize, topdown, high_limit); #ifdef CONFIG_PPC_64K_PAGES if (addr == -ENOMEM && psize == MMU_PAGE_64K) { /* retry the search with 4k-page slices included */ slice_or_mask(&potential_mask, &compat_mask); - addr = slice_find_area(mm, len, potential_mask, + addr = slice_find_area(mm, len, &potential_mask, psize, topdown, high_limit); } #endif @@ -609,7 +615,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, slice_range_to_mask(addr, len, &mask); slice_dbg(" found potential area at 0x%lx\n", addr); - slice_print_mask(" mask", mask); + slice_print_mask(" mask", &mask); convert: slice_andnot_mask(&mask, &good_mask); @@ -617,7 +623,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, if (mask.low_slices || (SLICE_NUM_HIGH && !bitmap_empty(mask.high_slices, SLICE_NUM_HIGH))) { - slice_convert(mm, mask, psize); + slice_convert(mm, &mask, psize); if (psize > MMU_PAGE_BASE) on_each_cpu(slice_flush_segments, mm, 1); } @@ -711,7 +717,7 @@ void slice_set_range_psize(struct mm_struct *mm, unsigned long start, VM_BUG_ON(radix_enabled()); slice_range_to_mask(start, len, &mask); - slice_convert(mm, mask, psize); + slice_convert(mm, &mask, psize); } #ifdef CONFIG_HUGETLB_PAGE @@ -758,9 +764,9 @@ int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, #if 0 /* too verbose */ slice_dbg("is_hugepage_only_range(mm=%p, addr=%lx, len=%lx)\n", mm, addr, len); - slice_print_mask(" mask", mask); - slice_print_mask(" available", available); + slice_print_mask(" mask", &mask); + slice_print_mask(" available", &available); #endif - return !slice_check_fit(mm, mask, available); + return !slice_check_fit(mm, &mask, &available); } #endif -- cgit v1.2.3 From 5709f7cfd8305252dc327206bd674ad65ca4d77f Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 7 Mar 2018 11:37:12 +1000 Subject: powerpc/mm/slice: implement a slice mask cache Calculating the slice mask can become a signifcant overhead for get_unmapped_area. This patch adds a struct slice_mask for each page size in the mm_context, and keeps these in synch with the slices psize arrays and slb_addr_limit. On Book3S/64 this adds 288 bytes to the mm_context_t for the slice mask caches. On POWER8, this increases vfork+exec+exit performance by 9.9% and reduces time to mmap+munmap a 64kB page by 28%. Reduces time to mmap+munmap by about 10% on 8xx. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/book3s/64/mmu.h | 18 +++++ arch/powerpc/include/asm/mmu-8xx.h | 10 +++ arch/powerpc/mm/slice.c | 112 +++++++++++++++++++------------ 3 files changed, 98 insertions(+), 42 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h index bef6e39ed63a..777778579305 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu.h +++ b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -80,6 +80,16 @@ struct spinlock; /* Maximum possible number of NPUs in a system. */ #define NV_MAX_NPUS 8 +/* + * One bit per slice. We have lower slices which cover 256MB segments + * upto 4G range. That gets us 16 low slices. For the rest we track slices + * in 1TB size. + */ +struct slice_mask { + u64 low_slices; + DECLARE_BITMAP(high_slices, SLICE_NUM_HIGH); +}; + typedef struct { mm_context_id_t id; u16 user_psize; /* page size index */ @@ -95,6 +105,14 @@ typedef struct { unsigned char low_slices_psize[BITS_PER_LONG / BITS_PER_BYTE]; unsigned char high_slices_psize[SLICE_ARRAY_SIZE]; unsigned long slb_addr_limit; +# ifdef CONFIG_PPC_64K_PAGES + struct slice_mask mask_64k; +# endif + struct slice_mask mask_4k; +# ifdef CONFIG_HUGETLB_PAGE + struct slice_mask mask_16m; + struct slice_mask mask_16g; +# endif #else u16 sllp; /* SLB page size encoding */ #endif diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h index d3d7e79140c6..4f547752ae79 100644 --- a/arch/powerpc/include/asm/mmu-8xx.h +++ b/arch/powerpc/include/asm/mmu-8xx.h @@ -192,6 +192,11 @@ #endif #ifndef __ASSEMBLY__ +struct slice_mask { + u64 low_slices; + DECLARE_BITMAP(high_slices, 0); +}; + typedef struct { unsigned int id; unsigned int active; @@ -201,6 +206,11 @@ typedef struct { unsigned char low_slices_psize[SLICE_ARRAY_SIZE]; unsigned char high_slices_psize[0]; unsigned long slb_addr_limit; + struct slice_mask mask_base_psize; /* 4k or 16k */ +# ifdef CONFIG_HUGETLB_PAGE + struct slice_mask mask_512k; + struct slice_mask mask_8m; +# endif #endif } mm_context_t; diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 420d791f0e18..3e199b9cbbfd 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -37,15 +37,6 @@ #include static DEFINE_SPINLOCK(slice_convert_lock); -/* - * One bit per slice. We have lower slices which cover 256MB segments - * upto 4G range. That gets us 16 low slices. For the rest we track slices - * in 1TB size. - */ -struct slice_mask { - u64 low_slices; - DECLARE_BITMAP(high_slices, SLICE_NUM_HIGH); -}; #ifdef DEBUG int _slice_debug = 1; @@ -149,36 +140,39 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret, __set_bit(i, ret->high_slices); } -static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_mask *ret, - unsigned long high_limit) +#ifdef CONFIG_PPC_BOOK3S_64 +static struct slice_mask *slice_mask_for_size(struct mm_struct *mm, int psize) { - unsigned char *hpsizes, *lpsizes; - int index, mask_index; - unsigned long i; - - ret->low_slices = 0; - if (SLICE_NUM_HIGH) - bitmap_zero(ret->high_slices, SLICE_NUM_HIGH); - - lpsizes = mm->context.low_slices_psize; - for (i = 0; i < SLICE_NUM_LOW; i++) { - mask_index = i & 0x1; - index = i >> 1; - if (((lpsizes[index] >> (mask_index * 4)) & 0xf) == psize) - ret->low_slices |= 1u << i; - } - - if (high_limit <= SLICE_LOW_TOP) - return; - - hpsizes = mm->context.high_slices_psize; - for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++) { - mask_index = i & 0x1; - index = i >> 1; - if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize) - __set_bit(i, ret->high_slices); - } +#ifdef CONFIG_PPC_64K_PAGES + if (psize == MMU_PAGE_64K) + return &mm->context.mask_64k; +#endif + if (psize == MMU_PAGE_4K) + return &mm->context.mask_4k; +#ifdef CONFIG_HUGETLB_PAGE + if (psize == MMU_PAGE_16M) + return &mm->context.mask_16m; + if (psize == MMU_PAGE_16G) + return &mm->context.mask_16g; +#endif + BUG(); } +#elif defined(CONFIG_PPC_8xx) +static struct slice_mask *slice_mask_for_size(struct mm_struct *mm, int psize) +{ + if (psize == mmu_virtual_psize) + return &mm->context.mask_base_psize; +#ifdef CONFIG_HUGETLB_PAGE + if (psize == MMU_PAGE_512K) + return &mm->context.mask_512k; + if (psize == MMU_PAGE_8M) + return &mm->context.mask_8m; +#endif + BUG(); +} +#else +#error "Must define the slice masks for page sizes supported by the platform" +#endif static int slice_check_fit(struct mm_struct *mm, const struct slice_mask *mask, @@ -225,11 +219,15 @@ static void slice_convert(struct mm_struct *mm, int index, mask_index; /* Write the new slice psize bits */ unsigned char *hpsizes, *lpsizes; + struct slice_mask *psize_mask, *old_mask; unsigned long i, flags; + int old_psize; slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize); slice_print_mask(" mask", mask); + psize_mask = slice_mask_for_size(mm, psize); + /* We need to use a spinlock here to protect against * concurrent 64k -> 4k demotion ... */ @@ -242,6 +240,14 @@ static void slice_convert(struct mm_struct *mm, mask_index = i & 0x1; index = i >> 1; + + /* Update the slice_mask */ + old_psize = (lpsizes[index] >> (mask_index * 4)) & 0xf; + old_mask = slice_mask_for_size(mm, old_psize); + old_mask->low_slices &= ~(1u << i); + psize_mask->low_slices |= 1u << i; + + /* Update the sizes array */ lpsizes[index] = (lpsizes[index] & ~(0xf << (mask_index * 4))) | (((unsigned long)psize) << (mask_index * 4)); } @@ -253,6 +259,14 @@ static void slice_convert(struct mm_struct *mm, mask_index = i & 0x1; index = i >> 1; + + /* Update the slice_mask */ + old_psize = (hpsizes[index] >> (mask_index * 4)) & 0xf; + old_mask = slice_mask_for_size(mm, old_psize); + __clear_bit(i, old_mask->high_slices); + __set_bit(i, psize_mask->high_slices); + + /* Update the sizes array */ hpsizes[index] = (hpsizes[index] & ~(0xf << (mask_index * 4))) | (((unsigned long)psize) << (mask_index * 4)); } @@ -463,7 +477,13 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, } if (high_limit > mm->context.slb_addr_limit) { + /* + * Increasing the slb_addr_limit does not require + * slice mask cache to be recalculated because it should + * be already initialised beyond the old address limit. + */ mm->context.slb_addr_limit = high_limit; + on_each_cpu(slice_flush_segments, mm, 1); } @@ -505,7 +525,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, /* First make up a "good" mask of slices that have the right size * already */ - slice_mask_for_size(mm, psize, &good_mask, high_limit); + good_mask = *slice_mask_for_size(mm, psize); slice_print_mask(" good_mask", &good_mask); /* @@ -530,7 +550,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, #ifdef CONFIG_PPC_64K_PAGES /* If we support combo pages, we can allow 64k pages in 4k slices */ if (psize == MMU_PAGE_64K) { - slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask, high_limit); + compat_mask = *slice_mask_for_size(mm, MMU_PAGE_4K); if (fixed) slice_or_mask(&good_mask, &compat_mask); } @@ -682,6 +702,7 @@ EXPORT_SYMBOL_GPL(get_slice_psize); void slice_init_new_context_exec(struct mm_struct *mm) { unsigned char *hpsizes, *lpsizes; + struct slice_mask *mask; unsigned int psize = mmu_virtual_psize; slice_dbg("slice_init_new_context_exec(mm=%p)\n", mm); @@ -707,6 +728,14 @@ void slice_init_new_context_exec(struct mm_struct *mm) hpsizes = mm->context.high_slices_psize; memset(hpsizes, (psize << 4) | psize, SLICE_NUM_HIGH >> 1); + + /* + * Slice mask cache starts zeroed, fill the default size cache. + */ + mask = slice_mask_for_size(mm, psize); + mask->low_slices = ~0UL; + if (SLICE_NUM_HIGH) + bitmap_fill(mask->high_slices, SLICE_NUM_HIGH); } void slice_set_range_psize(struct mm_struct *mm, unsigned long start, @@ -745,18 +774,17 @@ int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, { struct slice_mask mask, available; unsigned int psize = mm->context.user_psize; - unsigned long high_limit = mm->context.slb_addr_limit; if (radix_enabled()) return 0; slice_range_to_mask(addr, len, &mask); - slice_mask_for_size(mm, psize, &available, high_limit); + available = *slice_mask_for_size(mm, psize); #ifdef CONFIG_PPC_64K_PAGES /* We need to account for 4k slices too */ if (psize == MMU_PAGE_64K) { struct slice_mask compat_mask; - slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask, high_limit); + compat_mask = *slice_mask_for_size(mm, MMU_PAGE_4K); slice_or_mask(&available, &compat_mask); } #endif -- cgit v1.2.3 From ae3066bd1cbe58e596c791f72a36e576df5d9ed1 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 7 Mar 2018 11:37:13 +1000 Subject: powerpc/mm/slice: implement slice_check_range_fits Rather than build slice masks from a range then use that to check for fit in a candidate mask, implement slice_check_range_fits that checks if a range fits in a mask directly. This allows several structures to be removed from stacks, and also we don't expect a huge range in a lot of these cases, so building and comparing a full mask is going to be more expensive than testing just one or two bits of the range. On POWER8, this increases vfork+exec+exit performance by 0.3% and reduces time to mmap+munmap a 64kB page by 5%. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/mm/slice.c | 62 +++++++++++++++++++++++++++---------------------- 1 file changed, 34 insertions(+), 28 deletions(-) diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 3e199b9cbbfd..0a5efa40e739 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -174,26 +174,36 @@ static struct slice_mask *slice_mask_for_size(struct mm_struct *mm, int psize) #error "Must define the slice masks for page sizes supported by the platform" #endif -static int slice_check_fit(struct mm_struct *mm, - const struct slice_mask *mask, - const struct slice_mask *available) +static bool slice_check_range_fits(struct mm_struct *mm, + const struct slice_mask *available, + unsigned long start, unsigned long len) { - DECLARE_BITMAP(result, SLICE_NUM_HIGH); - /* - * Make sure we just do bit compare only to the max - * addr limit and not the full bit map size. - */ - unsigned long slice_count = GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); + unsigned long end = start + len - 1; + u64 low_slices = 0; - if (!SLICE_NUM_HIGH) - return (mask->low_slices & available->low_slices) == - mask->low_slices; + if (start < SLICE_LOW_TOP) { + unsigned long mend = min(end, + (unsigned long)(SLICE_LOW_TOP - 1)); - bitmap_and(result, mask->high_slices, - available->high_slices, slice_count); + low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1)) + - (1u << GET_LOW_SLICE_INDEX(start)); + } + if ((low_slices & available->low_slices) != low_slices) + return false; - return (mask->low_slices & available->low_slices) == mask->low_slices && - bitmap_equal(result, mask->high_slices, slice_count); + if (SLICE_NUM_HIGH && ((start + len) > SLICE_LOW_TOP)) { + unsigned long start_index = GET_HIGH_SLICE_INDEX(start); + unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT)); + unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index; + unsigned long i; + + for (i = start_index; i < start_index + count; i++) { + if (!test_bit(i, available->high_slices)) + return false; + } + } + + return true; } static void slice_flush_segments(void *parm) @@ -558,14 +568,10 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, /* First check hint if it's valid or if we have MAP_FIXED */ if (addr != 0 || fixed) { - /* Build a mask for the requested range */ - slice_range_to_mask(addr, len, &mask); - slice_print_mask(" mask", &mask); - /* Check if we fit in the good mask. If we do, we just return, * nothing else to do */ - if (slice_check_fit(mm, &mask, &good_mask)) { + if (slice_check_range_fits(mm, &good_mask, addr, len)) { slice_dbg(" fits good !\n"); return addr; } @@ -591,10 +597,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, slice_or_mask(&potential_mask, &good_mask); slice_print_mask(" potential", &potential_mask); - if ((addr != 0 || fixed) && - slice_check_fit(mm, &mask, &potential_mask)) { - slice_dbg(" fits potential !\n"); - goto convert; + if (addr != 0 || fixed) { + if (slice_check_range_fits(mm, &potential_mask, addr, len)) { + slice_dbg(" fits potential !\n"); + goto convert; + } } /* If we have MAP_FIXED and failed the above steps, then error out */ @@ -772,13 +779,12 @@ void slice_set_range_psize(struct mm_struct *mm, unsigned long start, int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, unsigned long len) { - struct slice_mask mask, available; + struct slice_mask available; unsigned int psize = mm->context.user_psize; if (radix_enabled()) return 0; - slice_range_to_mask(addr, len, &mask); available = *slice_mask_for_size(mm, psize); #ifdef CONFIG_PPC_64K_PAGES /* We need to account for 4k slices too */ @@ -795,6 +801,6 @@ int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, slice_print_mask(" mask", &mask); slice_print_mask(" available", &available); #endif - return !slice_check_fit(mm, &mask, &available); + return !slice_check_range_fits(mm, &available, addr, len); } #endif -- cgit v1.2.3 From b8c93549142077da10a02329378a8cfa46ce511c Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 7 Mar 2018 11:37:14 +1000 Subject: powerpc/mm/slice: Switch to 3-operand slice bitops helpers This converts the slice_mask bit operation helpers to be the usual 3-operand kind, which allows 2 inputs to set a different output without an extra copy, which is used in the next patch. Adds slice_copy_mask, which will be used in the next patch. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/mm/slice.c | 38 +++++++++++++++++++++++--------------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 0a5efa40e739..4b2fd37b727a 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -429,25 +429,33 @@ static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len, return slice_find_area_bottomup(mm, len, mask, psize, high_limit); } -static inline void slice_or_mask(struct slice_mask *dst, +static inline void slice_copy_mask(struct slice_mask *dst, const struct slice_mask *src) { - dst->low_slices |= src->low_slices; + dst->low_slices = src->low_slices; if (!SLICE_NUM_HIGH) return; - bitmap_or(dst->high_slices, dst->high_slices, src->high_slices, - SLICE_NUM_HIGH); + bitmap_copy(dst->high_slices, src->high_slices, SLICE_NUM_HIGH); } -static inline void slice_andnot_mask(struct slice_mask *dst, - const struct slice_mask *src) +static inline void slice_or_mask(struct slice_mask *dst, + const struct slice_mask *src1, + const struct slice_mask *src2) { - dst->low_slices &= ~src->low_slices; + dst->low_slices = src1->low_slices | src2->low_slices; + if (!SLICE_NUM_HIGH) + return; + bitmap_or(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH); +} +static inline void slice_andnot_mask(struct slice_mask *dst, + const struct slice_mask *src1, + const struct slice_mask *src2) +{ + dst->low_slices = src1->low_slices & ~src2->low_slices; if (!SLICE_NUM_HIGH) return; - bitmap_andnot(dst->high_slices, dst->high_slices, src->high_slices, - SLICE_NUM_HIGH); + bitmap_andnot(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH); } #ifdef CONFIG_PPC_64K_PAGES @@ -562,7 +570,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, if (psize == MMU_PAGE_64K) { compat_mask = *slice_mask_for_size(mm, MMU_PAGE_4K); if (fixed) - slice_or_mask(&good_mask, &compat_mask); + slice_or_mask(&good_mask, &good_mask, &compat_mask); } #endif @@ -594,7 +602,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, * empty and thus can be converted */ slice_mask_for_free(mm, &potential_mask, high_limit); - slice_or_mask(&potential_mask, &good_mask); + slice_or_mask(&potential_mask, &potential_mask, &good_mask); slice_print_mask(" potential", &potential_mask); if (addr != 0 || fixed) { @@ -631,7 +639,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, #ifdef CONFIG_PPC_64K_PAGES if (addr == -ENOMEM && psize == MMU_PAGE_64K) { /* retry the search with 4k-page slices included */ - slice_or_mask(&potential_mask, &compat_mask); + slice_or_mask(&potential_mask, &potential_mask, &compat_mask); addr = slice_find_area(mm, len, &potential_mask, psize, topdown, high_limit); } @@ -645,8 +653,8 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, slice_print_mask(" mask", &mask); convert: - slice_andnot_mask(&mask, &good_mask); - slice_andnot_mask(&mask, &compat_mask); + slice_andnot_mask(&mask, &mask, &good_mask); + slice_andnot_mask(&mask, &mask, &compat_mask); if (mask.low_slices || (SLICE_NUM_HIGH && !bitmap_empty(mask.high_slices, SLICE_NUM_HIGH))) { @@ -791,7 +799,7 @@ int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, if (psize == MMU_PAGE_64K) { struct slice_mask compat_mask; compat_mask = *slice_mask_for_size(mm, MMU_PAGE_4K); - slice_or_mask(&available, &compat_mask); + slice_or_mask(&available, &available, &compat_mask); } #endif -- cgit v1.2.3 From 74907558301f6540422ed1f4012af241b2c91733 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 7 Mar 2018 11:37:15 +1000 Subject: powerpc/mm/slice: remove dead code This code is never compiled in, and it gets broken by the next patch, so remove it. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/mm/slice.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 4b2fd37b727a..c4cb4de1fab5 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -803,12 +803,6 @@ int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, } #endif -#if 0 /* too verbose */ - slice_dbg("is_hugepage_only_range(mm=%p, addr=%lx, len=%lx)\n", - mm, addr, len); - slice_print_mask(" mask", &mask); - slice_print_mask(" available", &available); -#endif return !slice_check_range_fits(mm, &available, addr, len); } #endif -- cgit v1.2.3 From d262bd5a73998252d1cdf632bedaf1ca540839d8 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 7 Mar 2018 11:37:16 +1000 Subject: powerpc/mm/slice: Use const pointers to cached slice masks where possible The slice_mask cache was a basic conversion which copied the slice mask into caller's structures, because that's how the original code worked. In most cases the pointer can be used directly instead, saving a copy and an on-stack structure. On POWER8, this increases vfork+exec+exit performance by 0.3% and reduces time to mmap+munmap a 64kB page by 2%. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/mm/slice.c | 79 ++++++++++++++++++++++++------------------------- 1 file changed, 38 insertions(+), 41 deletions(-) diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index c4cb4de1fab5..b3b465c37224 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -468,10 +468,10 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, unsigned long flags, unsigned int psize, int topdown) { - struct slice_mask mask; struct slice_mask good_mask; struct slice_mask potential_mask; - struct slice_mask compat_mask; + const struct slice_mask *maskp; + const struct slice_mask *compat_maskp = NULL; int fixed = (flags & MAP_FIXED); int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); unsigned long page_size = 1UL << pshift; @@ -505,22 +505,6 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, on_each_cpu(slice_flush_segments, mm, 1); } - /* - * init different masks - */ - mask.low_slices = 0; - - /* silence stupid warning */; - potential_mask.low_slices = 0; - - compat_mask.low_slices = 0; - - if (SLICE_NUM_HIGH) { - bitmap_zero(mask.high_slices, SLICE_NUM_HIGH); - bitmap_zero(potential_mask.high_slices, SLICE_NUM_HIGH); - bitmap_zero(compat_mask.high_slices, SLICE_NUM_HIGH); - } - /* Sanity checks */ BUG_ON(mm->task_size == 0); BUG_ON(mm->context.slb_addr_limit == 0); @@ -543,8 +527,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, /* First make up a "good" mask of slices that have the right size * already */ - good_mask = *slice_mask_for_size(mm, psize); - slice_print_mask(" good_mask", &good_mask); + maskp = slice_mask_for_size(mm, psize); /* * Here "good" means slices that are already the right page size, @@ -565,14 +548,24 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, * search in good | compat | free, found => convert free. */ -#ifdef CONFIG_PPC_64K_PAGES - /* If we support combo pages, we can allow 64k pages in 4k slices */ - if (psize == MMU_PAGE_64K) { - compat_mask = *slice_mask_for_size(mm, MMU_PAGE_4K); + /* + * If we support combo pages, we can allow 64k pages in 4k slices + * The mask copies could be avoided in most cases here if we had + * a pointer to good mask for the next code to use. + */ + if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) { + compat_maskp = slice_mask_for_size(mm, MMU_PAGE_4K); if (fixed) - slice_or_mask(&good_mask, &good_mask, &compat_mask); + slice_or_mask(&good_mask, maskp, compat_maskp); + else + slice_copy_mask(&good_mask, maskp); + } else { + slice_copy_mask(&good_mask, maskp); } -#endif + + slice_print_mask(" good_mask", &good_mask); + if (compat_maskp) + slice_print_mask(" compat_mask", compat_maskp); /* First check hint if it's valid or if we have MAP_FIXED */ if (addr != 0 || fixed) { @@ -639,7 +632,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, #ifdef CONFIG_PPC_64K_PAGES if (addr == -ENOMEM && psize == MMU_PAGE_64K) { /* retry the search with 4k-page slices included */ - slice_or_mask(&potential_mask, &potential_mask, &compat_mask); + slice_or_mask(&potential_mask, &potential_mask, compat_maskp); addr = slice_find_area(mm, len, &potential_mask, psize, topdown, high_limit); } @@ -648,17 +641,18 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, if (addr == -ENOMEM) return -ENOMEM; - slice_range_to_mask(addr, len, &mask); + slice_range_to_mask(addr, len, &potential_mask); slice_dbg(" found potential area at 0x%lx\n", addr); - slice_print_mask(" mask", &mask); + slice_print_mask(" mask", &potential_mask); convert: - slice_andnot_mask(&mask, &mask, &good_mask); - slice_andnot_mask(&mask, &mask, &compat_mask); - if (mask.low_slices || - (SLICE_NUM_HIGH && - !bitmap_empty(mask.high_slices, SLICE_NUM_HIGH))) { - slice_convert(mm, &mask, psize); + slice_andnot_mask(&potential_mask, &potential_mask, &good_mask); + if (compat_maskp && !fixed) + slice_andnot_mask(&potential_mask, &potential_mask, compat_maskp); + if (potential_mask.low_slices || + (SLICE_NUM_HIGH && + !bitmap_empty(potential_mask.high_slices, SLICE_NUM_HIGH))) { + slice_convert(mm, &potential_mask, psize); if (psize > MMU_PAGE_BASE) on_each_cpu(slice_flush_segments, mm, 1); } @@ -787,22 +781,25 @@ void slice_set_range_psize(struct mm_struct *mm, unsigned long start, int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, unsigned long len) { - struct slice_mask available; + const struct slice_mask *maskp; unsigned int psize = mm->context.user_psize; if (radix_enabled()) return 0; - available = *slice_mask_for_size(mm, psize); + maskp = slice_mask_for_size(mm, psize); #ifdef CONFIG_PPC_64K_PAGES /* We need to account for 4k slices too */ if (psize == MMU_PAGE_64K) { - struct slice_mask compat_mask; - compat_mask = *slice_mask_for_size(mm, MMU_PAGE_4K); - slice_or_mask(&available, &available, &compat_mask); + const struct slice_mask *compat_maskp; + struct slice_mask available; + + compat_maskp = slice_mask_for_size(mm, MMU_PAGE_4K); + slice_or_mask(&available, maskp, compat_maskp); + return !slice_check_range_fits(mm, &available, addr, len); } #endif - return !slice_check_range_fits(mm, &available, addr, len); + return !slice_check_range_fits(mm, maskp, addr, len); } #endif -- cgit v1.2.3 From 014a32b30e9d81b47ef82b9995b52c3a0c8b4082 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 7 Mar 2018 11:37:17 +1000 Subject: powerpc/mm/slice: remove radix calls to the slice code This is a tidy up which removes radix MMU calls into the slice code. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/hugetlb.h | 8 ++++---- arch/powerpc/mm/hugetlbpage.c | 6 ++++-- arch/powerpc/mm/slice.c | 17 ++++------------- 3 files changed, 12 insertions(+), 19 deletions(-) diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index 1a4847f67ea8..48f2ed2a71ae 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -89,17 +89,17 @@ pte_t *huge_pte_offset_and_shift(struct mm_struct *mm, void flush_dcache_icache_hugepage(struct page *page); -#if defined(CONFIG_PPC_MM_SLICES) -int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, +int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, unsigned long len); -#else + static inline int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, unsigned long len) { + if (IS_ENABLED(CONFIG_PPC_MM_SLICES) && !radix_enabled()) + return slice_is_hugepage_only_range(mm, addr, len); return 0; } -#endif void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, pte_t pte); diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 590be3fa0ce2..f4153f21d214 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -565,10 +565,12 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) { #ifdef CONFIG_PPC_MM_SLICES - unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start); /* With radix we don't use slice, so derive it from vma*/ - if (!radix_enabled()) + if (!radix_enabled()) { + unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start); + return 1UL << mmu_psize_to_shift(psize); + } #endif if (!is_vm_hugetlb_page(vma)) return PAGE_SIZE; diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index b3b465c37224..1297b3ad7dd2 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -686,16 +686,8 @@ unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr) unsigned char *psizes; int index, mask_index; - /* - * Radix doesn't use slice, but can get enabled along with MMU_SLICE - */ - if (radix_enabled()) { -#ifdef CONFIG_PPC_64K_PAGES - return MMU_PAGE_64K; -#else - return MMU_PAGE_4K; -#endif - } + VM_BUG_ON(radix_enabled()); + if (addr < SLICE_LOW_TOP) { psizes = mm->context.low_slices_psize; index = GET_LOW_SLICE_INDEX(addr); @@ -778,14 +770,13 @@ void slice_set_range_psize(struct mm_struct *mm, unsigned long start, * for now as we only use slices with hugetlbfs enabled. This should * be fixed as the generic code gets fixed. */ -int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, +int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, unsigned long len) { const struct slice_mask *maskp; unsigned int psize = mm->context.user_psize; - if (radix_enabled()) - return 0; + VM_BUG_ON(radix_enabled()); maskp = slice_mask_for_size(mm, psize); #ifdef CONFIG_PPC_64K_PAGES -- cgit v1.2.3 From c2be663d5307fb9751a562ac664fa78cd7a00e2b Mon Sep 17 00:00:00 2001 From: Christophe Lombard Date: Tue, 20 Feb 2018 14:48:56 +0100 Subject: cxl: Fix timebase synchronization status on P9 The PSL Timebase register is updated by the PSL to maintain the timebase. On P9, the Timebase value is only provided by the CAPP as received the last time a timebase request was performed. The timebase requests are initiated through the adapter configuration or application registers. The specific sysfs entry "/sys/class/cxl/cardxx/psl_timebase_synced" is now dynamically updated according the content of the PSL Timebase register. Fixes: f24be42aab37 ("cxl: Add psl9 specific code") Signed-off-by: Christophe Lombard Reviewed-by: Vaibhav Jain Acked-by: Andrew Donnellan Acked-by: Frederic Barrat Signed-off-by: Michael Ellerman --- drivers/misc/cxl/pci.c | 17 ----------------- drivers/misc/cxl/sysfs.c | 12 ++++++++++++ 2 files changed, 12 insertions(+), 17 deletions(-) diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index e7ac78e85494..83f1d08058fc 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -659,9 +659,6 @@ static u64 timebase_read_xsl(struct cxl *adapter) static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev) { - u64 psl_tb; - int delta; - unsigned int retry = 0; struct device_node *np; adapter->psl_timebase_synced = false; @@ -689,20 +686,6 @@ static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev) cxl_p1_write(adapter, CXL_PSL_Control, 0x0000000000000000); cxl_p1_write(adapter, CXL_PSL_Control, CXL_PSL_Control_tb); - /* Wait until CORE TB and PSL TB difference <= 16usecs */ - do { - msleep(1); - if (retry++ > 5) { - dev_info(&dev->dev, "PSL timebase can't synchronize\n"); - return; - } - psl_tb = adapter->native->sl_ops->timebase_read(adapter); - delta = mftb() - psl_tb; - if (delta < 0) - delta = -delta; - } while (tb_to_ns(delta) > 16000); - - adapter->psl_timebase_synced = true; return; } diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c index a8b6d6a635e9..95285b7f636f 100644 --- a/drivers/misc/cxl/sysfs.c +++ b/drivers/misc/cxl/sysfs.c @@ -62,7 +62,19 @@ static ssize_t psl_timebase_synced_show(struct device *device, char *buf) { struct cxl *adapter = to_cxl_adapter(device); + u64 psl_tb, delta; + /* Recompute the status only in native mode */ + if (cpu_has_feature(CPU_FTR_HVMODE)) { + psl_tb = adapter->native->sl_ops->timebase_read(adapter); + delta = abs(mftb() - psl_tb); + + /* CORE TB and PSL TB difference <= 16usecs ? */ + adapter->psl_timebase_synced = (tb_to_ns(delta) < 16000) ? true : false; + pr_devel("PSL timebase %s - delta: 0x%016llx\n", + (tb_to_ns(delta) < 16000) ? "synchronized" : + "not synchronized", tb_to_ns(delta)); + } return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_timebase_synced); } -- cgit v1.2.3 From 720c84046c26444fe825f8614ddceb5c46539e67 Mon Sep 17 00:00:00 2001 From: Mark Hairgrove Date: Fri, 9 Feb 2018 19:20:06 -0800 Subject: powerpc/npu-dma.c: Fix crash after __mmu_notifier_register failure pnv_npu2_init_context wasn't checking the return code from __mmu_notifier_register. If __mmu_notifier_register failed, the npu_context was still assigned to the mm and the caller wasn't given any indication that things went wrong. Later on pnv_npu2_destroy_context would be called, which in turn called mmu_notifier_unregister and dropped mm->mm_count without having incremented it in the first place. This led to various forms of corruption like mm use-after-free and mm double-free. __mmu_notifier_register can fail with EINTR if a signal is pending, so this case can be frequent. This patch calls opal_npu_destroy_context on the failure paths, and makes sure not to assign mm->context.npu_context until past the failure points. Signed-off-by: Mark Hairgrove Acked-By: Alistair Popple Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/powernv/npu-dma.c | 32 +++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c index 77d6061fd0ce..69a4f9e8bd55 100644 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ b/arch/powerpc/platforms/powernv/npu-dma.c @@ -724,6 +724,11 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev, /* No nvlink associated with this GPU device */ return ERR_PTR(-ENODEV); + nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0); + if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index", + &nvlink_index))) + return ERR_PTR(-ENODEV); + if (!mm || mm->context.id == 0) { /* * Kernel thread contexts are not supported and context id 0 is @@ -751,25 +756,30 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev, */ npu_context = mm->context.npu_context; if (!npu_context) { + rc = -ENOMEM; npu_context = kzalloc(sizeof(struct npu_context), GFP_KERNEL); - if (!npu_context) - return ERR_PTR(-ENOMEM); + if (npu_context) { + kref_init(&npu_context->kref); + npu_context->mm = mm; + npu_context->mn.ops = &nv_nmmu_notifier_ops; + rc = __mmu_notifier_register(&npu_context->mn, mm); + } + + if (rc) { + kfree(npu_context); + opal_npu_destroy_context(nphb->opal_id, mm->context.id, + PCI_DEVID(gpdev->bus->number, + gpdev->devfn)); + return ERR_PTR(rc); + } mm->context.npu_context = npu_context; - npu_context->mm = mm; - npu_context->mn.ops = &nv_nmmu_notifier_ops; - __mmu_notifier_register(&npu_context->mn, mm); - kref_init(&npu_context->kref); } else { - kref_get(&npu_context->kref); + WARN_ON(!kref_get_unless_zero(&npu_context->kref)); } npu_context->release_cb = cb; npu_context->priv = priv; - nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0); - if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index", - &nvlink_index))) - return ERR_PTR(-ENODEV); /* * npdev is a pci_dev pointer setup by the PCI code. We assign it to -- cgit v1.2.3 From 45ddea8a73a25461387eb8e87f3e0ecca084799b Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Fri, 9 Feb 2018 11:49:06 -0600 Subject: powerpc/vas: Fix cleanup when VAS is not configured When VAS is not configured, unregister the platform driver. Also simplify cleanup by delaying vas debugfs init until we know VAS is configured. Signed-off-by: Sukadev Bhattiprolu Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/powernv/vas-debug.c | 11 +++++++++++ arch/powerpc/platforms/powernv/vas.c | 6 +++--- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/platforms/powernv/vas-debug.c b/arch/powerpc/platforms/powernv/vas-debug.c index b4de4c6fd38b..4f7276ebdf9c 100644 --- a/arch/powerpc/platforms/powernv/vas-debug.c +++ b/arch/powerpc/platforms/powernv/vas-debug.c @@ -179,6 +179,7 @@ void vas_instance_init_dbgdir(struct vas_instance *vinst) { struct dentry *d; + vas_init_dbgdir(); if (!vas_debugfs) return; @@ -201,8 +202,18 @@ free_name: vinst->dbgdir = NULL; } +/* + * Set up the "root" VAS debugfs dir. Return if we already set it up + * (or failed to) in an earlier instance of VAS. + */ void vas_init_dbgdir(void) { + static bool first_time = true; + + if (!first_time) + return; + + first_time = false; vas_debugfs = debugfs_create_dir("vas", NULL); if (IS_ERR(vas_debugfs)) vas_debugfs = NULL; diff --git a/arch/powerpc/platforms/powernv/vas.c b/arch/powerpc/platforms/powernv/vas.c index aebbe95c9230..5a2b24cbbc88 100644 --- a/arch/powerpc/platforms/powernv/vas.c +++ b/arch/powerpc/platforms/powernv/vas.c @@ -160,8 +160,6 @@ static int __init vas_init(void) int found = 0; struct device_node *dn; - vas_init_dbgdir(); - platform_driver_register(&vas_driver); for_each_compatible_node(dn, NULL, "ibm,vas") { @@ -169,8 +167,10 @@ static int __init vas_init(void) found++; } - if (!found) + if (!found) { + platform_driver_unregister(&vas_driver); return -ENODEV; + } pr_devel("Found %d instances\n", found); -- cgit v1.2.3 From 007bb7d6c77ef2243dabf9c4132afa68bec82817 Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Fri, 9 Feb 2018 19:49:27 -0800 Subject: powerpc/vas: Add a couple of trace points Add a couple of trace points in the VAS driver Signed-off-by: Sukadev Bhattiprolu [mpe: Add SPDX tag to new header] Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/powernv/vas-trace.h | 113 ++++++++++++++++++++++++++++ arch/powerpc/platforms/powernv/vas-window.c | 9 +++ 2 files changed, 122 insertions(+) create mode 100644 arch/powerpc/platforms/powernv/vas-trace.h diff --git a/arch/powerpc/platforms/powernv/vas-trace.h b/arch/powerpc/platforms/powernv/vas-trace.h new file mode 100644 index 000000000000..a449b9f0c12e --- /dev/null +++ b/arch/powerpc/platforms/powernv/vas-trace.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM vas + +#if !defined(_VAS_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) + +#define _VAS_TRACE_H +#include +#include +#include + +TRACE_EVENT( vas_rx_win_open, + + TP_PROTO(struct task_struct *tsk, + int vasid, + int cop, + struct vas_rx_win_attr *rxattr), + + TP_ARGS(tsk, vasid, cop, rxattr), + + TP_STRUCT__entry( + __field(struct task_struct *, tsk) + __field(int, pid) + __field(int, cop) + __field(int, vasid) + __field(struct vas_rx_win_attr *, rxattr) + __field(int, lnotify_lpid) + __field(int, lnotify_pid) + __field(int, lnotify_tid) + ), + + TP_fast_assign( + __entry->pid = tsk->pid; + __entry->vasid = vasid; + __entry->cop = cop; + __entry->lnotify_lpid = rxattr->lnotify_lpid; + __entry->lnotify_pid = rxattr->lnotify_pid; + __entry->lnotify_tid = rxattr->lnotify_tid; + ), + + TP_printk("pid=%d, vasid=%d, cop=%d, lpid=%d, pid=%d, tid=%d", + __entry->pid, __entry->vasid, __entry->cop, + __entry->lnotify_lpid, __entry->lnotify_pid, + __entry->lnotify_tid) +); + +TRACE_EVENT( vas_tx_win_open, + + TP_PROTO(struct task_struct *tsk, + int vasid, + int cop, + struct vas_tx_win_attr *txattr), + + TP_ARGS(tsk, vasid, cop, txattr), + + TP_STRUCT__entry( + __field(struct task_struct *, tsk) + __field(int, pid) + __field(int, cop) + __field(int, vasid) + __field(struct vas_tx_win_attr *, txattr) + __field(int, lpid) + __field(int, pidr) + ), + + TP_fast_assign( + __entry->pid = tsk->pid; + __entry->vasid = vasid; + __entry->cop = cop; + __entry->lpid = txattr->lpid; + __entry->pidr = txattr->pidr; + ), + + TP_printk("pid=%d, vasid=%d, cop=%d, lpid=%d, pidr=%d", + __entry->pid, __entry->vasid, __entry->cop, + __entry->lpid, __entry->pidr) +); + +TRACE_EVENT( vas_paste_crb, + + TP_PROTO(struct task_struct *tsk, + struct vas_window *win), + + TP_ARGS(tsk, win), + + TP_STRUCT__entry( + __field(struct task_struct *, tsk) + __field(struct vas_window *, win) + __field(int, pid) + __field(int, vasid) + __field(int, winid) + __field(unsigned long, paste_kaddr) + ), + + TP_fast_assign( + __entry->pid = tsk->pid; + __entry->vasid = win->vinst->vas_id; + __entry->winid = win->winid; + __entry->paste_kaddr = (unsigned long)win->paste_kaddr + ), + + TP_printk("pid=%d, vasid=%d, winid=%d, paste_kaddr=0x%016lx\n", + __entry->pid, __entry->vasid, __entry->winid, + __entry->paste_kaddr) +); + +#endif /* _VAS_TRACE_H */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ../../arch/powerpc/platforms/powernv +#define TRACE_INCLUDE_FILE vas-trace +#include diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c index b7c53a51c31b..ff9f48812331 100644 --- a/arch/powerpc/platforms/powernv/vas-window.c +++ b/arch/powerpc/platforms/powernv/vas-window.c @@ -21,6 +21,9 @@ #include "vas.h" #include "copy-paste.h" +#define CREATE_TRACE_POINTS +#include "vas-trace.h" + /* * Compute the paste address region for the window @window using the * ->paste_base_addr and ->paste_win_id_shift we got from device tree. @@ -880,6 +883,8 @@ struct vas_window *vas_rx_win_open(int vasid, enum vas_cop_type cop, struct vas_winctx winctx; struct vas_instance *vinst; + trace_vas_rx_win_open(current, vasid, cop, rxattr); + if (!rx_win_args_valid(cop, rxattr)) return ERR_PTR(-EINVAL); @@ -1008,6 +1013,8 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop, struct vas_winctx winctx; struct vas_instance *vinst; + trace_vas_tx_win_open(current, vasid, cop, attr); + if (!tx_win_args_valid(cop, attr)) return ERR_PTR(-EINVAL); @@ -1100,6 +1107,8 @@ int vas_paste_crb(struct vas_window *txwin, int offset, bool re) void *addr; uint64_t val; + trace_vas_paste_crb(current, txwin); + /* * Only NX windows are supported for now and hardware assumes * report-enable flag is set for NX windows. Ensure software -- cgit v1.2.3 From 890ae7979758568734881ad0f382c4064e2386c3 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Wed, 21 Feb 2018 22:46:33 +0100 Subject: powerpc/time: stop validating rtc_time in .read_time The RTC core is always calling rtc_valid_tm after the read_time callback. It is not necessary to call it just before returning from the callback. Signed-off-by: Alexandre Belloni Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/time.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index a32823dcd9a4..f7d96a68ecaa 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -1234,7 +1234,7 @@ void calibrate_delay(void) static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm) { ppc_md.get_rtc_time(tm); - return rtc_valid_tm(tm); + return 0; } static int rtc_generic_set_time(struct device *dev, struct rtc_time *tm) -- cgit v1.2.3 From 7004263bd4f4c79da9ca2a1d04d38d4d6ed609ab Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Sat, 17 Feb 2018 00:43:23 +0100 Subject: powerpc/5200: dts: digsy_mtc.dts: fix rv3029 compatible The proper compatible for rv3029 is microcrystal,rv3029. Acked-by: Anatolij Gustschin Signed-off-by: Alexandre Belloni Signed-off-by: Michael Ellerman --- arch/powerpc/boot/dts/digsy_mtc.dts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/boot/dts/digsy_mtc.dts b/arch/powerpc/boot/dts/digsy_mtc.dts index c280e75c86bf..c3922fc03e0b 100644 --- a/arch/powerpc/boot/dts/digsy_mtc.dts +++ b/arch/powerpc/boot/dts/digsy_mtc.dts @@ -78,7 +78,7 @@ }; rtc@56 { - compatible = "mc,rv3029c2"; + compatible = "microcrystal,rv3029"; reg = <0x56>; }; -- cgit v1.2.3 From 751ba79cc552c146595cd439b21c4ff8998c3b69 Mon Sep 17 00:00:00 2001 From: Matt Brown Date: Fri, 4 Aug 2017 13:42:32 +1000 Subject: lib/raid6/altivec: Add vpermxor implementation for raid6 Q syndrome This patch uses the vpermxor instruction to optimise the raid6 Q syndrome. This instruction was made available with POWER8, ISA version 2.07. It allows for both vperm and vxor instructions to be done in a single instruction. This has been tested for correctness on a ppc64le vm with a basic RAID6 setup containing 5 drives. The performance benchmarks are from the raid6test in the /lib/raid6/test directory. These results are from an IBM Firestone machine with ppc64le architecture. The benchmark results show a 35% speed increase over the best existing algorithm for powerpc (altivec). The raid6test has also been run on a big-endian ppc64 vm to ensure it also works for big-endian architectures. Performance benchmarks: raid6: altivecx4 gen() 18773 MB/s raid6: altivecx8 gen() 19438 MB/s raid6: vpermxor4 gen() 25112 MB/s raid6: vpermxor8 gen() 26279 MB/s Signed-off-by: Matt Brown Reviewed-by: Daniel Axtens [mpe: Add VPERMXOR macro so we can build with old binutils] Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/ppc-opcode.h | 6 ++ include/linux/raid/pq.h | 4 ++ lib/raid6/.gitignore | 1 + lib/raid6/Makefile | 27 ++++++++- lib/raid6/algos.c | 4 ++ lib/raid6/test/Makefile | 17 +++++- lib/raid6/vpermxor.uc | 105 ++++++++++++++++++++++++++++++++++ 7 files changed, 161 insertions(+), 3 deletions(-) create mode 100644 lib/raid6/vpermxor.uc diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index f1083bcf449c..7370da18035e 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -271,6 +271,7 @@ #define PPC_INST_TLBSRX_DOT 0x7c0006a5 #define PPC_INST_VPMSUMW 0x10000488 #define PPC_INST_VPMSUMD 0x100004c8 +#define PPC_INST_VPERMXOR 0x1000002d #define PPC_INST_XXLOR 0xf0000490 #define PPC_INST_XXSWAPD 0xf0000250 #define PPC_INST_XVCPSGNDP 0xf0000780 @@ -517,6 +518,11 @@ #define XVCPSGNDP(t, a, b) stringify_in_c(.long (PPC_INST_XVCPSGNDP | \ VSX_XX3((t), (a), (b)))) +#define VPERMXOR(vrt, vra, vrb, vrc) \ + stringify_in_c(.long (PPC_INST_VPERMXOR | \ + ___PPC_RT(vrt) | ___PPC_RA(vra) | \ + ___PPC_RB(vrb) | (((vrc) & 0x1f) << 6))) + #define PPC_NAP stringify_in_c(.long PPC_INST_NAP) #define PPC_SLEEP stringify_in_c(.long PPC_INST_SLEEP) #define PPC_WINKLE stringify_in_c(.long PPC_INST_WINKLE) diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h index 583cdd3d49ca..fd2e02461e41 100644 --- a/include/linux/raid/pq.h +++ b/include/linux/raid/pq.h @@ -107,6 +107,10 @@ extern const struct raid6_calls raid6_avx512x2; extern const struct raid6_calls raid6_avx512x4; extern const struct raid6_calls raid6_tilegx8; extern const struct raid6_calls raid6_s390vx8; +extern const struct raid6_calls raid6_vpermxor1; +extern const struct raid6_calls raid6_vpermxor2; +extern const struct raid6_calls raid6_vpermxor4; +extern const struct raid6_calls raid6_vpermxor8; struct raid6_recov_calls { void (*data2)(int, size_t, int, int, void **); diff --git a/lib/raid6/.gitignore b/lib/raid6/.gitignore index f01b1cb04f91..3de0d8921286 100644 --- a/lib/raid6/.gitignore +++ b/lib/raid6/.gitignore @@ -4,3 +4,4 @@ int*.c tables.c neon?.c s390vx?.c +vpermxor*.c diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile index 4add700ddfe3..21f59443e99e 100644 --- a/lib/raid6/Makefile +++ b/lib/raid6/Makefile @@ -5,7 +5,8 @@ raid6_pq-y += algos.o recov.o tables.o int1.o int2.o int4.o \ int8.o int16.o int32.o raid6_pq-$(CONFIG_X86) += recov_ssse3.o recov_avx2.o mmx.o sse1.o sse2.o avx2.o avx512.o recov_avx512.o -raid6_pq-$(CONFIG_ALTIVEC) += altivec1.o altivec2.o altivec4.o altivec8.o +raid6_pq-$(CONFIG_ALTIVEC) += altivec1.o altivec2.o altivec4.o altivec8.o \ + vpermxor1.o vpermxor2.o vpermxor4.o vpermxor8.o raid6_pq-$(CONFIG_KERNEL_MODE_NEON) += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o raid6_pq-$(CONFIG_TILEGX) += tilegx8.o raid6_pq-$(CONFIG_S390) += s390vx8.o recov_s390xc.o @@ -91,6 +92,30 @@ $(obj)/altivec8.c: UNROLL := 8 $(obj)/altivec8.c: $(src)/altivec.uc $(src)/unroll.awk FORCE $(call if_changed,unroll) +CFLAGS_vpermxor1.o += $(altivec_flags) +targets += vpermxor1.c +$(obj)/vpermxor1.c: UNROLL := 1 +$(obj)/vpermxor1.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE + $(call if_changed,unroll) + +CFLAGS_vpermxor2.o += $(altivec_flags) +targets += vpermxor2.c +$(obj)/vpermxor2.c: UNROLL := 2 +$(obj)/vpermxor2.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE + $(call if_changed,unroll) + +CFLAGS_vpermxor4.o += $(altivec_flags) +targets += vpermxor4.c +$(obj)/vpermxor4.c: UNROLL := 4 +$(obj)/vpermxor4.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE + $(call if_changed,unroll) + +CFLAGS_vpermxor8.o += $(altivec_flags) +targets += vpermxor8.c +$(obj)/vpermxor8.c: UNROLL := 8 +$(obj)/vpermxor8.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE + $(call if_changed,unroll) + CFLAGS_neon1.o += $(NEON_FLAGS) targets += neon1.c $(obj)/neon1.c: UNROLL := 1 diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c index 476994723258..b2e681018145 100644 --- a/lib/raid6/algos.c +++ b/lib/raid6/algos.c @@ -74,6 +74,10 @@ const struct raid6_calls * const raid6_algos[] = { &raid6_altivec2, &raid6_altivec4, &raid6_altivec8, + &raid6_vpermxor1, + &raid6_vpermxor2, + &raid6_vpermxor4, + &raid6_vpermxor8, #endif #if defined(CONFIG_TILEGX) &raid6_tilegx8, diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile index be1010bdc435..ef6d0e00f189 100644 --- a/lib/raid6/test/Makefile +++ b/lib/raid6/test/Makefile @@ -48,7 +48,8 @@ else gcc -c -x c - >&/dev/null && \ rm ./-.o && echo yes) ifeq ($(HAS_ALTIVEC),yes) - OBJS += altivec1.o altivec2.o altivec4.o altivec8.o + OBJS += altivec1.o altivec2.o altivec4.o altivec8.o \ + vpermxor1.o vpermxor2.o vpermxor4.o vpermxor8.o endif endif ifeq ($(ARCH),tilegx) @@ -98,6 +99,18 @@ altivec4.c: altivec.uc ../unroll.awk altivec8.c: altivec.uc ../unroll.awk $(AWK) ../unroll.awk -vN=8 < altivec.uc > $@ +vpermxor1.c: vpermxor.uc ../unroll.awk + $(AWK) ../unroll.awk -vN=1 < vpermxor.uc > $@ + +vpermxor2.c: vpermxor.uc ../unroll.awk + $(AWK) ../unroll.awk -vN=2 < vpermxor.uc > $@ + +vpermxor4.c: vpermxor.uc ../unroll.awk + $(AWK) ../unroll.awk -vN=4 < vpermxor.uc > $@ + +vpermxor8.c: vpermxor.uc ../unroll.awk + $(AWK) ../unroll.awk -vN=8 < vpermxor.uc > $@ + int1.c: int.uc ../unroll.awk $(AWK) ../unroll.awk -vN=1 < int.uc > $@ @@ -123,7 +136,7 @@ tables.c: mktables ./mktables > tables.c clean: - rm -f *.o *.a mktables mktables.c *.uc int*.c altivec*.c neon*.c tables.c raid6test + rm -f *.o *.a mktables mktables.c *.uc int*.c altivec*.c vpermxor*.c neon*.c tables.c raid6test rm -f tilegx*.c spotless: clean diff --git a/lib/raid6/vpermxor.uc b/lib/raid6/vpermxor.uc new file mode 100644 index 000000000000..10475dc423c1 --- /dev/null +++ b/lib/raid6/vpermxor.uc @@ -0,0 +1,105 @@ +/* + * Copyright 2017, Matt Brown, IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * vpermxor$#.c + * + * Based on H. Peter Anvin's paper - The mathematics of RAID-6 + * + * $#-way unrolled portable integer math RAID-6 instruction set + * This file is postprocessed using unroll.awk + * + * vpermxor$#.c makes use of the vpermxor instruction to optimise the RAID6 Q + * syndrome calculations. + * This can be run on systems which have both Altivec and vpermxor instruction. + * + * This instruction was introduced in POWER8 - ISA v2.07. + */ + +#include +#ifdef CONFIG_ALTIVEC + +#include +#ifdef __KERNEL__ +#include +#include +#include +#endif + +typedef vector unsigned char unative_t; +#define NSIZE sizeof(unative_t) + +static const vector unsigned char gf_low = {0x1e, 0x1c, 0x1a, 0x18, 0x16, 0x14, + 0x12, 0x10, 0x0e, 0x0c, 0x0a, 0x08, + 0x06, 0x04, 0x02,0x00}; +static const vector unsigned char gf_high = {0xfd, 0xdd, 0xbd, 0x9d, 0x7d, 0x5d, + 0x3d, 0x1d, 0xe0, 0xc0, 0xa0, 0x80, + 0x60, 0x40, 0x20, 0x00}; + +static void noinline raid6_vpermxor$#_gen_syndrome_real(int disks, size_t bytes, + void **ptrs) +{ + u8 **dptr = (u8 **)ptrs; + u8 *p, *q; + int d, z, z0; + unative_t wp$$, wq$$, wd$$; + + z0 = disks - 3; /* Highest data disk */ + p = dptr[z0+1]; /* XOR parity */ + q = dptr[z0+2]; /* RS syndrome */ + + for (d = 0; d < bytes; d += NSIZE*$#) { + wp$$ = wq$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; + + for (z = z0-1; z>=0; z--) { + wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; + /* P syndrome */ + wp$$ = vec_xor(wp$$, wd$$); + + /* Q syndrome */ + asm(VPERMXOR(%0,%1,%2,%3):"=v"(wq$$):"v"(gf_high), "v"(gf_low), "v"(wq$$)); + wq$$ = vec_xor(wq$$, wd$$); + } + *(unative_t *)&p[d+NSIZE*$$] = wp$$; + *(unative_t *)&q[d+NSIZE*$$] = wq$$; + } +} + +static void raid6_vpermxor$#_gen_syndrome(int disks, size_t bytes, void **ptrs) +{ + preempt_disable(); + enable_kernel_altivec(); + + raid6_vpermxor$#_gen_syndrome_real(disks, bytes, ptrs); + + disable_kernel_altivec(); + preempt_enable(); +} + +int raid6_have_altivec_vpermxor(void); +#if $# == 1 +int raid6_have_altivec_vpermxor(void) +{ + /* Check if arch has both altivec and the vpermxor instructions */ +# ifdef __KERNEL__ + return (cpu_has_feature(CPU_FTR_ALTIVEC_COMP) && + cpu_has_feature(CPU_FTR_ARCH_207S)); +# else + return 1; +#endif + +} +#endif + +const struct raid6_calls raid6_vpermxor$# = { + raid6_vpermxor$#_gen_syndrome, + NULL, + raid6_have_altivec_vpermxor, + "vpermxor$#", + 0 +}; +#endif -- cgit v1.2.3 From aa9532d4899ff14e50de10d261b761d157cd2ae3 Mon Sep 17 00:00:00 2001 From: Matt Brown Date: Fri, 4 Aug 2017 13:42:33 +1000 Subject: lib/raid6: Build proper raid6test files on powerpc Previously the raid6 test Makefile did not build the POWER specific files (altivec and vpermxor). This patch fixes the bug, so that all appropriate files for powerpc are built. This patch also fixes the missing and mismatched ifdef statements to allow the altivec.uc file to be built correctly. Signed-off-by: Matt Brown Signed-off-by: Michael Ellerman --- lib/raid6/altivec.uc | 3 +++ lib/raid6/test/Makefile | 5 +++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/lib/raid6/altivec.uc b/lib/raid6/altivec.uc index 682aae8a1fef..d20ed0d11411 100644 --- a/lib/raid6/altivec.uc +++ b/lib/raid6/altivec.uc @@ -24,10 +24,13 @@ #include +#ifdef CONFIG_ALTIVEC + #include #ifdef __KERNEL__ # include # include +#endif /* __KERNEL__ */ /* * This is the C data type to use. We use a vector of diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile index ef6d0e00f189..5050e270c06b 100644 --- a/lib/raid6/test/Makefile +++ b/lib/raid6/test/Makefile @@ -45,9 +45,10 @@ else ifeq ($(HAS_NEON),yes) CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1 else HAS_ALTIVEC := $(shell printf '\#include \nvector int a;\n' |\ - gcc -c -x c - >&/dev/null && \ - rm ./-.o && echo yes) + gcc -c -x c - >/dev/null && rm ./-.o && echo yes) ifeq ($(HAS_ALTIVEC),yes) + CFLAGS += -I../../../arch/powerpc/include + CFLAGS += -DCONFIG_ALTIVEC OBJS += altivec1.o altivec2.o altivec4.o altivec8.o \ vpermxor1.o vpermxor2.o vpermxor4.o vpermxor8.o endif -- cgit v1.2.3 From 31513207ce72fef5978e8b284e53f294c034ae51 Mon Sep 17 00:00:00 2001 From: Matt Brown Date: Thu, 20 Jul 2017 16:25:14 +1000 Subject: powerpc: Remove unused flush_dcache_phys_range() The flush_dcache_phys_range() function is no longer used in the kernel. The last usage was removed in c40785ad305b ("powerpc/dart: Use a cachable DART"). This patch removes the function and declaration. Signed-off-by: Matt Brown [mpe: Munge change log, include commit that removed last user] Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/cacheflush.h | 1 - arch/powerpc/kernel/misc_64.S | 38 ----------------------------------- 2 files changed, 39 deletions(-) diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h index b77f0364df94..11843e37d9cf 100644 --- a/arch/powerpc/include/asm/cacheflush.h +++ b/arch/powerpc/include/asm/cacheflush.h @@ -99,7 +99,6 @@ static inline void invalidate_dcache_range(unsigned long start, #ifdef CONFIG_PPC64 extern void flush_dcache_range(unsigned long start, unsigned long stop); extern void flush_inval_dcache_range(unsigned long start, unsigned long stop); -extern void flush_dcache_phys_range(unsigned long start, unsigned long stop); #endif #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index 3280953a82cf..fa267e94090a 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -144,44 +144,6 @@ _GLOBAL_TOC(flush_dcache_range) blr EXPORT_SYMBOL(flush_dcache_range) -/* - * Like above, but works on non-mapped physical addresses. - * Use only for non-LPAR setups ! It also assumes real mode - * is cacheable. Used for flushing out the DART before using - * it as uncacheable memory - * - * flush_dcache_phys_range(unsigned long start, unsigned long stop) - * - * flush all bytes from start to stop-1 inclusive - */ -_GLOBAL(flush_dcache_phys_range) - ld r10,PPC64_CACHES@toc(r2) - lwz r7,DCACHEL1BLOCKSIZE(r10) /* Get dcache block size */ - addi r5,r7,-1 - andc r6,r3,r5 /* round low to line bdy */ - subf r8,r6,r4 /* compute length */ - add r8,r8,r5 /* ensure we get enough */ - lwz r9,DCACHEL1LOGBLOCKSIZE(r10) /* Get log-2 of dcache block size */ - srw. r8,r8,r9 /* compute line count */ - beqlr /* nothing to do? */ - mfmsr r5 /* Disable MMU Data Relocation */ - ori r0,r5,MSR_DR - xori r0,r0,MSR_DR - sync - mtmsr r0 - sync - isync - mtctr r8 -0: dcbst 0,r6 - add r6,r6,r7 - bdnz 0b - sync - isync - mtmsr r5 /* Re-enable MMU Data Relocation */ - sync - isync - blr - _GLOBAL(flush_inval_dcache_range) ld r10,PPC64_CACHES@toc(r2) lwz r7,DCACHEL1BLOCKSIZE(r10) /* Get dcache block size */ -- cgit v1.2.3 From a0828cf57acce9bf941539e1f633e9a91f9df57d Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Thu, 19 Jan 2017 17:15:30 +0100 Subject: powerpc: Use sizeof(*foo) rather than sizeof(struct foo) It's slightly less error prone to use sizeof(*foo) rather than specifying the type. Signed-off-by: Markus Elfring [mpe: Consolidate into one patch, rewrite change log] Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/nvram_64.c | 9 +++------ arch/powerpc/oprofile/cell/spu_task_sync.c | 2 +- arch/powerpc/oprofile/cell/vma_map.c | 4 ++-- arch/powerpc/platforms/4xx/msi.c | 2 +- arch/powerpc/platforms/4xx/ocm.c | 2 +- arch/powerpc/platforms/cell/axon_msi.c | 2 +- arch/powerpc/platforms/cell/spider-pci.c | 2 +- arch/powerpc/platforms/cell/spufs/lscsa_alloc.c | 2 +- arch/powerpc/platforms/powermac/low_i2c.c | 2 +- arch/powerpc/platforms/powermac/pfunc_core.c | 4 ++-- arch/powerpc/platforms/powernv/opal-flash.c | 4 ++-- arch/powerpc/platforms/powernv/opal-hmi.c | 2 +- arch/powerpc/platforms/powernv/opal-imc.c | 10 +++++----- arch/powerpc/platforms/powernv/opal-memory-errors.c | 2 +- arch/powerpc/platforms/powernv/opal-psr.c | 2 +- arch/powerpc/platforms/powernv/opal-sensor-groups.c | 4 ++-- arch/powerpc/platforms/powernv/opal-xscom.c | 2 +- arch/powerpc/platforms/powernv/pci-ioda.c | 2 +- arch/powerpc/platforms/ps3/mm.c | 6 ++---- drivers/macintosh/rack-meter.c | 2 +- 20 files changed, 31 insertions(+), 36 deletions(-) diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c index 496d6393bd41..ba681dac7b46 100644 --- a/arch/powerpc/kernel/nvram_64.c +++ b/arch/powerpc/kernel/nvram_64.c @@ -207,8 +207,7 @@ int nvram_write_os_partition(struct nvram_os_partition *part, tmp_index = part->index; - rc = ppc_md.nvram_write((char *)&info, sizeof(struct err_log_info), - &tmp_index); + rc = ppc_md.nvram_write((char *)&info, sizeof(info), &tmp_index); if (rc <= 0) { pr_err("%s: Failed nvram_write (%d)\n", __func__, rc); return rc; @@ -244,9 +243,7 @@ int nvram_read_partition(struct nvram_os_partition *part, char *buff, tmp_index = part->index; if (part->os_partition) { - rc = ppc_md.nvram_read((char *)&info, - sizeof(struct err_log_info), - &tmp_index); + rc = ppc_md.nvram_read((char *)&info, sizeof(info), &tmp_index); if (rc <= 0) { pr_err("%s: Failed nvram_read (%d)\n", __func__, rc); return rc; @@ -1173,7 +1170,7 @@ int __init nvram_scan_partitions(void) "detected: 0-length partition\n"); goto out; } - tmp_part = kmalloc(sizeof(struct nvram_partition), GFP_KERNEL); + tmp_part = kmalloc(sizeof(*tmp_part), GFP_KERNEL); err = -ENOMEM; if (!tmp_part) { printk(KERN_ERR "nvram_scan_partitions: kmalloc failed\n"); diff --git a/arch/powerpc/oprofile/cell/spu_task_sync.c b/arch/powerpc/oprofile/cell/spu_task_sync.c index 44d67b167e0b..2668cc414e4e 100644 --- a/arch/powerpc/oprofile/cell/spu_task_sync.c +++ b/arch/powerpc/oprofile/cell/spu_task_sync.c @@ -208,7 +208,7 @@ prepare_cached_spu_info(struct spu *spu, unsigned long objectId) /* Create cached_info and set spu_info[spu->number] to point to it. * spu->number is a system-wide value, not a per-node value. */ - info = kzalloc(sizeof(struct cached_info), GFP_KERNEL); + info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) { printk(KERN_ERR "SPU_PROF: " "%s, line %d: create vma_map failed\n", diff --git a/arch/powerpc/oprofile/cell/vma_map.c b/arch/powerpc/oprofile/cell/vma_map.c index c579b16845da..f40e37316dd6 100644 --- a/arch/powerpc/oprofile/cell/vma_map.c +++ b/arch/powerpc/oprofile/cell/vma_map.c @@ -69,8 +69,8 @@ vma_map_add(struct vma_to_fileoffset_map *map, unsigned int vma, unsigned int size, unsigned int offset, unsigned int guard_ptr, unsigned int guard_val) { - struct vma_to_fileoffset_map *new = - kzalloc(sizeof(struct vma_to_fileoffset_map), GFP_KERNEL); + struct vma_to_fileoffset_map *new = kzalloc(sizeof(*new), GFP_KERNEL); + if (!new) { printk(KERN_ERR "SPU_PROF: %s, line %d: malloc failed\n", __func__, __LINE__); diff --git a/arch/powerpc/platforms/4xx/msi.c b/arch/powerpc/platforms/4xx/msi.c index d50417e23add..4b859c840ea9 100644 --- a/arch/powerpc/platforms/4xx/msi.c +++ b/arch/powerpc/platforms/4xx/msi.c @@ -223,7 +223,7 @@ static int ppc4xx_msi_probe(struct platform_device *dev) dev_dbg(&dev->dev, "PCIE-MSI: Setting up MSI support...\n"); - msi = kzalloc(sizeof(struct ppc4xx_msi), GFP_KERNEL); + msi = kzalloc(sizeof(*msi), GFP_KERNEL); if (!msi) { dev_err(&dev->dev, "No memory for MSI structure\n"); return -ENOMEM; diff --git a/arch/powerpc/platforms/4xx/ocm.c b/arch/powerpc/platforms/4xx/ocm.c index 85d9e37f5ccb..69d9f60d9fe5 100644 --- a/arch/powerpc/platforms/4xx/ocm.c +++ b/arch/powerpc/platforms/4xx/ocm.c @@ -339,7 +339,7 @@ void *ppc4xx_ocm_alloc(phys_addr_t *phys, int size, int align, if (IS_ERR_VALUE(offset)) continue; - ocm_blk = kzalloc(sizeof(struct ocm_block), GFP_KERNEL); + ocm_blk = kzalloc(sizeof(*ocm_blk), GFP_KERNEL); if (!ocm_blk) { printk(KERN_ERR "PPC4XX OCM: could not allocate ocm block"); rh_free(ocm_reg->rh, offset); diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c index 6ea3f248b155..326d34e2aa02 100644 --- a/arch/powerpc/platforms/cell/axon_msi.c +++ b/arch/powerpc/platforms/cell/axon_msi.c @@ -342,7 +342,7 @@ static int axon_msi_probe(struct platform_device *device) pr_devel("axon_msi: setting up dn %pOF\n", dn); - msic = kzalloc(sizeof(struct axon_msic), GFP_KERNEL); + msic = kzalloc(sizeof(*msic), GFP_KERNEL); if (!msic) { printk(KERN_ERR "axon_msi: couldn't allocate msic for %pOF\n", dn); diff --git a/arch/powerpc/platforms/cell/spider-pci.c b/arch/powerpc/platforms/cell/spider-pci.c index d1e61e273e64..1200d0dea512 100644 --- a/arch/powerpc/platforms/cell/spider-pci.c +++ b/arch/powerpc/platforms/cell/spider-pci.c @@ -133,7 +133,7 @@ int __init spiderpci_iowa_init(struct iowa_bus *bus, void *data) pr_debug("SPIDERPCI-IOWA:Bus initialize for spider(%pOF)\n", np); - priv = kzalloc(sizeof(struct spiderpci_iowa_private), GFP_KERNEL); + priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { pr_err("SPIDERPCI-IOWA:" "Can't allocate struct spiderpci_iowa_private"); diff --git a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c index b847e9403566..d9de848dae47 100644 --- a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c +++ b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c @@ -36,7 +36,7 @@ int spu_alloc_lscsa(struct spu_state *csa) struct spu_lscsa *lscsa; unsigned char *p; - lscsa = vzalloc(sizeof(struct spu_lscsa)); + lscsa = vzalloc(sizeof(*lscsa)); if (!lscsa) return -ENOMEM; csa->lscsa = lscsa; diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c index 3408f315ef48..fa89f30e7f27 100644 --- a/arch/powerpc/platforms/powermac/low_i2c.c +++ b/arch/powerpc/platforms/powermac/low_i2c.c @@ -492,7 +492,7 @@ static struct pmac_i2c_host_kw *__init kw_i2c_host_init(struct device_node *np) const u32 *psteps, *prate, *addrp; u32 steps; - host = kzalloc(sizeof(struct pmac_i2c_host_kw), GFP_KERNEL); + host = kzalloc(sizeof(*host), GFP_KERNEL); if (host == NULL) { printk(KERN_ERR "low_i2c: Can't allocate host for %pOF\n", np); diff --git a/arch/powerpc/platforms/powermac/pfunc_core.c b/arch/powerpc/platforms/powermac/pfunc_core.c index df3c93bef228..e0462fedcdb8 100644 --- a/arch/powerpc/platforms/powermac/pfunc_core.c +++ b/arch/powerpc/platforms/powermac/pfunc_core.c @@ -643,7 +643,7 @@ static int pmf_add_function_prop(struct pmf_device *dev, void *driverdata, while (length >= 12) { /* Allocate a structure */ - func = kzalloc(sizeof(struct pmf_function), GFP_KERNEL); + func = kzalloc(sizeof(*func), GFP_KERNEL); if (func == NULL) goto bail; kref_init(&func->ref); @@ -719,7 +719,7 @@ int pmf_register_driver(struct device_node *np, return -EBUSY; } - dev = kzalloc(sizeof(struct pmf_device), GFP_KERNEL); + dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (dev == NULL) { DBG("pmf: no memory !\n"); return -ENOMEM; diff --git a/arch/powerpc/platforms/powernv/opal-flash.c b/arch/powerpc/platforms/powernv/opal-flash.c index 2fa3ac80cb4e..1cb0b895a236 100644 --- a/arch/powerpc/platforms/powernv/opal-flash.c +++ b/arch/powerpc/platforms/powernv/opal-flash.c @@ -418,12 +418,12 @@ static int alloc_image_buf(char *buffer, size_t count) void *addr; int size; - if (count < sizeof(struct image_header_t)) { + if (count < sizeof(image_header)) { pr_warn("FLASH: Invalid candidate image\n"); return -EINVAL; } - memcpy(&image_header, (void *)buffer, sizeof(struct image_header_t)); + memcpy(&image_header, (void *)buffer, sizeof(image_header)); image_data.size = be32_to_cpu(image_header.size); pr_debug("FLASH: Candidate image size = %u\n", image_data.size); diff --git a/arch/powerpc/platforms/powernv/opal-hmi.c b/arch/powerpc/platforms/powernv/opal-hmi.c index c9e1a4ff295c..4efc95b4c7d4 100644 --- a/arch/powerpc/platforms/powernv/opal-hmi.c +++ b/arch/powerpc/platforms/powernv/opal-hmi.c @@ -314,7 +314,7 @@ static int opal_handle_hmi_event(struct notifier_block *nb, pr_err("HMI: out of memory, Opal message event not handled\n"); return -ENOMEM; } - memcpy(&msg_node->hmi_evt, hmi_evt, sizeof(struct OpalHMIEvent)); + memcpy(&msg_node->hmi_evt, hmi_evt, sizeof(*hmi_evt)); spin_lock_irqsave(&opal_hmi_evt_lock, flags); list_add(&msg_node->list, &opal_hmi_evt_list); diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c index f6f55ab4980e..2a14fda5ea26 100644 --- a/arch/powerpc/platforms/powernv/opal-imc.c +++ b/arch/powerpc/platforms/powernv/opal-imc.c @@ -110,11 +110,11 @@ static int imc_get_mem_addr_nest(struct device_node *node, if (nr_chips <= 0) return -ENODEV; - base_addr_arr = kcalloc(nr_chips, sizeof(u64), GFP_KERNEL); + base_addr_arr = kcalloc(nr_chips, sizeof(*base_addr_arr), GFP_KERNEL); if (!base_addr_arr) return -ENOMEM; - chipid_arr = kcalloc(nr_chips, sizeof(u32), GFP_KERNEL); + chipid_arr = kcalloc(nr_chips, sizeof(*chipid_arr), GFP_KERNEL); if (!chipid_arr) return -ENOMEM; @@ -125,8 +125,8 @@ static int imc_get_mem_addr_nest(struct device_node *node, nr_chips)) goto error; - pmu_ptr->mem_info = kcalloc(nr_chips, sizeof(struct imc_mem_info), - GFP_KERNEL); + pmu_ptr->mem_info = kcalloc(nr_chips, sizeof(*pmu_ptr->mem_info), + GFP_KERNEL); if (!pmu_ptr->mem_info) goto error; @@ -161,7 +161,7 @@ static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain) u32 offset; /* memory for pmu */ - pmu_ptr = kzalloc(sizeof(struct imc_pmu), GFP_KERNEL); + pmu_ptr = kzalloc(sizeof(*pmu_ptr), GFP_KERNEL); if (!pmu_ptr) return -ENOMEM; diff --git a/arch/powerpc/platforms/powernv/opal-memory-errors.c b/arch/powerpc/platforms/powernv/opal-memory-errors.c index 8ddc1accf199..dcb42bcb5efa 100644 --- a/arch/powerpc/platforms/powernv/opal-memory-errors.c +++ b/arch/powerpc/platforms/powernv/opal-memory-errors.c @@ -112,7 +112,7 @@ static int opal_memory_err_event(struct notifier_block *nb, "handled\n"); return -ENOMEM; } - memcpy(&msg_node->msg, msg, sizeof(struct opal_msg)); + memcpy(&msg_node->msg, msg, sizeof(msg_node->msg)); spin_lock_irqsave(&opal_mem_err_lock, flags); list_add(&msg_node->list, &opal_memory_err_list); diff --git a/arch/powerpc/platforms/powernv/opal-psr.c b/arch/powerpc/platforms/powernv/opal-psr.c index 7313b7fc9071..74986b35cf77 100644 --- a/arch/powerpc/platforms/powernv/opal-psr.c +++ b/arch/powerpc/platforms/powernv/opal-psr.c @@ -136,7 +136,7 @@ void __init opal_psr_init(void) return; } - psr_attrs = kcalloc(of_get_child_count(psr), sizeof(struct psr_attr), + psr_attrs = kcalloc(of_get_child_count(psr), sizeof(*psr_attrs), GFP_KERNEL); if (!psr_attrs) return; diff --git a/arch/powerpc/platforms/powernv/opal-sensor-groups.c b/arch/powerpc/platforms/powernv/opal-sensor-groups.c index 7e5a235ebf76..541c9ea04a32 100644 --- a/arch/powerpc/platforms/powernv/opal-sensor-groups.c +++ b/arch/powerpc/platforms/powernv/opal-sensor-groups.c @@ -166,13 +166,13 @@ void __init opal_sensor_groups_init(void) if (!nr_attrs) continue; - sgs[i].sgattrs = kcalloc(nr_attrs, sizeof(struct sg_attr), + sgs[i].sgattrs = kcalloc(nr_attrs, sizeof(*sgs[i].sgattrs), GFP_KERNEL); if (!sgs[i].sgattrs) goto out_sgs_sgattrs; sgs[i].sg.attrs = kcalloc(nr_attrs + 1, - sizeof(struct attribute *), + sizeof(*sgs[i].sg.attrs), GFP_KERNEL); if (!sgs[i].sg.attrs) { diff --git a/arch/powerpc/platforms/powernv/opal-xscom.c b/arch/powerpc/platforms/powernv/opal-xscom.c index 81c0a943dea9..22d5e1110dbb 100644 --- a/arch/powerpc/platforms/powernv/opal-xscom.c +++ b/arch/powerpc/platforms/powernv/opal-xscom.c @@ -46,7 +46,7 @@ static scom_map_t opal_scom_map(struct device_node *dev, u64 reg, u64 count) __func__, dev); return SCOM_MAP_INVALID; } - m = kmalloc(sizeof(struct opal_scom_map), GFP_KERNEL); + m = kmalloc(sizeof(*m), GFP_KERNEL); if (!m) return NULL; m->chip = be32_to_cpup(gcid); diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index a6c92c78c9b2..6c307f0650bb 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -3843,7 +3843,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, phb_id = be64_to_cpup(prop64); pr_debug(" PHB-ID : 0x%016llx\n", phb_id); - phb = memblock_virt_alloc(sizeof(struct pnv_phb), 0); + phb = memblock_virt_alloc(sizeof(*phb), 0); /* Allocate PCI controller */ phb->hose = hose = pcibios_alloc_controller(np); diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c index 7f870ec29daf..8c7009d001d9 100644 --- a/arch/powerpc/platforms/ps3/mm.c +++ b/arch/powerpc/platforms/ps3/mm.c @@ -524,8 +524,7 @@ static int dma_sb_map_pages(struct ps3_dma_region *r, unsigned long phys_addr, int result; struct dma_chunk *c; - c = kzalloc(sizeof(struct dma_chunk), GFP_ATOMIC); - + c = kzalloc(sizeof(*c), GFP_ATOMIC); if (!c) { result = -ENOMEM; goto fail_alloc; @@ -570,8 +569,7 @@ static int dma_ioc0_map_pages(struct ps3_dma_region *r, unsigned long phys_addr, DBG(KERN_ERR "%s: phy=%#lx, lpar%#lx, len=%#lx\n", __func__, phys_addr, ps3_mm_phys_to_lpar(phys_addr), len); - c = kzalloc(sizeof(struct dma_chunk), GFP_ATOMIC); - + c = kzalloc(sizeof(*c), GFP_ATOMIC); if (!c) { result = -ENOMEM; goto fail_alloc; diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c index 910b5b6f96b1..190c9efeace5 100644 --- a/drivers/macintosh/rack-meter.c +++ b/drivers/macintosh/rack-meter.c @@ -397,7 +397,7 @@ static int rackmeter_probe(struct macio_dev* mdev, } /* Create and initialize our instance data */ - rm = kzalloc(sizeof(struct rackmeter), GFP_KERNEL); + rm = kzalloc(sizeof(*rm), GFP_KERNEL); if (rm == NULL) { printk(KERN_ERR "rackmeter: failed to allocate memory !\n"); rc = -ENOMEM; -- cgit v1.2.3 From dd40c5b4c90d84d30cdb452c2d193d6fb42247df Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Tue, 6 Mar 2018 23:24:58 +1000 Subject: selftests/powerpc: Add process creation benchmark Signed-off-by: Nicholas Piggin [mpe: Add SPDX, and fixup formatting] Signed-off-by: Michael Ellerman --- .../selftests/powerpc/benchmarks/.gitignore | 2 + .../testing/selftests/powerpc/benchmarks/Makefile | 7 +- .../selftests/powerpc/benchmarks/exec_target.c | 13 + tools/testing/selftests/powerpc/benchmarks/fork.c | 325 +++++++++++++++++++++ 4 files changed, 346 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/powerpc/benchmarks/exec_target.c create mode 100644 tools/testing/selftests/powerpc/benchmarks/fork.c diff --git a/tools/testing/selftests/powerpc/benchmarks/.gitignore b/tools/testing/selftests/powerpc/benchmarks/.gitignore index 04dc1e6ef2ce..9161679b1e1a 100644 --- a/tools/testing/selftests/powerpc/benchmarks/.gitignore +++ b/tools/testing/selftests/powerpc/benchmarks/.gitignore @@ -1,5 +1,7 @@ gettimeofday context_switch +fork +exec_target mmap_bench futex_bench null_syscall diff --git a/tools/testing/selftests/powerpc/benchmarks/Makefile b/tools/testing/selftests/powerpc/benchmarks/Makefile index a35058e3766c..b4d7432a0ecd 100644 --- a/tools/testing/selftests/powerpc/benchmarks/Makefile +++ b/tools/testing/selftests/powerpc/benchmarks/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 -TEST_GEN_PROGS := gettimeofday context_switch mmap_bench futex_bench null_syscall +TEST_GEN_PROGS := gettimeofday context_switch fork mmap_bench futex_bench null_syscall +TEST_GEN_FILES := exec_target CFLAGS += -O2 @@ -10,3 +11,7 @@ $(TEST_GEN_PROGS): ../harness.c $(OUTPUT)/context_switch: ../utils.c $(OUTPUT)/context_switch: CFLAGS += -maltivec -mvsx -mabi=altivec $(OUTPUT)/context_switch: LDLIBS += -lpthread + +$(OUTPUT)/fork: LDLIBS += -lpthread + +$(OUTPUT)/exec_target: CFLAGS += -static -nostartfiles diff --git a/tools/testing/selftests/powerpc/benchmarks/exec_target.c b/tools/testing/selftests/powerpc/benchmarks/exec_target.c new file mode 100644 index 000000000000..3c9c144192be --- /dev/null +++ b/tools/testing/selftests/powerpc/benchmarks/exec_target.c @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/* + * Part of fork context switch microbenchmark. + * + * Copyright 2018, Anton Blanchard, IBM Corp. + */ + +void _exit(int); +void _start(void) +{ + _exit(0); +} diff --git a/tools/testing/selftests/powerpc/benchmarks/fork.c b/tools/testing/selftests/powerpc/benchmarks/fork.c new file mode 100644 index 000000000000..d312e638cb37 --- /dev/null +++ b/tools/testing/selftests/powerpc/benchmarks/fork.c @@ -0,0 +1,325 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/* + * Context switch microbenchmark. + * + * Copyright 2018, Anton Blanchard, IBM Corp. + */ + +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static unsigned int timeout = 30; + +static void set_cpu(int cpu) +{ + cpu_set_t cpuset; + + if (cpu == -1) + return; + + CPU_ZERO(&cpuset); + CPU_SET(cpu, &cpuset); + + if (sched_setaffinity(0, sizeof(cpuset), &cpuset)) { + perror("sched_setaffinity"); + exit(1); + } +} + +static void start_process_on(void *(*fn)(void *), void *arg, int cpu) +{ + int pid; + + pid = fork(); + if (pid == -1) { + perror("fork"); + exit(1); + } + + if (pid) + return; + + set_cpu(cpu); + + fn(arg); + + exit(0); +} + +static int cpu; +static int do_fork = 0; +static int do_vfork = 0; +static int do_exec = 0; +static char *exec_file; +static int exec_target = 0; +static unsigned long iterations; +static unsigned long iterations_prev; + +static void run_exec(void) +{ + char *const argv[] = { "./exec_target", NULL }; + + if (execve("./exec_target", argv, NULL) == -1) { + perror("execve"); + exit(1); + } +} + +static void bench_fork(void) +{ + while (1) { + pid_t pid = fork(); + if (pid == -1) { + perror("fork"); + exit(1); + } + if (pid == 0) { + if (do_exec) + run_exec(); + _exit(0); + } + pid = waitpid(pid, NULL, 0); + if (pid == -1) { + perror("waitpid"); + exit(1); + } + iterations++; + } +} + +static void bench_vfork(void) +{ + while (1) { + pid_t pid = vfork(); + if (pid == -1) { + perror("fork"); + exit(1); + } + if (pid == 0) { + if (do_exec) + run_exec(); + _exit(0); + } + pid = waitpid(pid, NULL, 0); + if (pid == -1) { + perror("waitpid"); + exit(1); + } + iterations++; + } +} + +static void *null_fn(void *arg) +{ + pthread_exit(NULL); +} + +static void bench_thread(void) +{ + pthread_t tid; + cpu_set_t cpuset; + pthread_attr_t attr; + int rc; + + rc = pthread_attr_init(&attr); + if (rc) { + errno = rc; + perror("pthread_attr_init"); + exit(1); + } + + if (cpu != -1) { + CPU_ZERO(&cpuset); + CPU_SET(cpu, &cpuset); + + rc = pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpuset); + if (rc) { + errno = rc; + perror("pthread_attr_setaffinity_np"); + exit(1); + } + } + + while (1) { + rc = pthread_create(&tid, &attr, null_fn, NULL); + if (rc) { + errno = rc; + perror("pthread_create"); + exit(1); + } + rc = pthread_join(tid, NULL); + if (rc) { + errno = rc; + perror("pthread_join"); + exit(1); + } + iterations++; + } +} + +static void sigalrm_handler(int junk) +{ + unsigned long i = iterations; + + printf("%ld\n", i - iterations_prev); + iterations_prev = i; + + if (--timeout == 0) + kill(0, SIGUSR1); + + alarm(1); +} + +static void sigusr1_handler(int junk) +{ + exit(0); +} + +static void *bench_proc(void *arg) +{ + signal(SIGALRM, sigalrm_handler); + alarm(1); + + if (do_fork) + bench_fork(); + else if (do_vfork) + bench_vfork(); + else + bench_thread(); + + return NULL; +} + +static struct option options[] = { + { "fork", no_argument, &do_fork, 1 }, + { "vfork", no_argument, &do_vfork, 1 }, + { "exec", no_argument, &do_exec, 1 }, + { "timeout", required_argument, 0, 's' }, + { "exec-target", no_argument, &exec_target, 1 }, + { NULL }, +}; + +static void usage(void) +{ + fprintf(stderr, "Usage: fork CPU\n\n"); + fprintf(stderr, "\t\t--fork\tUse fork() (default threads)\n"); + fprintf(stderr, "\t\t--vfork\tUse vfork() (default threads)\n"); + fprintf(stderr, "\t\t--exec\tAlso exec() (default no exec)\n"); + fprintf(stderr, "\t\t--timeout=X\tDuration in seconds to run (default 30)\n"); + fprintf(stderr, "\t\t--exec-target\tInternal option for exec workload\n"); +} + +int main(int argc, char *argv[]) +{ + signed char c; + + while (1) { + int option_index = 0; + + c = getopt_long(argc, argv, "", options, &option_index); + + if (c == -1) + break; + + switch (c) { + case 0: + if (options[option_index].flag != 0) + break; + + usage(); + exit(1); + break; + + case 's': + timeout = atoi(optarg); + break; + + default: + usage(); + exit(1); + } + } + + if (do_fork && do_vfork) { + usage(); + exit(1); + } + if (do_exec && !do_fork && !do_vfork) { + usage(); + exit(1); + } + + if (do_exec) { + char *dirname = strdup(argv[0]); + int i; + i = strlen(dirname) - 1; + while (i) { + if (dirname[i] == '/') { + dirname[i] = '\0'; + if (chdir(dirname) == -1) { + perror("chdir"); + exit(1); + } + break; + } + i--; + } + } + + if (exec_target) { + exit(0); + } + + if (((argc - optind) != 1)) { + cpu = -1; + } else { + cpu = atoi(argv[optind++]); + } + + if (do_exec) + exec_file = argv[0]; + + set_cpu(cpu); + + printf("Using "); + if (do_fork) + printf("fork"); + else if (do_vfork) + printf("vfork"); + else + printf("clone"); + + if (do_exec) + printf(" + exec"); + + printf(" on cpu %d\n", cpu); + + /* Create a new process group so we can signal everyone for exit */ + setpgid(getpid(), getpid()); + + signal(SIGUSR1, sigusr1_handler); + + start_process_on(bench_proc, NULL, cpu); + + while (1) + sleep(3600); + + return 0; +} -- cgit v1.2.3 From 78e5dfea84dc15d69940831b3981b3014d17222e Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Wed, 28 Feb 2018 16:44:06 -0600 Subject: powerpc: dts: replace 'linux,stdout-path' with 'stdout-path' 'linux,stdout-path' has been deprecated for some time in favor of 'stdout-path'. Now dtc will warn on occurrences of 'linux,stdout-path'. Search and replace all the of occurrences with 'stdout-path'. Signed-off-by: Rob Herring Cc: Mark Rutland Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Michael Ellerman Cc: linuxppc-dev@lists.ozlabs.org Signed-off-by: Michael Ellerman --- arch/powerpc/boot/dts/acadia.dts | 2 +- arch/powerpc/boot/dts/adder875-redboot.dts | 2 +- arch/powerpc/boot/dts/adder875-uboot.dts | 2 +- arch/powerpc/boot/dts/akebono.dts | 2 +- arch/powerpc/boot/dts/amigaone.dts | 2 +- arch/powerpc/boot/dts/asp834x-redboot.dts | 2 +- arch/powerpc/boot/dts/bamboo.dts | 2 +- arch/powerpc/boot/dts/c2k.dts | 2 +- arch/powerpc/boot/dts/currituck.dts | 2 +- arch/powerpc/boot/dts/ebony.dts | 2 +- arch/powerpc/boot/dts/eiger.dts | 2 +- arch/powerpc/boot/dts/ep405.dts | 2 +- arch/powerpc/boot/dts/fsl/mvme7100.dts | 2 +- arch/powerpc/boot/dts/fsp2.dts | 2 +- arch/powerpc/boot/dts/holly.dts | 2 +- arch/powerpc/boot/dts/hotfoot.dts | 2 +- arch/powerpc/boot/dts/icon.dts | 2 +- arch/powerpc/boot/dts/iss4xx-mpic.dts | 2 +- arch/powerpc/boot/dts/iss4xx.dts | 2 +- arch/powerpc/boot/dts/katmai.dts | 2 +- arch/powerpc/boot/dts/klondike.dts | 2 +- arch/powerpc/boot/dts/ksi8560.dts | 2 +- arch/powerpc/boot/dts/media5200.dts | 2 +- arch/powerpc/boot/dts/mpc8272ads.dts | 2 +- arch/powerpc/boot/dts/mpc866ads.dts | 2 +- arch/powerpc/boot/dts/mpc885ads.dts | 2 +- arch/powerpc/boot/dts/mvme5100.dts | 2 +- arch/powerpc/boot/dts/obs600.dts | 2 +- arch/powerpc/boot/dts/pq2fads.dts | 2 +- arch/powerpc/boot/dts/rainier.dts | 2 +- arch/powerpc/boot/dts/redwood.dts | 2 +- arch/powerpc/boot/dts/sam440ep.dts | 2 +- arch/powerpc/boot/dts/sequoia.dts | 2 +- arch/powerpc/boot/dts/storcenter.dts | 2 +- arch/powerpc/boot/dts/taishan.dts | 2 +- arch/powerpc/boot/dts/virtex440-ml507.dts | 2 +- arch/powerpc/boot/dts/virtex440-ml510.dts | 2 +- arch/powerpc/boot/dts/walnut.dts | 2 +- arch/powerpc/boot/dts/warp.dts | 2 +- arch/powerpc/boot/dts/xpedite5200_xmon.dts | 2 +- arch/powerpc/boot/dts/yosemite.dts | 2 +- 41 files changed, 41 insertions(+), 41 deletions(-) diff --git a/arch/powerpc/boot/dts/acadia.dts b/arch/powerpc/boot/dts/acadia.dts index 86266159521e..deb52e41ab84 100644 --- a/arch/powerpc/boot/dts/acadia.dts +++ b/arch/powerpc/boot/dts/acadia.dts @@ -219,6 +219,6 @@ }; chosen { - linux,stdout-path = "/plb/opb/serial@ef600300"; + stdout-path = "/plb/opb/serial@ef600300"; }; }; diff --git a/arch/powerpc/boot/dts/adder875-redboot.dts b/arch/powerpc/boot/dts/adder875-redboot.dts index 083984720b2f..7f5ff4168482 100644 --- a/arch/powerpc/boot/dts/adder875-redboot.dts +++ b/arch/powerpc/boot/dts/adder875-redboot.dts @@ -178,6 +178,6 @@ }; chosen { - linux,stdout-path = &console; + stdout-path = &console; }; }; diff --git a/arch/powerpc/boot/dts/adder875-uboot.dts b/arch/powerpc/boot/dts/adder875-uboot.dts index e4554caf8f8d..bd9f33c57737 100644 --- a/arch/powerpc/boot/dts/adder875-uboot.dts +++ b/arch/powerpc/boot/dts/adder875-uboot.dts @@ -177,6 +177,6 @@ }; chosen { - linux,stdout-path = &console; + stdout-path = &console; }; }; diff --git a/arch/powerpc/boot/dts/akebono.dts b/arch/powerpc/boot/dts/akebono.dts index 746779202a12..8a7a10139bc9 100644 --- a/arch/powerpc/boot/dts/akebono.dts +++ b/arch/powerpc/boot/dts/akebono.dts @@ -410,6 +410,6 @@ }; chosen { - linux,stdout-path = &UART0; + stdout-path = &UART0; }; }; diff --git a/arch/powerpc/boot/dts/amigaone.dts b/arch/powerpc/boot/dts/amigaone.dts index 49ac36b16dd7..712430155b99 100644 --- a/arch/powerpc/boot/dts/amigaone.dts +++ b/arch/powerpc/boot/dts/amigaone.dts @@ -168,6 +168,6 @@ }; chosen { - linux,stdout-path = "/pci@80000000/isa@7/serial@3f8"; + stdout-path = "/pci@80000000/isa@7/serial@3f8"; }; }; diff --git a/arch/powerpc/boot/dts/asp834x-redboot.dts b/arch/powerpc/boot/dts/asp834x-redboot.dts index 9198745f45fb..e987b5af9326 100644 --- a/arch/powerpc/boot/dts/asp834x-redboot.dts +++ b/arch/powerpc/boot/dts/asp834x-redboot.dts @@ -304,7 +304,7 @@ chosen { bootargs = "console=ttyS0,38400 root=/dev/mtdblock3 rootfstype=jffs2"; - linux,stdout-path = &serial0; + stdout-path = &serial0; }; }; diff --git a/arch/powerpc/boot/dts/bamboo.dts b/arch/powerpc/boot/dts/bamboo.dts index aa68911f6560..538e42b1120d 100644 --- a/arch/powerpc/boot/dts/bamboo.dts +++ b/arch/powerpc/boot/dts/bamboo.dts @@ -295,6 +295,6 @@ }; chosen { - linux,stdout-path = "/plb/opb/serial@ef600300"; + stdout-path = "/plb/opb/serial@ef600300"; }; }; diff --git a/arch/powerpc/boot/dts/c2k.dts b/arch/powerpc/boot/dts/c2k.dts index 27f169e3ade9..c5beb72d18b7 100644 --- a/arch/powerpc/boot/dts/c2k.dts +++ b/arch/powerpc/boot/dts/c2k.dts @@ -361,6 +361,6 @@ }; }; chosen { - linux,stdout-path = &MPSC0; + stdout-path = &MPSC0; }; }; diff --git a/arch/powerpc/boot/dts/currituck.dts b/arch/powerpc/boot/dts/currituck.dts index f2ad5815f08d..a04a4fcfde63 100644 --- a/arch/powerpc/boot/dts/currituck.dts +++ b/arch/powerpc/boot/dts/currituck.dts @@ -237,6 +237,6 @@ }; chosen { - linux,stdout-path = &UART0; + stdout-path = &UART0; }; }; diff --git a/arch/powerpc/boot/dts/ebony.dts b/arch/powerpc/boot/dts/ebony.dts index ec2d142291b4..5d11e6ea7405 100644 --- a/arch/powerpc/boot/dts/ebony.dts +++ b/arch/powerpc/boot/dts/ebony.dts @@ -332,6 +332,6 @@ }; chosen { - linux,stdout-path = "/plb/opb/serial@40000200"; + stdout-path = "/plb/opb/serial@40000200"; }; }; diff --git a/arch/powerpc/boot/dts/eiger.dts b/arch/powerpc/boot/dts/eiger.dts index 48bcf7187924..7a1231d9d6f0 100644 --- a/arch/powerpc/boot/dts/eiger.dts +++ b/arch/powerpc/boot/dts/eiger.dts @@ -421,7 +421,7 @@ }; chosen { - linux,stdout-path = "/plb/opb/serial@ef600200"; + stdout-path = "/plb/opb/serial@ef600200"; }; }; diff --git a/arch/powerpc/boot/dts/ep405.dts b/arch/powerpc/boot/dts/ep405.dts index 53ef06cc2134..4ac9c5ab6e6b 100644 --- a/arch/powerpc/boot/dts/ep405.dts +++ b/arch/powerpc/boot/dts/ep405.dts @@ -225,6 +225,6 @@ }; chosen { - linux,stdout-path = "/plb/opb/serial@ef600300"; + stdout-path = "/plb/opb/serial@ef600300"; }; }; diff --git a/arch/powerpc/boot/dts/fsl/mvme7100.dts b/arch/powerpc/boot/dts/fsl/mvme7100.dts index e2d306ad37a6..721cb53758ae 100644 --- a/arch/powerpc/boot/dts/fsl/mvme7100.dts +++ b/arch/powerpc/boot/dts/fsl/mvme7100.dts @@ -146,7 +146,7 @@ }; chosen { - linux,stdout-path = &serial0; + stdout-path = &serial0; }; }; diff --git a/arch/powerpc/boot/dts/fsp2.dts b/arch/powerpc/boot/dts/fsp2.dts index 6560283c5aec..9311b86b1bd9 100644 --- a/arch/powerpc/boot/dts/fsp2.dts +++ b/arch/powerpc/boot/dts/fsp2.dts @@ -607,7 +607,7 @@ }; chosen { - linux,stdout-path = "/plb/opb/serial@b0020000"; + stdout-path = "/plb/opb/serial@b0020000"; bootargs = "console=ttyS0,115200 rw log_buf_len=32768 debug"; }; }; diff --git a/arch/powerpc/boot/dts/holly.dts b/arch/powerpc/boot/dts/holly.dts index 43e6f0c8e449..02bd304c7d38 100644 --- a/arch/powerpc/boot/dts/holly.dts +++ b/arch/powerpc/boot/dts/holly.dts @@ -191,6 +191,6 @@ }; chosen { - linux,stdout-path = "/tsi109@c0000000/serial@7808"; + stdout-path = "/tsi109@c0000000/serial@7808"; }; }; diff --git a/arch/powerpc/boot/dts/hotfoot.dts b/arch/powerpc/boot/dts/hotfoot.dts index 71d3bb4931dc..b93bf2d9dd5b 100644 --- a/arch/powerpc/boot/dts/hotfoot.dts +++ b/arch/powerpc/boot/dts/hotfoot.dts @@ -291,6 +291,6 @@ }; chosen { - linux,stdout-path = &UART0; + stdout-path = &UART0; }; }; diff --git a/arch/powerpc/boot/dts/icon.dts b/arch/powerpc/boot/dts/icon.dts index 9c94fd737f7c..2e6e3a7b2604 100644 --- a/arch/powerpc/boot/dts/icon.dts +++ b/arch/powerpc/boot/dts/icon.dts @@ -442,6 +442,6 @@ }; chosen { - linux,stdout-path = "/plb/opb/serial@f0000200"; + stdout-path = "/plb/opb/serial@f0000200"; }; }; diff --git a/arch/powerpc/boot/dts/iss4xx-mpic.dts b/arch/powerpc/boot/dts/iss4xx-mpic.dts index 23e9d9b7e400..f7063198b2dc 100644 --- a/arch/powerpc/boot/dts/iss4xx-mpic.dts +++ b/arch/powerpc/boot/dts/iss4xx-mpic.dts @@ -150,6 +150,6 @@ }; chosen { - linux,stdout-path = "/plb/opb/serial@40000200"; + stdout-path = "/plb/opb/serial@40000200"; }; }; diff --git a/arch/powerpc/boot/dts/iss4xx.dts b/arch/powerpc/boot/dts/iss4xx.dts index 4ff6555c866d..5533aff25e41 100644 --- a/arch/powerpc/boot/dts/iss4xx.dts +++ b/arch/powerpc/boot/dts/iss4xx.dts @@ -111,6 +111,6 @@ }; chosen { - linux,stdout-path = "/plb/opb/serial@40000200"; + stdout-path = "/plb/opb/serial@40000200"; }; }; diff --git a/arch/powerpc/boot/dts/katmai.dts b/arch/powerpc/boot/dts/katmai.dts index f913dbe25d35..02629e119b87 100644 --- a/arch/powerpc/boot/dts/katmai.dts +++ b/arch/powerpc/boot/dts/katmai.dts @@ -505,6 +505,6 @@ }; chosen { - linux,stdout-path = "/plb/opb/serial@f0000200"; + stdout-path = "/plb/opb/serial@f0000200"; }; }; diff --git a/arch/powerpc/boot/dts/klondike.dts b/arch/powerpc/boot/dts/klondike.dts index 8c9429033618..d9613b7b945f 100644 --- a/arch/powerpc/boot/dts/klondike.dts +++ b/arch/powerpc/boot/dts/klondike.dts @@ -222,6 +222,6 @@ }; chosen { - linux,stdout-path = "/plb/opb/serial@50001000"; + stdout-path = "/plb/opb/serial@50001000"; }; }; diff --git a/arch/powerpc/boot/dts/ksi8560.dts b/arch/powerpc/boot/dts/ksi8560.dts index 5d68236e7c3c..fe6c17c8812a 100644 --- a/arch/powerpc/boot/dts/ksi8560.dts +++ b/arch/powerpc/boot/dts/ksi8560.dts @@ -339,6 +339,6 @@ chosen { - linux,stdout-path = "/soc/cpm/serial@91a00"; + stdout-path = "/soc/cpm/serial@91a00"; }; }; diff --git a/arch/powerpc/boot/dts/media5200.dts b/arch/powerpc/boot/dts/media5200.dts index b5413cb85f13..843f156a49c4 100644 --- a/arch/powerpc/boot/dts/media5200.dts +++ b/arch/powerpc/boot/dts/media5200.dts @@ -25,7 +25,7 @@ }; chosen { - linux,stdout-path = &console; + stdout-path = &console; }; cpus { diff --git a/arch/powerpc/boot/dts/mpc8272ads.dts b/arch/powerpc/boot/dts/mpc8272ads.dts index 6d2cddf64cfd..98282c18d989 100644 --- a/arch/powerpc/boot/dts/mpc8272ads.dts +++ b/arch/powerpc/boot/dts/mpc8272ads.dts @@ -262,6 +262,6 @@ }; chosen { - linux,stdout-path = "/soc/cpm/serial@11a00"; + stdout-path = "/soc/cpm/serial@11a00"; }; }; diff --git a/arch/powerpc/boot/dts/mpc866ads.dts b/arch/powerpc/boot/dts/mpc866ads.dts index 34c1f48b1a09..4443fac3f576 100644 --- a/arch/powerpc/boot/dts/mpc866ads.dts +++ b/arch/powerpc/boot/dts/mpc866ads.dts @@ -185,6 +185,6 @@ }; chosen { - linux,stdout-path = "/soc/cpm/serial@a80"; + stdout-path = "/soc/cpm/serial@a80"; }; }; diff --git a/arch/powerpc/boot/dts/mpc885ads.dts b/arch/powerpc/boot/dts/mpc885ads.dts index 4e93bd961e0f..5b037f51741d 100644 --- a/arch/powerpc/boot/dts/mpc885ads.dts +++ b/arch/powerpc/boot/dts/mpc885ads.dts @@ -227,6 +227,6 @@ }; chosen { - linux,stdout-path = "/soc/cpm/serial@a80"; + stdout-path = "/soc/cpm/serial@a80"; }; }; diff --git a/arch/powerpc/boot/dts/mvme5100.dts b/arch/powerpc/boot/dts/mvme5100.dts index 1ecb341a232a..a7eb6d25903d 100644 --- a/arch/powerpc/boot/dts/mvme5100.dts +++ b/arch/powerpc/boot/dts/mvme5100.dts @@ -179,7 +179,7 @@ }; chosen { - linux,stdout-path = &serial0; + stdout-path = &serial0; }; }; diff --git a/arch/powerpc/boot/dts/obs600.dts b/arch/powerpc/boot/dts/obs600.dts index 18e7d79ee4c3..d10b0411809b 100644 --- a/arch/powerpc/boot/dts/obs600.dts +++ b/arch/powerpc/boot/dts/obs600.dts @@ -309,6 +309,6 @@ }; }; chosen { - linux,stdout-path = "/plb/opb/serial@ef600200"; + stdout-path = "/plb/opb/serial@ef600200"; }; }; diff --git a/arch/powerpc/boot/dts/pq2fads.dts b/arch/powerpc/boot/dts/pq2fads.dts index 0c525ff0c257..a477615e3468 100644 --- a/arch/powerpc/boot/dts/pq2fads.dts +++ b/arch/powerpc/boot/dts/pq2fads.dts @@ -242,6 +242,6 @@ }; chosen { - linux,stdout-path = "/soc/cpm/serial@11a00"; + stdout-path = "/soc/cpm/serial@11a00"; }; }; diff --git a/arch/powerpc/boot/dts/rainier.dts b/arch/powerpc/boot/dts/rainier.dts index 9684c80e4093..e59829cff556 100644 --- a/arch/powerpc/boot/dts/rainier.dts +++ b/arch/powerpc/boot/dts/rainier.dts @@ -344,7 +344,7 @@ }; chosen { - linux,stdout-path = "/plb/opb/serial@ef600300"; + stdout-path = "/plb/opb/serial@ef600300"; bootargs = "console=ttyS0,115200"; }; }; diff --git a/arch/powerpc/boot/dts/redwood.dts b/arch/powerpc/boot/dts/redwood.dts index d86a3a498118..f3e046fb49e2 100644 --- a/arch/powerpc/boot/dts/redwood.dts +++ b/arch/powerpc/boot/dts/redwood.dts @@ -381,7 +381,7 @@ chosen { - linux,stdout-path = "/plb/opb/serial@ef600200"; + stdout-path = "/plb/opb/serial@ef600200"; }; }; diff --git a/arch/powerpc/boot/dts/sam440ep.dts b/arch/powerpc/boot/dts/sam440ep.dts index 088361cf4636..7d15f18e1180 100644 --- a/arch/powerpc/boot/dts/sam440ep.dts +++ b/arch/powerpc/boot/dts/sam440ep.dts @@ -288,6 +288,6 @@ }; chosen { - linux,stdout-path = "/plb/opb/serial@ef600300"; + stdout-path = "/plb/opb/serial@ef600300"; }; }; diff --git a/arch/powerpc/boot/dts/sequoia.dts b/arch/powerpc/boot/dts/sequoia.dts index e41b88a5eaee..60d211da9593 100644 --- a/arch/powerpc/boot/dts/sequoia.dts +++ b/arch/powerpc/boot/dts/sequoia.dts @@ -406,7 +406,7 @@ }; chosen { - linux,stdout-path = "/plb/opb/serial@ef600300"; + stdout-path = "/plb/opb/serial@ef600300"; bootargs = "console=ttyS0,115200"; }; }; diff --git a/arch/powerpc/boot/dts/storcenter.dts b/arch/powerpc/boot/dts/storcenter.dts index 2a555738517e..99f6f544dc5f 100644 --- a/arch/powerpc/boot/dts/storcenter.dts +++ b/arch/powerpc/boot/dts/storcenter.dts @@ -137,6 +137,6 @@ }; chosen { - linux,stdout-path = &serial0; + stdout-path = &serial0; }; }; diff --git a/arch/powerpc/boot/dts/taishan.dts b/arch/powerpc/boot/dts/taishan.dts index 1657ad0bf8a6..803f1bff7fa8 100644 --- a/arch/powerpc/boot/dts/taishan.dts +++ b/arch/powerpc/boot/dts/taishan.dts @@ -422,6 +422,6 @@ }; chosen { - linux,stdout-path = "/plb/opb/serial@40000300"; + stdout-path = "/plb/opb/serial@40000300"; }; }; diff --git a/arch/powerpc/boot/dts/virtex440-ml507.dts b/arch/powerpc/boot/dts/virtex440-ml507.dts index 391a4e299783..66f1c6312de6 100644 --- a/arch/powerpc/boot/dts/virtex440-ml507.dts +++ b/arch/powerpc/boot/dts/virtex440-ml507.dts @@ -32,7 +32,7 @@ } ; chosen { bootargs = "console=ttyS0 root=/dev/ram"; - linux,stdout-path = &RS232_Uart_1; + stdout-path = &RS232_Uart_1; } ; cpus { #address-cells = <1>; diff --git a/arch/powerpc/boot/dts/virtex440-ml510.dts b/arch/powerpc/boot/dts/virtex440-ml510.dts index 81201d3907e2..3b736ca26ddc 100644 --- a/arch/powerpc/boot/dts/virtex440-ml510.dts +++ b/arch/powerpc/boot/dts/virtex440-ml510.dts @@ -26,7 +26,7 @@ } ; chosen { bootargs = "console=ttyS0 root=/dev/ram"; - linux,stdout-path = "/plb@0/serial@83e00000"; + stdout-path = "/plb@0/serial@83e00000"; } ; cpus { #address-cells = <1>; diff --git a/arch/powerpc/boot/dts/walnut.dts b/arch/powerpc/boot/dts/walnut.dts index 4a9f726ada13..0872862c9363 100644 --- a/arch/powerpc/boot/dts/walnut.dts +++ b/arch/powerpc/boot/dts/walnut.dts @@ -241,6 +241,6 @@ }; chosen { - linux,stdout-path = "/plb/opb/serial@ef600300"; + stdout-path = "/plb/opb/serial@ef600300"; }; }; diff --git a/arch/powerpc/boot/dts/warp.dts b/arch/powerpc/boot/dts/warp.dts index ea9053ef4819..b4f32740870e 100644 --- a/arch/powerpc/boot/dts/warp.dts +++ b/arch/powerpc/boot/dts/warp.dts @@ -304,6 +304,6 @@ }; chosen { - linux,stdout-path = "/plb/opb/serial@ef600300"; + stdout-path = "/plb/opb/serial@ef600300"; }; }; diff --git a/arch/powerpc/boot/dts/xpedite5200_xmon.dts b/arch/powerpc/boot/dts/xpedite5200_xmon.dts index 646acfbef0dd..d5e14421c39a 100644 --- a/arch/powerpc/boot/dts/xpedite5200_xmon.dts +++ b/arch/powerpc/boot/dts/xpedite5200_xmon.dts @@ -503,6 +503,6 @@ /* Needed for dtbImage boot wrapper compatibility */ chosen { - linux,stdout-path = &serial0; + stdout-path = &serial0; }; }; diff --git a/arch/powerpc/boot/dts/yosemite.dts b/arch/powerpc/boot/dts/yosemite.dts index 30bb4753577a..56508785ce13 100644 --- a/arch/powerpc/boot/dts/yosemite.dts +++ b/arch/powerpc/boot/dts/yosemite.dts @@ -327,6 +327,6 @@ }; chosen { - linux,stdout-path = "/plb/opb/serial@ef600300"; + stdout-path = "/plb/opb/serial@ef600300"; }; }; -- cgit v1.2.3 From c0d64cf9fefd58831ce2cc81b2683bfff3760f7a Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Tue, 20 Mar 2018 08:46:11 +1100 Subject: powerpc: Use feature bit for RTC presence rather than timebase presence All PowerPC CPUs other than the original PPC601 have a timebase register rather than the "real-time clock" (RTC) register that the PPC601 (and the original POWER and POWER2 CPUs) had. Currently we have a CPU feature bit to indicate the presence of the timebase, but it makes more sense to use a bit to indicate the unusual situation rather than the common situation. This therefore defines a CPU_FTR_USE_RTC bit in place of the CPU_FTR_USE_TB bit, and arranges for it to be set on PPC601 systems. Signed-off-by: Paul Mackerras Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/cputable.h | 85 +++++++++++++++++-------------------- arch/powerpc/include/asm/time.h | 2 +- arch/powerpc/kernel/dt_cpu_ftrs.c | 3 +- arch/powerpc/kernel/vdso.c | 12 +++--- 4 files changed, 47 insertions(+), 55 deletions(-) diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index a2c5c95882cf..052db187805e 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -138,7 +138,7 @@ static inline void cpu_feature_keys_init(void) { } #define CPU_FTR_ALTIVEC ASM_CONST(0x00000008) #define CPU_FTR_TAU ASM_CONST(0x00000010) #define CPU_FTR_CAN_DOZE ASM_CONST(0x00000020) -#define CPU_FTR_USE_TB ASM_CONST(0x00000040) +#define CPU_FTR_USE_RTC ASM_CONST(0x00000040) #define CPU_FTR_L2CSR ASM_CONST(0x00000080) #define CPU_FTR_601 ASM_CONST(0x00000100) #define CPU_FTR_DBELL ASM_CONST(0x00000200) @@ -285,21 +285,19 @@ static inline void cpu_feature_keys_init(void) { } #endif #define CPU_FTRS_PPC601 (CPU_FTR_COMMON | CPU_FTR_601 | \ - CPU_FTR_COHERENT_ICACHE | CPU_FTR_UNIFIED_ID_CACHE) -#define CPU_FTRS_603 (CPU_FTR_COMMON | \ - CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ + CPU_FTR_COHERENT_ICACHE | CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_USE_RTC) +#define CPU_FTRS_603 (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE | \ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE) -#define CPU_FTRS_604 (CPU_FTR_COMMON | \ - CPU_FTR_USE_TB | CPU_FTR_PPC_LE) +#define CPU_FTRS_604 (CPU_FTR_COMMON | CPU_FTR_PPC_LE) #define CPU_FTRS_740_NOTAU (CPU_FTR_COMMON | \ - CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ + CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_L2CR | \ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE) #define CPU_FTRS_740 (CPU_FTR_COMMON | \ - CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ + CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_L2CR | \ CPU_FTR_TAU | CPU_FTR_MAYBE_CAN_NAP | \ CPU_FTR_PPC_LE) #define CPU_FTRS_750 (CPU_FTR_COMMON | \ - CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ + CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_L2CR | \ CPU_FTR_TAU | CPU_FTR_MAYBE_CAN_NAP | \ CPU_FTR_PPC_LE) #define CPU_FTRS_750CL (CPU_FTRS_750) @@ -308,103 +306,96 @@ static inline void cpu_feature_keys_init(void) { } #define CPU_FTRS_750FX (CPU_FTRS_750 | CPU_FTR_DUAL_PLL_750FX) #define CPU_FTRS_750GX (CPU_FTRS_750FX) #define CPU_FTRS_7400_NOTAU (CPU_FTR_COMMON | \ - CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ + CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_L2CR | \ CPU_FTR_ALTIVEC_COMP | \ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE) #define CPU_FTRS_7400 (CPU_FTR_COMMON | \ - CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ + CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_L2CR | \ CPU_FTR_TAU | CPU_FTR_ALTIVEC_COMP | \ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE) #define CPU_FTRS_7450_20 (CPU_FTR_COMMON | \ - CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ + CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \ CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX) #define CPU_FTRS_7450_21 (CPU_FTR_COMMON | \ - CPU_FTR_USE_TB | \ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \ CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | \ CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX) #define CPU_FTRS_7450_23 (CPU_FTR_COMMON | \ - CPU_FTR_USE_TB | CPU_FTR_NEED_PAIRED_STWCX | \ + CPU_FTR_NEED_PAIRED_STWCX | \ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \ CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE) #define CPU_FTRS_7455_1 (CPU_FTR_COMMON | \ - CPU_FTR_USE_TB | CPU_FTR_NEED_PAIRED_STWCX | \ + CPU_FTR_NEED_PAIRED_STWCX | \ CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR | \ CPU_FTR_SPEC7450 | CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE) #define CPU_FTRS_7455_20 (CPU_FTR_COMMON | \ - CPU_FTR_USE_TB | CPU_FTR_NEED_PAIRED_STWCX | \ + CPU_FTR_NEED_PAIRED_STWCX | \ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \ CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | \ CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE) #define CPU_FTRS_7455 (CPU_FTR_COMMON | \ - CPU_FTR_USE_TB | \ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ CPU_FTR_L3CR | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \ CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX) #define CPU_FTRS_7447_10 (CPU_FTR_COMMON | \ - CPU_FTR_USE_TB | \ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ CPU_FTR_L3CR | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \ CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC | CPU_FTR_PPC_LE | \ CPU_FTR_NEED_PAIRED_STWCX) #define CPU_FTRS_7447 (CPU_FTR_COMMON | \ - CPU_FTR_USE_TB | \ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ CPU_FTR_L3CR | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \ CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX) #define CPU_FTRS_7447A (CPU_FTR_COMMON | \ - CPU_FTR_USE_TB | \ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \ CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX) #define CPU_FTRS_7448 (CPU_FTR_COMMON | \ - CPU_FTR_USE_TB | \ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \ CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX) -#define CPU_FTRS_82XX (CPU_FTR_COMMON | \ - CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB) +#define CPU_FTRS_82XX (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE) #define CPU_FTRS_G2_LE (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE | \ - CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP) + CPU_FTR_MAYBE_CAN_NAP) #define CPU_FTRS_E300 (CPU_FTR_MAYBE_CAN_DOZE | \ - CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | \ + CPU_FTR_MAYBE_CAN_NAP | \ CPU_FTR_COMMON) #define CPU_FTRS_E300C2 (CPU_FTR_MAYBE_CAN_DOZE | \ - CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | \ + CPU_FTR_MAYBE_CAN_NAP | \ CPU_FTR_COMMON | CPU_FTR_FPU_UNAVAILABLE) -#define CPU_FTRS_CLASSIC32 (CPU_FTR_COMMON | CPU_FTR_USE_TB) -#define CPU_FTRS_8XX (CPU_FTR_USE_TB | CPU_FTR_NOEXECUTE) -#define CPU_FTRS_40X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) -#define CPU_FTRS_44X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) -#define CPU_FTRS_440x6 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE | \ +#define CPU_FTRS_CLASSIC32 (CPU_FTR_COMMON) +#define CPU_FTRS_8XX (CPU_FTR_NOEXECUTE) +#define CPU_FTRS_40X (CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) +#define CPU_FTRS_44X (CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) +#define CPU_FTRS_440x6 (CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE | \ CPU_FTR_INDEXED_DCR) #define CPU_FTRS_47X (CPU_FTRS_440x6) -#define CPU_FTRS_E200 (CPU_FTR_USE_TB | CPU_FTR_SPE_COMP | \ +#define CPU_FTRS_E200 (CPU_FTR_SPE_COMP | \ CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \ CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_NOEXECUTE | \ CPU_FTR_DEBUG_LVL_EXC) -#define CPU_FTRS_E500 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ +#define CPU_FTRS_E500 (CPU_FTR_MAYBE_CAN_DOZE | \ CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \ CPU_FTR_NOEXECUTE) -#define CPU_FTRS_E500_2 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ +#define CPU_FTRS_E500_2 (CPU_FTR_MAYBE_CAN_DOZE | \ CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \ CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) -#define CPU_FTRS_E500MC (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \ +#define CPU_FTRS_E500MC (CPU_FTR_NODSISRALIGN | \ CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ CPU_FTR_DBELL | CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV) /* * e5500/e6500 erratum A-006958 is a timebase bug that can use the * same workaround as CPU_FTR_CELL_TB_BUG. */ -#define CPU_FTRS_E5500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \ +#define CPU_FTRS_E5500 (CPU_FTR_NODSISRALIGN | \ CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV | CPU_FTR_CELL_TB_BUG) -#define CPU_FTRS_E6500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \ +#define CPU_FTRS_E6500 (CPU_FTR_NODSISRALIGN | \ CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV | CPU_FTR_ALTIVEC_COMP | \ @@ -412,21 +403,21 @@ static inline void cpu_feature_keys_init(void) { } #define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) /* 64-bit CPUs */ -#define CPU_FTRS_POWER4 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ +#define CPU_FTRS_POWER4 (CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_MMCRA | CPU_FTR_CP_USE_DCBTZ | \ CPU_FTR_STCX_CHECKS_ADDRESS) -#define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ +#define CPU_FTRS_PPC970 (CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_201 | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \ CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS | \ CPU_FTR_HVMODE | CPU_FTR_DABRX) -#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ +#define CPU_FTRS_POWER5 (CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | CPU_FTR_PURR | \ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_DABRX) -#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ +#define CPU_FTRS_POWER6 (CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | \ @@ -434,7 +425,7 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR | \ CPU_FTR_DABRX) -#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ +#define CPU_FTRS_POWER7 (CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\ CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | \ @@ -443,7 +434,7 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_CFAR | CPU_FTR_HVMODE | \ CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR | CPU_FTR_DABRX | CPU_FTR_PKEY) -#define CPU_FTRS_POWER8 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ +#define CPU_FTRS_POWER8 (CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\ CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | \ @@ -455,7 +446,7 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_PKEY) #define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG) #define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL) -#define CPU_FTRS_POWER9 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ +#define CPU_FTRS_POWER9 (CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\ CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | \ @@ -470,15 +461,15 @@ static inline void cpu_feature_keys_init(void) { } (~CPU_FTR_SAO)) #define CPU_FTRS_POWER9_DD2_0 CPU_FTRS_POWER9 #define CPU_FTRS_POWER9_DD2_1 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1) -#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ +#define CPU_FTRS_CELL (CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_PAUSE_ZERO | CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \ CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_DABRX) -#define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ +#define CPU_FTRS_PA6T (CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | \ CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_DABRX) -#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2) +#define CPU_FTRS_COMPATIBLE (CPU_FTR_PPCAS_ARCH_V2) #ifdef __powerpc64__ #ifdef CONFIG_PPC_BOOK3E diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h index b240666b7bc1..c9ad438cc0a9 100644 --- a/arch/powerpc/include/asm/time.h +++ b/arch/powerpc/include/asm/time.h @@ -46,7 +46,7 @@ struct div_result { /* Accessor functions for the timebase (RTC on 601) registers. */ /* If one day CONFIG_POWER is added just define __USE_RTC as 1 */ #ifdef CONFIG_6xx -#define __USE_RTC() (!cpu_has_feature(CPU_FTR_USE_TB)) +#define __USE_RTC() (cpu_has_feature(CPU_FTR_USE_RTC)) #else #define __USE_RTC() 0 #endif diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index 945e2c29ad2d..ee562ffb00c0 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -54,8 +54,7 @@ struct dt_cpu_feature { }; #define CPU_FTRS_BASE \ - (CPU_FTR_USE_TB | \ - CPU_FTR_LWSYNC | \ + (CPU_FTR_LWSYNC | \ CPU_FTR_FPU_UNAVAILABLE |\ CPU_FTR_NODSISRALIGN |\ CPU_FTR_NOEXECUTE |\ diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 22b01a3962f0..b44ec104a5a1 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -99,26 +99,28 @@ static struct vdso_patch_def vdso_patches[] = { CPU_FTR_COHERENT_ICACHE, CPU_FTR_COHERENT_ICACHE, "__kernel_sync_dicache", "__kernel_sync_dicache_p5" }, +#ifdef CONFIG_PPC32 { - CPU_FTR_USE_TB, 0, + CPU_FTR_USE_RTC, CPU_FTR_USE_RTC, "__kernel_gettimeofday", NULL }, { - CPU_FTR_USE_TB, 0, + CPU_FTR_USE_RTC, CPU_FTR_USE_RTC, "__kernel_clock_gettime", NULL }, { - CPU_FTR_USE_TB, 0, + CPU_FTR_USE_RTC, CPU_FTR_USE_RTC, "__kernel_clock_getres", NULL }, { - CPU_FTR_USE_TB, 0, + CPU_FTR_USE_RTC, CPU_FTR_USE_RTC, "__kernel_get_tbfreq", NULL }, { - CPU_FTR_USE_TB, 0, + CPU_FTR_USE_RTC, CPU_FTR_USE_RTC, "__kernel_time", NULL }, +#endif }; /* -- cgit v1.2.3 From dd0efb3f11cc0adcb4caa192ba09ad802d1fa6c0 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Tue, 20 Mar 2018 08:46:12 +1100 Subject: powerpc: Book E: Remove unused CPU_FTR_L2CSR bit The CPU_FTR_L2CSR bit is never tested anywhere, so let's reclaim the bit. The last usage was removed in 86d63363defc ("powerpc/e500mc: Remove dead L2 flushing code in idle_e500.S") (Jun 2015). Signed-off-by: Paul Mackerras Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/cputable.h | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 052db187805e..761b99c3dfad 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -139,7 +139,6 @@ static inline void cpu_feature_keys_init(void) { } #define CPU_FTR_TAU ASM_CONST(0x00000010) #define CPU_FTR_CAN_DOZE ASM_CONST(0x00000020) #define CPU_FTR_USE_RTC ASM_CONST(0x00000040) -#define CPU_FTR_L2CSR ASM_CONST(0x00000080) #define CPU_FTR_601 ASM_CONST(0x00000100) #define CPU_FTR_DBELL ASM_CONST(0x00000200) #define CPU_FTR_CAN_NAP ASM_CONST(0x00000400) @@ -385,18 +384,18 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \ CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) #define CPU_FTRS_E500MC (CPU_FTR_NODSISRALIGN | \ - CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ + CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ CPU_FTR_DBELL | CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV) /* * e5500/e6500 erratum A-006958 is a timebase bug that can use the * same workaround as CPU_FTR_CELL_TB_BUG. */ #define CPU_FTRS_E5500 (CPU_FTR_NODSISRALIGN | \ - CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ + CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV | CPU_FTR_CELL_TB_BUG) #define CPU_FTRS_E6500 (CPU_FTR_NODSISRALIGN | \ - CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ + CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV | CPU_FTR_ALTIVEC_COMP | \ CPU_FTR_CELL_TB_BUG | CPU_FTR_SMT) -- cgit v1.2.3 From 9bbf0b576d3294b6a2fda54d1af3b88290e8b65c Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Tue, 20 Mar 2018 08:46:13 +1100 Subject: powerpc: Free up CPU feature bits on 64-bit machines This moves all the CPU feature bits that are only used on 32-bit machines to the top 20 bits of the CPU feature word and arranges for them to be defined only in 32-bit builds. The features that are common to 32-bit and 64-bit machines are moved to bits 0-11 of the CPU feature word. This means that for 64-bit platforms, bits 44-63 can now be used for new features that only exist on 64-bit machines. (These bit numbers are counting from the right, i.e. the LSB is bit 0.) Because CPU_FTR_L3_DISABLE_NAP moved from the low 16 bits to the high 16 bits, we have to adjust some assembly code. Also, CPU_FTR_EMB_HV moved from the high 16 bits to the low 16 bits. Note that CPU_FTR_REAL_LE only applies to 64-bit chips, because only 64-bit chips (POWER6, 7, 8, 9) have a true little-endian mode that is a CPU execution mode as opposed to being a page attribute. With this we now have 20 free CPU feature bits on 64-bit machines. Signed-off-by: Paul Mackerras Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/cputable.h | 133 ++++++++++++++++-------------- arch/powerpc/kernel/cpu_setup_6xx.S | 2 +- arch/powerpc/kernel/cpu_setup_fsl_booke.S | 2 +- 3 files changed, 73 insertions(+), 64 deletions(-) diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 761b99c3dfad..49fd0676b6e5 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -131,40 +131,48 @@ static inline void cpu_feature_keys_init(void) { } /* CPU kernel features */ -/* Retain the 32b definitions all use bottom half of word */ +/* Definitions for features that we have on both 32-bit and 64-bit chips */ #define CPU_FTR_COHERENT_ICACHE ASM_CONST(0x00000001) -#define CPU_FTR_L2CR ASM_CONST(0x00000002) -#define CPU_FTR_SPEC7450 ASM_CONST(0x00000004) -#define CPU_FTR_ALTIVEC ASM_CONST(0x00000008) -#define CPU_FTR_TAU ASM_CONST(0x00000010) -#define CPU_FTR_CAN_DOZE ASM_CONST(0x00000020) -#define CPU_FTR_USE_RTC ASM_CONST(0x00000040) -#define CPU_FTR_601 ASM_CONST(0x00000100) -#define CPU_FTR_DBELL ASM_CONST(0x00000200) -#define CPU_FTR_CAN_NAP ASM_CONST(0x00000400) -#define CPU_FTR_L3CR ASM_CONST(0x00000800) -#define CPU_FTR_L3_DISABLE_NAP ASM_CONST(0x00001000) -#define CPU_FTR_NAP_DISABLE_L2_PR ASM_CONST(0x00002000) -#define CPU_FTR_DUAL_PLL_750FX ASM_CONST(0x00004000) -#define CPU_FTR_NO_DPM ASM_CONST(0x00008000) -#define CPU_FTR_476_DD2 ASM_CONST(0x00010000) -#define CPU_FTR_NEED_COHERENT ASM_CONST(0x00020000) -#define CPU_FTR_NO_BTIC ASM_CONST(0x00040000) -#define CPU_FTR_DEBUG_LVL_EXC ASM_CONST(0x00080000) -#define CPU_FTR_NODSISRALIGN ASM_CONST(0x00100000) -#define CPU_FTR_PPC_LE ASM_CONST(0x00200000) -#define CPU_FTR_REAL_LE ASM_CONST(0x00400000) -#define CPU_FTR_FPU_UNAVAILABLE ASM_CONST(0x00800000) -#define CPU_FTR_UNIFIED_ID_CACHE ASM_CONST(0x01000000) -#define CPU_FTR_SPE ASM_CONST(0x02000000) -#define CPU_FTR_NEED_PAIRED_STWCX ASM_CONST(0x04000000) -#define CPU_FTR_LWSYNC ASM_CONST(0x08000000) -#define CPU_FTR_NOEXECUTE ASM_CONST(0x10000000) -#define CPU_FTR_INDEXED_DCR ASM_CONST(0x20000000) -#define CPU_FTR_EMB_HV ASM_CONST(0x40000000) +#define CPU_FTR_ALTIVEC ASM_CONST(0x00000002) +#define CPU_FTR_DBELL ASM_CONST(0x00000004) +#define CPU_FTR_CAN_NAP ASM_CONST(0x00000008) +#define CPU_FTR_DEBUG_LVL_EXC ASM_CONST(0x00000010) +#define CPU_FTR_NODSISRALIGN ASM_CONST(0x00000020) +#define CPU_FTR_FPU_UNAVAILABLE ASM_CONST(0x00000040) +#define CPU_FTR_LWSYNC ASM_CONST(0x00000080) +#define CPU_FTR_NOEXECUTE ASM_CONST(0x00000100) +#define CPU_FTR_EMB_HV ASM_CONST(0x00000200) + +/* Definitions for features that only exist on 32-bit chips */ +#ifdef CONFIG_PPC32 +#define CPU_FTR_601 ASM_CONST(0x00001000) +#define CPU_FTR_L2CR ASM_CONST(0x00002000) +#define CPU_FTR_SPEC7450 ASM_CONST(0x00004000) +#define CPU_FTR_TAU ASM_CONST(0x00008000) +#define CPU_FTR_CAN_DOZE ASM_CONST(0x00010000) +#define CPU_FTR_USE_RTC ASM_CONST(0x00020000) +#define CPU_FTR_L3CR ASM_CONST(0x00040000) +#define CPU_FTR_L3_DISABLE_NAP ASM_CONST(0x00080000) +#define CPU_FTR_NAP_DISABLE_L2_PR ASM_CONST(0x00100000) +#define CPU_FTR_DUAL_PLL_750FX ASM_CONST(0x00200000) +#define CPU_FTR_NO_DPM ASM_CONST(0x00400000) +#define CPU_FTR_476_DD2 ASM_CONST(0x00800000) +#define CPU_FTR_NEED_COHERENT ASM_CONST(0x01000000) +#define CPU_FTR_NO_BTIC ASM_CONST(0x02000000) +#define CPU_FTR_PPC_LE ASM_CONST(0x04000000) +#define CPU_FTR_UNIFIED_ID_CACHE ASM_CONST(0x08000000) +#define CPU_FTR_SPE ASM_CONST(0x10000000) +#define CPU_FTR_NEED_PAIRED_STWCX ASM_CONST(0x20000000) +#define CPU_FTR_INDEXED_DCR ASM_CONST(0x40000000) + +#else /* CONFIG_PPC32 */ +/* Define these to 0 for the sake of tests in common code */ +#define CPU_FTR_601 (0) +#define CPU_FTR_PPC_LE (0) +#endif /* - * Add the 64-bit processor unique features in the top half of the word; + * Definitions for the 64-bit processor unique features; * on 32-bit, make the names available but defined to be 0. */ #ifdef __powerpc64__ @@ -173,37 +181,38 @@ static inline void cpu_feature_keys_init(void) { } #define LONG_ASM_CONST(x) 0 #endif -#define CPU_FTR_HVMODE LONG_ASM_CONST(0x0000000100000000) -#define CPU_FTR_ARCH_201 LONG_ASM_CONST(0x0000000200000000) -#define CPU_FTR_ARCH_206 LONG_ASM_CONST(0x0000000400000000) -#define CPU_FTR_ARCH_207S LONG_ASM_CONST(0x0000000800000000) -#define CPU_FTR_ARCH_300 LONG_ASM_CONST(0x0000001000000000) -#define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000002000000000) -#define CPU_FTR_CTRL LONG_ASM_CONST(0x0000004000000000) -#define CPU_FTR_SMT LONG_ASM_CONST(0x0000008000000000) -#define CPU_FTR_PAUSE_ZERO LONG_ASM_CONST(0x0000010000000000) -#define CPU_FTR_PURR LONG_ASM_CONST(0x0000020000000000) -#define CPU_FTR_CELL_TB_BUG LONG_ASM_CONST(0x0000040000000000) -#define CPU_FTR_SPURR LONG_ASM_CONST(0x0000080000000000) -#define CPU_FTR_DSCR LONG_ASM_CONST(0x0000100000000000) -#define CPU_FTR_VSX LONG_ASM_CONST(0x0000200000000000) -#define CPU_FTR_SAO LONG_ASM_CONST(0x0000400000000000) -#define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0000800000000000) -#define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0001000000000000) -#define CPU_FTR_ASYM_SMT LONG_ASM_CONST(0x0002000000000000) -#define CPU_FTR_STCX_CHECKS_ADDRESS LONG_ASM_CONST(0x0004000000000000) -#define CPU_FTR_POPCNTB LONG_ASM_CONST(0x0008000000000000) -#define CPU_FTR_POPCNTD LONG_ASM_CONST(0x0010000000000000) -#define CPU_FTR_PKEY LONG_ASM_CONST(0x0020000000000000) -#define CPU_FTR_VMX_COPY LONG_ASM_CONST(0x0040000000000000) -#define CPU_FTR_TM LONG_ASM_CONST(0x0080000000000000) -#define CPU_FTR_CFAR LONG_ASM_CONST(0x0100000000000000) -#define CPU_FTR_HAS_PPR LONG_ASM_CONST(0x0200000000000000) -#define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000) -#define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000) -#define CPU_FTR_PMAO_BUG LONG_ASM_CONST(0x1000000000000000) -#define CPU_FTR_POWER9_DD1 LONG_ASM_CONST(0x4000000000000000) -#define CPU_FTR_POWER9_DD2_1 LONG_ASM_CONST(0x8000000000000000) +#define CPU_FTR_REAL_LE LONG_ASM_CONST(0x0000000000001000) +#define CPU_FTR_HVMODE LONG_ASM_CONST(0x0000000000002000) +#define CPU_FTR_ARCH_201 LONG_ASM_CONST(0x0000000000004000) +#define CPU_FTR_ARCH_206 LONG_ASM_CONST(0x0000000000008000) +#define CPU_FTR_ARCH_207S LONG_ASM_CONST(0x0000000000010000) +#define CPU_FTR_ARCH_300 LONG_ASM_CONST(0x0000000000020000) +#define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000000000040000) +#define CPU_FTR_CTRL LONG_ASM_CONST(0x0000000000080000) +#define CPU_FTR_SMT LONG_ASM_CONST(0x0000000000100000) +#define CPU_FTR_PAUSE_ZERO LONG_ASM_CONST(0x0000000000200000) +#define CPU_FTR_PURR LONG_ASM_CONST(0x0000000000400000) +#define CPU_FTR_CELL_TB_BUG LONG_ASM_CONST(0x0000000000800000) +#define CPU_FTR_SPURR LONG_ASM_CONST(0x0000000001000000) +#define CPU_FTR_DSCR LONG_ASM_CONST(0x0000000002000000) +#define CPU_FTR_VSX LONG_ASM_CONST(0x0000000004000000) +#define CPU_FTR_SAO LONG_ASM_CONST(0x0000000008000000) +#define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0000000010000000) +#define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0000000020000000) +#define CPU_FTR_ASYM_SMT LONG_ASM_CONST(0x0000000040000000) +#define CPU_FTR_STCX_CHECKS_ADDRESS LONG_ASM_CONST(0x0000000080000000) +#define CPU_FTR_POPCNTB LONG_ASM_CONST(0x0000000100000000) +#define CPU_FTR_POPCNTD LONG_ASM_CONST(0x0000000200000000) +#define CPU_FTR_PKEY LONG_ASM_CONST(0x0000000400000000) +#define CPU_FTR_VMX_COPY LONG_ASM_CONST(0x0000000800000000) +#define CPU_FTR_TM LONG_ASM_CONST(0x0000001000000000) +#define CPU_FTR_CFAR LONG_ASM_CONST(0x0000002000000000) +#define CPU_FTR_HAS_PPR LONG_ASM_CONST(0x0000004000000000) +#define CPU_FTR_DAWR LONG_ASM_CONST(0x0000008000000000) +#define CPU_FTR_DABRX LONG_ASM_CONST(0x0000010000000000) +#define CPU_FTR_PMAO_BUG LONG_ASM_CONST(0x0000020000000000) +#define CPU_FTR_POWER9_DD1 LONG_ASM_CONST(0x0000040000000000) +#define CPU_FTR_POWER9_DD2_1 LONG_ASM_CONST(0x0000080000000000) #ifndef __ASSEMBLY__ diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S index c5e5a94d9892..a9f3970693e1 100644 --- a/arch/powerpc/kernel/cpu_setup_6xx.S +++ b/arch/powerpc/kernel/cpu_setup_6xx.S @@ -226,7 +226,7 @@ BEGIN_FTR_SECTION beq 1f END_FTR_SECTION_IFSET(CPU_FTR_L3CR) lwz r6,CPU_SPEC_FEATURES(r4) - andi. r0,r6,CPU_FTR_L3_DISABLE_NAP + andis. r0,r6,CPU_FTR_L3_DISABLE_NAP@h beq 1f li r7,CPU_FTR_CAN_NAP andc r6,r6,r7 diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S index 462aed9bcf51..8d142e5d84cd 100644 --- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S +++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S @@ -162,7 +162,7 @@ _GLOBAL(__setup_cpu_e5500) * the feature on the primary core, avoid doing it on the * secondary core. */ - andis. r6, r3, CPU_FTR_EMB_HV@h + andi. r6, r3, CPU_FTR_EMB_HV beq 2f rlwinm r3, r3, 0, ~CPU_FTR_EMB_HV stw r3, CPU_SPEC_FEATURES(r4) -- cgit v1.2.3 From b5af4f2793233cf37596e2c1f7b23385dc3aaa58 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 21 Mar 2018 21:31:59 +1100 Subject: powerpc: Add CPU feature bits for TM bug workarounds on POWER9 v2.2 This adds a CPU feature bit which is set for POWER9 "Nimbus" DD2.2 processors which will be used to enable the hypervisor to assist hardware with the handling of checkpointed register values while the CPU is in suspend state, in order to work around hardware bugs. The hardware assistance for these workarounds introduced a new hardware bug relating to the XER[SO] bit. We add a separate feature bit for this bug in case future chips fix it while still requiring the hypervisor assistance with suspend state. When the dt_cpu_ftrs subsystem is in use, the software assistance can be enabled using a "tm-suspend-hypervisor-assist" node in the device tree, and a "tm-suspend-xer-so-bug" node enables the workarounds for the XER[SO] bug. In the absence of such nodes, a quirk enables both for POWER9 "Nimbus" DD2.2 processors. Signed-off-by: Paul Mackerras Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/cputable.h | 7 ++++++- arch/powerpc/kernel/cputable.c | 24 ++++++++++++++++++++++-- arch/powerpc/kernel/dt_cpu_ftrs.c | 5 +++++ 3 files changed, 33 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 49fd0676b6e5..ecee84dea7e7 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -213,6 +213,8 @@ static inline void cpu_feature_keys_init(void) { } #define CPU_FTR_PMAO_BUG LONG_ASM_CONST(0x0000020000000000) #define CPU_FTR_POWER9_DD1 LONG_ASM_CONST(0x0000040000000000) #define CPU_FTR_POWER9_DD2_1 LONG_ASM_CONST(0x0000080000000000) +#define CPU_FTR_P9_TM_HV_ASSIST LONG_ASM_CONST(0x0000100000000000) +#define CPU_FTR_P9_TM_XER_SO_BUG LONG_ASM_CONST(0x0000200000000000) #ifndef __ASSEMBLY__ @@ -469,6 +471,8 @@ static inline void cpu_feature_keys_init(void) { } (~CPU_FTR_SAO)) #define CPU_FTRS_POWER9_DD2_0 CPU_FTRS_POWER9 #define CPU_FTRS_POWER9_DD2_1 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1) +#define CPU_FTRS_POWER9_DD2_2 (CPU_FTRS_POWER9 | CPU_FTR_P9_TM_HV_ASSIST | \ + CPU_FTR_P9_TM_XER_SO_BUG) #define CPU_FTRS_CELL (CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ @@ -488,7 +492,8 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTRS_POWER6 | CPU_FTRS_POWER7 | CPU_FTRS_POWER8E | \ CPU_FTRS_POWER8 | CPU_FTRS_POWER8_DD1 | CPU_FTRS_CELL | \ CPU_FTRS_PA6T | CPU_FTR_VSX | CPU_FTRS_POWER9 | \ - CPU_FTRS_POWER9_DD1 | CPU_FTRS_POWER9_DD2_1) + CPU_FTRS_POWER9_DD1 | CPU_FTRS_POWER9_DD2_1 | \ + CPU_FTRS_POWER9_DD2_2) #endif #else enum { diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index c40a9fc1e5d1..68052eacb827 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -553,11 +553,31 @@ static struct cpu_spec __initdata cpu_specs[] = { .machine_check_early = __machine_check_early_realmode_p9, .platform = "power9", }, - { /* Power9 DD 2.1 or later (see DD2.0 above) */ + { /* Power9 DD 2.1 */ + .pvr_mask = 0xffffefff, + .pvr_value = 0x004e0201, + .cpu_name = "POWER9 (raw)", + .cpu_features = CPU_FTRS_POWER9_DD2_1, + .cpu_user_features = COMMON_USER_POWER9, + .cpu_user_features2 = COMMON_USER2_POWER9, + .mmu_features = MMU_FTRS_POWER9, + .icache_bsize = 128, + .dcache_bsize = 128, + .num_pmcs = 6, + .pmc_type = PPC_PMC_IBM, + .oprofile_cpu_type = "ppc64/power9", + .oprofile_type = PPC_OPROFILE_INVALID, + .cpu_setup = __setup_cpu_power9, + .cpu_restore = __restore_cpu_power9, + .flush_tlb = __flush_tlb_power9, + .machine_check_early = __machine_check_early_realmode_p9, + .platform = "power9", + }, + { /* Power9 DD2.2 or later */ .pvr_mask = 0xffff0000, .pvr_value = 0x004e0000, .cpu_name = "POWER9 (raw)", - .cpu_features = CPU_FTRS_POWER9_DD2_1, + .cpu_features = CPU_FTRS_POWER9_DD2_2, .cpu_user_features = COMMON_USER_POWER9, .cpu_user_features2 = COMMON_USER2_POWER9, .mmu_features = MMU_FTRS_POWER9, diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index ee562ffb00c0..0a0c601c6ade 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -589,6 +589,8 @@ static struct dt_cpu_feature_match __initdata {"virtual-page-class-key-protection", feat_enable, 0}, {"transactional-memory", feat_enable_tm, CPU_FTR_TM}, {"transactional-memory-v3", feat_enable_tm, 0}, + {"tm-suspend-hypervisor-assist", feat_enable, CPU_FTR_P9_TM_HV_ASSIST}, + {"tm-suspend-xer-so-bug", feat_enable, CPU_FTR_P9_TM_XER_SO_BUG}, {"idle-nap", feat_enable_idle_nap, 0}, {"alignment-interrupt-dsisr", feat_enable_align_dsisr, 0}, {"idle-stop", feat_enable_idle_stop, 0}, @@ -708,6 +710,9 @@ static __init void cpufeatures_cpu_quirks(void) cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD1; else if ((version & 0xffffefff) == 0x004e0201) cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1; + else if ((version & 0xffffefff) == 0x004e0202) + cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_HV_ASSIST | + CPU_FTR_P9_TM_XER_SO_BUG; } static void __init cpufeatures_setup_finished(void) -- cgit v1.2.3 From 7672691a08c886e53ccbf8cdca406f8c92ec7a20 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 21 Mar 2018 21:32:00 +1100 Subject: powerpc/powernv: Provide a way to force a core into SMT4 mode POWER9 processors up to and including "Nimbus" v2.2 have hardware bugs relating to transactional memory and thread reconfiguration. One of these bugs has a workaround which is to get the core into SMT4 state temporarily. This workaround is only needed when running bare-metal. This patch provides a function which gets the core into SMT4 mode by preventing threads from going to a stop state, and waking up those which are already in a stop state. Once at least 3 threads are not in a stop state, the core will be in SMT4 and we can continue. To do this, we add a "dont_stop" flag to the paca to tell the thread not to go into a stop state. If this flag is set, power9_idle_stop() just returns immediately with a return value of 0. The pnv_power9_force_smt4_catch() function does the following: 1. Set the dont_stop flag for each thread in the core, except ourselves (in fact we use an atomic_inc() in case more than one thread is calling this function concurrently). 2. See how many threads are awake, indicated by their requested_psscr field in the paca being 0. If this is at least 3, skip to step 5. 3. Send a doorbell interrupt to each thread that was seen as being in a stop state in step 2. 4. Until at least 3 threads are awake, scan the threads to which we sent a doorbell interrupt and check if they are awake now. This relies on the following properties: - Once dont_stop is non-zero, requested_psccr can't go from zero to non-zero, except transiently (and without the thread doing stop). - requested_psscr being zero guarantees that the thread isn't in a state-losing stop state where thread reconfiguration could occur. - Doing stop with a PSSCR value of 0 won't be a state-losing stop and thus won't allow thread reconfiguration. - Once threads_per_core/2 + 1 (i.e. 3) threads are awake, the core must be in SMT4 mode, since SMT modes are powers of 2. This does add a sync to power9_idle_stop(), which is necessary to provide the correct ordering between setting requested_psscr and checking dont_stop. The overhead of the sync should be unnoticeable compared to the latency of going into and out of a stop state. Because some objected to incurring this extra latency on systems where the XER[SO] bug is not relevant, I have put the test in power9_idle_stop inside a feature section. This means that pnv_power9_force_smt4_catch() WILL NOT WORK correctly on systems without the CPU_FTR_P9_TM_XER_SO_BUG feature bit set, and will probably hang the system. In order to cater for uses where the caller has an operation that has to be done while the core is in SMT4, the core continues to be kept in SMT4 after pnv_power9_force_smt4_catch() function returns, until the pnv_power9_force_smt4_release() function is called. It undoes the effect of step 1 above and allows the other threads to go into a stop state. Signed-off-by: Paul Mackerras Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/asm-prototypes.h | 3 ++ arch/powerpc/include/asm/paca.h | 3 ++ arch/powerpc/include/asm/powernv.h | 1 + arch/powerpc/kernel/asm-offsets.c | 1 + arch/powerpc/kernel/idle_book3s.S | 21 ++++++++ arch/powerpc/platforms/powernv/idle.c | 81 +++++++++++++++++++++++++++++++ 6 files changed, 110 insertions(+) diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index 7330150bfe34..4e14d2304d5f 100644 --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h @@ -126,4 +126,7 @@ extern int __ucmpdi2(u64, u64); void _mcount(void); unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip); +void pnv_power9_force_smt4_catch(void); +void pnv_power9_force_smt4_release(void); + #endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */ diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index b62c31037cad..4803cc1b011b 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -32,6 +32,7 @@ #include #include #include +#include register struct paca_struct *local_paca asm("r13"); @@ -177,6 +178,8 @@ struct paca_struct { u8 thread_mask; /* Mask to denote subcore sibling threads */ u8 subcore_sibling_mask; + /* Flag to request this thread not to stop */ + atomic_t dont_stop; /* * Pointer to an array which contains pointer * to the sibling threads' paca. diff --git a/arch/powerpc/include/asm/powernv.h b/arch/powerpc/include/asm/powernv.h index dc5f6a5d4575..d1c2d2e658cf 100644 --- a/arch/powerpc/include/asm/powernv.h +++ b/arch/powerpc/include/asm/powernv.h @@ -40,6 +40,7 @@ static inline int pnv_npu2_handle_fault(struct npu_context *context, } static inline void pnv_tm_init(void) { } +static inline void pnv_power9_force_smt4(void) { } #endif #endif /* _ASM_POWERNV_H */ diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index ea5eb91b836e..dbefe30d4daa 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -759,6 +759,7 @@ int main(void) OFFSET(PACA_SUBCORE_SIBLING_MASK, paca_struct, subcore_sibling_mask); OFFSET(PACA_SIBLING_PACA_PTRS, paca_struct, thread_sibling_pacas); OFFSET(PACA_REQ_PSSCR, paca_struct, requested_psscr); + OFFSET(PACA_DONT_STOP, paca_struct, dont_stop); #define STOP_SPR(x, f) OFFSET(x, paca_struct, stop_sprs.f) STOP_SPR(STOP_PID, pid); STOP_SPR(STOP_LDBAR, ldbar); diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S index 01e1c1997893..89157cf452e3 100644 --- a/arch/powerpc/kernel/idle_book3s.S +++ b/arch/powerpc/kernel/idle_book3s.S @@ -339,6 +339,7 @@ power_enter_stop: bne .Lhandle_esl_ec_set PPC_STOP li r3,0 /* Since we didn't lose state, return 0 */ + std r3, PACA_REQ_PSSCR(r13) /* * pnv_wakeup_noloss() expects r12 to contain the SRR1 value so @@ -429,11 +430,29 @@ ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \ * r3 contains desired PSSCR register value. */ _GLOBAL(power9_idle_stop) +BEGIN_FTR_SECTION + lwz r5, PACA_DONT_STOP(r13) + cmpwi r5, 0 + bne 1f std r3, PACA_REQ_PSSCR(r13) + sync + lwz r5, PACA_DONT_STOP(r13) + cmpwi r5, 0 + bne 1f +END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) mtspr SPRN_PSSCR,r3 LOAD_REG_ADDR(r4,power_enter_stop) b pnv_powersave_common /* No return */ +1: + /* + * We get here when TM / thread reconfiguration bug workaround + * code wants to get the CPU into SMT4 mode, and therefore + * we are being asked not to stop. + */ + li r3, 0 + std r3, PACA_REQ_PSSCR(r13) + blr /* return 0 for wakeup cause / SRR1 value */ /* * On waking up from stop 0,1,2 with ESL=1 on POWER9 DD1, @@ -584,6 +603,8 @@ FTR_SECTION_ELSE_NESTED(71) mfspr r5, SPRN_PSSCR rldicl r5,r5,4,60 ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_POWER9_DD1, 71) + li r0, 0 /* clear requested_psscr to say we're awake */ + std r0, PACA_REQ_PSSCR(r13) cmpd cr4,r5,r4 bge cr4,pnv_wakeup_tb_loss /* returns to caller */ diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c index 443d5ca71995..99a760eae964 100644 --- a/arch/powerpc/platforms/powernv/idle.c +++ b/arch/powerpc/platforms/powernv/idle.c @@ -24,6 +24,7 @@ #include #include #include +#include #include "powernv.h" #include "subcore.h" @@ -387,6 +388,86 @@ void power9_idle(void) power9_idle_type(pnv_default_stop_val, pnv_default_stop_mask); } +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE +/* + * This is used in working around bugs in thread reconfiguration + * on POWER9 (at least up to Nimbus DD2.2) relating to transactional + * memory and the way that XER[SO] is checkpointed. + * This function forces the core into SMT4 in order by asking + * all other threads not to stop, and sending a message to any + * that are in a stop state. + * Must be called with preemption disabled. + * + * DO NOT call this unless cpu_has_feature(CPU_FTR_P9_TM_XER_SO_BUG) is + * true; otherwise this function will hang the system, due to the + * optimization in power9_idle_stop. + */ +void pnv_power9_force_smt4_catch(void) +{ + int cpu, cpu0, thr; + struct paca_struct *tpaca; + int awake_threads = 1; /* this thread is awake */ + int poke_threads = 0; + int need_awake = threads_per_core; + + cpu = smp_processor_id(); + cpu0 = cpu & ~(threads_per_core - 1); + tpaca = &paca[cpu0]; + for (thr = 0; thr < threads_per_core; ++thr) { + if (cpu != cpu0 + thr) + atomic_inc(&tpaca[thr].dont_stop); + } + /* order setting dont_stop vs testing requested_psscr */ + mb(); + for (thr = 0; thr < threads_per_core; ++thr) { + if (!tpaca[thr].requested_psscr) + ++awake_threads; + else + poke_threads |= (1 << thr); + } + + /* If at least 3 threads are awake, the core is in SMT4 already */ + if (awake_threads < need_awake) { + /* We have to wake some threads; we'll use msgsnd */ + for (thr = 0; thr < threads_per_core; ++thr) { + if (poke_threads & (1 << thr)) { + ppc_msgsnd_sync(); + ppc_msgsnd(PPC_DBELL_MSGTYPE, 0, + tpaca[thr].hw_cpu_id); + } + } + /* now spin until at least 3 threads are awake */ + do { + for (thr = 0; thr < threads_per_core; ++thr) { + if ((poke_threads & (1 << thr)) && + !tpaca[thr].requested_psscr) { + ++awake_threads; + poke_threads &= ~(1 << thr); + } + } + } while (awake_threads < need_awake); + } +} +EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_catch); + +void pnv_power9_force_smt4_release(void) +{ + int cpu, cpu0, thr; + struct paca_struct *tpaca; + + cpu = smp_processor_id(); + cpu0 = cpu & ~(threads_per_core - 1); + tpaca = &paca[cpu0]; + + /* clear all the dont_stop flags */ + for (thr = 0; thr < threads_per_core; ++thr) { + if (cpu != cpu0 + thr) + atomic_dec(&tpaca[thr].dont_stop); + } +} +EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_release); +#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ + #ifdef CONFIG_HOTPLUG_CPU static void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val) { -- cgit v1.2.3 From 4bb3c7a0208fc13ca70598efd109901a7cd45ae7 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 21 Mar 2018 21:32:01 +1100 Subject: KVM: PPC: Book3S HV: Work around transactional memory bugs in POWER9 POWER9 has hardware bugs relating to transactional memory and thread reconfiguration (changes to hardware SMT mode). Specifically, the core does not have enough storage to store a complete checkpoint of all the architected state for all four threads. The DD2.2 version of POWER9 includes hardware modifications designed to allow hypervisor software to implement workarounds for these problems. This patch implements those workarounds in KVM code so that KVM guests see a full, working transactional memory implementation. The problems center around the use of TM suspended state, where the CPU has a checkpointed state but execution is not transactional. The workaround is to implement a "fake suspend" state, which looks to the guest like suspended state but the CPU does not store a checkpoint. In this state, any instruction that would cause a transition to transactional state (rfid, rfebb, mtmsrd, tresume) or would use the checkpointed state (treclaim) causes a "soft patch" interrupt (vector 0x1500) to the hypervisor so that it can be emulated. The trechkpt instruction also causes a soft patch interrupt. On POWER9 DD2.2, we avoid returning to the guest in any state which would require a checkpoint to be present. The trechkpt in the guest entry path which would normally create that checkpoint is replaced by either a transition to fake suspend state, if the guest is in suspend state, or a rollback to the pre-transactional state if the guest is in transactional state. Fake suspend state is indicated by a flag in the PACA plus a new bit in the PSSCR. The new PSSCR bit is write-only and reads back as 0. On exit from the guest, if the guest is in fake suspend state, we still do the treclaim instruction as we would in real suspend state, in order to get into non-transactional state, but we do not save the resulting register state since there was no checkpoint. Emulation of the instructions that cause a softpatch interrupt is handled in two paths. If the guest is in real suspend mode, we call kvmhv_p9_tm_emulation_early() to handle the cases where the guest is transitioning to transactional state. This is called before we do the treclaim in the guest exit path; because we haven't done treclaim, we can get back to the guest with the transaction still active. If the instruction is a case that kvmhv_p9_tm_emulation_early() doesn't handle, or if the guest is in fake suspend state, then we proceed to do the complete guest exit path and subsequently call kvmhv_p9_tm_emulation() in host context with the MMU on. This handles all the cases including the cases that generate program interrupts (illegal instruction or TM Bad Thing) and facility unavailable interrupts. The emulation is reasonably straightforward and is mostly concerned with checking for exception conditions and updating the state of registers such as MSR and CR0. The treclaim emulation takes care to ensure that the TEXASR register gets updated as if it were the guest treclaim instruction that had done failure recording, not the treclaim done in hypervisor state in the guest exit path. With this, the KVM_CAP_PPC_HTM capability returns true (1) even if transactional memory is not available to host userspace. Signed-off-by: Paul Mackerras Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/kvm_asm.h | 2 + arch/powerpc/include/asm/kvm_book3s.h | 4 + arch/powerpc/include/asm/kvm_book3s_64.h | 43 ++++++ arch/powerpc/include/asm/kvm_book3s_asm.h | 1 + arch/powerpc/include/asm/kvm_host.h | 1 + arch/powerpc/include/asm/ppc-opcode.h | 4 + arch/powerpc/include/asm/reg.h | 7 + arch/powerpc/kernel/asm-offsets.c | 2 + arch/powerpc/kernel/cputable.c | 1 - arch/powerpc/kernel/exceptions-64s.S | 4 +- arch/powerpc/kvm/Makefile | 7 + arch/powerpc/kvm/book3s_hv.c | 18 ++- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 143 +++++++++++++++++++- arch/powerpc/kvm/book3s_hv_tm.c | 216 ++++++++++++++++++++++++++++++ arch/powerpc/kvm/book3s_hv_tm_builtin.c | 109 +++++++++++++++ arch/powerpc/kvm/powerpc.c | 5 +- 16 files changed, 557 insertions(+), 10 deletions(-) create mode 100644 arch/powerpc/kvm/book3s_hv_tm.c create mode 100644 arch/powerpc/kvm/book3s_hv_tm_builtin.c diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h index 09a802bb702f..a790d5cf6ea3 100644 --- a/arch/powerpc/include/asm/kvm_asm.h +++ b/arch/powerpc/include/asm/kvm_asm.h @@ -108,6 +108,8 @@ /* book3s_hv */ +#define BOOK3S_INTERRUPT_HV_SOFTPATCH 0x1500 + /* * Special trap used to indicate to host that this is a * passthrough interrupt that could not be handled diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 376ae803b69c..4c02a7378d06 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -241,6 +241,10 @@ extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask); extern void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr); +extern int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu); +extern int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu); +extern void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu); + extern void kvmppc_entry_trampoline(void); extern void kvmppc_hv_entry_trampoline(void); extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst); diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index 998f7b7aaa9e..c424e44f4c00 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -472,6 +472,49 @@ static inline void set_dirty_bits_atomic(unsigned long *map, unsigned long i, set_bit_le(i, map); } +static inline u64 sanitize_msr(u64 msr) +{ + msr &= ~MSR_HV; + msr |= MSR_ME; + return msr; +} + +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu) +{ + vcpu->arch.cr = vcpu->arch.cr_tm; + vcpu->arch.xer = vcpu->arch.xer_tm; + vcpu->arch.lr = vcpu->arch.lr_tm; + vcpu->arch.ctr = vcpu->arch.ctr_tm; + vcpu->arch.amr = vcpu->arch.amr_tm; + vcpu->arch.ppr = vcpu->arch.ppr_tm; + vcpu->arch.dscr = vcpu->arch.dscr_tm; + vcpu->arch.tar = vcpu->arch.tar_tm; + memcpy(vcpu->arch.gpr, vcpu->arch.gpr_tm, + sizeof(vcpu->arch.gpr)); + vcpu->arch.fp = vcpu->arch.fp_tm; + vcpu->arch.vr = vcpu->arch.vr_tm; + vcpu->arch.vrsave = vcpu->arch.vrsave_tm; +} + +static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu) +{ + vcpu->arch.cr_tm = vcpu->arch.cr; + vcpu->arch.xer_tm = vcpu->arch.xer; + vcpu->arch.lr_tm = vcpu->arch.lr; + vcpu->arch.ctr_tm = vcpu->arch.ctr; + vcpu->arch.amr_tm = vcpu->arch.amr; + vcpu->arch.ppr_tm = vcpu->arch.ppr; + vcpu->arch.dscr_tm = vcpu->arch.dscr; + vcpu->arch.tar_tm = vcpu->arch.tar; + memcpy(vcpu->arch.gpr_tm, vcpu->arch.gpr, + sizeof(vcpu->arch.gpr)); + vcpu->arch.fp_tm = vcpu->arch.fp; + vcpu->arch.vr_tm = vcpu->arch.vr; + vcpu->arch.vrsave_tm = vcpu->arch.vrsave; +} +#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ + #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ #endif /* __ASM_KVM_BOOK3S_64_H__ */ diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index ab386af2904f..d978fdf698af 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h @@ -119,6 +119,7 @@ struct kvmppc_host_state { u8 host_ipi; u8 ptid; /* thread number within subcore when split */ u8 tid; /* thread number within whole core */ + u8 fake_suspend; struct kvm_vcpu *kvm_vcpu; struct kvmppc_vcore *kvm_vcore; void __iomem *xics_phys; diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 1f53b562726f..deb54293398c 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -610,6 +610,7 @@ struct kvm_vcpu_arch { u64 tfhar; u64 texasr; u64 tfiar; + u64 orig_texasr; u32 cr_tm; u64 xer_tm; diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index f1083bcf449c..772eff7fd446 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -232,6 +232,7 @@ #define PPC_INST_MSGSYNC 0x7c0006ec #define PPC_INST_MSGSNDP 0x7c00011c #define PPC_INST_MSGCLRP 0x7c00015c +#define PPC_INST_MTMSRD 0x7c000164 #define PPC_INST_MTTMR 0x7c0003dc #define PPC_INST_NOP 0x60000000 #define PPC_INST_PASTE 0x7c20070d @@ -239,8 +240,10 @@ #define PPC_INST_POPCNTB_MASK 0xfc0007fe #define PPC_INST_POPCNTD 0x7c0003f4 #define PPC_INST_POPCNTW 0x7c0002f4 +#define PPC_INST_RFEBB 0x4c000124 #define PPC_INST_RFCI 0x4c000066 #define PPC_INST_RFDI 0x4c00004e +#define PPC_INST_RFID 0x4c000024 #define PPC_INST_RFMCI 0x4c00004c #define PPC_INST_MFSPR 0x7c0002a6 #define PPC_INST_MFSPR_DSCR 0x7c1102a6 @@ -277,6 +280,7 @@ #define PPC_INST_TRECHKPT 0x7c0007dd #define PPC_INST_TRECLAIM 0x7c00075d #define PPC_INST_TABORT 0x7c00071d +#define PPC_INST_TSR 0x7c0005dd #define PPC_INST_NAP 0x4c000364 #define PPC_INST_SLEEP 0x4c0003a4 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index e6c7eadf6bce..cb0f272ce123 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -156,6 +156,8 @@ #define PSSCR_SD 0x00400000 /* Status Disable */ #define PSSCR_PLS 0xf000000000000000 /* Power-saving Level Status */ #define PSSCR_GUEST_VIS 0xf0000000000003ff /* Guest-visible PSSCR fields */ +#define PSSCR_FAKE_SUSPEND 0x00000400 /* Fake-suspend bit (P9 DD2.2) */ +#define PSSCR_FAKE_SUSPEND_LG 10 /* Fake-suspend bit position */ /* Floating Point Status and Control Register (FPSCR) Fields */ #define FPSCR_FX 0x80000000 /* FPU exception summary */ @@ -237,7 +239,12 @@ #define SPRN_TFIAR 0x81 /* Transaction Failure Inst Addr */ #define SPRN_TEXASR 0x82 /* Transaction EXception & Summary */ #define SPRN_TEXASRU 0x83 /* '' '' '' Upper 32 */ +#define TEXASR_ABORT __MASK(63-31) /* terminated by tabort or treclaim */ +#define TEXASR_SUSP __MASK(63-32) /* tx failed in suspended state */ +#define TEXASR_HV __MASK(63-34) /* MSR[HV] when failure occurred */ +#define TEXASR_PR __MASK(63-35) /* MSR[PR] when failure occurred */ #define TEXASR_FS __MASK(63-36) /* TEXASR Failure Summary */ +#define TEXASR_EXACT __MASK(63-37) /* TFIAR value is exact */ #define SPRN_TFHAR 0x80 /* Transaction Failure Handler Addr */ #define SPRN_TIDR 144 /* Thread ID register */ #define SPRN_CTRLF 0x088 diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index dbefe30d4daa..daf809a9b88e 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -568,6 +568,7 @@ int main(void) OFFSET(VCPU_TFHAR, kvm_vcpu, arch.tfhar); OFFSET(VCPU_TFIAR, kvm_vcpu, arch.tfiar); OFFSET(VCPU_TEXASR, kvm_vcpu, arch.texasr); + OFFSET(VCPU_ORIG_TEXASR, kvm_vcpu, arch.orig_texasr); OFFSET(VCPU_GPR_TM, kvm_vcpu, arch.gpr_tm); OFFSET(VCPU_FPRS_TM, kvm_vcpu, arch.fp_tm.fpr); OFFSET(VCPU_VRS_TM, kvm_vcpu, arch.vr_tm.vr); @@ -650,6 +651,7 @@ int main(void) HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi); HSTATE_FIELD(HSTATE_PTID, ptid); HSTATE_FIELD(HSTATE_TID, tid); + HSTATE_FIELD(HSTATE_FAKE_SUSPEND, fake_suspend); HSTATE_FIELD(HSTATE_MMCR0, host_mmcr[0]); HSTATE_FIELD(HSTATE_MMCR1, host_mmcr[1]); HSTATE_FIELD(HSTATE_MMCRA, host_mmcr[2]); diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 68052eacb827..b3de017bcd71 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -569,7 +569,6 @@ static struct cpu_spec __initdata cpu_specs[] = { .oprofile_type = PPC_OPROFILE_INVALID, .cpu_setup = __setup_cpu_power9, .cpu_restore = __restore_cpu_power9, - .flush_tlb = __flush_tlb_power9, .machine_check_early = __machine_check_early_realmode_p9, .platform = "power9", }, diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 243d072a225a..9df9e0a40250 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -1273,7 +1273,7 @@ EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x100) bne+ denorm_assist #endif - KVMTEST_PR(0x1500) + KVMTEST_HV(0x1500) EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV) EXC_REAL_END(denorm_exception_hv, 0x1500, 0x100) @@ -1285,7 +1285,7 @@ EXC_VIRT_END(denorm_exception, 0x5500, 0x100) EXC_VIRT_NONE(0x5500, 0x100) #endif -TRAMP_KVM_SKIP(PACA_EXGEN, 0x1500) +TRAMP_KVM_HV(PACA_EXGEN, 0x1500) #ifdef CONFIG_PPC_DENORMALISATION TRAMP_REAL_BEGIN(denorm_assist) diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index 85ba80de7133..4b19da8c87ae 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile @@ -74,9 +74,15 @@ kvm-hv-y += \ book3s_64_mmu_hv.o \ book3s_64_mmu_radix.o +kvm-hv-$(CONFIG_PPC_TRANSACTIONAL_MEM) += \ + book3s_hv_tm.o + kvm-book3s_64-builtin-xics-objs-$(CONFIG_KVM_XICS) := \ book3s_hv_rm_xics.o book3s_hv_rm_xive.o +kvm-book3s_64-builtin-tm-objs-$(CONFIG_PPC_TRANSACTIONAL_MEM) += \ + book3s_hv_tm_builtin.o + ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \ book3s_hv_hmi.o \ @@ -84,6 +90,7 @@ kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \ book3s_hv_rm_mmu.o \ book3s_hv_ras.o \ book3s_hv_builtin.o \ + $(kvm-book3s_64-builtin-tm-objs-y) \ $(kvm-book3s_64-builtin-xics-objs-y) endif diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 89707354c2ef..a043bde4952c 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1206,6 +1206,19 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, r = RESUME_GUEST; } break; + +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + case BOOK3S_INTERRUPT_HV_SOFTPATCH: + /* + * This occurs for various TM-related instructions that + * we need to emulate on POWER9 DD2.2. We have already + * handled the cases where the guest was in real-suspend + * mode and was transitioning to transactional state. + */ + r = kvmhv_p9_tm_emulation(vcpu); + break; +#endif + case BOOK3S_INTERRUPT_HV_RM_HARD: r = RESUME_PASSTHROUGH; break; @@ -1978,7 +1991,9 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm, * turn off the HFSCR bit, which causes those instructions to trap. */ vcpu->arch.hfscr = mfspr(SPRN_HFSCR); - if (!cpu_has_feature(CPU_FTR_TM)) + if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) + vcpu->arch.hfscr |= HFSCR_TM; + else if (!cpu_has_feature(CPU_FTR_TM_COMP)) vcpu->arch.hfscr &= ~HFSCR_TM; if (cpu_has_feature(CPU_FTR_ARCH_300)) vcpu->arch.hfscr &= ~HFSCR_MSGP; @@ -2242,6 +2257,7 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc) tpaca = &paca[cpu]; tpaca->kvm_hstate.kvm_vcpu = vcpu; tpaca->kvm_hstate.ptid = cpu - vc->pcpu; + tpaca->kvm_hstate.fake_suspend = 0; /* Order stores to hstate.kvm_vcpu etc. before store to kvm_vcore */ smp_wmb(); tpaca->kvm_hstate.kvm_vcore = vc; diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index f31f357b8c5a..5af617459244 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -787,12 +787,18 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM +/* + * Branch around the call if both CPU_FTR_TM and + * CPU_FTR_P9_TM_HV_ASSIST are off. + */ BEGIN_FTR_SECTION + b 91f +END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) /* * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR */ bl kvmppc_restore_tm -END_FTR_SECTION_IFSET(CPU_FTR_TM) +91: #endif /* Load guest PMU registers */ @@ -915,11 +921,14 @@ BEGIN_FTR_SECTION mtspr SPRN_ACOP, r6 mtspr SPRN_CSIGR, r7 mtspr SPRN_TACR, r8 + nop FTR_SECTION_ELSE /* POWER9-only registers */ ld r5, VCPU_TID(r4) ld r6, VCPU_PSSCR(r4) + lbz r8, HSTATE_FAKE_SUSPEND(r13) oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */ + rldimi r6, r8, PSSCR_FAKE_SUSPEND_LG, 63 - PSSCR_FAKE_SUSPEND_LG ld r7, VCPU_HFSCR(r4) mtspr SPRN_TIDR, r5 mtspr SPRN_PSSCR, r6 @@ -1370,6 +1379,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) std r3, VCPU_CTR(r9) std r4, VCPU_XER(r9) +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + /* For softpatch interrupt, go off and do TM instruction emulation */ + cmpwi r12, BOOK3S_INTERRUPT_HV_SOFTPATCH + beq kvmppc_tm_emul +#endif + /* If this is a page table miss then see if it's theirs or ours */ cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE beq kvmppc_hdsi @@ -1729,12 +1744,18 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) bl kvmppc_save_fp #ifdef CONFIG_PPC_TRANSACTIONAL_MEM +/* + * Branch around the call if both CPU_FTR_TM and + * CPU_FTR_P9_TM_HV_ASSIST are off. + */ BEGIN_FTR_SECTION + b 91f +END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) /* * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR */ bl kvmppc_save_tm -END_FTR_SECTION_IFSET(CPU_FTR_TM) +91: #endif /* Increment yield count if they have a VPA */ @@ -2054,6 +2075,42 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) mtlr r0 blr +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +/* + * Softpatch interrupt for transactional memory emulation cases + * on POWER9 DD2.2. This is early in the guest exit path - we + * haven't saved registers or done a treclaim yet. + */ +kvmppc_tm_emul: + /* Save instruction image in HEIR */ + mfspr r3, SPRN_HEIR + stw r3, VCPU_HEIR(r9) + + /* + * The cases we want to handle here are those where the guest + * is in real suspend mode and is trying to transition to + * transactional mode. + */ + lbz r0, HSTATE_FAKE_SUSPEND(r13) + cmpwi r0, 0 /* keep exiting guest if in fake suspend */ + bne guest_exit_cont + rldicl r3, r11, 64 - MSR_TS_S_LG, 62 + cmpwi r3, 1 /* or if not in suspend state */ + bne guest_exit_cont + + /* Call C code to do the emulation */ + mr r3, r9 + bl kvmhv_p9_tm_emulation_early + nop + ld r9, HSTATE_KVM_VCPU(r13) + li r12, BOOK3S_INTERRUPT_HV_SOFTPATCH + cmpwi r3, 0 + beq guest_exit_cont /* continue exiting if not handled */ + ld r10, VCPU_PC(r9) + ld r11, VCPU_MSR(r9) + b fast_interrupt_c_return /* go back to guest if handled */ +#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ + /* * Check whether an HDSI is an HPTE not found fault or something else. * If it is an HPTE not found fault that is due to the guest accessing @@ -2587,13 +2644,19 @@ _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */ bl kvmppc_save_fp #ifdef CONFIG_PPC_TRANSACTIONAL_MEM +/* + * Branch around the call if both CPU_FTR_TM and + * CPU_FTR_P9_TM_HV_ASSIST are off. + */ BEGIN_FTR_SECTION + b 91f +END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) /* * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR */ ld r9, HSTATE_KVM_VCPU(r13) bl kvmppc_save_tm -END_FTR_SECTION_IFSET(CPU_FTR_TM) +91: #endif /* @@ -2700,12 +2763,18 @@ kvm_end_cede: #endif #ifdef CONFIG_PPC_TRANSACTIONAL_MEM +/* + * Branch around the call if both CPU_FTR_TM and + * CPU_FTR_P9_TM_HV_ASSIST are off. + */ BEGIN_FTR_SECTION + b 91f +END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) /* * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR */ bl kvmppc_restore_tm -END_FTR_SECTION_IFSET(CPU_FTR_TM) +91: #endif /* load up FP state */ @@ -3046,6 +3115,15 @@ kvmppc_save_tm: std r1, HSTATE_HOST_R1(r13) li r3, TM_CAUSE_KVM_RESCHED +BEGIN_FTR_SECTION + /* Emulation of the treclaim instruction needs TEXASR before treclaim */ + mfspr r6, SPRN_TEXASR + std r6, VCPU_ORIG_TEXASR(r9) + + rldicl. r8, r8, 64 - MSR_TS_S_LG, 62 + beq 3f +END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) + /* Clear the MSR RI since r1, r13 are all going to be foobar. */ li r5, 0 mtmsrd r5, 1 @@ -3057,6 +3135,38 @@ kvmppc_save_tm: SET_SCRATCH0(r13) GET_PACA(r13) std r9, PACATMSCRATCH(r13) + + /* If doing TM emulation on POWER9 DD2.2, check for fake suspend mode */ +BEGIN_FTR_SECTION +3: + lbz r9, HSTATE_FAKE_SUSPEND(r13) + cmpwi r9, 0 + beq 2f + /* + * We were in fake suspend, so we are not going to save the + * register state as the guest checkpointed state (since + * we already have it), therefore we can now use any volatile GPR. + */ + /* Reload stack pointer and TOC. */ + ld r1, HSTATE_HOST_R1(r13) + ld r2, PACATOC(r13) + li r5, MSR_RI + mtmsrd r5, 1 + HMT_MEDIUM + ld r6, HSTATE_DSCR(r13) + mtspr SPRN_DSCR, r6 + li r0, 0 + stb r0, HSTATE_FAKE_SUSPEND(r13) + mfspr r3, SPRN_PSSCR + /* PSSCR_FAKE_SUSPEND is a write-only bit, but clear it anyway */ + li r0, PSSCR_FAKE_SUSPEND + andc r3, r3, r0 + mtspr SPRN_PSSCR, r3 + ld r9, HSTATE_KVM_VCPU(r13) + b 1f +2: +END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) + ld r9, HSTATE_KVM_VCPU(r13) /* Get a few more GPRs free. */ @@ -3181,6 +3291,15 @@ kvmppc_restore_tm: oris r7, r7, (TEXASR_FS)@h mtspr SPRN_TEXASR, r7 + /* + * If we are doing TM emulation for the guest on a POWER9 DD2, + * then we don't actually do a trechkpt -- we either set up + * fake-suspend mode, or emulate a TM rollback. + */ +BEGIN_FTR_SECTION + b .Ldo_tm_fake_load +END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) + /* * We need to load up the checkpointed state for the guest. * We need to do this early as it will blow away any GPRs, VSRs and @@ -3253,10 +3372,24 @@ kvmppc_restore_tm: /* Set the MSR RI since we have our registers back. */ li r5, MSR_RI mtmsrd r5, 1 - +9: ld r0, PPC_LR_STKOFF(r1) mtlr r0 blr + +.Ldo_tm_fake_load: + cmpwi r5, 1 /* check for suspended state */ + bgt 10f + stb r5, HSTATE_FAKE_SUSPEND(r13) + b 9b /* and return */ +10: stdu r1, -PPC_MIN_STKFRM(r1) + /* guest is in transactional state, so simulate rollback */ + mr r3, r4 + bl kvmhv_emulate_tm_rollback + nop + ld r4, HSTATE_KVM_VCPU(r13) /* our vcpu pointer has been trashed */ + addi r1, r1, PPC_MIN_STKFRM + b 9b #endif /* diff --git a/arch/powerpc/kvm/book3s_hv_tm.c b/arch/powerpc/kvm/book3s_hv_tm.c new file mode 100644 index 000000000000..bf710ad3a6d7 --- /dev/null +++ b/arch/powerpc/kvm/book3s_hv_tm.c @@ -0,0 +1,216 @@ +/* + * Copyright 2017 Paul Mackerras, IBM Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + */ + +#include + +#include +#include +#include +#include +#include + +static void emulate_tx_failure(struct kvm_vcpu *vcpu, u64 failure_cause) +{ + u64 texasr, tfiar; + u64 msr = vcpu->arch.shregs.msr; + + tfiar = vcpu->arch.pc & ~0x3ull; + texasr = (failure_cause << 56) | TEXASR_ABORT | TEXASR_FS | TEXASR_EXACT; + if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) + texasr |= TEXASR_SUSP; + if (msr & MSR_PR) { + texasr |= TEXASR_PR; + tfiar |= 1; + } + vcpu->arch.tfiar = tfiar; + /* Preserve ROT and TL fields of existing TEXASR */ + vcpu->arch.texasr = (vcpu->arch.texasr & 0x3ffffff) | texasr; +} + +/* + * This gets called on a softpatch interrupt on POWER9 DD2.2 processors. + * We expect to find a TM-related instruction to be emulated. The + * instruction image is in vcpu->arch.emul_inst. If the guest was in + * TM suspended or transactional state, the checkpointed state has been + * reclaimed and is in the vcpu struct. The CPU is in virtual mode in + * host context. + */ +int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) +{ + u32 instr = vcpu->arch.emul_inst; + u64 msr = vcpu->arch.shregs.msr; + u64 newmsr, bescr; + int ra, rs; + + switch (instr & 0xfc0007ff) { + case PPC_INST_RFID: + /* XXX do we need to check for PR=0 here? */ + newmsr = vcpu->arch.shregs.srr1; + /* should only get here for Sx -> T1 transition */ + WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) && + MSR_TM_TRANSACTIONAL(newmsr) && + (newmsr & MSR_TM))); + newmsr = sanitize_msr(newmsr); + vcpu->arch.shregs.msr = newmsr; + vcpu->arch.cfar = vcpu->arch.pc - 4; + vcpu->arch.pc = vcpu->arch.shregs.srr0; + return RESUME_GUEST; + + case PPC_INST_RFEBB: + if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) { + /* generate an illegal instruction interrupt */ + kvmppc_core_queue_program(vcpu, SRR1_PROGILL); + return RESUME_GUEST; + } + /* check EBB facility is available */ + if (!(vcpu->arch.hfscr & HFSCR_EBB)) { + /* generate an illegal instruction interrupt */ + kvmppc_core_queue_program(vcpu, SRR1_PROGILL); + return RESUME_GUEST; + } + if ((msr & MSR_PR) && !(vcpu->arch.fscr & FSCR_EBB)) { + /* generate a facility unavailable interrupt */ + vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) | + ((u64)FSCR_EBB_LG << 56); + kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL); + return RESUME_GUEST; + } + bescr = vcpu->arch.bescr; + /* expect to see a S->T transition requested */ + WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) && + ((bescr >> 30) & 3) == 2)); + bescr &= ~BESCR_GE; + if (instr & (1 << 11)) + bescr |= BESCR_GE; + vcpu->arch.bescr = bescr; + msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; + vcpu->arch.shregs.msr = msr; + vcpu->arch.cfar = vcpu->arch.pc - 4; + vcpu->arch.pc = vcpu->arch.ebbrr; + return RESUME_GUEST; + + case PPC_INST_MTMSRD: + /* XXX do we need to check for PR=0 here? */ + rs = (instr >> 21) & 0x1f; + newmsr = kvmppc_get_gpr(vcpu, rs); + /* check this is a Sx -> T1 transition */ + WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) && + MSR_TM_TRANSACTIONAL(newmsr) && + (newmsr & MSR_TM))); + /* mtmsrd doesn't change LE */ + newmsr = (newmsr & ~MSR_LE) | (msr & MSR_LE); + newmsr = sanitize_msr(newmsr); + vcpu->arch.shregs.msr = newmsr; + return RESUME_GUEST; + + case PPC_INST_TSR: + /* check for PR=1 and arch 2.06 bit set in PCR */ + if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) { + /* generate an illegal instruction interrupt */ + kvmppc_core_queue_program(vcpu, SRR1_PROGILL); + return RESUME_GUEST; + } + /* check for TM disabled in the HFSCR or MSR */ + if (!(vcpu->arch.hfscr & HFSCR_TM)) { + /* generate an illegal instruction interrupt */ + kvmppc_core_queue_program(vcpu, SRR1_PROGILL); + return RESUME_GUEST; + } + if (!(msr & MSR_TM)) { + /* generate a facility unavailable interrupt */ + vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) | + ((u64)FSCR_TM_LG << 56); + kvmppc_book3s_queue_irqprio(vcpu, + BOOK3S_INTERRUPT_FAC_UNAVAIL); + return RESUME_GUEST; + } + /* Set CR0 to indicate previous transactional state */ + vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | + (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28); + /* L=1 => tresume, L=0 => tsuspend */ + if (instr & (1 << 21)) { + if (MSR_TM_SUSPENDED(msr)) + msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; + } else { + if (MSR_TM_TRANSACTIONAL(msr)) + msr = (msr & ~MSR_TS_MASK) | MSR_TS_S; + } + vcpu->arch.shregs.msr = msr; + return RESUME_GUEST; + + case PPC_INST_TRECLAIM: + /* check for TM disabled in the HFSCR or MSR */ + if (!(vcpu->arch.hfscr & HFSCR_TM)) { + /* generate an illegal instruction interrupt */ + kvmppc_core_queue_program(vcpu, SRR1_PROGILL); + return RESUME_GUEST; + } + if (!(msr & MSR_TM)) { + /* generate a facility unavailable interrupt */ + vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) | + ((u64)FSCR_TM_LG << 56); + kvmppc_book3s_queue_irqprio(vcpu, + BOOK3S_INTERRUPT_FAC_UNAVAIL); + return RESUME_GUEST; + } + /* If no transaction active, generate TM bad thing */ + if (!MSR_TM_ACTIVE(msr)) { + kvmppc_core_queue_program(vcpu, SRR1_PROGTM); + return RESUME_GUEST; + } + /* If failure was not previously recorded, recompute TEXASR */ + if (!(vcpu->arch.orig_texasr & TEXASR_FS)) { + ra = (instr >> 16) & 0x1f; + if (ra) + ra = kvmppc_get_gpr(vcpu, ra) & 0xff; + emulate_tx_failure(vcpu, ra); + } + + copy_from_checkpoint(vcpu); + + /* Set CR0 to indicate previous transactional state */ + vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | + (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28); + vcpu->arch.shregs.msr &= ~MSR_TS_MASK; + return RESUME_GUEST; + + case PPC_INST_TRECHKPT: + /* XXX do we need to check for PR=0 here? */ + /* check for TM disabled in the HFSCR or MSR */ + if (!(vcpu->arch.hfscr & HFSCR_TM)) { + /* generate an illegal instruction interrupt */ + kvmppc_core_queue_program(vcpu, SRR1_PROGILL); + return RESUME_GUEST; + } + if (!(msr & MSR_TM)) { + /* generate a facility unavailable interrupt */ + vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) | + ((u64)FSCR_TM_LG << 56); + kvmppc_book3s_queue_irqprio(vcpu, + BOOK3S_INTERRUPT_FAC_UNAVAIL); + return RESUME_GUEST; + } + /* If transaction active or TEXASR[FS] = 0, bad thing */ + if (MSR_TM_ACTIVE(msr) || !(vcpu->arch.texasr & TEXASR_FS)) { + kvmppc_core_queue_program(vcpu, SRR1_PROGTM); + return RESUME_GUEST; + } + + copy_to_checkpoint(vcpu); + + /* Set CR0 to indicate previous transactional state */ + vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | + (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28); + vcpu->arch.shregs.msr = msr | MSR_TS_S; + return RESUME_GUEST; + } + + /* What should we do here? We didn't recognize the instruction */ + WARN_ON_ONCE(1); + return RESUME_GUEST; +} diff --git a/arch/powerpc/kvm/book3s_hv_tm_builtin.c b/arch/powerpc/kvm/book3s_hv_tm_builtin.c new file mode 100644 index 000000000000..d98ccfd2b88c --- /dev/null +++ b/arch/powerpc/kvm/book3s_hv_tm_builtin.c @@ -0,0 +1,109 @@ +/* + * Copyright 2017 Paul Mackerras, IBM Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + */ + +#include + +#include +#include +#include +#include +#include + +/* + * This handles the cases where the guest is in real suspend mode + * and we want to get back to the guest without dooming the transaction. + * The caller has checked that the guest is in real-suspend mode + * (MSR[TS] = S and the fake-suspend flag is not set). + */ +int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu) +{ + u32 instr = vcpu->arch.emul_inst; + u64 newmsr, msr, bescr; + int rs; + + switch (instr & 0xfc0007ff) { + case PPC_INST_RFID: + /* XXX do we need to check for PR=0 here? */ + newmsr = vcpu->arch.shregs.srr1; + /* should only get here for Sx -> T1 transition */ + if (!(MSR_TM_TRANSACTIONAL(newmsr) && (newmsr & MSR_TM))) + return 0; + newmsr = sanitize_msr(newmsr); + vcpu->arch.shregs.msr = newmsr; + vcpu->arch.cfar = vcpu->arch.pc - 4; + vcpu->arch.pc = vcpu->arch.shregs.srr0; + return 1; + + case PPC_INST_RFEBB: + /* check for PR=1 and arch 2.06 bit set in PCR */ + msr = vcpu->arch.shregs.msr; + if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) + return 0; + /* check EBB facility is available */ + if (!(vcpu->arch.hfscr & HFSCR_EBB) || + ((msr & MSR_PR) && !(mfspr(SPRN_FSCR) & FSCR_EBB))) + return 0; + bescr = mfspr(SPRN_BESCR); + /* expect to see a S->T transition requested */ + if (((bescr >> 30) & 3) != 2) + return 0; + bescr &= ~BESCR_GE; + if (instr & (1 << 11)) + bescr |= BESCR_GE; + mtspr(SPRN_BESCR, bescr); + msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; + vcpu->arch.shregs.msr = msr; + vcpu->arch.cfar = vcpu->arch.pc - 4; + vcpu->arch.pc = mfspr(SPRN_EBBRR); + return 1; + + case PPC_INST_MTMSRD: + /* XXX do we need to check for PR=0 here? */ + rs = (instr >> 21) & 0x1f; + newmsr = kvmppc_get_gpr(vcpu, rs); + msr = vcpu->arch.shregs.msr; + /* check this is a Sx -> T1 transition */ + if (!(MSR_TM_TRANSACTIONAL(newmsr) && (newmsr & MSR_TM))) + return 0; + /* mtmsrd doesn't change LE */ + newmsr = (newmsr & ~MSR_LE) | (msr & MSR_LE); + newmsr = sanitize_msr(newmsr); + vcpu->arch.shregs.msr = newmsr; + return 1; + + case PPC_INST_TSR: + /* we know the MSR has the TS field = S (0b01) here */ + msr = vcpu->arch.shregs.msr; + /* check for PR=1 and arch 2.06 bit set in PCR */ + if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) + return 0; + /* check for TM disabled in the HFSCR or MSR */ + if (!(vcpu->arch.hfscr & HFSCR_TM) || !(msr & MSR_TM)) + return 0; + /* L=1 => tresume => set TS to T (0b10) */ + if (instr & (1 << 21)) + vcpu->arch.shregs.msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; + /* Set CR0 to 0b0010 */ + vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0x20000000; + return 1; + } + + return 0; +} + +/* + * This is called when we are returning to a guest in TM transactional + * state. We roll the guest state back to the checkpointed state. + */ +void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu) +{ + vcpu->arch.shregs.msr &= ~MSR_TS_MASK; /* go to N state */ + vcpu->arch.pc = vcpu->arch.tfhar; + copy_from_checkpoint(vcpu); + vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0xa0000000; +} diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 403e642c78f5..677b98e6650f 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -646,10 +646,13 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = hv_enabled; break; #endif +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM case KVM_CAP_PPC_HTM: r = hv_enabled && - (cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM_COMP); + (!!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) || + cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)); break; +#endif default: r = 0; break; -- cgit v1.2.3 From 87a11bb6a7f7d131bd2112f210eff780aa25b04c Mon Sep 17 00:00:00 2001 From: Suraj Jitindar Singh Date: Wed, 21 Mar 2018 21:32:02 +1100 Subject: KVM: PPC: Book3S HV: Work around XER[SO] bug in fake suspend mode This works around a hardware bug in "Nimbus" POWER9 DD2.2 processors, where a treclaim performed in fake suspend mode can cause subsequent reads from the XER register to return inconsistent values for the SO (summary overflow) bit. The inconsistent SO bit state can potentially be observed on any thread in the core. We have to do the treclaim because that is the only way to get the thread out of suspend state (fake or real) and into non-transactional state. The workaround for the bug is to force the core into SMT4 mode before doing the treclaim. This patch adds the code to do that, conditional on the CPU_FTR_P9_TM_XER_SO_BUG feature bit. Signed-off-by: Suraj Jitindar Singh Signed-off-by: Paul Mackerras Signed-off-by: Michael Ellerman --- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 5af617459244..11396c0fee96 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -3101,6 +3101,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) kvmppc_save_tm: mflr r0 std r0, PPC_LR_STKOFF(r1) + stdu r1, -PPC_MIN_STKFRM(r1) /* Turn on TM. */ mfmsr r8 @@ -3120,8 +3121,16 @@ BEGIN_FTR_SECTION mfspr r6, SPRN_TEXASR std r6, VCPU_ORIG_TEXASR(r9) - rldicl. r8, r8, 64 - MSR_TS_S_LG, 62 + lbz r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */ + cmpwi r0, 0 beq 3f + rldicl. r8, r8, 64 - MSR_TS_S_LG, 62 /* Did we actually hrfid? */ + beq 4f +BEGIN_FTR_SECTION_NESTED(96) + bl pnv_power9_force_smt4_catch +END_FTR_SECTION_NESTED(CPU_FTR_P9_TM_XER_SO_BUG, CPU_FTR_P9_TM_XER_SO_BUG, 96) + nop +3: END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) /* Clear the MSR RI since r1, r13 are all going to be foobar. */ @@ -3138,7 +3147,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) /* If doing TM emulation on POWER9 DD2.2, check for fake suspend mode */ BEGIN_FTR_SECTION -3: lbz r9, HSTATE_FAKE_SUSPEND(r13) cmpwi r9, 0 beq 2f @@ -3150,13 +3158,18 @@ BEGIN_FTR_SECTION /* Reload stack pointer and TOC. */ ld r1, HSTATE_HOST_R1(r13) ld r2, PACATOC(r13) + /* Set MSR RI now we have r1 and r13 back. */ li r5, MSR_RI mtmsrd r5, 1 HMT_MEDIUM ld r6, HSTATE_DSCR(r13) mtspr SPRN_DSCR, r6 - li r0, 0 - stb r0, HSTATE_FAKE_SUSPEND(r13) +BEGIN_FTR_SECTION_NESTED(96) + bl pnv_power9_force_smt4_release +END_FTR_SECTION_NESTED(CPU_FTR_P9_TM_XER_SO_BUG, CPU_FTR_P9_TM_XER_SO_BUG, 96) + nop + +4: mfspr r3, SPRN_PSSCR /* PSSCR_FAKE_SUSPEND is a write-only bit, but clear it anyway */ li r0, PSSCR_FAKE_SUSPEND @@ -3244,6 +3257,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) std r6, VCPU_TFIAR(r9) std r7, VCPU_TEXASR(r9) + addi r1, r1, PPC_MIN_STKFRM ld r0, PPC_LR_STKOFF(r1) mtlr r0 blr @@ -3278,6 +3292,8 @@ kvmppc_restore_tm: mtspr SPRN_TFIAR, r6 mtspr SPRN_TEXASR, r7 + li r0, 0 + stb r0, HSTATE_FAKE_SUSPEND(r13) ld r5, VCPU_MSR(r4) rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 beqlr /* TM not active in guest */ -- cgit v1.2.3 From 681c617b7c42fce0798c2b0b472f270f28c82d56 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 21 Mar 2018 21:32:03 +1100 Subject: KVM: PPC: Book3S HV: Work around TEXASR bug in fake suspend state This works around a hardware bug in "Nimbus" POWER9 DD2.2 processors, where the contents of the TEXASR can get corrupted while a thread is in fake suspend state. The workaround is for the instruction emulation code to use the value saved at the most recent guest exit in real suspend mode. We achieve this by simply not saving the TEXASR into the vcpu struct on an exit in fake suspend state. We also have to take care to set the orig_texasr field only on guest exit in real suspend state. This also means that on guest entry in fake suspend state, TEXASR will be restored to the value it had on the last exit in real suspend state, effectively counteracting any hardware-caused corruption. This works because TEXASR may not be written in suspend state. With this, the guest might see the wrong values in TEXASR if it reads it while in suspend state, but will see the correct value in non-transactional state (e.g. after a treclaim), and treclaim will work correctly. With this workaround, the code will actually run slightly faster, and will operate correctly on systems without the TEXASR bug (since TEXASR may not be written in suspend state, and is only changed by failure recording, which will have already been done before we get into fake suspend state). Therefore these changes are not made subject to a CPU feature bit. Signed-off-by: Paul Mackerras Signed-off-by: Michael Ellerman --- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 11396c0fee96..736809fba912 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -3117,10 +3117,6 @@ kvmppc_save_tm: li r3, TM_CAUSE_KVM_RESCHED BEGIN_FTR_SECTION - /* Emulation of the treclaim instruction needs TEXASR before treclaim */ - mfspr r6, SPRN_TEXASR - std r6, VCPU_ORIG_TEXASR(r9) - lbz r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */ cmpwi r0, 0 beq 3f @@ -3130,7 +3126,12 @@ BEGIN_FTR_SECTION_NESTED(96) bl pnv_power9_force_smt4_catch END_FTR_SECTION_NESTED(CPU_FTR_P9_TM_XER_SO_BUG, CPU_FTR_P9_TM_XER_SO_BUG, 96) nop + b 6f 3: + /* Emulation of the treclaim instruction needs TEXASR before treclaim */ + mfspr r6, SPRN_TEXASR + std r6, VCPU_ORIG_TEXASR(r9) +6: END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) /* Clear the MSR RI since r1, r13 are all going to be foobar. */ @@ -3176,7 +3177,8 @@ END_FTR_SECTION_NESTED(CPU_FTR_P9_TM_XER_SO_BUG, CPU_FTR_P9_TM_XER_SO_BUG, 96) andc r3, r3, r0 mtspr SPRN_PSSCR, r3 ld r9, HSTATE_KVM_VCPU(r13) - b 1f + /* Don't save TEXASR, use value from last exit in real suspend state */ + b 11f 2: END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) @@ -3250,12 +3252,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) * change these outside of a transaction, so they must always be * context switched. */ + mfspr r7, SPRN_TEXASR + std r7, VCPU_TEXASR(r9) +11: mfspr r5, SPRN_TFHAR mfspr r6, SPRN_TFIAR - mfspr r7, SPRN_TEXASR std r5, VCPU_TFHAR(r9) std r6, VCPU_TFIAR(r9) - std r7, VCPU_TEXASR(r9) addi r1, r1, PPC_MIN_STKFRM ld r0, PPC_LR_STKOFF(r1) -- cgit v1.2.3 From dbfcf3cb9c681aa0c5d0bb46068f98d5b1823dd3 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Thu, 16 Feb 2017 16:03:39 +1100 Subject: powerpc/64: Call H_REGISTER_PROC_TBL when running as a HPT guest on POWER9 On POWER9, since commit cc3d2940133d ("powerpc/64: Enable use of radix MMU under hypervisor on POWER9", 2017-01-30), we set both the radix and HPT bits in the client-architecture-support (CAS) vector, which tells the hypervisor that we can do either radix or HPT. According to PAPR, if we use this combination we are promising to do a H_REGISTER_PROC_TBL hcall later on to let the hypervisor know whether we are doing radix or HPT. We currently do this call if we are doing radix but not if we are doing HPT. If the hypervisor is able to support both radix and HPT guests, it would be entitled to defer allocation of the HPT until the H_REGISTER_PROC_TBL call, and to fail any attempts to create HPTEs until the H_REGISTER_PROC_TBL call. Thus we need to do a H_REGISTER_PROC_TBL call when we are doing HPT; otherwise we may crash at boot time. This adds the code to call H_REGISTER_PROC_TBL in this case, before we attempt to create any HPT entries using H_ENTER. Fixes: cc3d2940133d ("powerpc/64: Enable use of radix MMU under hypervisor on POWER9") Cc: stable@vger.kernel.org # v4.11+ Signed-off-by: Paul Mackerras Reviewed-by: Suraj Jitindar Singh Signed-off-by: Michael Ellerman --- arch/powerpc/mm/hash_utils_64.c | 6 ++++++ arch/powerpc/platforms/pseries/lpar.c | 8 ++++++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index b578148d89e6..17fc13cab8dc 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -875,6 +875,12 @@ static void __init htab_initialize(void) /* Using a hypervisor which owns the htab */ htab_address = NULL; _SDR1 = 0; + /* + * On POWER9, we need to do a H_REGISTER_PROC_TBL hcall + * to inform the hypervisor that we wish to use the HPT. + */ + if (cpu_has_feature(CPU_FTR_ARCH_300)) + register_process_table(0, 0, 0); #ifdef CONFIG_FA_DUMP /* * If firmware assisted dump is active firmware preserves diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 0ee4a469a4ae..d11f3c14c21e 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -726,15 +726,18 @@ static int pseries_lpar_resize_hpt(unsigned long shift) return 0; } -/* Actually only used for radix, so far */ static int pseries_lpar_register_process_table(unsigned long base, unsigned long page_size, unsigned long table_size) { long rc; - unsigned long flags = PROC_TABLE_NEW; + unsigned long flags = 0; + if (table_size) + flags |= PROC_TABLE_NEW; if (radix_enabled()) flags |= PROC_TABLE_RADIX | PROC_TABLE_GTSE; + else + flags |= PROC_TABLE_HPT_SLB; for (;;) { rc = plpar_hcall_norets(H_REGISTER_PROC_TBL, flags, base, page_size, table_size); @@ -760,6 +763,7 @@ void __init hpte_init_pseries(void) mmu_hash_ops.flush_hash_range = pSeries_lpar_flush_hash_range; mmu_hash_ops.hpte_clear_all = pseries_hpte_clear_all; mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate; + register_process_table = pseries_lpar_register_process_table; if (firmware_has_feature(FW_FEATURE_HPT_RESIZE)) mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt; -- cgit v1.2.3 From e1ebd0e5b9d0a10ba65e63a3514b6da8c6a5a819 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 21 Mar 2018 17:10:24 +0530 Subject: powerpc/perf: Fix kernel address leak via sampling registers Current code in power_pmu_disable() does not clear the sampling registers like Sampling Instruction Address Register (SIAR) and Sampling Data Address Register (SDAR) after disabling the PMU. Since these are userspace readable and could contain kernel addresses, add code to explicitly clear the content of these registers. Also add a "context synchronizing instruction" to enforce no further updates to these registers as suggested by Power ISA v3.0B. From section 9.4, on page 1108: "If an mtspr instruction is executed that changes the value of a Performance Monitor register other than SIAR, SDAR, and SIER, the change is not guaranteed to have taken effect until after a subsequent context synchronizing instruction has been executed (see Chapter 11. "Synchronization Requirements for Context Alterations" on page 1133)." Signed-off-by: Madhavan Srinivasan [mpe: Massage change log and add ISA reference] Signed-off-by: Michael Ellerman --- arch/powerpc/perf/core-book3s.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index f89bbd54ecec..39846226c702 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -1226,6 +1226,7 @@ static void power_pmu_disable(struct pmu *pmu) */ write_mmcr0(cpuhw, val); mb(); + isync(); /* * Disable instruction sampling if it was enabled @@ -1234,12 +1235,26 @@ static void power_pmu_disable(struct pmu *pmu) mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); mb(); + isync(); } cpuhw->disabled = 1; cpuhw->n_added = 0; ebb_switch_out(mmcr0); + +#ifdef CONFIG_PPC64 + /* + * These are readable by userspace, may contain kernel + * addresses and are not switched by context switch, so clear + * them now to avoid leaking anything to userspace in general + * including to another process. + */ + if (ppmu->flags & PPMU_ARCH_207S) { + mtspr(SPRN_SDAR, 0); + mtspr(SPRN_SIAR, 0); + } +#endif } local_irq_restore(flags); -- cgit v1.2.3 From bb19af816025d495376bd76bf6fbcf4244f9a06d Mon Sep 17 00:00:00 2001 From: Madhavan Srinivasan Date: Wed, 21 Mar 2018 17:10:25 +0530 Subject: powerpc/perf: Prevent kernel address leak to userspace via BHRB buffer The current Branch History Rolling Buffer (BHRB) code does not check for any privilege levels before updating the data from BHRB. This could leak kernel addresses to userspace even when profiling only with userspace privileges. Add proper checks to prevent it. Acked-by: Balbir Singh Signed-off-by: Madhavan Srinivasan Signed-off-by: Michael Ellerman --- arch/powerpc/perf/core-book3s.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 39846226c702..1e55ae2f2afd 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -457,6 +457,16 @@ static void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) /* invalid entry */ continue; + /* + * BHRB rolling buffer could very much contain the kernel + * addresses at this point. Check the privileges before + * exporting it to userspace (avoid exposure of regions + * where we could have speculative execution) + */ + if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN) && + is_kernel_addr(addr)) + continue; + /* Branches are read most recent first (ie. mfbhrb 0 is * the most recent branch). * There are two types of valid entries: -- cgit v1.2.3 From cd1231d7035fea894118d5155ff984cdaf1ac1a2 Mon Sep 17 00:00:00 2001 From: Madhavan Srinivasan Date: Wed, 21 Mar 2018 17:10:26 +0530 Subject: powerpc/perf: Prevent kernel address leak via perf_get_data_addr() Sampled Data Address Register (SDAR) is a 64-bit register that contains the effective address of the storage operand of an instruction that was being executed, possibly out-of-order, at or around the time that the Performance Monitor alert occurred. In certain scenario SDAR happen to contain the kernel address even for userspace only sampling. Add checks to prevent it. Signed-off-by: Madhavan Srinivasan Signed-off-by: Michael Ellerman --- arch/powerpc/perf/core-book3s.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 1e55ae2f2afd..a00b364fb9d7 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -198,6 +198,10 @@ static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) if (!(mmcra & MMCRA_SAMPLE_ENABLE) || sdar_valid) *addrp = mfspr(SPRN_SDAR); + + if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN) && + is_kernel_addr(mfspr(SPRN_SDAR))) + *addrp = 0; } static bool regs_sihv(struct pt_regs *regs) -- cgit v1.2.3 From b58064da046243f0c988afd939997e9317dc6d48 Mon Sep 17 00:00:00 2001 From: Madhavan Srinivasan Date: Sun, 4 Mar 2018 17:26:26 +0530 Subject: powerpc/perf: Infrastructure to support addition of blacklisted events Introduce code to support addition of blacklisted events for a processor version. Blacklisted events are events that are known to not count correctly on that CPU revision, and so should be prevented from being counted so as to avoid user confusion. A 'pointer' and 'int' variable to hold the number of events are added to 'struct power_pmu', along with a generic function to loop through the list to validate the given event. Generic function 'is_event_blacklisted' is called in power_pmu_event_init() to detect and reject early. Signed-off-by: Madhavan Srinivasan Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/perf_event_server.h | 2 ++ arch/powerpc/perf/core-book3s.c | 21 +++++++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h index 723bf48e7494..67a8a9585d50 100644 --- a/arch/powerpc/include/asm/perf_event_server.h +++ b/arch/powerpc/include/asm/perf_event_server.h @@ -53,6 +53,8 @@ struct power_pmu { [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX]; + int n_blacklist_ev; + int *blacklist_ev; /* BHRB entries in the PMU */ int bhrb_nr; }; diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index a00b364fb9d7..e032aeff3d6b 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -1839,6 +1839,18 @@ static int hw_perf_cache_event(u64 config, u64 *eventp) return 0; } +static bool is_event_blacklisted(u64 ev) +{ + int i; + + for (i=0; i < ppmu->n_blacklist_ev; i++) { + if (ppmu->blacklist_ev[i] == ev) + return true; + } + + return false; +} + static int power_pmu_event_init(struct perf_event *event) { u64 ev; @@ -1864,15 +1876,24 @@ static int power_pmu_event_init(struct perf_event *event) ev = event->attr.config; if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) return -EOPNOTSUPP; + + if (ppmu->blacklist_ev && is_event_blacklisted(ev)) + return -EINVAL; ev = ppmu->generic_events[ev]; break; case PERF_TYPE_HW_CACHE: err = hw_perf_cache_event(event->attr.config, &ev); if (err) return err; + + if (ppmu->blacklist_ev && is_event_blacklisted(ev)) + return -EINVAL; break; case PERF_TYPE_RAW: ev = event->attr.config; + + if (ppmu->blacklist_ev && is_event_blacklisted(ev)) + return -EINVAL; break; default: return -ENOENT; -- cgit v1.2.3 From 64acab4e4fca19706e907bec435cc2acb65c83f3 Mon Sep 17 00:00:00 2001 From: Madhavan Srinivasan Date: Sun, 4 Mar 2018 17:26:27 +0530 Subject: powerpc/perf: Add blacklisted events for Power9 DD2.1 These events either do not count, or do not count correctly, so to prevent user confusion block counting them at all. Signed-off-by: Madhavan Srinivasan [mpe: Change log] Signed-off-by: Michael Ellerman --- arch/powerpc/perf/power9-events-list.h | 13 +++++++++++++ arch/powerpc/perf/power9-pmu.c | 26 ++++++++++++++++++++++++++ 2 files changed, 39 insertions(+) diff --git a/arch/powerpc/perf/power9-events-list.h b/arch/powerpc/perf/power9-events-list.h index e99c6bf4d391..9d7a16a943a8 100644 --- a/arch/powerpc/perf/power9-events-list.h +++ b/arch/powerpc/perf/power9-events-list.h @@ -69,3 +69,16 @@ EVENT(PM_BR_CMPL_ALT, 0x10012) EVENT(PM_BR_2PATH, 0x20036) /* ALternate branch event that are not strongly biased */ EVENT(PM_BR_2PATH_ALT, 0x40036) + +/* Blacklisted events */ +EVENT(PM_MRK_ST_DONE_L2, 0x10134) +EVENT(PM_RADIX_PWC_L1_HIT, 0x1f056) +EVENT(PM_FLOP_CMPL, 0x100f4) +EVENT(PM_MRK_NTF_FIN, 0x20112) +EVENT(PM_RADIX_PWC_L2_HIT, 0x2d024) +EVENT(PM_IFETCH_THROTTLE, 0x3405e) +EVENT(PM_MRK_L2_TM_ST_ABORT_SISTER, 0x3e15c) +EVENT(PM_RADIX_PWC_L3_HIT, 0x3f056) +EVENT(PM_RUN_CYC_SMT2_MODE, 0x3006c) +EVENT(PM_TM_TX_PASS_RUN_INST, 0x4e014) +EVENT(PM_DISP_HELD_SYNC_HOLD, 0x4003c) diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c index 24b5b5b7a206..3847607c16f0 100644 --- a/arch/powerpc/perf/power9-pmu.c +++ b/arch/powerpc/perf/power9-pmu.c @@ -101,9 +101,26 @@ enum { #define POWER9_MMCRA_IFM2 0x0000000080000000UL #define POWER9_MMCRA_IFM3 0x00000000C0000000UL +/* Nasty Power9 specific hack */ +#define PVR_POWER9_CUMULUS 0x00002000 + /* PowerISA v2.07 format attribute structure*/ extern struct attribute_group isa207_pmu_format_group; +int p9_dd21_bl_ev[] = { + PM_MRK_ST_DONE_L2, + PM_RADIX_PWC_L1_HIT, + PM_FLOP_CMPL, + PM_MRK_NTF_FIN, + PM_RADIX_PWC_L2_HIT, + PM_IFETCH_THROTTLE, + PM_MRK_L2_TM_ST_ABORT_SISTER, + PM_RADIX_PWC_L3_HIT, + PM_RUN_CYC_SMT2_MODE, + PM_TM_TX_PASS_RUN_INST, + PM_DISP_HELD_SYNC_HOLD, +}; + /* Table of alternatives, sorted by column 0 */ static const unsigned int power9_event_alternatives[][MAX_ALT] = { { PM_INST_DISP, PM_INST_DISP_ALT }, @@ -446,12 +463,21 @@ static struct power_pmu power9_pmu = { static int __init init_power9_pmu(void) { int rc = 0; + unsigned int pvr = mfspr(SPRN_PVR); /* Comes from cpu_specs[] */ if (!cur_cpu_spec->oprofile_cpu_type || strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power9")) return -ENODEV; + /* Blacklist events */ + if (!(pvr & PVR_POWER9_CUMULUS)) { + if ((PVR_CFG(pvr) == 2) && (PVR_MIN(pvr) == 1)) { + power9_pmu.blacklist_ev = p9_dd21_bl_ev; + power9_pmu.n_blacklist_ev = ARRAY_SIZE(p9_dd21_bl_ev); + } + } + if (cpu_has_feature(CPU_FTR_POWER9_DD1)) { /* * Since PM_INST_CMPL may not provide right counts in all -- cgit v1.2.3 From ac96588d9831bd047ab4cf54850cc69b44855337 Mon Sep 17 00:00:00 2001 From: Madhavan Srinivasan Date: Sun, 4 Mar 2018 17:26:28 +0530 Subject: powerpc/perf: Add blacklisted events for Power9 DD2.2 These events either do not count, or do not count correctly, so to prevent user confusion block counting them at all. Signed-off-by: Madhavan Srinivasan [mpe: Change log] Signed-off-by: Michael Ellerman --- arch/powerpc/perf/power9-events-list.h | 15 +++++++++++++++ arch/powerpc/perf/power9-pmu.c | 22 ++++++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/arch/powerpc/perf/power9-events-list.h b/arch/powerpc/perf/power9-events-list.h index 9d7a16a943a8..7de344b7d9cc 100644 --- a/arch/powerpc/perf/power9-events-list.h +++ b/arch/powerpc/perf/power9-events-list.h @@ -82,3 +82,18 @@ EVENT(PM_RADIX_PWC_L3_HIT, 0x3f056) EVENT(PM_RUN_CYC_SMT2_MODE, 0x3006c) EVENT(PM_TM_TX_PASS_RUN_INST, 0x4e014) EVENT(PM_DISP_HELD_SYNC_HOLD, 0x4003c) +EVENT(PM_DTLB_MISS_16G, 0x1c058) +EVENT(PM_DERAT_MISS_2M, 0x1c05a) +EVENT(PM_DTLB_MISS_2M, 0x1c05c) +EVENT(PM_MRK_DTLB_MISS_1G, 0x1d15c) +EVENT(PM_DTLB_MISS_4K, 0x2c056) +EVENT(PM_DERAT_MISS_1G, 0x2c05a) +EVENT(PM_MRK_DERAT_MISS_2M, 0x2d152) +EVENT(PM_MRK_DTLB_MISS_4K, 0x2d156) +EVENT(PM_MRK_DTLB_MISS_16G, 0x2d15e) +EVENT(PM_DTLB_MISS_64K, 0x3c056) +EVENT(PM_MRK_DERAT_MISS_1G, 0x3d152) +EVENT(PM_MRK_DTLB_MISS_64K, 0x3d156) +EVENT(PM_DTLB_MISS_16M, 0x4c056) +EVENT(PM_DTLB_MISS_1G, 0x4c05a) +EVENT(PM_MRK_DTLB_MISS_16M, 0x4c15e) diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c index 3847607c16f0..2ca0b33b4efb 100644 --- a/arch/powerpc/perf/power9-pmu.c +++ b/arch/powerpc/perf/power9-pmu.c @@ -121,6 +121,25 @@ int p9_dd21_bl_ev[] = { PM_DISP_HELD_SYNC_HOLD, }; +int p9_dd22_bl_ev[] = { + PM_DTLB_MISS_16G, + PM_DERAT_MISS_2M, + PM_DTLB_MISS_2M, + PM_MRK_DTLB_MISS_1G, + PM_DTLB_MISS_4K, + PM_DERAT_MISS_1G, + PM_MRK_DERAT_MISS_2M, + PM_MRK_DTLB_MISS_4K, + PM_MRK_DTLB_MISS_16G, + PM_DTLB_MISS_64K, + PM_MRK_DERAT_MISS_1G, + PM_MRK_DTLB_MISS_64K, + PM_DISP_HELD_SYNC_HOLD, + PM_DTLB_MISS_16M, + PM_DTLB_MISS_1G, + PM_MRK_DTLB_MISS_16M, +}; + /* Table of alternatives, sorted by column 0 */ static const unsigned int power9_event_alternatives[][MAX_ALT] = { { PM_INST_DISP, PM_INST_DISP_ALT }, @@ -475,6 +494,9 @@ static int __init init_power9_pmu(void) if ((PVR_CFG(pvr) == 2) && (PVR_MIN(pvr) == 1)) { power9_pmu.blacklist_ev = p9_dd21_bl_ev; power9_pmu.n_blacklist_ev = ARRAY_SIZE(p9_dd21_bl_ev); + } else if ((PVR_CFG(pvr) == 2) && (PVR_MIN(pvr) == 2)) { + power9_pmu.blacklist_ev = p9_dd22_bl_ev; + power9_pmu.n_blacklist_ev = ARRAY_SIZE(p9_dd22_bl_ev); } } -- cgit v1.2.3 From 1e2a9fc7496955faacbbed49461d611b704a7505 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 14 Mar 2018 19:40:38 -0300 Subject: powerpc/rfi-flush: Move the logic to avoid a redo into the debugfs code rfi_flush_enable() includes a check to see if we're already enabled (or disabled), and in that case does nothing. But that means calling setup_rfi_flush() a 2nd time doesn't actually work, which is a bit confusing. Move that check into the debugfs code, where it really belongs. Signed-off-by: Michael Ellerman Signed-off-by: Mauricio Faria de Oliveira Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/setup_64.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index c388cc3357fa..3efc01a570e8 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -846,9 +846,6 @@ static void do_nothing(void *unused) void rfi_flush_enable(bool enable) { - if (rfi_flush == enable) - return; - if (enable) { do_rfi_flush_fixups(enabled_flush_types); on_each_cpu(do_nothing, NULL, 1); @@ -902,13 +899,19 @@ void __init setup_rfi_flush(enum l1d_flush_type types, bool enable) #ifdef CONFIG_DEBUG_FS static int rfi_flush_set(void *data, u64 val) { + bool enable; + if (val == 1) - rfi_flush_enable(true); + enable = true; else if (val == 0) - rfi_flush_enable(false); + enable = false; else return -EINVAL; + /* Only do anything if we're changing state */ + if (enable != rfi_flush) + rfi_flush_enable(enable); + return 0; } -- cgit v1.2.3 From abf110f3e1cea40f5ea15e85f5d67c39c14568a7 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 14 Mar 2018 19:40:39 -0300 Subject: powerpc/rfi-flush: Make it possible to call setup_rfi_flush() again For PowerVM migration we want to be able to call setup_rfi_flush() again after we've migrated the partition. To support that we need to check that we're not trying to allocate the fallback flush area after memblock has gone away (i.e., boot-time only). Signed-off-by: Michael Ellerman Signed-off-by: Mauricio Faria de Oliveira Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/setup.h | 2 +- arch/powerpc/kernel/setup_64.c | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index 469b7fdc9be4..bbcdf929be54 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -49,7 +49,7 @@ enum l1d_flush_type { L1D_FLUSH_MTTRIG = 0x8, }; -void __init setup_rfi_flush(enum l1d_flush_type, bool enable); +void setup_rfi_flush(enum l1d_flush_type, bool enable); void do_rfi_flush_fixups(enum l1d_flush_type types); #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 3efc01a570e8..d60e2f7eff1b 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -860,6 +860,10 @@ static void init_fallback_flush(void) u64 l1d_size, limit; int cpu; + /* Only allocate the fallback flush area once (at boot time). */ + if (l1d_flush_fallback_area) + return; + l1d_size = ppc64_caches.l1d.size; limit = min(ppc64_bolted_size(), ppc64_rma_size); @@ -877,7 +881,7 @@ static void init_fallback_flush(void) } } -void __init setup_rfi_flush(enum l1d_flush_type types, bool enable) +void setup_rfi_flush(enum l1d_flush_type types, bool enable) { if (types & L1D_FLUSH_FALLBACK) { pr_info("rfi-flush: Using fallback displacement flush\n"); -- cgit v1.2.3 From 84749a58b6e382f109abf1e734bc4dd43c2c25bb Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 14 Mar 2018 19:40:40 -0300 Subject: powerpc/rfi-flush: Always enable fallback flush on pseries This ensures the fallback flush area is always allocated on pseries, so in case a LPAR is migrated from a patched to an unpatched system, it is possible to enable the fallback flush in the target system. Signed-off-by: Michael Ellerman Signed-off-by: Mauricio Faria de Oliveira Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/pseries/setup.c | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 4642e48d1c2e..b20d1074acb9 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -468,26 +468,18 @@ static void pseries_setup_rfi_flush(void) /* Enable by default */ enable = true; + types = L1D_FLUSH_FALLBACK; rc = plpar_get_cpu_characteristics(&result); if (rc == H_SUCCESS) { - types = L1D_FLUSH_NONE; - if (result.character & H_CPU_CHAR_L1D_FLUSH_TRIG2) types |= L1D_FLUSH_MTTRIG; if (result.character & H_CPU_CHAR_L1D_FLUSH_ORI30) types |= L1D_FLUSH_ORI; - /* Use fallback if nothing set in hcall */ - if (types == L1D_FLUSH_NONE) - types = L1D_FLUSH_FALLBACK; - if ((!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR)) || (!(result.behaviour & H_CPU_BEHAV_FAVOUR_SECURITY))) enable = false; - } else { - /* Default to fallback if case hcall is not available */ - types = L1D_FLUSH_FALLBACK; } setup_rfi_flush(types, enable); -- cgit v1.2.3 From 0063d61ccfc011f379a31acaeba6de7c926fed2c Mon Sep 17 00:00:00 2001 From: Mauricio Faria de Oliveira Date: Wed, 14 Mar 2018 19:40:41 -0300 Subject: powerpc/rfi-flush: Differentiate enabled and patched flush types Currently the rfi-flush messages print 'Using flush' for all enabled_flush_types, but that is not necessarily true -- as now the fallback flush is always enabled on pseries, but the fixup function overwrites its nop/branch slot with other flush types, if available. So, replace the 'Using flush' messages with ' flush is available'. Also, print the patched flush types in the fixup function, so users can know what is (not) being used (e.g., the slower, fallback flush, or no flush type at all if flush is disabled via the debugfs switch). Suggested-by: Michael Ellerman Signed-off-by: Mauricio Faria de Oliveira Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/setup_64.c | 6 +++--- arch/powerpc/lib/feature-fixups.c | 9 ++++++++- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index d60e2f7eff1b..4ec4a27b36a9 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -884,15 +884,15 @@ static void init_fallback_flush(void) void setup_rfi_flush(enum l1d_flush_type types, bool enable) { if (types & L1D_FLUSH_FALLBACK) { - pr_info("rfi-flush: Using fallback displacement flush\n"); + pr_info("rfi-flush: fallback displacement flush available\n"); init_fallback_flush(); } if (types & L1D_FLUSH_ORI) - pr_info("rfi-flush: Using ori type flush\n"); + pr_info("rfi-flush: ori type flush available\n"); if (types & L1D_FLUSH_MTTRIG) - pr_info("rfi-flush: Using mttrig type flush\n"); + pr_info("rfi-flush: mttrig type flush available\n"); enabled_flush_types = types; diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 73697c4e3468..35f80ab7cbd8 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -153,7 +153,14 @@ void do_rfi_flush_fixups(enum l1d_flush_type types) patch_instruction(dest + 2, instrs[2]); } - printk(KERN_DEBUG "rfi-flush: patched %d locations\n", i); + printk(KERN_DEBUG "rfi-flush: patched %d locations (%s flush)\n", i, + (types == L1D_FLUSH_NONE) ? "no" : + (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" : + (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG) + ? "ori+mttrig type" + : "ori type" : + (types & L1D_FLUSH_MTTRIG) ? "mttrig type" + : "unknown"); } #endif /* CONFIG_PPC_BOOK3S_64 */ -- cgit v1.2.3 From 921bc6cf807ceb2ab8005319cf39f33494d6b100 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 14 Mar 2018 19:40:42 -0300 Subject: powerpc/rfi-flush: Call setup_rfi_flush() after LPM migration We might have migrated to a machine that uses a different flush type, or doesn't need flushing at all. Signed-off-by: Michael Ellerman Signed-off-by: Mauricio Faria de Oliveira Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/pseries/mobility.c | 3 +++ arch/powerpc/platforms/pseries/pseries.h | 2 ++ arch/powerpc/platforms/pseries/setup.c | 2 +- 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c index 0f7fb7170b03..8a8033a249c7 100644 --- a/arch/powerpc/platforms/pseries/mobility.c +++ b/arch/powerpc/platforms/pseries/mobility.c @@ -348,6 +348,9 @@ void post_mobility_fixup(void) printk(KERN_ERR "Post-mobility device tree update " "failed: %d\n", rc); + /* Possibly switch to a new RFI flush type */ + pseries_setup_rfi_flush(); + return; } diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h index c73351cea276..60db2ee511fb 100644 --- a/arch/powerpc/platforms/pseries/pseries.h +++ b/arch/powerpc/platforms/pseries/pseries.h @@ -108,4 +108,6 @@ static inline unsigned long cmo_get_page_size(void) int dlpar_workqueue_init(void); +void pseries_setup_rfi_flush(void); + #endif /* _PSERIES_PSERIES_H */ diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index b20d1074acb9..f34f9081ec60 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -459,7 +459,7 @@ static void __init find_and_init_phbs(void) of_pci_check_probe_only(); } -static void pseries_setup_rfi_flush(void) +void pseries_setup_rfi_flush(void) { struct h_cpu_char_result result; enum l1d_flush_type types; -- cgit v1.2.3 From c4bc36628d7f8b664657d8bd6ad1c44c177880b7 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 27 Mar 2018 23:01:45 +1100 Subject: powerpc/pseries: Add new H_GET_CPU_CHARACTERISTICS flags Add some additional values which have been defined for the H_GET_CPU_CHARACTERISTICS hypercall. Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/hvcall.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index eca3f9c68907..5a740feb7bd7 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -337,6 +337,9 @@ #define H_CPU_CHAR_L1D_FLUSH_ORI30 (1ull << 61) // IBM bit 2 #define H_CPU_CHAR_L1D_FLUSH_TRIG2 (1ull << 60) // IBM bit 3 #define H_CPU_CHAR_L1D_THREAD_PRIV (1ull << 59) // IBM bit 4 +#define H_CPU_CHAR_BRANCH_HINTS_HONORED (1ull << 58) // IBM bit 5 +#define H_CPU_CHAR_THREAD_RECONFIG_CTRL (1ull << 57) // IBM bit 6 +#define H_CPU_CHAR_COUNT_CACHE_DISABLED (1ull << 56) // IBM bit 7 #define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0 #define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1 -- cgit v1.2.3 From 9a868f634349e62922c226834aa23e3d1329ae7f Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 27 Mar 2018 23:01:44 +1100 Subject: powerpc: Add security feature flags for Spectre/Meltdown This commit adds security feature flags to reflect the settings we receive from firmware regarding Spectre/Meltdown mitigations. The feature names reflect the names we are given by firmware on bare metal machines. See the hostboot source for details. Arguably these could be firmware features, but that then requires them to be read early in boot so they're available prior to asm feature patching, but we don't actually want to use them for patching. We may also want to dynamically update them in future, which would be incompatible with the way firmware features work (at the moment at least). So for now just make them separate flags. Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/security_features.h | 65 ++++++++++++++++++++++++++++ arch/powerpc/kernel/Makefile | 2 +- arch/powerpc/kernel/security.c | 15 +++++++ 3 files changed, 81 insertions(+), 1 deletion(-) create mode 100644 arch/powerpc/include/asm/security_features.h create mode 100644 arch/powerpc/kernel/security.c diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h new file mode 100644 index 000000000000..db00ad2c72c2 --- /dev/null +++ b/arch/powerpc/include/asm/security_features.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Security related feature bit definitions. + * + * Copyright 2018, Michael Ellerman, IBM Corporation. + */ + +#ifndef _ASM_POWERPC_SECURITY_FEATURES_H +#define _ASM_POWERPC_SECURITY_FEATURES_H + + +extern unsigned long powerpc_security_features; + +static inline void security_ftr_set(unsigned long feature) +{ + powerpc_security_features |= feature; +} + +static inline void security_ftr_clear(unsigned long feature) +{ + powerpc_security_features &= ~feature; +} + +static inline bool security_ftr_enabled(unsigned long feature) +{ + return !!(powerpc_security_features & feature); +} + + +// Features indicating support for Spectre/Meltdown mitigations + +// The L1-D cache can be flushed with ori r30,r30,0 +#define SEC_FTR_L1D_FLUSH_ORI30 0x0000000000000001ull + +// The L1-D cache can be flushed with mtspr 882,r0 (aka SPRN_TRIG2) +#define SEC_FTR_L1D_FLUSH_TRIG2 0x0000000000000002ull + +// ori r31,r31,0 acts as a speculation barrier +#define SEC_FTR_SPEC_BAR_ORI31 0x0000000000000004ull + +// Speculation past bctr is disabled +#define SEC_FTR_BCCTRL_SERIALISED 0x0000000000000008ull + +// Entries in L1-D are private to a SMT thread +#define SEC_FTR_L1D_THREAD_PRIV 0x0000000000000010ull + +// Indirect branch prediction cache disabled +#define SEC_FTR_COUNT_CACHE_DISABLED 0x0000000000000020ull + + +// Features indicating need for Spectre/Meltdown mitigations + +// The L1-D cache should be flushed on MSR[HV] 1->0 transition (hypervisor to guest) +#define SEC_FTR_L1D_FLUSH_HV 0x0000000000000040ull + +// The L1-D cache should be flushed on MSR[PR] 0->1 transition (kernel to userspace) +#define SEC_FTR_L1D_FLUSH_PR 0x0000000000000080ull + +// A speculation barrier should be used for bounds checks (Spectre variant 1) +#define SEC_FTR_BNDS_CHK_SPEC_BAR 0x0000000000000100ull + +// Firmware configuration indicates user favours security over performance +#define SEC_FTR_FAVOUR_SECURITY 0x0000000000000200ull + +#endif /* _ASM_POWERPC_SECURITY_FEATURES_H */ diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 1b6bc7fba996..d458c45e5004 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -42,7 +42,7 @@ obj-$(CONFIG_VDSO32) += vdso32/ obj-$(CONFIG_PPC_WATCHDOG) += watchdog.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o -obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o +obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o security.o obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o obj-$(CONFIG_PPC64) += vdso64/ diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c new file mode 100644 index 000000000000..4ccba00d224c --- /dev/null +++ b/arch/powerpc/kernel/security.c @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0+ +// +// Security related flags and so on. +// +// Copyright 2018, Michael Ellerman, IBM Corporation. + +#include +#include + + +unsigned long powerpc_security_features __read_mostly = \ + SEC_FTR_L1D_FLUSH_HV | \ + SEC_FTR_L1D_FLUSH_PR | \ + SEC_FTR_BNDS_CHK_SPEC_BAR | \ + SEC_FTR_FAVOUR_SECURITY; -- cgit v1.2.3 From f636c14790ead6cc22cf62279b1f8d7e11a67116 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 27 Mar 2018 23:01:46 +1100 Subject: powerpc/pseries: Set or clear security feature flags Now that we have feature flags for security related things, set or clear them based on what we receive from the hypercall. Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/pseries/setup.c | 43 ++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index f34f9081ec60..fb84c1df6ed7 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -68,6 +68,7 @@ #include #include #include +#include #include "pseries.h" @@ -459,6 +460,40 @@ static void __init find_and_init_phbs(void) of_pci_check_probe_only(); } +static void init_cpu_char_feature_flags(struct h_cpu_char_result *result) +{ + if (result->character & H_CPU_CHAR_SPEC_BAR_ORI31) + security_ftr_set(SEC_FTR_SPEC_BAR_ORI31); + + if (result->character & H_CPU_CHAR_BCCTRL_SERIALISED) + security_ftr_set(SEC_FTR_BCCTRL_SERIALISED); + + if (result->character & H_CPU_CHAR_L1D_FLUSH_ORI30) + security_ftr_set(SEC_FTR_L1D_FLUSH_ORI30); + + if (result->character & H_CPU_CHAR_L1D_FLUSH_TRIG2) + security_ftr_set(SEC_FTR_L1D_FLUSH_TRIG2); + + if (result->character & H_CPU_CHAR_L1D_THREAD_PRIV) + security_ftr_set(SEC_FTR_L1D_THREAD_PRIV); + + if (result->character & H_CPU_CHAR_COUNT_CACHE_DISABLED) + security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED); + + /* + * The features below are enabled by default, so we instead look to see + * if firmware has *disabled* them, and clear them if so. + */ + if (!(result->character & H_CPU_BEHAV_FAVOUR_SECURITY)) + security_ftr_clear(SEC_FTR_FAVOUR_SECURITY); + + if (!(result->character & H_CPU_BEHAV_L1D_FLUSH_PR)) + security_ftr_clear(SEC_FTR_L1D_FLUSH_PR); + + if (!(result->character & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR)) + security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR); +} + void pseries_setup_rfi_flush(void) { struct h_cpu_char_result result; @@ -472,6 +507,8 @@ void pseries_setup_rfi_flush(void) rc = plpar_get_cpu_characteristics(&result); if (rc == H_SUCCESS) { + init_cpu_char_feature_flags(&result); + if (result.character & H_CPU_CHAR_L1D_FLUSH_TRIG2) types |= L1D_FLUSH_MTTRIG; if (result.character & H_CPU_CHAR_L1D_FLUSH_ORI30) @@ -482,6 +519,12 @@ void pseries_setup_rfi_flush(void) enable = false; } + /* + * We're the guest so this doesn't apply to us, clear it to simplify + * handling of it elsewhere. + */ + security_ftr_clear(SEC_FTR_L1D_FLUSH_HV); + setup_rfi_flush(types, enable); } -- cgit v1.2.3 From 77addf6e95c8689e478d607176b399a6242a777e Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 27 Mar 2018 23:01:47 +1100 Subject: powerpc/powernv: Set or clear security feature flags Now that we have feature flags for security related things, set or clear them based on what we see in the device tree provided by firmware. Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/powernv/setup.c | 56 ++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index 092715b9674b..f5c0c7d91db1 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -38,9 +38,63 @@ #include #include #include +#include #include "powernv.h" + +static bool fw_feature_is(const char *state, const char *name, + struct device_node *fw_features) +{ + struct device_node *np; + bool rc = false; + + np = of_get_child_by_name(fw_features, name); + if (np) { + rc = of_property_read_bool(np, state); + of_node_put(np); + } + + return rc; +} + +static void init_fw_feat_flags(struct device_node *np) +{ + if (fw_feature_is("enabled", "inst-spec-barrier-ori31,31,0", np)) + security_ftr_set(SEC_FTR_SPEC_BAR_ORI31); + + if (fw_feature_is("enabled", "fw-bcctrl-serialized", np)) + security_ftr_set(SEC_FTR_BCCTRL_SERIALISED); + + if (fw_feature_is("enabled", "inst-spec-barrier-ori31,31,0", np)) + security_ftr_set(SEC_FTR_L1D_FLUSH_ORI30); + + if (fw_feature_is("enabled", "inst-l1d-flush-trig2", np)) + security_ftr_set(SEC_FTR_L1D_FLUSH_TRIG2); + + if (fw_feature_is("enabled", "fw-l1d-thread-split", np)) + security_ftr_set(SEC_FTR_L1D_THREAD_PRIV); + + if (fw_feature_is("enabled", "fw-count-cache-disabled", np)) + security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED); + + /* + * The features below are enabled by default, so we instead look to see + * if firmware has *disabled* them, and clear them if so. + */ + if (fw_feature_is("disabled", "speculation-policy-favor-security", np)) + security_ftr_clear(SEC_FTR_FAVOUR_SECURITY); + + if (fw_feature_is("disabled", "needs-l1d-flush-msr-pr-0-to-1", np)) + security_ftr_clear(SEC_FTR_L1D_FLUSH_PR); + + if (fw_feature_is("disabled", "needs-l1d-flush-msr-hv-1-to-0", np)) + security_ftr_clear(SEC_FTR_L1D_FLUSH_HV); + + if (fw_feature_is("disabled", "needs-spec-barrier-for-bound-checks", np)) + security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR); +} + static void pnv_setup_rfi_flush(void) { struct device_node *np, *fw_features; @@ -56,6 +110,8 @@ static void pnv_setup_rfi_flush(void) of_node_put(np); if (fw_features) { + init_fw_feat_flags(fw_features); + np = of_get_child_by_name(fw_features, "inst-l1d-flush-trig2"); if (np && of_property_read_bool(np, "enabled")) type = L1D_FLUSH_MTTRIG; -- cgit v1.2.3 From 8ad33041563a10b34988800c682ada14b2612533 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 27 Mar 2018 23:01:48 +1100 Subject: powerpc/64s: Move cpu_show_meltdown() This landed in setup_64.c for no good reason other than we had nowhere else to put it. Now that we have a security-related file, that is a better place for it so move it. Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/security.c | 11 +++++++++++ arch/powerpc/kernel/setup_64.c | 8 -------- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index 4ccba00d224c..564e7f182a16 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -5,6 +5,8 @@ // Copyright 2018, Michael Ellerman, IBM Corporation. #include +#include + #include @@ -13,3 +15,12 @@ unsigned long powerpc_security_features __read_mostly = \ SEC_FTR_L1D_FLUSH_PR | \ SEC_FTR_BNDS_CHK_SPEC_BAR | \ SEC_FTR_FAVOUR_SECURITY; + + +ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) +{ + if (rfi_flush) + return sprintf(buf, "Mitigation: RFI Flush\n"); + + return sprintf(buf, "Vulnerable\n"); +} diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 4ec4a27b36a9..7f7621668613 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -934,12 +934,4 @@ static __init int rfi_flush_debugfs_init(void) } device_initcall(rfi_flush_debugfs_init); #endif - -ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) -{ - if (rfi_flush) - return sprintf(buf, "Mitigation: RFI Flush\n"); - - return sprintf(buf, "Vulnerable\n"); -} #endif /* CONFIG_PPC_BOOK3S_64 */ -- cgit v1.2.3 From ff348355e9c72493947be337bb4fae4fc1a41eba Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 27 Mar 2018 23:01:49 +1100 Subject: powerpc/64s: Enhance the information in cpu_show_meltdown() Now that we have the security feature flags we can make the information displayed in the "meltdown" file more informative. Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/security_features.h | 1 + arch/powerpc/kernel/security.c | 30 ++++++++++++++++++++++++++-- 2 files changed, 29 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h index db00ad2c72c2..400a9050e035 100644 --- a/arch/powerpc/include/asm/security_features.h +++ b/arch/powerpc/include/asm/security_features.h @@ -10,6 +10,7 @@ extern unsigned long powerpc_security_features; +extern bool rfi_flush; static inline void security_ftr_set(unsigned long feature) { diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index 564e7f182a16..865db6f8bcca 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -6,6 +6,7 @@ #include #include +#include #include @@ -19,8 +20,33 @@ unsigned long powerpc_security_features __read_mostly = \ ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) { - if (rfi_flush) - return sprintf(buf, "Mitigation: RFI Flush\n"); + bool thread_priv; + + thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV); + + if (rfi_flush || thread_priv) { + struct seq_buf s; + seq_buf_init(&s, buf, PAGE_SIZE - 1); + + seq_buf_printf(&s, "Mitigation: "); + + if (rfi_flush) + seq_buf_printf(&s, "RFI Flush"); + + if (rfi_flush && thread_priv) + seq_buf_printf(&s, ", "); + + if (thread_priv) + seq_buf_printf(&s, "L1D private per thread"); + + seq_buf_printf(&s, "\n"); + + return s.len; + } + + if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && + !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR)) + return sprintf(buf, "Not affected\n"); return sprintf(buf, "Vulnerable\n"); } -- cgit v1.2.3 From 37c0bdd00d3ae83369ab60a6712c28e11e6458d5 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 27 Mar 2018 23:01:50 +1100 Subject: powerpc/powernv: Use the security flags in pnv_setup_rfi_flush() Now that we have the security flags we can significantly simplify the code in pnv_setup_rfi_flush(), because we can use the flags instead of checking device tree properties and because the security flags have pessimistic defaults. Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/powernv/setup.c | 41 +++++++++------------------------- 1 file changed, 10 insertions(+), 31 deletions(-) diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index f5c0c7d91db1..7de050a3736b 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -66,7 +66,7 @@ static void init_fw_feat_flags(struct device_node *np) if (fw_feature_is("enabled", "fw-bcctrl-serialized", np)) security_ftr_set(SEC_FTR_BCCTRL_SERIALISED); - if (fw_feature_is("enabled", "inst-spec-barrier-ori31,31,0", np)) + if (fw_feature_is("enabled", "inst-l1d-flush-ori30,30,0", np)) security_ftr_set(SEC_FTR_L1D_FLUSH_ORI30); if (fw_feature_is("enabled", "inst-l1d-flush-trig2", np)) @@ -99,11 +99,10 @@ static void pnv_setup_rfi_flush(void) { struct device_node *np, *fw_features; enum l1d_flush_type type; - int enable; + bool enable; /* Default to fallback in case fw-features are not available */ type = L1D_FLUSH_FALLBACK; - enable = 1; np = of_find_node_by_name(NULL, "ibm,opal"); fw_features = of_get_child_by_name(np, "fw-features"); @@ -111,40 +110,20 @@ static void pnv_setup_rfi_flush(void) if (fw_features) { init_fw_feat_flags(fw_features); + of_node_put(fw_features); - np = of_get_child_by_name(fw_features, "inst-l1d-flush-trig2"); - if (np && of_property_read_bool(np, "enabled")) + if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_TRIG2)) type = L1D_FLUSH_MTTRIG; - of_node_put(np); - - np = of_get_child_by_name(fw_features, "inst-l1d-flush-ori30,30,0"); - if (np && of_property_read_bool(np, "enabled")) + if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_ORI30)) type = L1D_FLUSH_ORI; - - of_node_put(np); - - /* Enable unless firmware says NOT to */ - enable = 2; - np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-hv-1-to-0"); - if (np && of_property_read_bool(np, "disabled")) - enable--; - - of_node_put(np); - - np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-pr-0-to-1"); - if (np && of_property_read_bool(np, "disabled")) - enable--; - - np = of_get_child_by_name(fw_features, "speculation-policy-favor-security"); - if (np && of_property_read_bool(np, "disabled")) - enable = 0; - - of_node_put(np); - of_node_put(fw_features); } - setup_rfi_flush(type, enable > 0); + enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \ + (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) || \ + security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV)); + + setup_rfi_flush(type, enable); } static void __init pnv_setup_arch(void) -- cgit v1.2.3 From 2e4a16161fcd324b1f9bf6cb6856529f7eaf0689 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 27 Mar 2018 23:01:51 +1100 Subject: powerpc/pseries: Use the security flags in pseries_setup_rfi_flush() Now that we have the security flags we can simplify the code in pseries_setup_rfi_flush() because the security flags have pessimistic defaults. Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/pseries/setup.c | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index fb84c1df6ed7..1f122359cd8f 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -501,30 +501,27 @@ void pseries_setup_rfi_flush(void) bool enable; long rc; - /* Enable by default */ - enable = true; - types = L1D_FLUSH_FALLBACK; - rc = plpar_get_cpu_characteristics(&result); - if (rc == H_SUCCESS) { + if (rc == H_SUCCESS) init_cpu_char_feature_flags(&result); - if (result.character & H_CPU_CHAR_L1D_FLUSH_TRIG2) - types |= L1D_FLUSH_MTTRIG; - if (result.character & H_CPU_CHAR_L1D_FLUSH_ORI30) - types |= L1D_FLUSH_ORI; - - if ((!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR)) || - (!(result.behaviour & H_CPU_BEHAV_FAVOUR_SECURITY))) - enable = false; - } - /* * We're the guest so this doesn't apply to us, clear it to simplify * handling of it elsewhere. */ security_ftr_clear(SEC_FTR_L1D_FLUSH_HV); + types = L1D_FLUSH_FALLBACK; + + if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_TRIG2)) + types |= L1D_FLUSH_MTTRIG; + + if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_ORI30)) + types |= L1D_FLUSH_ORI; + + enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \ + security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR); + setup_rfi_flush(types, enable); } -- cgit v1.2.3 From 56986016cb8cd9050e601831fe89f332b4e3c46e Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 27 Mar 2018 23:01:52 +1100 Subject: powerpc/64s: Wire up cpu_show_spectre_v1() Add a definition for cpu_show_spectre_v1() to override the generic version. Currently this just prints "Not affected" or "Vulnerable" based on the firmware flag. Although the kernel does have array_index_nospec() in a few places, we haven't yet audited all the powerpc code to see where it's necessary, so for now we don't list that as a mitigation. Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/security.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index 865db6f8bcca..0eace3cac818 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -50,3 +50,11 @@ ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, cha return sprintf(buf, "Vulnerable\n"); } + +ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) +{ + if (!security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) + return sprintf(buf, "Not affected\n"); + + return sprintf(buf, "Vulnerable\n"); +} -- cgit v1.2.3 From d6fbe1c55c55c6937cbea3531af7da84ab7473c3 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 27 Mar 2018 23:01:53 +1100 Subject: powerpc/64s: Wire up cpu_show_spectre_v2() Add a definition for cpu_show_spectre_v2() to override the generic version. This has several permuations, though in practice some may not occur we cater for any combination. The most verbose is: Mitigation: Indirect branch serialisation (kernel only), Indirect branch cache disabled, ori31 speculation barrier enabled We don't treat the ori31 speculation barrier as a mitigation on its own, because it has to be *used* by code in order to be a mitigation and we don't know if userspace is doing that. So if that's all we see we say: Vulnerable, ori31 speculation barrier enabled Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/security.c | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index 0eace3cac818..2cee3dcd231b 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -58,3 +58,36 @@ ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, c return sprintf(buf, "Vulnerable\n"); } + +ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) +{ + bool bcs, ccd, ori; + struct seq_buf s; + + seq_buf_init(&s, buf, PAGE_SIZE - 1); + + bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED); + ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED); + ori = security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31); + + if (bcs || ccd) { + seq_buf_printf(&s, "Mitigation: "); + + if (bcs) + seq_buf_printf(&s, "Indirect branch serialisation (kernel only)"); + + if (bcs && ccd) + seq_buf_printf(&s, ", "); + + if (ccd) + seq_buf_printf(&s, "Indirect branch cache disabled"); + } else + seq_buf_printf(&s, "Vulnerable"); + + if (ori) + seq_buf_printf(&s, ", ori31 speculation barrier enabled"); + + seq_buf_printf(&s, "\n"); + + return s.len; +} -- cgit v1.2.3 From bde709a70884bfc790da6fbc4467c91e8d41c51b Mon Sep 17 00:00:00 2001 From: Mauricio Faria de Oliveira Date: Fri, 9 Mar 2018 17:45:58 -0300 Subject: powerpc/mm: Fix section mismatch warning in stop_machine_change_mapping() Fix the warning messages for stop_machine_change_mapping(), and a number of other affected functions in its call chain. All modified functions are under CONFIG_MEMORY_HOTPLUG, so __meminit is okay (keeps them / does not discard them). Boot-tested on powernv/power9/radix-mmu and pseries/power8/hash-mmu. $ make -j$(nproc) CONFIG_DEBUG_SECTION_MISMATCH=y vmlinux ... MODPOST vmlinux.o WARNING: vmlinux.o(.text+0x6b130): Section mismatch in reference from the function stop_machine_change_mapping() to the function .meminit.text:create_physical_mapping() The function stop_machine_change_mapping() references the function __meminit create_physical_mapping(). This is often because stop_machine_change_mapping lacks a __meminit annotation or the annotation of create_physical_mapping is wrong. WARNING: vmlinux.o(.text+0x6b13c): Section mismatch in reference from the function stop_machine_change_mapping() to the function .meminit.text:create_physical_mapping() The function stop_machine_change_mapping() references the function __meminit create_physical_mapping(). This is often because stop_machine_change_mapping lacks a __meminit annotation or the annotation of create_physical_mapping is wrong. ... Signed-off-by: Mauricio Faria de Oliveira Acked-by: Balbir Singh Signed-off-by: Michael Ellerman --- arch/powerpc/mm/mem.c | 4 ++-- arch/powerpc/mm/pgtable-book3s64.c | 4 ++-- arch/powerpc/mm/pgtable-radix.c | 12 ++++++------ 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index fe8c61149fb8..85245ef97e72 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -127,7 +127,7 @@ int __weak remove_section_mapping(unsigned long start, unsigned long end) return -ENODEV; } -int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, +int __meminit arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, bool want_memblock) { unsigned long start_pfn = start >> PAGE_SHIFT; @@ -148,7 +148,7 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, } #ifdef CONFIG_MEMORY_HOTREMOVE -int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) +int __meminit arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c index 422e80253a33..bd6ca74acf9e 100644 --- a/arch/powerpc/mm/pgtable-book3s64.c +++ b/arch/powerpc/mm/pgtable-book3s64.c @@ -155,7 +155,7 @@ void mmu_cleanup_all(void) } #ifdef CONFIG_MEMORY_HOTPLUG -int create_section_mapping(unsigned long start, unsigned long end) +int __meminit create_section_mapping(unsigned long start, unsigned long end) { if (radix_enabled()) return radix__create_section_mapping(start, end); @@ -163,7 +163,7 @@ int create_section_mapping(unsigned long start, unsigned long end) return hash__create_section_mapping(start, end); } -int remove_section_mapping(unsigned long start, unsigned long end) +int __meminit remove_section_mapping(unsigned long start, unsigned long end) { if (radix_enabled()) return radix__remove_section_mapping(start, end); diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index 2e10a964e290..ab9db0afd2c8 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c @@ -695,7 +695,7 @@ struct change_mapping_params { unsigned long aligned_end; }; -static int stop_machine_change_mapping(void *data) +static int __meminit stop_machine_change_mapping(void *data) { struct change_mapping_params *params = (struct change_mapping_params *)data; @@ -742,7 +742,7 @@ static void remove_pte_table(pte_t *pte_start, unsigned long addr, /* * clear the pte and potentially split the mapping helper */ -static void split_kernel_mapping(unsigned long addr, unsigned long end, +static void __meminit split_kernel_mapping(unsigned long addr, unsigned long end, unsigned long size, pte_t *pte) { unsigned long mask = ~(size - 1); @@ -835,7 +835,7 @@ static void remove_pud_table(pud_t *pud_start, unsigned long addr, } } -static void remove_pagetable(unsigned long start, unsigned long end) +static void __meminit remove_pagetable(unsigned long start, unsigned long end) { unsigned long addr, next; pud_t *pud_base; @@ -863,12 +863,12 @@ static void remove_pagetable(unsigned long start, unsigned long end) radix__flush_tlb_kernel_range(start, end); } -int __ref radix__create_section_mapping(unsigned long start, unsigned long end) +int __meminit radix__create_section_mapping(unsigned long start, unsigned long end) { return create_physical_mapping(start, end); } -int radix__remove_section_mapping(unsigned long start, unsigned long end) +int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end) { remove_pagetable(start, end); return 0; @@ -888,7 +888,7 @@ int __meminit radix__vmemmap_create_mapping(unsigned long start, } #ifdef CONFIG_MEMORY_HOTPLUG -void radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size) +void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size) { remove_pagetable(start, start + page_size); } -- cgit v1.2.3 From 79b4686857029cdea97d0102d179aef2f58e5acb Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Tue, 9 Jan 2018 16:45:20 +1100 Subject: powerpc/init: Do not advertise radix during client-architecture-support Currently the pseries kernel advertises radix MMU support even if the actual support is disabled via the CONFIG_PPC_RADIX_MMU option. This adds a check for CONFIG_PPC_RADIX_MMU to avoid advertising radix to the hypervisor. Suggested-by: Paul Mackerras Signed-off-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/prom_init.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index d22c41c26bb3..8ca3f4c915e3 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -1111,7 +1111,8 @@ static void __init prom_check_platform_support(void) } } - if (supported.radix_mmu && supported.radix_gtse) { + if (supported.radix_mmu && supported.radix_gtse && + IS_ENABLED(CONFIG_PPC_RADIX_MMU)) { /* Radix preferred - but we require GTSE for now */ prom_debug("Asking for radix with GTSE\n"); ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX); -- cgit v1.2.3 From a8c0bf3c621e0acc01451e27fe47c41138e13d0d Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Tue, 9 Jan 2018 16:52:14 +1100 Subject: powerpc/lpar/debug: Initialize flags before printing debug message MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With enabled DEBUG, there is a compile error: "error: ‘flags’ is used uninitialized in this function". This moves pr_devel() little further where @flags are initialized. Signed-off-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/pseries/lpar.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index d11f3c14c21e..238b55fb8007 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -306,14 +306,14 @@ static long pSeries_lpar_hpte_updatepp(unsigned long slot, want_v = hpte_encode_avpn(vpn, psize, ssize); - pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...", - want_v, slot, flags, psize); - flags = (newpp & 7) | H_AVPN; if (mmu_has_feature(MMU_FTR_KERNEL_RO)) /* Move pp0 into bit 8 (IBM 55) */ flags |= (newpp & HPTE_R_PP0) >> 55; + pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...", + want_v, slot, flags, psize); + lpar_rc = plpar_pte_protect(flags, slot, want_v); if (lpar_rc == H_NOT_FOUND) { -- cgit v1.2.3 From b574df94883df4d37f1b9d648867d623496ca3b1 Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Thu, 1 Feb 2018 16:07:25 +1100 Subject: powerpc/mm: Fix typo in comments Fixes: 912cc87a6 "powerpc/mm/radix: Add LPID based tlb flush helpers" Signed-off-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman --- arch/powerpc/mm/tlb-radix.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c index 71d1b19ad1c0..001c1f6458b6 100644 --- a/arch/powerpc/mm/tlb-radix.c +++ b/arch/powerpc/mm/tlb-radix.c @@ -98,7 +98,7 @@ static inline void __tlbiel_pid(unsigned long pid, int set, rb |= set << PPC_BITLSHIFT(51); rs = ((unsigned long)pid) << PPC_BITLSHIFT(31); prs = 1; /* process scoped */ - r = 1; /* raidx format */ + r = 1; /* radix format */ asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); @@ -112,7 +112,7 @@ static inline void __tlbie_pid(unsigned long pid, unsigned long ric) rb = PPC_BIT(53); /* IS = 1 */ rs = pid << PPC_BITLSHIFT(31); prs = 1; /* process scoped */ - r = 1; /* raidx format */ + r = 1; /* radix format */ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); @@ -164,7 +164,7 @@ static inline void __tlbiel_va(unsigned long va, unsigned long pid, rb |= ap << PPC_BITLSHIFT(58); rs = pid << PPC_BITLSHIFT(31); prs = 1; /* process scoped */ - r = 1; /* raidx format */ + r = 1; /* radix format */ asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); @@ -212,7 +212,7 @@ static inline void __tlbie_va(unsigned long va, unsigned long pid, rb |= ap << PPC_BITLSHIFT(58); rs = pid << PPC_BITLSHIFT(31); prs = 1; /* process scoped */ - r = 1; /* raidx format */ + r = 1; /* radix format */ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); @@ -615,7 +615,7 @@ void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa, rb |= ap << PPC_BITLSHIFT(58); rs = lpid & ((1UL << 32) - 1); prs = 0; /* process scoped */ - r = 1; /* raidx format */ + r = 1; /* radix format */ asm volatile("ptesync": : :"memory"); asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) @@ -633,7 +633,7 @@ void radix__flush_tlb_lpid(unsigned long lpid) rb = 0x2 << PPC_BITLSHIFT(53); /* IS = 2 */ rs = lpid & ((1UL << 32) - 1); prs = 0; /* partition scoped */ - r = 1; /* raidx format */ + r = 1; /* radix format */ asm volatile("ptesync": : :"memory"); asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) @@ -657,7 +657,7 @@ void radix__flush_tlb_all(void) rb = 0x3 << PPC_BITLSHIFT(53); /* IS = 3 */ prs = 0; /* partition scoped */ - r = 1; /* raidx format */ + r = 1; /* radix format */ rs = 1 & ((1UL << 32) - 1); /* any LPID value to flush guest mappings */ asm volatile("ptesync": : :"memory"); -- cgit v1.2.3 From d41ce7b1bcc3e1d02cc9da3b83c0fe355fcb68e0 Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Tue, 13 Feb 2018 16:51:35 +1100 Subject: powerpc/powernv/npu: Do not try invalidating 32bit table when 64bit table is enabled GPUs and the corresponding NVLink bridges get different PEs as they have separate translation validation entries (TVEs). We put these PEs to the same IOMMU group so they cannot be passed through separately. So the iommu_table_group_ops::set_window/unset_window for GPUs do set tables to the NPU PEs as well which means that iommu_table's list of attached PEs (iommu_table_group_link) has both GPU and NPU PEs linked. This list is used for TCE cache invalidation. The problem is that NPU PE has just a single TVE and can be programmed to point to 32bit or 64bit windows while GPU PE has two (as any other PCI device). So we end up having an 32bit iommu_table struct linked to both PEs even though only the 64bit TCE table cache can be invalidated on NPU. And a relatively recent skiboot detects this and prints errors. This changes GPU's iommu_table_group_ops::set_window/unset_window to make sure that NPU PE is only linked to the table actually used by the hardware. If there are two tables used by an IOMMU group, the NPU PE will use the last programmed one which with the current use scenarios is expected to be a 64bit one. Signed-off-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/powernv/pci-ioda.c | 27 ++++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 6c307f0650bb..3f9c69d7623a 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -2681,14 +2681,23 @@ static struct pnv_ioda_pe *gpe_table_group_to_npe( static long pnv_pci_ioda2_npu_set_window(struct iommu_table_group *table_group, int num, struct iommu_table *tbl) { + struct pnv_ioda_pe *npe = gpe_table_group_to_npe(table_group); + int num2 = (num == 0) ? 1 : 0; long ret = pnv_pci_ioda2_set_window(table_group, num, tbl); if (ret) return ret; - ret = pnv_npu_set_window(gpe_table_group_to_npe(table_group), num, tbl); - if (ret) + if (table_group->tables[num2]) + pnv_npu_unset_window(npe, num2); + + ret = pnv_npu_set_window(npe, num, tbl); + if (ret) { pnv_pci_ioda2_unset_window(table_group, num); + if (table_group->tables[num2]) + pnv_npu_set_window(npe, num2, + table_group->tables[num2]); + } return ret; } @@ -2697,12 +2706,24 @@ static long pnv_pci_ioda2_npu_unset_window( struct iommu_table_group *table_group, int num) { + struct pnv_ioda_pe *npe = gpe_table_group_to_npe(table_group); + int num2 = (num == 0) ? 1 : 0; long ret = pnv_pci_ioda2_unset_window(table_group, num); if (ret) return ret; - return pnv_npu_unset_window(gpe_table_group_to_npe(table_group), num); + if (!npe->table_group.tables[num]) + return 0; + + ret = pnv_npu_unset_window(npe, num); + if (ret) + return ret; + + if (table_group->tables[num2]) + ret = pnv_npu_set_window(npe, num2, table_group->tables[num2]); + + return ret; } static void pnv_ioda2_npu_take_ownership(struct iommu_table_group *table_group) -- cgit v1.2.3 From 68701780712f7ddb2fa81032aa1b4a949949ddf8 Mon Sep 17 00:00:00 2001 From: Sam Bobroff Date: Mon, 19 Mar 2018 13:46:20 +1100 Subject: powerpc/eeh: Remove eeh_handle_event() The function eeh_handle_event(pe) does nothing other than switching between calling eeh_handle_normal_event(pe) and eeh_handle_special_event(). However it is only called in two places, one where pe can't be NULL and the other where it must be NULL (see eeh_event_handler()) so it does nothing but obscure the flow of control. So, remove it. Signed-off-by: Sam Bobroff Reviewed-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/eeh_event.h | 3 ++- arch/powerpc/kernel/eeh_driver.c | 42 +++++++++++++----------------------- arch/powerpc/kernel/eeh_event.c | 4 ++-- 3 files changed, 19 insertions(+), 30 deletions(-) diff --git a/arch/powerpc/include/asm/eeh_event.h b/arch/powerpc/include/asm/eeh_event.h index 1e551a2d6f82..0a168038882d 100644 --- a/arch/powerpc/include/asm/eeh_event.h +++ b/arch/powerpc/include/asm/eeh_event.h @@ -34,7 +34,8 @@ struct eeh_event { int eeh_event_init(void); int eeh_send_failure_event(struct eeh_pe *pe); void eeh_remove_event(struct eeh_pe *pe, bool force); -void eeh_handle_event(struct eeh_pe *pe); +bool eeh_handle_normal_event(struct eeh_pe *pe); +void eeh_handle_special_event(void); #endif /* __KERNEL__ */ #endif /* ASM_POWERPC_EEH_EVENT_H */ diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index 0c0b66fc5bfb..51b21c97910f 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -738,9 +738,22 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus, * Attempts to recover the given PE. If recovery fails or the PE has failed * too many times, remove the PE. * + * While PHB detects address or data parity errors on particular PCI + * slot, the associated PE will be frozen. Besides, DMA's occurring + * to wild addresses (which usually happen due to bugs in device + * drivers or in PCI adapter firmware) can cause EEH error. #SERR, + * #PERR or other misc PCI-related errors also can trigger EEH errors. + * + * Recovery process consists of unplugging the device driver (which + * generated hotplug events to userspace), then issuing a PCI #RST to + * the device, then reconfiguring the PCI config space for all bridges + * & devices under this slot, and then finally restarting the device + * drivers (which cause a second set of hotplug events to go out to + * userspace). + * * Returns true if @pe should no longer be used, else false. */ -static bool eeh_handle_normal_event(struct eeh_pe *pe) +bool eeh_handle_normal_event(struct eeh_pe *pe) { struct pci_bus *frozen_bus; struct eeh_dev *edev, *tmp; @@ -942,7 +955,7 @@ hard_fail: * specific PE. Iterates through possible failures and handles them as * necessary. */ -static void eeh_handle_special_event(void) +void eeh_handle_special_event(void) { struct eeh_pe *pe, *phb_pe; struct pci_bus *bus; @@ -1049,28 +1062,3 @@ static void eeh_handle_special_event(void) break; } while (rc != EEH_NEXT_ERR_NONE); } - -/** - * eeh_handle_event - Reset a PCI device after hard lockup. - * @pe: EEH PE - * - * While PHB detects address or data parity errors on particular PCI - * slot, the associated PE will be frozen. Besides, DMA's occurring - * to wild addresses (which usually happen due to bugs in device - * drivers or in PCI adapter firmware) can cause EEH error. #SERR, - * #PERR or other misc PCI-related errors also can trigger EEH errors. - * - * Recovery process consists of unplugging the device driver (which - * generated hotplug events to userspace), then issuing a PCI #RST to - * the device, then reconfiguring the PCI config space for all bridges - * & devices under this slot, and then finally restarting the device - * drivers (which cause a second set of hotplug events to go out to - * userspace). - */ -void eeh_handle_event(struct eeh_pe *pe) -{ - if (pe) - eeh_handle_normal_event(pe); - else - eeh_handle_special_event(); -} diff --git a/arch/powerpc/kernel/eeh_event.c b/arch/powerpc/kernel/eeh_event.c index accbf8b5fd46..872bcfe8f90e 100644 --- a/arch/powerpc/kernel/eeh_event.c +++ b/arch/powerpc/kernel/eeh_event.c @@ -81,10 +81,10 @@ static int eeh_event_handler(void * dummy) pr_info("EEH: Detected PCI bus error on " "PHB#%x-PE#%x\n", pe->phb->global_number, pe->addr); - eeh_handle_event(pe); + eeh_handle_normal_event(pe); eeh_pe_state_clear(pe, EEH_PE_RECOVERING); } else { - eeh_handle_event(NULL); + eeh_handle_special_event(); } kfree(event); -- cgit v1.2.3 From 37fd8125873031d5c450293bce827c3bf397287d Mon Sep 17 00:00:00 2001 From: Sam Bobroff Date: Mon, 19 Mar 2018 13:46:30 +1100 Subject: powerpc/eeh: Manage EEH_PE_RECOVERING inside eeh_handle_normal_event() Currently the EEH_PE_RECOVERING flag for a PE is managed by both the caller and callee of eeh_handle_normal_event() (among other places not considered here). This is complicated by the fact that the PE may or may not have been invalidated by the call. So move the callee's handling into eeh_handle_normal_event(), which clarifies it and allows the return type to be changed to void (because it no longer needs to indicate at the PE has been invalidated). This should not change behaviour except in eeh_event_handler() where it was previously possible to cause eeh_pe_state_clear() to be called on an invalid PE, which is now avoided. Signed-off-by: Sam Bobroff Reviewed-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/eeh_event.h | 2 +- arch/powerpc/kernel/eeh_driver.c | 29 +++++++++++------------------ arch/powerpc/kernel/eeh_event.c | 2 -- 3 files changed, 12 insertions(+), 21 deletions(-) diff --git a/arch/powerpc/include/asm/eeh_event.h b/arch/powerpc/include/asm/eeh_event.h index 0a168038882d..9884e872686f 100644 --- a/arch/powerpc/include/asm/eeh_event.h +++ b/arch/powerpc/include/asm/eeh_event.h @@ -34,7 +34,7 @@ struct eeh_event { int eeh_event_init(void); int eeh_send_failure_event(struct eeh_pe *pe); void eeh_remove_event(struct eeh_pe *pe, bool force); -bool eeh_handle_normal_event(struct eeh_pe *pe); +void eeh_handle_normal_event(struct eeh_pe *pe); void eeh_handle_special_event(void); #endif /* __KERNEL__ */ diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index 51b21c97910f..5b7a5ed4db4d 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -733,7 +733,8 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus, /** * eeh_handle_normal_event - Handle EEH events on a specific PE - * @pe: EEH PE + * @pe: EEH PE - which should not be used after we return, as it may + * have been invalidated. * * Attempts to recover the given PE. If recovery fails or the PE has failed * too many times, remove the PE. @@ -750,10 +751,8 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus, * & devices under this slot, and then finally restarting the device * drivers (which cause a second set of hotplug events to go out to * userspace). - * - * Returns true if @pe should no longer be used, else false. */ -bool eeh_handle_normal_event(struct eeh_pe *pe) +void eeh_handle_normal_event(struct eeh_pe *pe) { struct pci_bus *frozen_bus; struct eeh_dev *edev, *tmp; @@ -765,9 +764,11 @@ bool eeh_handle_normal_event(struct eeh_pe *pe) if (!frozen_bus) { pr_err("%s: Cannot find PCI bus for PHB#%x-PE#%x\n", __func__, pe->phb->global_number, pe->addr); - return false; + return; } + eeh_pe_state_mark(pe, EEH_PE_RECOVERING); + eeh_pe_update_time_stamp(pe); pe->freeze_count++; if (pe->freeze_count > eeh_max_freezes) { @@ -904,7 +905,7 @@ bool eeh_handle_normal_event(struct eeh_pe *pe) pr_info("EEH: Notify device driver to resume\n"); eeh_pe_dev_traverse(pe, eeh_report_resume, NULL); - return false; + goto final; hard_fail: /* @@ -940,12 +941,12 @@ hard_fail: pci_lock_rescan_remove(); pci_hp_remove_devices(frozen_bus); pci_unlock_rescan_remove(); - /* The passed PE should no longer be used */ - return true; + return; } } - return false; +final: + eeh_pe_state_clear(pe, EEH_PE_RECOVERING); } /** @@ -1018,15 +1019,7 @@ void eeh_handle_special_event(void) */ if (rc == EEH_NEXT_ERR_FROZEN_PE || rc == EEH_NEXT_ERR_FENCED_PHB) { - /* - * eeh_handle_normal_event() can make the PE stale if it - * determines that the PE cannot possibly be recovered. - * Don't modify the PE state if that's the case. - */ - if (eeh_handle_normal_event(pe)) - continue; - - eeh_pe_state_clear(pe, EEH_PE_RECOVERING); + eeh_handle_normal_event(pe); } else { pci_lock_rescan_remove(); list_for_each_entry(hose, &hose_list, list_node) { diff --git a/arch/powerpc/kernel/eeh_event.c b/arch/powerpc/kernel/eeh_event.c index 872bcfe8f90e..61c9356bf9c9 100644 --- a/arch/powerpc/kernel/eeh_event.c +++ b/arch/powerpc/kernel/eeh_event.c @@ -73,7 +73,6 @@ static int eeh_event_handler(void * dummy) /* We might have event without binding PE */ pe = event->pe; if (pe) { - eeh_pe_state_mark(pe, EEH_PE_RECOVERING); if (pe->type & EEH_PE_PHB) pr_info("EEH: Detected error on PHB#%x\n", pe->phb->global_number); @@ -82,7 +81,6 @@ static int eeh_event_handler(void * dummy) "PHB#%x-PE#%x\n", pe->phb->global_number, pe->addr); eeh_handle_normal_event(pe); - eeh_pe_state_clear(pe, EEH_PE_RECOVERING); } else { eeh_handle_special_event(); } -- cgit v1.2.3 From 63457b144b0e3dccb9482bfe7506deb0b958e20d Mon Sep 17 00:00:00 2001 From: Sam Bobroff Date: Mon, 19 Mar 2018 13:46:40 +1100 Subject: powerpc/eeh: Fix misleading comment in __eeh_addr_cache_get_device() Commit "0ba178888b05 powerpc/eeh: Remove reference to PCI device" removed a call to pci_dev_get() from __eeh_addr_cache_get_device() but did not update the comment to match. Signed-off-by: Sam Bobroff Reviewed-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/eeh_cache.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/eeh_cache.c b/arch/powerpc/kernel/eeh_cache.c index d4cc26618809..201943d54a6e 100644 --- a/arch/powerpc/kernel/eeh_cache.c +++ b/arch/powerpc/kernel/eeh_cache.c @@ -84,8 +84,7 @@ static inline struct eeh_dev *__eeh_addr_cache_get_device(unsigned long addr) * @addr: mmio (PIO) phys address or i/o port number * * Given an mmio phys address, or a port number, find a pci device - * that implements this address. Be sure to pci_dev_put the device - * when finished. I/O port numbers are assumed to be offset + * that implements this address. I/O port numbers are assumed to be offset * from zero (that is, they do *not* have pci_io_addr added in). * It is safe to call this function within an interrupt. */ -- cgit v1.2.3 From 5b86ac9e91715224ae9e8ee2b544f7392b7fc8b2 Mon Sep 17 00:00:00 2001 From: Sam Bobroff Date: Mon, 19 Mar 2018 13:46:51 +1100 Subject: powerpc/eeh: Remove misleading test in eeh_handle_normal_event() Remove a test that checks if "frozen_bus" is NULL, because it cannot have changed since it was tested at the start of the function and so must be true here. Signed-off-by: Sam Bobroff Reviewed-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/eeh_driver.c | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index 5b7a5ed4db4d..04a5d9db5499 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -930,20 +930,18 @@ hard_fail: * all removed devices correctly to avoid access * the their PCI config any more. */ - if (frozen_bus) { - if (pe->type & EEH_PE_VF) { - eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL); - eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED); - } else { - eeh_pe_state_clear(pe, EEH_PE_PRI_BUS); - eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED); + if (pe->type & EEH_PE_VF) { + eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL); + eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED); + } else { + eeh_pe_state_clear(pe, EEH_PE_PRI_BUS); + eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED); - pci_lock_rescan_remove(); - pci_hp_remove_devices(frozen_bus); - pci_unlock_rescan_remove(); - /* The passed PE should no longer be used */ - return; - } + pci_lock_rescan_remove(); + pci_hp_remove_devices(frozen_bus); + pci_unlock_rescan_remove(); + /* The passed PE should no longer be used */ + return; } final: eeh_pe_state_clear(pe, EEH_PE_RECOVERING); -- cgit v1.2.3 From cd95f804ac49c457c483309a58362acbc8654063 Mon Sep 17 00:00:00 2001 From: Sam Bobroff Date: Mon, 19 Mar 2018 13:47:02 +1100 Subject: powerpc/eeh: Rename frozen_bus to bus in eeh_handle_normal_event() The name "frozen_bus" is misleading: it's not necessarily frozen, it's just the PE's PCI bus. Signed-off-by: Sam Bobroff Reviewed-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/eeh_driver.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index 04a5d9db5499..cb584d72b0a5 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -754,14 +754,14 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus, */ void eeh_handle_normal_event(struct eeh_pe *pe) { - struct pci_bus *frozen_bus; + struct pci_bus *bus; struct eeh_dev *edev, *tmp; int rc = 0; enum pci_ers_result result = PCI_ERS_RESULT_NONE; struct eeh_rmv_data rmv_data = {LIST_HEAD_INIT(rmv_data.edev_list), 0}; - frozen_bus = eeh_pe_bus_get(pe); - if (!frozen_bus) { + bus = eeh_pe_bus_get(pe); + if (!bus) { pr_err("%s: Cannot find PCI bus for PHB#%x-PE#%x\n", __func__, pe->phb->global_number, pe->addr); return; @@ -820,7 +820,7 @@ void eeh_handle_normal_event(struct eeh_pe *pe) */ if (result == PCI_ERS_RESULT_NONE) { pr_info("EEH: Reset with hotplug activity\n"); - rc = eeh_reset_device(pe, frozen_bus, NULL); + rc = eeh_reset_device(pe, bus, NULL); if (rc) { pr_warn("%s: Unable to reset, err=%d\n", __func__, rc); @@ -938,7 +938,7 @@ hard_fail: eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED); pci_lock_rescan_remove(); - pci_hp_remove_devices(frozen_bus); + pci_hp_remove_devices(bus); pci_unlock_rescan_remove(); /* The passed PE should no longer be used */ return; -- cgit v1.2.3 From 5fd13460af36f5c6c958165d697950e676fad7c6 Mon Sep 17 00:00:00 2001 From: Sam Bobroff Date: Mon, 19 Mar 2018 13:48:55 +1100 Subject: powerpc/eeh: Clarify arguments to eeh_reset_device() It is currently difficult to understand the behaviour of eeh_reset_device() due to the way it's parameters are used. In particular, when 'bus' is NULL, it's value is still necessary so the same value is looked up again locally under a different name ('frozen_bus') but behaviour is changed. To clarify this, add a new parameter 'driver_eeh_aware', and have the caller set it when it would have passed NULL for 'bus' and always pass a value for 'bus'. Then change any test that was on 'bus' to one on '!driver_eeh_aware' and replace uses of 'frozen_bus' with 'bus'. Also update the function's comment. This should not change behaviour. Signed-off-by: Sam Bobroff Reviewed-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/eeh_driver.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index cb584d72b0a5..07437d765434 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -619,17 +619,19 @@ int eeh_pe_reset_and_recover(struct eeh_pe *pe) /** * eeh_reset_device - Perform actual reset of a pci slot + * @driver_eeh_aware: Does the device's driver provide EEH support? * @pe: EEH PE * @bus: PCI bus corresponding to the isolcated slot + * @rmv_data: Optional, list to record removed devices * * This routine must be called to do reset on the indicated PE. * During the reset, udev might be invoked because those affected * PCI devices will be removed and then added. */ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus, - struct eeh_rmv_data *rmv_data) + struct eeh_rmv_data *rmv_data, + bool driver_eeh_aware) { - struct pci_bus *frozen_bus = eeh_pe_bus_get(pe); time64_t tstamp; int cnt, rc; struct eeh_dev *edev; @@ -645,7 +647,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus, * into pci_hp_add_devices(). */ eeh_pe_state_mark(pe, EEH_PE_KEEP); - if (bus) { + if (!driver_eeh_aware) { if (pe->type & EEH_PE_VF) { eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL); } else { @@ -653,7 +655,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus, pci_hp_remove_devices(bus); pci_unlock_rescan_remove(); } - } else if (frozen_bus) { + } else if (bus) { eeh_pe_dev_traverse(pe, eeh_rmv_device, rmv_data); } @@ -689,7 +691,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus, * the device up before the scripts have taken it down, * potentially weird things happen. */ - if (bus) { + if (!driver_eeh_aware) { pr_info("EEH: Sleep 5s ahead of complete hotplug\n"); ssleep(5); @@ -706,7 +708,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus, eeh_pe_state_clear(pe, EEH_PE_PRI_BUS); pci_hp_add_devices(bus); } - } else if (frozen_bus && rmv_data->removed) { + } else if (bus && rmv_data->removed) { pr_info("EEH: Sleep 5s ahead of partial hotplug\n"); ssleep(5); @@ -715,7 +717,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus, if (pe->type & EEH_PE_VF) eeh_add_virt_device(edev, NULL); else - pci_hp_add_devices(frozen_bus); + pci_hp_add_devices(bus); } eeh_pe_state_clear(pe, EEH_PE_KEEP); @@ -820,7 +822,7 @@ void eeh_handle_normal_event(struct eeh_pe *pe) */ if (result == PCI_ERS_RESULT_NONE) { pr_info("EEH: Reset with hotplug activity\n"); - rc = eeh_reset_device(pe, bus, NULL); + rc = eeh_reset_device(pe, bus, NULL, false); if (rc) { pr_warn("%s: Unable to reset, err=%d\n", __func__, rc); @@ -872,7 +874,7 @@ void eeh_handle_normal_event(struct eeh_pe *pe) /* If any device called out for a reset, then reset the slot */ if (result == PCI_ERS_RESULT_NEED_RESET) { pr_info("EEH: Reset without hotplug activity\n"); - rc = eeh_reset_device(pe, NULL, &rmv_data); + rc = eeh_reset_device(pe, bus, &rmv_data, true); if (rc) { pr_warn("%s: Cannot reset, err=%d\n", __func__, rc); -- cgit v1.2.3 From d3136d771292b87cfc217a528aa50acec0b2b84f Mon Sep 17 00:00:00 2001 From: Sam Bobroff Date: Mon, 19 Mar 2018 13:49:04 +1100 Subject: powerpc/eeh: Remove always-true tests in eeh_reset_device() eeh_reset_device() tests the value of 'bus' more than once but the only caller, eeh_handle_normal_device() does this test itself and will never pass NULL. So, remove the dead tests. This should not change behaviour. Signed-off-by: Sam Bobroff Reviewed-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/eeh_driver.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index 07437d765434..93fc22e791fa 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -655,7 +655,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus, pci_hp_remove_devices(bus); pci_unlock_rescan_remove(); } - } else if (bus) { + } else { eeh_pe_dev_traverse(pe, eeh_rmv_device, rmv_data); } @@ -708,7 +708,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus, eeh_pe_state_clear(pe, EEH_PE_PRI_BUS); pci_hp_add_devices(bus); } - } else if (bus && rmv_data->removed) { + } else if (rmv_data->removed) { pr_info("EEH: Sleep 5s ahead of partial hotplug\n"); ssleep(5); -- cgit v1.2.3 From 54048cf876615285363f6f6c21014bf2462ebe0d Mon Sep 17 00:00:00 2001 From: Sam Bobroff Date: Wed, 21 Mar 2018 13:06:40 +1100 Subject: powerpc/eeh: Factor out common code eeh_reset_device() The caller will always pass NULL for 'rmv_data' when 'eeh_aware_driver' is true, so the first two calls to eeh_pe_dev_traverse() can be combined without changing behaviour as can the two arms of the final 'if' block. This should not change behaviour. Signed-off-by: Sam Bobroff Reviewed-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/eeh_driver.c | 32 ++++++++++---------------------- 1 file changed, 10 insertions(+), 22 deletions(-) diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index 93fc22e791fa..43ceb6263cd8 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -647,16 +647,12 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus, * into pci_hp_add_devices(). */ eeh_pe_state_mark(pe, EEH_PE_KEEP); - if (!driver_eeh_aware) { - if (pe->type & EEH_PE_VF) { - eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL); - } else { - pci_lock_rescan_remove(); - pci_hp_remove_devices(bus); - pci_unlock_rescan_remove(); - } - } else { + if (driver_eeh_aware || (pe->type & EEH_PE_VF)) { eeh_pe_dev_traverse(pe, eeh_rmv_device, rmv_data); + } else { + pci_lock_rescan_remove(); + pci_hp_remove_devices(bus); + pci_unlock_rescan_remove(); } /* @@ -691,8 +687,9 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus, * the device up before the scripts have taken it down, * potentially weird things happen. */ - if (!driver_eeh_aware) { - pr_info("EEH: Sleep 5s ahead of complete hotplug\n"); + if (!driver_eeh_aware || rmv_data->removed) { + pr_info("EEH: Sleep 5s ahead of %s hotplug\n", + (driver_eeh_aware ? "partial" : "complete")); ssleep(5); /* @@ -705,19 +702,10 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus, if (pe->type & EEH_PE_VF) { eeh_add_virt_device(edev, NULL); } else { - eeh_pe_state_clear(pe, EEH_PE_PRI_BUS); + if (!driver_eeh_aware) + eeh_pe_state_clear(pe, EEH_PE_PRI_BUS); pci_hp_add_devices(bus); } - } else if (rmv_data->removed) { - pr_info("EEH: Sleep 5s ahead of partial hotplug\n"); - ssleep(5); - - edev = list_first_entry(&pe->edevs, struct eeh_dev, list); - eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL); - if (pe->type & EEH_PE_VF) - eeh_add_virt_device(edev, NULL); - else - pci_hp_add_devices(bus); } eeh_pe_state_clear(pe, EEH_PE_KEEP); -- cgit v1.2.3 From 34a286a4ac576d3d9ea3ac2bc7bbd4216a1f7ac7 Mon Sep 17 00:00:00 2001 From: Sam Bobroff Date: Mon, 19 Mar 2018 13:49:23 +1100 Subject: powerpc/eeh: Add eeh_state_active() helper Checking for a "fully active" device state requires testing two flag bits, which is open coded in several places, so add a function to do it. Signed-off-by: Sam Bobroff Reviewed-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/eeh.h | 6 ++++++ arch/powerpc/kernel/eeh.c | 19 ++++++------------- arch/powerpc/platforms/powernv/eeh-powernv.c | 9 ++------- 3 files changed, 14 insertions(+), 20 deletions(-) diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index fd37cc101f4f..c2266ca61853 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h @@ -256,6 +256,12 @@ static inline void eeh_serialize_unlock(unsigned long flags) raw_spin_unlock_irqrestore(&confirm_error_lock, flags); } +static inline bool eeh_state_active(int state) +{ + return (state & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) + == (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE); +} + typedef void *(*eeh_traverse_func)(void *data, void *flag); void eeh_set_pe_aux_size(int size); int eeh_phb_pe_create(struct pci_controller *phb); diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 2b9df0040d6b..bc640e4c5ca5 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -394,9 +394,7 @@ static int eeh_phb_check_failure(struct eeh_pe *pe) /* Check PHB state */ ret = eeh_ops->get_state(phb_pe, NULL); if ((ret < 0) || - (ret == EEH_STATE_NOT_SUPPORT) || - (ret & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) == - (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) { + (ret == EEH_STATE_NOT_SUPPORT) || eeh_state_active(ret)) { ret = 0; goto out; } @@ -433,7 +431,6 @@ out: int eeh_dev_check_failure(struct eeh_dev *edev) { int ret; - int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE); unsigned long flags; struct device_node *dn; struct pci_dev *dev; @@ -525,8 +522,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev) * state, PE is in good state. */ if ((ret < 0) || - (ret == EEH_STATE_NOT_SUPPORT) || - ((ret & active_flags) == active_flags)) { + (ret == EEH_STATE_NOT_SUPPORT) || eeh_state_active(ret)) { eeh_stats.false_positives++; pe->false_positives++; rc = 0; @@ -546,8 +542,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev) /* Frozen parent PE ? */ ret = eeh_ops->get_state(parent_pe, NULL); - if (ret > 0 && - (ret & active_flags) != active_flags) + if (ret > 0 && !eeh_state_active(ret)) pe = parent_pe; /* Next parent level */ @@ -888,7 +883,6 @@ static void *eeh_set_dev_freset(void *data, void *flag) */ int eeh_pe_reset_full(struct eeh_pe *pe) { - int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE); int reset_state = (EEH_PE_RESET | EEH_PE_CFG_BLOCKED); int type = EEH_RESET_HOT; unsigned int freset = 0; @@ -919,7 +913,7 @@ int eeh_pe_reset_full(struct eeh_pe *pe) /* Wait until the PE is in a functioning state */ state = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC); - if ((state & active_flags) == active_flags) + if (eeh_state_active(state)) break; if (state < 0) { @@ -1352,16 +1346,15 @@ static int eeh_pe_change_owner(struct eeh_pe *pe) struct eeh_dev *edev, *tmp; struct pci_dev *pdev; struct pci_device_id *id; - int flags, ret; + int ret; /* Check PE state */ - flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE); ret = eeh_ops->get_state(pe, NULL); if (ret < 0 || ret == EEH_STATE_NOT_SUPPORT) return 0; /* Unfrozen PE, nothing to do */ - if ((ret & flags) == flags) + if (eeh_state_active(ret)) return 0; /* Frozen PE, check if it needs PE level reset */ diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c index 33c86c1a1720..ddfc3544d285 100644 --- a/arch/powerpc/platforms/powernv/eeh-powernv.c +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c @@ -1425,11 +1425,8 @@ static int pnv_eeh_get_pe(struct pci_controller *hose, dev_pe = dev_pe->parent; while (dev_pe && !(dev_pe->type & EEH_PE_PHB)) { int ret; - int active_flags = (EEH_STATE_MMIO_ACTIVE | - EEH_STATE_DMA_ACTIVE); - ret = eeh_ops->get_state(dev_pe, NULL); - if (ret <= 0 || (ret & active_flags) == active_flags) { + if (ret <= 0 || eeh_state_active(ret)) { dev_pe = dev_pe->parent; continue; } @@ -1463,7 +1460,6 @@ static int pnv_eeh_next_error(struct eeh_pe **pe) struct eeh_pe *phb_pe, *parent_pe; __be64 frozen_pe_no; __be16 err_type, severity; - int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE); long rc; int state, ret = EEH_NEXT_ERR_NONE; @@ -1626,8 +1622,7 @@ static int pnv_eeh_next_error(struct eeh_pe **pe) /* Frozen parent PE ? */ state = eeh_ops->get_state(parent_pe, NULL); - if (state > 0 && - (state & active_flags) != active_flags) + if (state > 0 && !eeh_state_active(state)) *pe = parent_pe; /* Next parent level */ -- cgit v1.2.3 From 404b27d66ed657ebccb08a9c8f8f65523e9b666b Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Tue, 27 Mar 2018 15:37:17 +1100 Subject: powerpc: Add ppc_breakpoint_available() Add ppc_breakpoint_available() to determine if a breakpoint is available currently via the DAWR or DABR. Signed-off-by: Michael Neuling Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/debug.h | 1 + arch/powerpc/kernel/process.c | 12 ++++++++++++ 2 files changed, 13 insertions(+) diff --git a/arch/powerpc/include/asm/debug.h b/arch/powerpc/include/asm/debug.h index fc97404de0a3..ce5da214ffe5 100644 --- a/arch/powerpc/include/asm/debug.h +++ b/arch/powerpc/include/asm/debug.h @@ -47,6 +47,7 @@ static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; } void set_breakpoint(struct arch_hw_breakpoint *brk); void __set_breakpoint(struct arch_hw_breakpoint *brk); +bool ppc_breakpoint_available(void); #ifdef CONFIG_PPC_ADV_DEBUG_REGS extern void do_send_trap(struct pt_regs *regs, unsigned long address, unsigned long error_code, int brkpt); diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 1738c4127b32..4466e3db46d4 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -827,6 +827,18 @@ void set_breakpoint(struct arch_hw_breakpoint *brk) preempt_enable(); } +/* Check if we have DAWR or DABR hardware */ +bool ppc_breakpoint_available(void) +{ + if (cpu_has_feature(CPU_FTR_DAWR)) + return true; /* POWER8 DAWR */ + if (cpu_has_feature(CPU_FTR_ARCH_207S)) + return false; /* POWER9 with DAWR disabled */ + /* DABR: Everything but POWER8 and POWER9 */ + return true; +} +EXPORT_SYMBOL_GPL(ppc_breakpoint_available); + #ifdef CONFIG_PPC64 DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array); #endif -- cgit v1.2.3 From 85ce9a5d57bec126d19610d6e77f9e6e4eaea635 Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Tue, 27 Mar 2018 15:37:18 +1100 Subject: powerpc: Update ptrace to use ppc_breakpoint_available() This updates the ptrace code to use ppc_breakpoint_available(). We now advertise via PPC_PTRACE_GETHWDBGINFO zero breakpoints when the DAWR is missing (ie. POWER9). This results in GDB falling back to software emulation of the breakpoint (which is slow). For the features advertised by PPC_PTRACE_GETHWDBGINFO, we keep advertising DAWR as if we don't GDB assumes 1 breakpoint irrespective of the number of breakpoints advertised. GDB then fails later when trying to set this one breakpoint. Signed-off-by: Michael Neuling Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/hw_breakpoint.c | 3 +++ arch/powerpc/kernel/ptrace.c | 16 ++++++++++++++-- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index 53b9c1dfd7d9..4c1012b80d3b 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -33,6 +33,7 @@ #include #include #include +#include #include /* @@ -171,6 +172,8 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) * HW_BREAKPOINT_ALIGN by rounding off to the lower address, the * 'symbolsize' should satisfy the check below. */ + if (!ppc_breakpoint_available()) + return -ENODEV; length_max = 8; /* DABR */ if (cpu_has_feature(CPU_FTR_DAWR)) { length_max = 512 ; /* 64 doublewords */ diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index ca72d7391d40..d23cf632edf0 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -41,6 +41,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include @@ -2378,6 +2379,7 @@ static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, struct perf_event_attr attr; #endif /* CONFIG_HAVE_HW_BREAKPOINT */ #ifndef CONFIG_PPC_ADV_DEBUG_REGS + bool set_bp = true; struct arch_hw_breakpoint hw_brk; #endif @@ -2411,9 +2413,10 @@ static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, hw_brk.address = data & (~HW_BRK_TYPE_DABR); hw_brk.type = (data & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL; hw_brk.len = 8; + set_bp = (data) && (hw_brk.type & HW_BRK_TYPE_RDWR); #ifdef CONFIG_HAVE_HW_BREAKPOINT bp = thread->ptrace_bps[0]; - if ((!data) || !(hw_brk.type & HW_BRK_TYPE_RDWR)) { + if (!set_bp) { if (bp) { unregister_hw_breakpoint(bp); thread->ptrace_bps[0] = NULL; @@ -2450,6 +2453,9 @@ static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, return PTR_ERR(bp); } +#else /* !CONFIG_HAVE_HW_BREAKPOINT */ + if (set_bp && (!ppc_breakpoint_available())) + return -ENODEV; #endif /* CONFIG_HAVE_HW_BREAKPOINT */ task->thread.hw_brk = hw_brk; #else /* CONFIG_PPC_ADV_DEBUG_REGS */ @@ -2904,6 +2910,9 @@ static long ppc_set_hwdebug(struct task_struct *child, if (child->thread.hw_brk.address) return -ENOSPC; + if (!ppc_breakpoint_available()) + return -ENODEV; + child->thread.hw_brk = brk; return 1; @@ -3052,7 +3061,10 @@ long arch_ptrace(struct task_struct *child, long request, #endif #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ dbginfo.num_instruction_bps = 0; - dbginfo.num_data_bps = 1; + if (ppc_breakpoint_available()) + dbginfo.num_data_bps = 1; + else + dbginfo.num_data_bps = 0; dbginfo.num_condition_regs = 0; #ifdef CONFIG_PPC64 dbginfo.data_bp_alignment = 8; -- cgit v1.2.3 From 9bc2bd5d9d8d3eddf410075e2eea70bb493dfa26 Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Tue, 27 Mar 2018 15:37:19 +1100 Subject: powerpc: Update xmon to use ppc_breakpoint_available() The 'bd' command will now print an error and not set the breakpoint on P9. Signed-off-by: Michael Neuling [mpe: Unsplit quoted string] Signed-off-by: Michael Ellerman --- arch/powerpc/xmon/xmon.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 82e1a3ee6e0f..b481f9f48489 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -1297,6 +1297,10 @@ bpt_cmds(void) static const char badaddr[] = "Only kernel addresses are permitted for breakpoints\n"; int mode; case 'd': /* bd - hardware data breakpoint */ + if (!ppc_breakpoint_available()) { + printf("Hardware data breakpoint not supported on this cpu\n"); + break; + } mode = 7; cmd = inchar(); if (cmd == 'r') -- cgit v1.2.3 From 398e712c007fbd0bf996d25eb6b39d8314c50db4 Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Tue, 27 Mar 2018 15:37:20 +1100 Subject: KVM: PPC: Book3S HV: Return error from h_set_mode(SET_DAWR) on POWER9 Return H_P2 on a h_set_mode(SET_DAWR) on POWER9 where the DAWR is disabled. Current Linux guests ignore this error, so they will silently not get the DAWR (sigh). The same error code is being used by POWERVM in this case. Signed-off-by: Michael Neuling Signed-off-by: Michael Ellerman --- arch/powerpc/kvm/book3s_hv.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index a043bde4952c..55c1022733c3 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -741,6 +741,8 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags, case H_SET_MODE_RESOURCE_SET_DAWR: if (!kvmppc_power8_compatible(vcpu)) return H_P2; + if (!ppc_breakpoint_available()) + return H_P2; if (mflags) return H_UNSUPPORTED_FLAG_START; if (value2 & DABRX_HYP) -- cgit v1.2.3 From e8ebedbf3131ce2db0c7092a27f752ab365eef53 Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Tue, 27 Mar 2018 15:37:21 +1100 Subject: KVM: PPC: Book3S HV: Return error from h_set_dabr() on POWER9 POWER7 compat mode guests can use h_set_dabr on POWER9. POWER9 should use the DAWR but since it's disabled there we can't. This returns H_UNSUPPORTED on a h_set_dabr() on POWER9 where the DAWR is disabled. Current Linux guests ignore this error, so they will silently not get the DAWR (sigh). The same error code is being used by POWERVM in this case. Signed-off-by: Michael Neuling Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/hvcall.h | 1 + arch/powerpc/kvm/book3s_hv_rmhandlers.S | 8 +++++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index eca3f9c68907..e87d465af4f8 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -88,6 +88,7 @@ #define H_P8 -61 #define H_P9 -62 #define H_TOO_BIG -64 +#define H_UNSUPPORTED -67 #define H_OVERLAP -68 #define H_INTERRUPT -69 #define H_BAD_DATA -70 diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 736809fba912..3d1023b03d5a 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -2563,8 +2563,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) li r3,0 blr +2: +BEGIN_FTR_SECTION + /* POWER9 with disabled DAWR */ + li r3, H_UNSUPPORTED + blr +END_FTR_SECTION_IFCLR(CPU_FTR_DAWR) /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */ -2: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW + rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW rlwimi r5, r4, 2, DAWRX_WT clrrdi r4, r4, 3 std r4, VCPU_DAWR(r3) -- cgit v1.2.3 From b53221e7042764d7456933d47a83b31372ce9dac Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Tue, 27 Mar 2018 15:37:22 +1100 Subject: KVM: PPC: Book3S HV: Handle migration with POWER9 disabled DAWR POWER9 with the DAWR disabled causes problems for partition migration. Either we have to fail the migration (since we lose the DAWR) or we silently drop the DAWR and allow the migration to pass. This patch does the latter and allows the migration to pass (at the cost of silently losing the DAWR). This is not ideal but hopefully the best overall solution. This approach has been acked by Paulus. With this patch kvmppc_set_one_reg() will store the DAWR in the vcpu but won't actually set it on POWER9 hardware. Signed-off-by: Michael Neuling Signed-off-by: Michael Ellerman --- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 3d1023b03d5a..af1772169eff 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -892,8 +892,14 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) ld r6, VCPU_DAWRX(r4) ld r7, VCPU_CIABR(r4) ld r8, VCPU_TAR(r4) + /* + * Handle broken DAWR case by not writing it. This means we + * can still store the DAWR register for migration. + */ +BEGIN_FTR_SECTION mtspr SPRN_DAWR, r5 mtspr SPRN_DAWRX, r6 +END_FTR_SECTION_IFSET(CPU_FTR_DAWR) mtspr SPRN_CIABR, r7 mtspr SPRN_TAR, r8 ld r5, VCPU_IC(r4) @@ -1855,6 +1861,10 @@ BEGIN_FTR_SECTION ld r6, STACK_SLOT_DAWR(r1) ld r7, STACK_SLOT_DAWRX(r1) mtspr SPRN_CIABR, r5 + /* + * If the DAWR doesn't work, it's ok to write these here as + * this value should always be zero + */ mtspr SPRN_DAWR, r6 mtspr SPRN_DAWRX, r7 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) -- cgit v1.2.3 From 622aa35e8f6a077f034fe4ad053b6a2e9d278414 Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Tue, 27 Mar 2018 15:37:23 +1100 Subject: powerpc: Disable DAWR on POWER9 via CPU feature quirk This disables the DAWR on all POWER9 CPUs via cpu feature quirk. Using the DAWR on POWER9 can cause xstops, hence we need to disable it. Signed-off-by: Michael Neuling Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/dt_cpu_ftrs.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index 0a0c601c6ade..0af2c5dc5162 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -713,6 +713,9 @@ static __init void cpufeatures_cpu_quirks(void) else if ((version & 0xffffefff) == 0x004e0202) cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_HV_ASSIST | CPU_FTR_P9_TM_XER_SO_BUG; + + if ((version & 0xffff0000) == 0x004e0000) + cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR); } static void __init cpufeatures_setup_finished(void) -- cgit v1.2.3 From 9654153158d3e0684a1bdb76dbababdb7111d5a0 Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Tue, 27 Mar 2018 15:37:24 +1100 Subject: powerpc: Disable DAWR in the base POWER9 CPU features Using the DAWR on POWER9 can cause xstops, hence we need to disable it. Signed-off-by: Michael Neuling Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/cputable.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index ecee84dea7e7..734ff976b82f 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -464,9 +464,8 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_DSCR | CPU_FTR_SAO | \ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ - CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \ - CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | \ - CPU_FTR_PKEY) + CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \ + CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | CPU_FTR_PKEY) #define CPU_FTRS_POWER9_DD1 ((CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD1) & \ (~CPU_FTR_SAO)) #define CPU_FTRS_POWER9_DD2_0 CPU_FTRS_POWER9 -- cgit v1.2.3 From 0834d627fbea00c1444075eb3e448e1974da452d Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 30 Mar 2018 23:27:25 +1100 Subject: powerpc/mpic: Check if cpu_possible() in mpic_physmask() In mpic_physmask() we loop over all CPUs up to 32, then get the hard SMP processor id of that CPU. Currently that's possibly walking off the end of the paca array, but in a future patch we will change the paca array to be an array of pointers, and in that case we will get a NULL for missing CPUs and oops. eg: Unable to handle kernel paging request for data at address 0x88888888888888b8 Faulting instruction address: 0xc00000000004e380 Oops: Kernel access of bad area, sig: 11 [#1] ... NIP .mpic_set_affinity+0x60/0x1a0 LR .irq_do_set_affinity+0x48/0x100 Fix it by checking the CPU is possible, this also fixes the code if there are gaps in the CPU numbering which probably never happens on mpic systems but who knows. Debugged-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/sysdev/mpic.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 73067805300a..1d4e0ef658d3 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c @@ -626,7 +626,7 @@ static inline u32 mpic_physmask(u32 cpumask) int i; u32 mask = 0; - for (i = 0; i < min(32, NR_CPUS); ++i, cpumask >>= 1) + for (i = 0; i < min(32, NR_CPUS) && cpu_possible(i); ++i, cpumask >>= 1) mask |= (cpumask & 1) << get_hard_smp_processor_id(i); return mask; } -- cgit v1.2.3 From 8e0b634b132752ec3eba50afb952502b1a87d6ba Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 14 Feb 2018 01:08:11 +1000 Subject: powerpc/64s: Do not allocate lppaca if we are not virtualized The "lppaca" is a structure registered with the hypervisor. This is unnecessary when running on non-virtualised platforms. One field from the lppaca (pmcregs_in_use) is also used by the host, so move the host part out into the paca (lppaca field is still updated in guest mode). Signed-off-by: Nicholas Piggin [mpe: Fix non-pseries build with some #ifdefs] Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/paca.h | 12 ++++++++++-- arch/powerpc/include/asm/plpar_wrappers.h | 4 ++++ arch/powerpc/include/asm/pmc.h | 13 ++++++++++++- arch/powerpc/kernel/asm-offsets.c | 5 +++++ arch/powerpc/kernel/paca.c | 16 +++++++++++++--- arch/powerpc/kvm/book3s_hv_interrupts.S | 3 +-- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 3 +-- 7 files changed, 46 insertions(+), 10 deletions(-) diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index b62c31037cad..6db5ab2a29a3 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -46,7 +46,10 @@ extern unsigned int debug_smp_processor_id(void); /* from linux/smp.h */ #define get_paca() local_paca #endif +#ifdef CONFIG_PPC_PSERIES #define get_lppaca() (get_paca()->lppaca_ptr) +#endif + #define get_slb_shadow() (get_paca()->slb_shadow_ptr) struct task_struct; @@ -58,7 +61,7 @@ struct task_struct; * processor. */ struct paca_struct { -#ifdef CONFIG_PPC_BOOK3S +#ifdef CONFIG_PPC_PSERIES /* * Because hw_cpu_id, unlike other paca fields, is accessed * routinely from other CPUs (from the IRQ code), we stick to @@ -67,7 +70,8 @@ struct paca_struct { */ struct lppaca *lppaca_ptr; /* Pointer to LpPaca for PLIC */ -#endif /* CONFIG_PPC_BOOK3S */ +#endif /* CONFIG_PPC_PSERIES */ + /* * MAGIC: the spinlock functions in arch/powerpc/lib/locks.c * load lock_token and paca_index with a single lwz @@ -160,10 +164,14 @@ struct paca_struct { u64 saved_msr; /* MSR saved here by enter_rtas */ u16 trap_save; /* Used when bad stack is encountered */ u8 irq_soft_mask; /* mask for irq soft masking */ + u8 soft_enabled; /* irq soft-enable flag */ u8 irq_happened; /* irq happened while soft-disabled */ u8 io_sync; /* writel() needs spin_unlock sync */ u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */ u8 nap_state_lost; /* NV GPR values lost in power7_idle */ +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE + u8 pmcregs_in_use; /* pseries puts this in lppaca */ +#endif u64 sprg_vdso; /* Saved user-visible sprg */ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM u64 tm_scratch; /* TM scratch area for reclaim */ diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h index 55eddf50d149..540785d01f96 100644 --- a/arch/powerpc/include/asm/plpar_wrappers.h +++ b/arch/powerpc/include/asm/plpar_wrappers.h @@ -2,6 +2,8 @@ #ifndef _ASM_POWERPC_PLPAR_WRAPPERS_H #define _ASM_POWERPC_PLPAR_WRAPPERS_H +#ifdef CONFIG_PPC_PSERIES + #include #include @@ -340,4 +342,6 @@ static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p) return rc; } +#endif /* CONFIG_PPC_PSERIES */ + #endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */ diff --git a/arch/powerpc/include/asm/pmc.h b/arch/powerpc/include/asm/pmc.h index 5a9ede4962cb..7ac3586c38ab 100644 --- a/arch/powerpc/include/asm/pmc.h +++ b/arch/powerpc/include/asm/pmc.h @@ -31,10 +31,21 @@ void ppc_enable_pmcs(void); #ifdef CONFIG_PPC_BOOK3S_64 #include +#include static inline void ppc_set_pmu_inuse(int inuse) { - get_lppaca()->pmcregs_in_use = inuse; +#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) + if (firmware_has_feature(FW_FEATURE_LPAR)) { +#ifdef CONFIG_PPC_PSERIES + get_lppaca()->pmcregs_in_use = inuse; +#endif + } else { +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE + get_paca()->pmcregs_in_use = inuse; +#endif + } +#endif } extern void power4_enable_pmcs(void); diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index ea5eb91b836e..bbde55f408c7 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -221,12 +221,17 @@ int main(void) OFFSET(PACA_EXMC, paca_struct, exmc); OFFSET(PACA_EXSLB, paca_struct, exslb); OFFSET(PACA_EXNMI, paca_struct, exnmi); +#ifdef CONFIG_PPC_PSERIES OFFSET(PACALPPACAPTR, paca_struct, lppaca_ptr); +#endif OFFSET(PACA_SLBSHADOWPTR, paca_struct, slb_shadow_ptr); OFFSET(SLBSHADOW_STACKVSID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid); OFFSET(SLBSHADOW_STACKESID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid); OFFSET(SLBSHADOW_SAVEAREA, slb_shadow, save_area); OFFSET(LPPACA_PMCINUSE, lppaca, pmcregs_in_use); +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE + OFFSET(PACA_PMCINUSE, paca_struct, pmcregs_in_use); +#endif OFFSET(LPPACA_DTLIDX, lppaca, dtl_idx); OFFSET(LPPACA_YIELDCOUNT, lppaca, yield_count); OFFSET(PACA_DTL_RIDX, paca_struct, dtl_ridx); diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index 95ffedf14885..5900540e2ff8 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -20,7 +20,7 @@ #include "setup.h" -#ifdef CONFIG_PPC_BOOK3S +#ifdef CONFIG_PPC_PSERIES /* * The structure which the hypervisor knows about - this structure @@ -47,6 +47,9 @@ static long __initdata lppaca_size; static void __init allocate_lppacas(int nr_cpus, unsigned long limit) { + if (early_cpu_has_feature(CPU_FTR_HVMODE)) + return; + if (nr_cpus <= NR_LPPACAS) return; @@ -60,6 +63,9 @@ static struct lppaca * __init new_lppaca(int cpu) { struct lppaca *lp; + if (early_cpu_has_feature(CPU_FTR_HVMODE)) + return NULL; + if (cpu < NR_LPPACAS) return &lppaca[cpu]; @@ -73,6 +79,9 @@ static void __init free_lppacas(void) { long new_size = 0, nr; + if (early_cpu_has_feature(CPU_FTR_HVMODE)) + return; + if (!lppaca_size) return; nr = num_possible_cpus() - NR_LPPACAS; @@ -157,9 +166,10 @@ EXPORT_SYMBOL(paca); void __init initialise_paca(struct paca_struct *new_paca, int cpu) { -#ifdef CONFIG_PPC_BOOK3S +#ifdef CONFIG_PPC_PSERIES new_paca->lppaca_ptr = new_lppaca(cpu); -#else +#endif +#ifdef CONFIG_PPC_BOOK3E new_paca->kernel_pgd = swapper_pg_dir; #endif new_paca->lock_token = 0x8000; diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S index dc54373c8780..0e8493033288 100644 --- a/arch/powerpc/kvm/book3s_hv_interrupts.S +++ b/arch/powerpc/kvm/book3s_hv_interrupts.S @@ -79,8 +79,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) li r5, 0 mtspr SPRN_MMCRA, r5 isync - ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */ - lbz r5, LPPACA_PMCINUSE(r3) + lbz r5, PACA_PMCINUSE(r13) /* is the host using the PMU? */ cmpwi r5, 0 beq 31f /* skip if not */ mfspr r5, SPRN_MMCR1 diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index f31f357b8c5a..a1c6ea26f568 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -113,8 +113,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) mtspr SPRN_SPRG_VDSO_WRITE,r3 /* Reload the host's PMU registers */ - ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */ - lbz r4, LPPACA_PMCINUSE(r3) + lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */ cmpwi r4, 0 beq 23f /* skip if not */ BEGIN_FTR_SECTION -- cgit v1.2.3 From d2e60075a3d4422dc54b919f3b125d8066b839d4 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 14 Feb 2018 01:08:12 +1000 Subject: powerpc/64: Use array of paca pointers and allocate pacas individually Change the paca array into an array of pointers to pacas. Allocate pacas individually. This allows flexibility in where the PACAs are allocated. Future work will allocate them node-local. Platforms that don't have address limits on PACAs would be able to defer PACA allocations until later in boot rather than allocate all possible ones up-front then freeing unused. This is slightly more overhead (one additional indirection) for cross CPU paca references, but those aren't too common. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/kvm_ppc.h | 8 ++-- arch/powerpc/include/asm/lppaca.h | 2 +- arch/powerpc/include/asm/paca.h | 4 +- arch/powerpc/include/asm/smp.h | 4 +- arch/powerpc/kernel/crash.c | 2 +- arch/powerpc/kernel/head_64.S | 19 ++++---- arch/powerpc/kernel/machine_kexec_64.c | 22 ++++----- arch/powerpc/kernel/paca.c | 70 +++++++++++++++++++--------- arch/powerpc/kernel/setup_64.c | 23 ++++----- arch/powerpc/kernel/smp.c | 10 ++-- arch/powerpc/kernel/sysfs.c | 2 +- arch/powerpc/kvm/book3s_hv.c | 31 ++++++------ arch/powerpc/kvm/book3s_hv_builtin.c | 2 +- arch/powerpc/mm/tlb-radix.c | 2 +- arch/powerpc/platforms/85xx/smp.c | 8 ++-- arch/powerpc/platforms/cell/smp.c | 4 +- arch/powerpc/platforms/powernv/idle.c | 13 +++--- arch/powerpc/platforms/powernv/setup.c | 4 +- arch/powerpc/platforms/powernv/smp.c | 2 +- arch/powerpc/platforms/powernv/subcore.c | 2 +- arch/powerpc/platforms/pseries/hotplug-cpu.c | 2 +- arch/powerpc/platforms/pseries/lpar.c | 4 +- arch/powerpc/platforms/pseries/setup.c | 2 +- arch/powerpc/platforms/pseries/smp.c | 4 +- arch/powerpc/sysdev/xics/icp-native.c | 2 +- arch/powerpc/xmon/xmon.c | 2 +- 26 files changed, 143 insertions(+), 107 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 7765a800ddae..b7d066b037da 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -436,15 +436,15 @@ struct openpic; extern void kvm_cma_reserve(void) __init; static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) { - paca[cpu].kvm_hstate.xics_phys = (void __iomem *)addr; + paca_ptrs[cpu]->kvm_hstate.xics_phys = (void __iomem *)addr; } static inline void kvmppc_set_xive_tima(int cpu, unsigned long phys_addr, void __iomem *virt_addr) { - paca[cpu].kvm_hstate.xive_tima_phys = (void __iomem *)phys_addr; - paca[cpu].kvm_hstate.xive_tima_virt = virt_addr; + paca_ptrs[cpu]->kvm_hstate.xive_tima_phys = (void __iomem *)phys_addr; + paca_ptrs[cpu]->kvm_hstate.xive_tima_virt = virt_addr; } static inline u32 kvmppc_get_xics_latch(void) @@ -458,7 +458,7 @@ static inline u32 kvmppc_get_xics_latch(void) static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi) { - paca[cpu].kvm_hstate.host_ipi = host_ipi; + paca_ptrs[cpu]->kvm_hstate.host_ipi = host_ipi; } static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu) diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h index d0a2a2f99564..6e4589eee2da 100644 --- a/arch/powerpc/include/asm/lppaca.h +++ b/arch/powerpc/include/asm/lppaca.h @@ -103,7 +103,7 @@ struct lppaca { extern struct lppaca lppaca[]; -#define lppaca_of(cpu) (*paca[cpu].lppaca_ptr) +#define lppaca_of(cpu) (*paca_ptrs[cpu]->lppaca_ptr) /* * We are using a non architected field to determine if a partition is diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 6db5ab2a29a3..e89887f5e56f 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -249,10 +249,10 @@ struct paca_struct { void *rfi_flush_fallback_area; u64 l1d_flush_size; #endif -}; +} ____cacheline_aligned; extern void copy_mm_to_paca(struct mm_struct *mm); -extern struct paca_struct *paca; +extern struct paca_struct **paca_ptrs; extern void initialise_paca(struct paca_struct *new_paca, int cpu); extern void setup_paca(struct paca_struct *new_paca); extern void allocate_pacas(void); diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index fac963e10d39..ec7b299350d9 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -170,12 +170,12 @@ static inline const struct cpumask *cpu_sibling_mask(int cpu) #ifdef CONFIG_PPC64 static inline int get_hard_smp_processor_id(int cpu) { - return paca[cpu].hw_cpu_id; + return paca_ptrs[cpu]->hw_cpu_id; } static inline void set_hard_smp_processor_id(int cpu, int phys) { - paca[cpu].hw_cpu_id = phys; + paca_ptrs[cpu]->hw_cpu_id = phys; } #else /* 32-bit */ diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c index 00b215125d3e..17c8b99680f2 100644 --- a/arch/powerpc/kernel/crash.c +++ b/arch/powerpc/kernel/crash.c @@ -238,7 +238,7 @@ static void __maybe_unused crash_kexec_wait_realmode(int cpu) if (i == cpu) continue; - while (paca[i].kexec_state < KEXEC_STATE_REAL_MODE) { + while (paca_ptrs[i]->kexec_state < KEXEC_STATE_REAL_MODE) { barrier(); if (!cpu_possible(i) || !cpu_online(i) || (msecs <= 0)) break; diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index a61151a6ea5e..6eca15f25c73 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -392,19 +392,20 @@ generic_secondary_common_init: * physical cpu id in r24, we need to search the pacas to find * which logical id maps to our physical one. */ - LOAD_REG_ADDR(r13, paca) /* Load paca pointer */ - ld r13,0(r13) /* Get base vaddr of paca array */ #ifndef CONFIG_SMP - addi r13,r13,PACA_SIZE /* know r13 if used accidentally */ b kexec_wait /* wait for next kernel if !SMP */ #else + LOAD_REG_ADDR(r8, paca_ptrs) /* Load paca_ptrs pointe */ + ld r8,0(r8) /* Get base vaddr of array */ LOAD_REG_ADDR(r7, nr_cpu_ids) /* Load nr_cpu_ids address */ lwz r7,0(r7) /* also the max paca allocated */ li r5,0 /* logical cpu id */ -1: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */ +1: + sldi r9,r5,3 /* get paca_ptrs[] index from cpu id */ + ldx r13,r9,r8 /* r13 = paca_ptrs[cpu id] */ + lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */ cmpw r6,r24 /* Compare to our id */ beq 2f - addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */ addi r5,r5,1 cmpw r5,r7 /* Check if more pacas exist */ blt 1b @@ -756,10 +757,10 @@ _GLOBAL(pmac_secondary_start) mtmsrd r3 /* RI on */ /* Set up a paca value for this processor. */ - LOAD_REG_ADDR(r4,paca) /* Load paca pointer */ - ld r4,0(r4) /* Get base vaddr of paca array */ - mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */ - add r13,r13,r4 /* for this processor. */ + LOAD_REG_ADDR(r4,paca_ptrs) /* Load paca pointer */ + ld r4,0(r4) /* Get base vaddr of paca_ptrs array */ + sldi r5,r24,3 /* get paca_ptrs[] index from cpu id */ + ldx r13,r5,r4 /* r13 = paca_ptrs[cpu id] */ SET_PACA(r13) /* Save vaddr of paca in an SPRG*/ /* Mark interrupts soft and hard disabled (they might be enabled diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index 49d34d7271e7..a250e3331f94 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c @@ -168,24 +168,25 @@ static void kexec_prepare_cpus_wait(int wait_state) * are correctly onlined. If somehow we start a CPU on boot with RTAS * start-cpu, but somehow that CPU doesn't write callin_cpu_map[] in * time, the boot CPU will timeout. If it does eventually execute - * stuff, the secondary will start up (paca[].cpu_start was written) and - * get into a peculiar state. If the platform supports - * smp_ops->take_timebase(), the secondary CPU will probably be spinning - * in there. If not (i.e. pseries), the secondary will continue on and - * try to online itself/idle/etc. If it survives that, we need to find - * these possible-but-not-online-but-should-be CPUs and chaperone them - * into kexec_smp_wait(). + * stuff, the secondary will start up (paca_ptrs[]->cpu_start was + * written) and get into a peculiar state. + * If the platform supports smp_ops->take_timebase(), the secondary CPU + * will probably be spinning in there. If not (i.e. pseries), the + * secondary will continue on and try to online itself/idle/etc. If it + * survives that, we need to find these + * possible-but-not-online-but-should-be CPUs and chaperone them into + * kexec_smp_wait(). */ for_each_online_cpu(i) { if (i == my_cpu) continue; - while (paca[i].kexec_state < wait_state) { + while (paca_ptrs[i]->kexec_state < wait_state) { barrier(); if (i != notified) { printk(KERN_INFO "kexec: waiting for cpu %d " "(physical %d) to enter %i state\n", - i, paca[i].hw_cpu_id, wait_state); + i, paca_ptrs[i]->hw_cpu_id, wait_state); notified = i; } } @@ -327,8 +328,7 @@ void default_machine_kexec(struct kimage *image) */ memcpy(&kexec_paca, get_paca(), sizeof(struct paca_struct)); kexec_paca.data_offset = 0xedeaddeadeeeeeeeUL; - paca = (struct paca_struct *)RELOC_HIDE(&kexec_paca, 0) - - kexec_paca.paca_index; + paca_ptrs[kexec_paca.paca_index] = &kexec_paca; setup_paca(&kexec_paca); /* XXX: If anyone does 'dynamic lppacas' this will also need to be diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index 5900540e2ff8..eef4891c9af6 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -161,8 +161,8 @@ static void __init allocate_slb_shadows(int nr_cpus, int limit) { } * processors. The processor VPD array needs one entry per physical * processor (not thread). */ -struct paca_struct *paca; -EXPORT_SYMBOL(paca); +struct paca_struct **paca_ptrs __read_mostly; +EXPORT_SYMBOL(paca_ptrs); void __init initialise_paca(struct paca_struct *new_paca, int cpu) { @@ -213,11 +213,13 @@ void setup_paca(struct paca_struct *new_paca) } -static int __initdata paca_size; +static int __initdata paca_nr_cpu_ids; +static int __initdata paca_ptrs_size; void __init allocate_pacas(void) { u64 limit; + unsigned long size = 0; int cpu; #ifdef CONFIG_PPC_BOOK3S_64 @@ -230,13 +232,27 @@ void __init allocate_pacas(void) limit = ppc64_rma_size; #endif - paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids); + paca_nr_cpu_ids = nr_cpu_ids; - paca = __va(memblock_alloc_base(paca_size, PAGE_SIZE, limit)); - memset(paca, 0, paca_size); + paca_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids; + paca_ptrs = __va(memblock_alloc_base(paca_ptrs_size, 0, limit)); + memset(paca_ptrs, 0, paca_ptrs_size); - printk(KERN_DEBUG "Allocated %u bytes for %u pacas at %p\n", - paca_size, nr_cpu_ids, paca); + size += paca_ptrs_size; + + for (cpu = 0; cpu < nr_cpu_ids; cpu++) { + unsigned long pa; + + pa = memblock_alloc_base(sizeof(struct paca_struct), + L1_CACHE_BYTES, limit); + paca_ptrs[cpu] = __va(pa); + memset(paca_ptrs[cpu], 0, sizeof(struct paca_struct)); + + size += sizeof(struct paca_struct); + } + + printk(KERN_DEBUG "Allocated %lu bytes for %u pacas\n", + size, nr_cpu_ids); allocate_lppacas(nr_cpu_ids, limit); @@ -244,26 +260,38 @@ void __init allocate_pacas(void) /* Can't use for_each_*_cpu, as they aren't functional yet */ for (cpu = 0; cpu < nr_cpu_ids; cpu++) - initialise_paca(&paca[cpu], cpu); + initialise_paca(paca_ptrs[cpu], cpu); } void __init free_unused_pacas(void) { - int new_size; - - new_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids); - - if (new_size >= paca_size) - return; - - memblock_free(__pa(paca) + new_size, paca_size - new_size); - - printk(KERN_DEBUG "Freed %u bytes for unused pacas\n", - paca_size - new_size); + unsigned long size = 0; + int new_ptrs_size; + int cpu; - paca_size = new_size; + for (cpu = 0; cpu < paca_nr_cpu_ids; cpu++) { + if (!cpu_possible(cpu)) { + unsigned long pa = __pa(paca_ptrs[cpu]); + memblock_free(pa, sizeof(struct paca_struct)); + paca_ptrs[cpu] = NULL; + size += sizeof(struct paca_struct); + } + } + + new_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids; + if (new_ptrs_size < paca_ptrs_size) { + memblock_free(__pa(paca_ptrs) + new_ptrs_size, + paca_ptrs_size - new_ptrs_size); + size += paca_ptrs_size - new_ptrs_size; + } + + if (size) + printk(KERN_DEBUG "Freed %lu bytes for unused pacas\n", size); free_lppacas(); + + paca_nr_cpu_ids = nr_cpu_ids; + paca_ptrs_size = new_ptrs_size; } void copy_mm_to_paca(struct mm_struct *mm) diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index c388cc3357fa..3ce12af4906f 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -110,7 +110,7 @@ void __init setup_tlb_core_data(void) if (cpu_first_thread_sibling(boot_cpuid) == first) first = boot_cpuid; - paca[cpu].tcd_ptr = &paca[first].tcd; + paca_ptrs[cpu]->tcd_ptr = &paca_ptrs[first]->tcd; /* * If we have threads, we need either tlbsrx. @@ -304,7 +304,7 @@ void __init early_setup(unsigned long dt_ptr) early_init_devtree(__va(dt_ptr)); /* Now we know the logical id of our boot cpu, setup the paca. */ - setup_paca(&paca[boot_cpuid]); + setup_paca(paca_ptrs[boot_cpuid]); fixup_boot_paca(); /* @@ -628,15 +628,15 @@ void __init exc_lvl_early_init(void) for_each_possible_cpu(i) { sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE); critirq_ctx[i] = (struct thread_info *)__va(sp); - paca[i].crit_kstack = __va(sp + THREAD_SIZE); + paca_ptrs[i]->crit_kstack = __va(sp + THREAD_SIZE); sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE); dbgirq_ctx[i] = (struct thread_info *)__va(sp); - paca[i].dbg_kstack = __va(sp + THREAD_SIZE); + paca_ptrs[i]->dbg_kstack = __va(sp + THREAD_SIZE); sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE); mcheckirq_ctx[i] = (struct thread_info *)__va(sp); - paca[i].mc_kstack = __va(sp + THREAD_SIZE); + paca_ptrs[i]->mc_kstack = __va(sp + THREAD_SIZE); } if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) @@ -693,20 +693,20 @@ void __init emergency_stack_init(void) ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); memset(ti, 0, THREAD_SIZE); emerg_stack_init_thread_info(ti, i); - paca[i].emergency_sp = (void *)ti + THREAD_SIZE; + paca_ptrs[i]->emergency_sp = (void *)ti + THREAD_SIZE; #ifdef CONFIG_PPC_BOOK3S_64 /* emergency stack for NMI exception handling. */ ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); memset(ti, 0, THREAD_SIZE); emerg_stack_init_thread_info(ti, i); - paca[i].nmi_emergency_sp = (void *)ti + THREAD_SIZE; + paca_ptrs[i]->nmi_emergency_sp = (void *)ti + THREAD_SIZE; /* emergency stack for machine check exception handling. */ ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); memset(ti, 0, THREAD_SIZE); emerg_stack_init_thread_info(ti, i); - paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE; + paca_ptrs[i]->mc_emergency_sp = (void *)ti + THREAD_SIZE; #endif } } @@ -762,7 +762,7 @@ void __init setup_per_cpu_areas(void) delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; for_each_possible_cpu(cpu) { __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; - paca[cpu].data_offset = __per_cpu_offset[cpu]; + paca_ptrs[cpu]->data_offset = __per_cpu_offset[cpu]; } } #endif @@ -875,8 +875,9 @@ static void init_fallback_flush(void) memset(l1d_flush_fallback_area, 0, l1d_size * 2); for_each_possible_cpu(cpu) { - paca[cpu].rfi_flush_fallback_area = l1d_flush_fallback_area; - paca[cpu].l1d_flush_size = l1d_size; + struct paca_struct *paca = paca_ptrs[cpu]; + paca->rfi_flush_fallback_area = l1d_flush_fallback_area; + paca->l1d_flush_size = l1d_size; } } diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index bbe7634b3a43..cfc08b099c49 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -123,8 +123,8 @@ int smp_generic_kick_cpu(int nr) * cpu_start field to become non-zero After we set cpu_start, * the processor will continue on to secondary_start */ - if (!paca[nr].cpu_start) { - paca[nr].cpu_start = 1; + if (!paca_ptrs[nr]->cpu_start) { + paca_ptrs[nr]->cpu_start = 1; smp_mb(); return 0; } @@ -657,7 +657,7 @@ void smp_prepare_boot_cpu(void) { BUG_ON(smp_processor_id() != boot_cpuid); #ifdef CONFIG_PPC64 - paca[boot_cpuid].__current = current; + paca_ptrs[boot_cpuid]->__current = current; #endif set_numa_node(numa_cpu_lookup_table[boot_cpuid]); current_set[boot_cpuid] = task_thread_info(current); @@ -748,8 +748,8 @@ static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle) struct thread_info *ti = task_thread_info(idle); #ifdef CONFIG_PPC64 - paca[cpu].__current = idle; - paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD; + paca_ptrs[cpu]->__current = idle; + paca_ptrs[cpu]->kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD; #endif ti->cpu = cpu; secondary_ti = current_set[cpu] = ti; diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index 5a8bfee6e187..1f9d94dac3a6 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -600,7 +600,7 @@ void __init record_spr_defaults(void) if (cpu_has_feature(CPU_FTR_DSCR)) { dscr_default = mfspr(SPRN_DSCR); for (cpu = 0; cpu < nr_cpu_ids; cpu++) - paca[cpu].dscr_default = dscr_default; + paca_ptrs[cpu]->dscr_default = dscr_default; } } #endif /* CONFIG_PPC64 */ diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 89707354c2ef..41fce69714d5 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -170,7 +170,7 @@ static bool kvmppc_ipi_thread(int cpu) #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP) if (cpu >= 0 && cpu < nr_cpu_ids) { - if (paca[cpu].kvm_hstate.xics_phys) { + if (paca_ptrs[cpu]->kvm_hstate.xics_phys) { xics_wake_cpu(cpu); return true; } @@ -2140,7 +2140,7 @@ static int kvmppc_grab_hwthread(int cpu) struct paca_struct *tpaca; long timeout = 10000; - tpaca = &paca[cpu]; + tpaca = paca_ptrs[cpu]; /* Ensure the thread won't go into the kernel if it wakes */ tpaca->kvm_hstate.kvm_vcpu = NULL; @@ -2173,7 +2173,7 @@ static void kvmppc_release_hwthread(int cpu) { struct paca_struct *tpaca; - tpaca = &paca[cpu]; + tpaca = paca_ptrs[cpu]; tpaca->kvm_hstate.hwthread_req = 0; tpaca->kvm_hstate.kvm_vcpu = NULL; tpaca->kvm_hstate.kvm_vcore = NULL; @@ -2239,7 +2239,7 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc) vcpu->arch.thread_cpu = cpu; cpumask_set_cpu(cpu, &kvm->arch.cpu_in_guest); } - tpaca = &paca[cpu]; + tpaca = paca_ptrs[cpu]; tpaca->kvm_hstate.kvm_vcpu = vcpu; tpaca->kvm_hstate.ptid = cpu - vc->pcpu; /* Order stores to hstate.kvm_vcpu etc. before store to kvm_vcore */ @@ -2264,7 +2264,7 @@ static void kvmppc_wait_for_nap(int n_threads) * for any threads that still have a non-NULL vcore ptr. */ for (i = 1; i < n_threads; ++i) - if (paca[cpu + i].kvm_hstate.kvm_vcore) + if (paca_ptrs[cpu + i]->kvm_hstate.kvm_vcore) break; if (i == n_threads) { HMT_medium(); @@ -2274,7 +2274,7 @@ static void kvmppc_wait_for_nap(int n_threads) } HMT_medium(); for (i = 1; i < n_threads; ++i) - if (paca[cpu + i].kvm_hstate.kvm_vcore) + if (paca_ptrs[cpu + i]->kvm_hstate.kvm_vcore) pr_err("KVM: CPU %d seems to be stuck\n", cpu + i); } @@ -2806,9 +2806,11 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) } for (thr = 0; thr < controlled_threads; ++thr) { - paca[pcpu + thr].kvm_hstate.tid = thr; - paca[pcpu + thr].kvm_hstate.napping = 0; - paca[pcpu + thr].kvm_hstate.kvm_split_mode = sip; + struct paca_struct *paca = paca_ptrs[pcpu + thr]; + + paca->kvm_hstate.tid = thr; + paca->kvm_hstate.napping = 0; + paca->kvm_hstate.kvm_split_mode = sip; } /* Initiate micro-threading (split-core) on POWER8 if required */ @@ -2925,7 +2927,9 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) } else if (hpt_on_radix) { /* Wait for all threads to have seen final sync */ for (thr = 1; thr < controlled_threads; ++thr) { - while (paca[pcpu + thr].kvm_hstate.kvm_split_mode) { + struct paca_struct *paca = paca_ptrs[pcpu + thr]; + + while (paca->kvm_hstate.kvm_split_mode) { HMT_low(); barrier(); } @@ -4387,7 +4391,7 @@ static int kvm_init_subcore_bitmap(void) int node = cpu_to_node(first_cpu); /* Ignore if it is already allocated. */ - if (paca[first_cpu].sibling_subcore_state) + if (paca_ptrs[first_cpu]->sibling_subcore_state) continue; sibling_subcore_state = @@ -4402,7 +4406,8 @@ static int kvm_init_subcore_bitmap(void) for (j = 0; j < threads_per_core; j++) { int cpu = first_cpu + j; - paca[cpu].sibling_subcore_state = sibling_subcore_state; + paca_ptrs[cpu]->sibling_subcore_state = + sibling_subcore_state; } } return 0; @@ -4429,7 +4434,7 @@ static int kvmppc_book3s_init_hv(void) /* * We need a way of accessing the XICS interrupt controller, - * either directly, via paca[cpu].kvm_hstate.xics_phys, or + * either directly, via paca_ptrs[cpu]->kvm_hstate.xics_phys, or * indirectly, via OPAL. */ #ifdef CONFIG_SMP diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index 49a2c7825e04..de18299f92b7 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -251,7 +251,7 @@ void kvmhv_rm_send_ipi(int cpu) return; /* Else poke the target with an IPI */ - xics_phys = paca[cpu].kvm_hstate.xics_phys; + xics_phys = paca_ptrs[cpu]->kvm_hstate.xics_phys; if (xics_phys) __raw_rm_writeb(IPI_PRIORITY, xics_phys + XICS_MFRR); else diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c index 71d1b19ad1c0..e6016f4466f3 100644 --- a/arch/powerpc/mm/tlb-radix.c +++ b/arch/powerpc/mm/tlb-radix.c @@ -723,7 +723,7 @@ extern void radix_kvm_prefetch_workaround(struct mm_struct *mm) for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) { if (sib == cpu) continue; - if (paca[sib].kvm_hstate.kvm_vcpu) + if (paca_ptrs[sib]->kvm_hstate.kvm_vcpu) flush = true; } if (flush) diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c index f51fd35f4618..7e966f4cf19a 100644 --- a/arch/powerpc/platforms/85xx/smp.c +++ b/arch/powerpc/platforms/85xx/smp.c @@ -147,7 +147,7 @@ static void qoriq_cpu_kill(unsigned int cpu) for (i = 0; i < 500; i++) { if (is_cpu_dead(cpu)) { #ifdef CONFIG_PPC64 - paca[cpu].cpu_start = 0; + paca_ptrs[cpu]->cpu_start = 0; #endif return; } @@ -328,7 +328,7 @@ static int smp_85xx_kick_cpu(int nr) return ret; done: - paca[nr].cpu_start = 1; + paca_ptrs[nr]->cpu_start = 1; generic_set_cpu_up(nr); return ret; @@ -409,14 +409,14 @@ void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary) } if (disable_threadbit) { - while (paca[disable_cpu].kexec_state < KEXEC_STATE_REAL_MODE) { + while (paca_ptrs[disable_cpu]->kexec_state < KEXEC_STATE_REAL_MODE) { barrier(); now = mftb(); if (!notified && now - start > 1000000) { pr_info("%s/%d: waiting for cpu %d to enter KEXEC_STATE_REAL_MODE (%d)\n", __func__, smp_processor_id(), disable_cpu, - paca[disable_cpu].kexec_state); + paca_ptrs[disable_cpu]->kexec_state); notified = true; } } diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c index f84d52a2db40..1aeac5761e0b 100644 --- a/arch/powerpc/platforms/cell/smp.c +++ b/arch/powerpc/platforms/cell/smp.c @@ -83,7 +83,7 @@ static inline int smp_startup_cpu(unsigned int lcpu) pcpu = get_hard_smp_processor_id(lcpu); /* Fixup atomic count: it exited inside IRQ handler. */ - task_thread_info(paca[lcpu].__current)->preempt_count = 0; + task_thread_info(paca_ptrs[lcpu]->__current)->preempt_count = 0; /* * If the RTAS start-cpu token does not exist then presume the @@ -126,7 +126,7 @@ static int smp_cell_kick_cpu(int nr) * cpu_start field to become non-zero After we set cpu_start, * the processor will continue on to secondary_start */ - paca[nr].cpu_start = 1; + paca_ptrs[nr]->cpu_start = 1; return 0; } diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c index 443d5ca71995..5b2ca71ee551 100644 --- a/arch/powerpc/platforms/powernv/idle.c +++ b/arch/powerpc/platforms/powernv/idle.c @@ -80,7 +80,7 @@ static int pnv_save_sprs_for_deep_states(void) for_each_possible_cpu(cpu) { uint64_t pir = get_hard_smp_processor_id(cpu); - uint64_t hsprg0_val = (uint64_t)&paca[cpu]; + uint64_t hsprg0_val = (uint64_t)paca_ptrs[cpu]; rc = opal_slw_set_reg(pir, SPRN_HSPRG0, hsprg0_val); if (rc != 0) @@ -173,12 +173,12 @@ static void pnv_alloc_idle_core_states(void) for (j = 0; j < threads_per_core; j++) { int cpu = first_cpu + j; - paca[cpu].core_idle_state_ptr = core_idle_state; - paca[cpu].thread_idle_state = PNV_THREAD_RUNNING; - paca[cpu].thread_mask = 1 << j; + paca_ptrs[cpu]->core_idle_state_ptr = core_idle_state; + paca_ptrs[cpu]->thread_idle_state = PNV_THREAD_RUNNING; + paca_ptrs[cpu]->thread_mask = 1 << j; if (!cpu_has_feature(CPU_FTR_POWER9_DD1)) continue; - paca[cpu].thread_sibling_pacas = + paca_ptrs[cpu]->thread_sibling_pacas = kmalloc_node(paca_ptr_array_size, GFP_KERNEL, node); } @@ -749,7 +749,8 @@ static int __init pnv_init_idle_states(void) for (i = 0; i < threads_per_core; i++) { int j = base_cpu + i; - paca[j].thread_sibling_pacas[idx] = &paca[cpu]; + paca_ptrs[j]->thread_sibling_pacas[idx] = + paca_ptrs[cpu]; } } } diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index 4fb21e17504a..b62ca0220ea5 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -254,7 +254,7 @@ static void pnv_kexec_wait_secondaries_down(void) if (i != notified) { printk(KERN_INFO "kexec: waiting for cpu %d " "(physical %d) to enter OPAL\n", - i, paca[i].hw_cpu_id); + i, paca_ptrs[i]->hw_cpu_id); notified = i; } @@ -266,7 +266,7 @@ static void pnv_kexec_wait_secondaries_down(void) if (timeout-- == 0) { printk(KERN_ERR "kexec: timed out waiting for " "cpu %d (physical %d) to enter OPAL\n", - i, paca[i].hw_cpu_id); + i, paca_ptrs[i]->hw_cpu_id); break; } } diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index 9664c8461f03..19af6de6b6f0 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c @@ -80,7 +80,7 @@ static int pnv_smp_kick_cpu(int nr) * If we already started or OPAL is not supported, we just * kick the CPU via the PACA */ - if (paca[nr].cpu_start || !firmware_has_feature(FW_FEATURE_OPAL)) + if (paca_ptrs[nr]->cpu_start || !firmware_has_feature(FW_FEATURE_OPAL)) goto kick; /* diff --git a/arch/powerpc/platforms/powernv/subcore.c b/arch/powerpc/platforms/powernv/subcore.c index 596ae2e98040..45563004feda 100644 --- a/arch/powerpc/platforms/powernv/subcore.c +++ b/arch/powerpc/platforms/powernv/subcore.c @@ -280,7 +280,7 @@ void update_subcore_sibling_mask(void) int offset = (tid / threads_per_subcore) * threads_per_subcore; int mask = sibling_mask_first_cpu << offset; - paca[cpu].subcore_sibling_mask = mask; + paca_ptrs[cpu]->subcore_sibling_mask = mask; } } diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index dceb51454d8d..357471aa99a6 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -233,7 +233,7 @@ static void pseries_cpu_die(unsigned int cpu) * done here. Change isolate state to Isolate and * change allocation-state to Unusable. */ - paca[cpu].cpu_start = 0; + paca_ptrs[cpu]->cpu_start = 0; } /* diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 0ee4a469a4ae..b6d2ecce33eb 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -99,7 +99,7 @@ void vpa_init(int cpu) * reports that. All SPLPAR support SLB shadow buffer. */ if (!radix_enabled() && firmware_has_feature(FW_FEATURE_SPLPAR)) { - addr = __pa(paca[cpu].slb_shadow_ptr); + addr = __pa(paca_ptrs[cpu]->slb_shadow_ptr); ret = register_slb_shadow(hwcpu, addr); if (ret) pr_err("WARNING: SLB shadow buffer registration for " @@ -111,7 +111,7 @@ void vpa_init(int cpu) /* * Register dispatch trace log, if one has been allocated. */ - pp = &paca[cpu]; + pp = paca_ptrs[cpu]; dtl = pp->dispatch_log; if (dtl) { pp->dtl_ridx = 0; diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 372d7ada1a0c..a66005a25c55 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -246,7 +246,7 @@ static int alloc_dispatch_logs(void) return 0; for_each_possible_cpu(cpu) { - pp = &paca[cpu]; + pp = paca_ptrs[cpu]; dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL); if (!dtl) { pr_warn("Failed to allocate dispatch trace log for cpu %d\n", diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index 2e184829e5d4..d506bf661f0f 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c @@ -110,7 +110,7 @@ static inline int smp_startup_cpu(unsigned int lcpu) } /* Fixup atomic count: it exited inside IRQ handler. */ - task_thread_info(paca[lcpu].__current)->preempt_count = 0; + task_thread_info(paca_ptrs[lcpu]->__current)->preempt_count = 0; #ifdef CONFIG_HOTPLUG_CPU if (get_cpu_current_state(lcpu) == CPU_STATE_INACTIVE) goto out; @@ -165,7 +165,7 @@ static int smp_pSeries_kick_cpu(int nr) * cpu_start field to become non-zero After we set cpu_start, * the processor will continue on to secondary_start */ - paca[nr].cpu_start = 1; + paca_ptrs[nr]->cpu_start = 1; #ifdef CONFIG_HOTPLUG_CPU set_preferred_offline_state(nr, CPU_STATE_ONLINE); diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c index 1459f4e8b698..37bfbc54aacb 100644 --- a/arch/powerpc/sysdev/xics/icp-native.c +++ b/arch/powerpc/sysdev/xics/icp-native.c @@ -164,7 +164,7 @@ void icp_native_cause_ipi_rm(int cpu) * Just like the cause_ipi functions, it is required to * include a full barrier before causing the IPI. */ - xics_phys = paca[cpu].kvm_hstate.xics_phys; + xics_phys = paca_ptrs[cpu]->kvm_hstate.xics_phys; mb(); __raw_rm_writeb(IPI_PRIORITY, xics_phys + XICS_MFRR); } diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 82e1a3ee6e0f..b6574b6f7d4a 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -2327,7 +2327,7 @@ static void dump_one_paca(int cpu) catch_memory_errors = 1; sync(); - p = &paca[cpu]; + p = paca_ptrs[cpu]; printf("paca for cpu 0x%x @ %px:\n", cpu, p); -- cgit v1.2.3 From 499dcd41378ebab2a37a0df65735748d66e75599 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 14 Feb 2018 01:08:13 +1000 Subject: powerpc/64s: Allocate LPPACAs individually We no longer allocate lppacas in an array, so this patch removes the 1kB static alignment for the structure, and enforces the PAPR alignment requirements at allocation time. We can not reduce the 1kB allocation size however, due to existing KVM hypervisors. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/lppaca.h | 24 ++++----- arch/powerpc/kernel/machine_kexec_64.c | 15 ++++-- arch/powerpc/kernel/paca.c | 89 ++++++++++++---------------------- arch/powerpc/kvm/book3s_hv.c | 3 +- arch/powerpc/mm/numa.c | 4 +- arch/powerpc/platforms/pseries/kexec.c | 7 ++- 6 files changed, 63 insertions(+), 79 deletions(-) diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h index 6e4589eee2da..65d589689f01 100644 --- a/arch/powerpc/include/asm/lppaca.h +++ b/arch/powerpc/include/asm/lppaca.h @@ -36,14 +36,16 @@ #include /* - * We only have to have statically allocated lppaca structs on - * legacy iSeries, which supports at most 64 cpus. - */ -#define NR_LPPACAS 1 - -/* - * The Hypervisor barfs if the lppaca crosses a page boundary. A 1k - * alignment is sufficient to prevent this + * The lppaca is the "virtual processor area" registered with the hypervisor, + * H_REGISTER_VPA etc. + * + * According to PAPR, the structure is 640 bytes long, must be L1 cache line + * aligned, and must not cross a 4kB boundary. Its size field must be at + * least 640 bytes (but may be more). + * + * Pre-v4.14 KVM hypervisors reject the VPA if its size field is smaller than + * 1kB, so we dynamically allocate 1kB and advertise size as 1kB, but keep + * this structure as the canonical 640 byte size. */ struct lppaca { /* cacheline 1 contains read-only data */ @@ -97,11 +99,9 @@ struct lppaca { __be32 page_ins; /* CMO Hint - # page ins by OS */ u8 reserved11[148]; - volatile __be64 dtl_idx; /* Dispatch Trace Log head index */ + volatile __be64 dtl_idx; /* Dispatch Trace Log head index */ u8 reserved12[96]; -} __attribute__((__aligned__(0x400))); - -extern struct lppaca lppaca[]; +} ____cacheline_aligned; #define lppaca_of(cpu) (*paca_ptrs[cpu]->lppaca_ptr) diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index a250e3331f94..1044bf15d5ed 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c @@ -323,17 +323,24 @@ void default_machine_kexec(struct kimage *image) kexec_stack.thread_info.cpu = current_thread_info()->cpu; /* We need a static PACA, too; copy this CPU's PACA over and switch to - * it. Also poison per_cpu_offset to catch anyone using non-static - * data. + * it. Also poison per_cpu_offset and NULL lppaca to catch anyone using + * non-static data. */ memcpy(&kexec_paca, get_paca(), sizeof(struct paca_struct)); kexec_paca.data_offset = 0xedeaddeadeeeeeeeUL; +#ifdef CONFIG_PPC_PSERIES + kexec_paca.lppaca_ptr = NULL; +#endif paca_ptrs[kexec_paca.paca_index] = &kexec_paca; + setup_paca(&kexec_paca); - /* XXX: If anyone does 'dynamic lppacas' this will also need to be - * switched to a static version! + /* + * The lppaca should be unregistered at this point so the HV won't + * touch it. In the case of a crash, none of the lppacas are + * unregistered so there is not much we can do about it here. */ + /* * On Book3S, the copy must happen with the MMU off if we are either * using Radix page tables or we are not in an LPAR since we can diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index eef4891c9af6..6cddb9bdc151 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -23,82 +23,50 @@ #ifdef CONFIG_PPC_PSERIES /* - * The structure which the hypervisor knows about - this structure - * should not cross a page boundary. The vpa_init/register_vpa call - * is now known to fail if the lppaca structure crosses a page - * boundary. The lppaca is also used on POWER5 pSeries boxes. - * The lppaca is 640 bytes long, and cannot readily - * change since the hypervisor knows its layout, so a 1kB alignment - * will suffice to ensure that it doesn't cross a page boundary. + * See asm/lppaca.h for more detail. + * + * lppaca structures must must be 1kB in size, L1 cache line aligned, + * and not cross 4kB boundary. A 1kB size and 1kB alignment will satisfy + * these requirements. */ -struct lppaca lppaca[] = { - [0 ... (NR_LPPACAS-1)] = { +static inline void init_lppaca(struct lppaca *lppaca) +{ + BUILD_BUG_ON(sizeof(struct lppaca) != 640); + + *lppaca = (struct lppaca) { .desc = cpu_to_be32(0xd397d781), /* "LpPa" */ - .size = cpu_to_be16(sizeof(struct lppaca)), + .size = cpu_to_be16(0x400), .fpregs_in_use = 1, .slb_count = cpu_to_be16(64), .vmxregs_in_use = 0, - .page_ins = 0, - }, + .page_ins = 0, }; }; -static struct lppaca *extra_lppacas; -static long __initdata lppaca_size; - -static void __init allocate_lppacas(int nr_cpus, unsigned long limit) -{ - if (early_cpu_has_feature(CPU_FTR_HVMODE)) - return; - - if (nr_cpus <= NR_LPPACAS) - return; - - lppaca_size = PAGE_ALIGN(sizeof(struct lppaca) * - (nr_cpus - NR_LPPACAS)); - extra_lppacas = __va(memblock_alloc_base(lppaca_size, - PAGE_SIZE, limit)); -} - -static struct lppaca * __init new_lppaca(int cpu) +static struct lppaca * __init new_lppaca(int cpu, unsigned long limit) { struct lppaca *lp; + size_t size = 0x400; + + BUILD_BUG_ON(size < sizeof(struct lppaca)); if (early_cpu_has_feature(CPU_FTR_HVMODE)) return NULL; - if (cpu < NR_LPPACAS) - return &lppaca[cpu]; - - lp = extra_lppacas + (cpu - NR_LPPACAS); - *lp = lppaca[0]; + lp = __va(memblock_alloc_base(size, 0x400, limit)); + init_lppaca(lp); return lp; } -static void __init free_lppacas(void) +static void __init free_lppaca(struct lppaca *lp) { - long new_size = 0, nr; + size_t size = 0x400; if (early_cpu_has_feature(CPU_FTR_HVMODE)) return; - if (!lppaca_size) - return; - nr = num_possible_cpus() - NR_LPPACAS; - if (nr > 0) - new_size = PAGE_ALIGN(nr * sizeof(struct lppaca)); - if (new_size >= lppaca_size) - return; - - memblock_free(__pa(extra_lppacas) + new_size, lppaca_size - new_size); - lppaca_size = new_size; + memblock_free(__pa(lp), size); } - -#else - -static inline void allocate_lppacas(int nr_cpus, unsigned long limit) { } -static inline void free_lppacas(void) { } - #endif /* CONFIG_PPC_BOOK3S */ #ifdef CONFIG_PPC_BOOK3S_64 @@ -167,7 +135,7 @@ EXPORT_SYMBOL(paca_ptrs); void __init initialise_paca(struct paca_struct *new_paca, int cpu) { #ifdef CONFIG_PPC_PSERIES - new_paca->lppaca_ptr = new_lppaca(cpu); + new_paca->lppaca_ptr = NULL; #endif #ifdef CONFIG_PPC_BOOK3E new_paca->kernel_pgd = swapper_pg_dir; @@ -254,13 +222,15 @@ void __init allocate_pacas(void) printk(KERN_DEBUG "Allocated %lu bytes for %u pacas\n", size, nr_cpu_ids); - allocate_lppacas(nr_cpu_ids, limit); - allocate_slb_shadows(nr_cpu_ids, limit); /* Can't use for_each_*_cpu, as they aren't functional yet */ - for (cpu = 0; cpu < nr_cpu_ids; cpu++) + for (cpu = 0; cpu < nr_cpu_ids; cpu++) { initialise_paca(paca_ptrs[cpu], cpu); +#ifdef CONFIG_PPC_PSERIES + paca_ptrs[cpu]->lppaca_ptr = new_lppaca(cpu, limit); +#endif + } } void __init free_unused_pacas(void) @@ -272,6 +242,9 @@ void __init free_unused_pacas(void) for (cpu = 0; cpu < paca_nr_cpu_ids; cpu++) { if (!cpu_possible(cpu)) { unsigned long pa = __pa(paca_ptrs[cpu]); +#ifdef CONFIG_PPC_PSERIES + free_lppaca(paca_ptrs[cpu]->lppaca_ptr); +#endif memblock_free(pa, sizeof(struct paca_struct)); paca_ptrs[cpu] = NULL; size += sizeof(struct paca_struct); @@ -288,8 +261,6 @@ void __init free_unused_pacas(void) if (size) printk(KERN_DEBUG "Freed %lu bytes for unused pacas\n", size); - free_lppacas(); - paca_nr_cpu_ids = nr_cpu_ids; paca_ptrs_size = new_ptrs_size; } diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 41fce69714d5..9b48d4a191ff 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -498,7 +498,8 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu, * use 640 bytes of the structure though, so we should accept * clients that set a size of 640. */ - if (len < 640) + BUILD_BUG_ON(sizeof(struct lppaca) != 640); + if (len < sizeof(struct lppaca)) break; vpap = &tvcpu->arch.vpa; err = 0; diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 314d19ab9385..e9ec465068f1 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -1110,7 +1110,7 @@ static void setup_cpu_associativity_change_counters(void) for_each_possible_cpu(cpu) { int i; u8 *counts = vphn_cpu_change_counts[cpu]; - volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts; + volatile u8 *hypervisor_counts = lppaca_of(cpu).vphn_assoc_counts; for (i = 0; i < distance_ref_points_depth; i++) counts[i] = hypervisor_counts[i]; @@ -1136,7 +1136,7 @@ static int update_cpu_associativity_changes_mask(void) for_each_possible_cpu(cpu) { int i, changed = 0; u8 *counts = vphn_cpu_change_counts[cpu]; - volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts; + volatile u8 *hypervisor_counts = lppaca_of(cpu).vphn_assoc_counts; for (i = 0; i < distance_ref_points_depth; i++) { if (hypervisor_counts[i] != counts[i]) { diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c index eeb13429d685..3fe126796975 100644 --- a/arch/powerpc/platforms/pseries/kexec.c +++ b/arch/powerpc/platforms/pseries/kexec.c @@ -23,7 +23,12 @@ void pseries_kexec_cpu_down(int crash_shutdown, int secondary) { - /* Don't risk a hypervisor call if we're crashing */ + /* + * Don't risk a hypervisor call if we're crashing + * XXX: Why? The hypervisor is not crashing. It might be better + * to at least attempt unregister to avoid the hypervisor stepping + * on our memory. + */ if (firmware_has_feature(FW_FEATURE_SPLPAR) && !crash_shutdown) { int ret; int cpu = smp_processor_id(); -- cgit v1.2.3 From 384e8067844fc19f9c067d6b03cbc8781f98bd5d Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 14 Feb 2018 01:08:14 +1000 Subject: powerpc/64s: Allocate slb_shadow structures individually slb_shadow structures are avoided for radix environment. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/paca.c | 65 +++++++++++++++++++++------------------------- 1 file changed, 30 insertions(+), 35 deletions(-) diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index 6cddb9bdc151..2699f9009286 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -72,41 +72,28 @@ static void __init free_lppaca(struct lppaca *lp) #ifdef CONFIG_PPC_BOOK3S_64 /* - * 3 persistent SLBs are registered here. The buffer will be zero + * 3 persistent SLBs are allocated here. The buffer will be zero * initially, hence will all be invaild until we actually write them. * * If you make the number of persistent SLB entries dynamic, please also * update PR KVM to flush and restore them accordingly. */ -static struct slb_shadow * __initdata slb_shadow; - -static void __init allocate_slb_shadows(int nr_cpus, int limit) -{ - int size = PAGE_ALIGN(sizeof(struct slb_shadow) * nr_cpus); - - if (early_radix_enabled()) - return; - - slb_shadow = __va(memblock_alloc_base(size, PAGE_SIZE, limit)); - memset(slb_shadow, 0, size); -} - -static struct slb_shadow * __init init_slb_shadow(int cpu) +static struct slb_shadow * __init new_slb_shadow(int cpu, unsigned long limit) { struct slb_shadow *s; - if (early_radix_enabled()) - return NULL; + if (cpu != boot_cpuid) { + /* + * Boot CPU comes here before early_radix_enabled + * is parsed (e.g., for disable_radix). So allocate + * always and this will be fixed up in free_unused_pacas. + */ + if (early_radix_enabled()) + return NULL; + } - s = &slb_shadow[cpu]; - - /* - * When we come through here to initialise boot_paca, the slb_shadow - * buffers are not allocated yet. That's OK, we'll get one later in - * boot, but make sure we don't corrupt memory at 0. - */ - if (!slb_shadow) - return NULL; + s = __va(memblock_alloc_base(sizeof(*s), L1_CACHE_BYTES, limit)); + memset(s, 0, sizeof(*s)); s->persistent = cpu_to_be32(SLB_NUM_BOLTED); s->buffer_length = cpu_to_be32(sizeof(*s)); @@ -114,10 +101,6 @@ static struct slb_shadow * __init init_slb_shadow(int cpu) return s; } -#else /* !CONFIG_PPC_BOOK3S_64 */ - -static void __init allocate_slb_shadows(int nr_cpus, int limit) { } - #endif /* CONFIG_PPC_BOOK3S_64 */ /* The Paca is an array with one entry per processor. Each contains an @@ -151,7 +134,7 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu) new_paca->__current = &init_task; new_paca->data_offset = 0xfeeeeeeeeeeeeeeeULL; #ifdef CONFIG_PPC_BOOK3S_64 - new_paca->slb_shadow_ptr = init_slb_shadow(cpu); + new_paca->slb_shadow_ptr = NULL; #endif #ifdef CONFIG_PPC_BOOK3E @@ -222,13 +205,16 @@ void __init allocate_pacas(void) printk(KERN_DEBUG "Allocated %lu bytes for %u pacas\n", size, nr_cpu_ids); - allocate_slb_shadows(nr_cpu_ids, limit); - /* Can't use for_each_*_cpu, as they aren't functional yet */ for (cpu = 0; cpu < nr_cpu_ids; cpu++) { - initialise_paca(paca_ptrs[cpu], cpu); + struct paca_struct *paca = paca_ptrs[cpu]; + + initialise_paca(paca, cpu); #ifdef CONFIG_PPC_PSERIES - paca_ptrs[cpu]->lppaca_ptr = new_lppaca(cpu, limit); + paca->lppaca_ptr = new_lppaca(cpu, limit); +#endif +#ifdef CONFIG_PPC_BOOK3S_64 + paca->slb_shadow_ptr = new_slb_shadow(cpu, limit); #endif } } @@ -263,6 +249,15 @@ void __init free_unused_pacas(void) paca_nr_cpu_ids = nr_cpu_ids; paca_ptrs_size = new_ptrs_size; + +#ifdef CONFIG_PPC_BOOK3S_64 + if (early_radix_enabled()) { + /* Ugly fixup, see new_slb_shadow() */ + memblock_free(__pa(paca_ptrs[boot_cpuid]->slb_shadow_ptr), + sizeof(struct slb_shadow)); + paca_ptrs[boot_cpuid]->slb_shadow_ptr = NULL; + } +#endif } void copy_mm_to_paca(struct mm_struct *mm) -- cgit v1.2.3 From b575454fa330aab2d65cf17812ca8e1f405ae80d Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 14 Feb 2018 01:08:15 +1000 Subject: mm: make memblock_alloc_base_nid() non-static This will be used by powerpc to allocate per-cpu stacks and other data structures node-local where possible. Signed-off-by: Nicholas Piggin [mpe: Drop stray change to memblock_alloc_range() as noticed by akpm] Signed-off-by: Michael Ellerman --- include/linux/memblock.h | 3 +++ mm/memblock.c | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 8be5077efb5f..4e1e3d0b002a 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -319,6 +319,9 @@ static inline bool memblock_bottom_up(void) phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align, phys_addr_t start, phys_addr_t end, ulong flags); +phys_addr_t memblock_alloc_base_nid(phys_addr_t size, + phys_addr_t align, phys_addr_t max_addr, + int nid, ulong flags); phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr); phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align, diff --git a/mm/memblock.c b/mm/memblock.c index 5a9ca2a1751b..cea2af494da0 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -1190,7 +1190,7 @@ phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align, flags); } -static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size, +phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr, int nid, ulong flags) { -- cgit v1.2.3 From 9bd9be006c8ec0ccf7cb0422d35033af39d3f969 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 14 Feb 2018 01:08:16 +1000 Subject: powerpc/mm/numa: move numa topology discovery earlier Split sparsemem initialisation from basic numa topology discovery. Move the parsing earlier in boot, before pacas are allocated. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/setup.h | 1 + arch/powerpc/kernel/setup-common.c | 3 +++ arch/powerpc/mm/mem.c | 5 ++++- arch/powerpc/mm/numa.c | 32 +++++++++++++++++++------------- 4 files changed, 27 insertions(+), 14 deletions(-) diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index 469b7fdc9be4..d2bf233aebd5 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -23,6 +23,7 @@ extern void reloc_got2(unsigned long); #define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x))) void check_for_initrd(void); +void mem_topology_setup(void); void initmem_init(void); void setup_panic(void); #define ARCH_PANIC_TIMEOUT 180 diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index d73ec518ef80..9eaf26318d20 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -888,6 +888,9 @@ void __init setup_arch(char **cmdline_p) /* Check the SMT related command line arguments (ppc64). */ check_smt_enabled(); + /* Parse memory topology */ + mem_topology_setup(); + /* On BookE, setup per-core TLB data structures. */ setup_tlb_core_data(); diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index fe8c61149fb8..4eee46ea4d96 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -212,7 +212,7 @@ walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, EXPORT_SYMBOL_GPL(walk_system_ram_range); #ifndef CONFIG_NEED_MULTIPLE_NODES -void __init initmem_init(void) +void __init mem_topology_setup(void) { max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; min_low_pfn = MEMORY_START >> PAGE_SHIFT; @@ -224,7 +224,10 @@ void __init initmem_init(void) * memblock_regions */ memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0); +} +void __init initmem_init(void) +{ /* XXX need to clip this if using highmem? */ sparse_memory_present_with_active_regions(0); sparse_init(); diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index e9ec465068f1..1eec1bcc03a6 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -836,18 +836,13 @@ out: of_node_put(rtas); } -void __init initmem_init(void) +void __init mem_topology_setup(void) { - int nid, cpu; - - max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; - max_pfn = max_low_pfn; + int cpu; if (parse_numa_properties()) setup_nonnuma(); - memblock_dump_all(); - /* * Modify the set of possible NUMA nodes to reflect information * available about the set of online nodes, and the set of nodes @@ -858,6 +853,23 @@ void __init initmem_init(void) find_possible_nodes(); + setup_node_to_cpumask_map(); + + reset_numa_cpu_lookup_table(); + + for_each_present_cpu(cpu) + numa_setup_cpu(cpu); +} + +void __init initmem_init(void) +{ + int nid; + + max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; + max_pfn = max_low_pfn; + + memblock_dump_all(); + for_each_online_node(nid) { unsigned long start_pfn, end_pfn; @@ -868,10 +880,6 @@ void __init initmem_init(void) sparse_init(); - setup_node_to_cpumask_map(); - - reset_numa_cpu_lookup_table(); - /* * We need the numa_cpu_lookup_table to be accurate for all CPUs, * even before we online them, so that we can use cpu_to_{node,mem} @@ -881,8 +889,6 @@ void __init initmem_init(void) */ cpuhp_setup_state_nocalls(CPUHP_POWER_NUMA_PREPARE, "powerpc/numa:prepare", ppc_numa_cpu_prepare, ppc_numa_cpu_dead); - for_each_present_cpu(cpu) - numa_setup_cpu(cpu); } static int __init early_numa(char *p) -- cgit v1.2.3 From c0abd0c745bdabe027a8f013a866f385fba717b1 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 14 Feb 2018 01:08:17 +1000 Subject: powerpc/64: move default SPR recording Move this into the early setup code, and don't iterate over CPU masks. We don't want to call into sysfs so early from setup, and a future patch won't initialize CPU masks by the time this is called. Signed-off-by: Nicholas Piggin [mpe: Fold in incremental fix from Nick for DSCR handling] Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/setup.h | 9 +++------ arch/powerpc/kernel/setup_64.c | 8 ++++++++ arch/powerpc/kernel/sysfs.c | 20 +++++++++----------- 3 files changed, 20 insertions(+), 17 deletions(-) diff --git a/arch/powerpc/kernel/setup.h b/arch/powerpc/kernel/setup.h index 3fc11e30308f..d144df54ad40 100644 --- a/arch/powerpc/kernel/setup.h +++ b/arch/powerpc/kernel/setup.h @@ -45,14 +45,11 @@ void emergency_stack_init(void); static inline void emergency_stack_init(void) { }; #endif -#ifdef CONFIG_PPC64 -void record_spr_defaults(void); -#else -static inline void record_spr_defaults(void) { }; -#endif - #ifdef CONFIG_PPC64 u64 ppc64_bolted_size(void); + +/* Default SPR values from firmware/kexec */ +extern unsigned long spr_default_dscr; #endif /* diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 3ce12af4906f..dde34d35d1e7 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -254,6 +254,14 @@ static void cpu_ready_for_interrupts(void) get_paca()->kernel_msr = MSR_KERNEL; } +unsigned long spr_default_dscr = 0; + +void __init record_spr_defaults(void) +{ + if (early_cpu_has_feature(CPU_FTR_DSCR)) + spr_default_dscr = mfspr(SPRN_DSCR); +} + /* * Early initialization entry point. This is called by head.S * with MMU translation disabled. We rely on the "feature" of diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index 1f9d94dac3a6..9f327483008c 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -20,6 +20,7 @@ #include #include "cacheinfo.h" +#include "setup.h" #ifdef CONFIG_PPC64 #include @@ -588,21 +589,18 @@ static DEVICE_ATTR(dscr_default, 0600, static void sysfs_create_dscr_default(void) { - int err = 0; - if (cpu_has_feature(CPU_FTR_DSCR)) - err = device_create_file(cpu_subsys.dev_root, &dev_attr_dscr_default); -} - -void __init record_spr_defaults(void) -{ - int cpu; - if (cpu_has_feature(CPU_FTR_DSCR)) { - dscr_default = mfspr(SPRN_DSCR); - for (cpu = 0; cpu < nr_cpu_ids; cpu++) + int err = 0; + int cpu; + + dscr_default = spr_default_dscr; + for_each_possible_cpu(cpu) paca_ptrs[cpu]->dscr_default = dscr_default; + + err = device_create_file(cpu_subsys.dev_root, &dev_attr_dscr_default); } } + #endif /* CONFIG_PPC64 */ #ifdef HAS_PPC_PMC_PA6T -- cgit v1.2.3 From 9f593f131ed463dc571290980dd12cb9e56d8ea5 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 14 Feb 2018 01:08:18 +1000 Subject: powerpc/setup: Add cpu_to_phys_id array Build an array that finds hardware CPU number from logical CPU number in firmware CPU discovery. Use that rather than setting paca of other CPUs directly, to begin with. Subsequent patch will not have pacas allocated at this point. Signed-off-by: Nicholas Piggin [mpe: Fix SMP=n build by adding #ifdef in arch_match_cpu_phys_id()] Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/smp.h | 1 + arch/powerpc/kernel/prom.c | 10 ++++++++++ arch/powerpc/kernel/setup-common.c | 15 ++++++++++++++- 3 files changed, 25 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index ec7b299350d9..cfecfee1194b 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -31,6 +31,7 @@ extern int boot_cpuid; extern int spinning_secondaries; +extern u32 *cpu_to_phys_id; extern void cpu_die(void); extern int cpu_to_chip_id(int cpu); diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 4dffef947b8a..0d59a7128deb 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -874,5 +874,15 @@ EXPORT_SYMBOL(cpu_to_chip_id); bool arch_match_cpu_phys_id(int cpu, u64 phys_id) { +#ifdef CONFIG_SMP + /* + * Early firmware scanning must use this rather than + * get_hard_smp_processor_id because we don't have pacas allocated + * until memory topology is discovered. + */ + if (cpu_to_phys_id != NULL) + return (int)phys_id == cpu_to_phys_id[cpu]; +#endif + return (int)phys_id == get_hard_smp_processor_id(cpu); } diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 9eaf26318d20..bd79a5644c78 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -437,6 +437,8 @@ static void __init cpu_init_thread_core_maps(int tpc) } +u32 *cpu_to_phys_id = NULL; + /** * setup_cpu_maps - initialize the following cpu maps: * cpu_possible_mask @@ -463,6 +465,10 @@ void __init smp_setup_cpu_maps(void) DBG("smp_setup_cpu_maps()\n"); + cpu_to_phys_id = __va(memblock_alloc(nr_cpu_ids * sizeof(u32), + __alignof__(u32))); + memset(cpu_to_phys_id, 0, nr_cpu_ids * sizeof(u32)); + for_each_node_by_type(dn, "cpu") { const __be32 *intserv; __be32 cpu_be; @@ -480,6 +486,7 @@ void __init smp_setup_cpu_maps(void) intserv = of_get_property(dn, "reg", &len); if (!intserv) { cpu_be = cpu_to_be32(cpu); + /* XXX: what is this? uninitialized?? */ intserv = &cpu_be; /* assume logical == phys */ len = 4; } @@ -499,8 +506,8 @@ void __init smp_setup_cpu_maps(void) "enable-method", "spin-table"); set_cpu_present(cpu, avail); - set_hard_smp_processor_id(cpu, be32_to_cpu(intserv[j])); set_cpu_possible(cpu, true); + cpu_to_phys_id[cpu] = be32_to_cpu(intserv[j]); cpu++; } @@ -570,6 +577,12 @@ void __init smp_setup_cpu_maps(void) setup_nr_cpu_ids(); free_unused_pacas(); + + for_each_possible_cpu(cpu) { + if (cpu == smp_processor_id()) + continue; + set_hard_smp_processor_id(cpu, cpu_to_phys_id[cpu]); + } } #endif /* CONFIG_SMP */ -- cgit v1.2.3 From 59f577743d71bf796ceac10961bf6cfa5ca26786 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 14 Feb 2018 01:08:19 +1000 Subject: powerpc/64: Defer paca allocation until memory topology is discovered Signed-off-by: Nicholas Piggin [mpe: Rename the dummy allocate_pacas() to fix 32-bit build] Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/paca.h | 6 ++- arch/powerpc/kernel/paca.c | 90 ++++++++++++-------------------------- arch/powerpc/kernel/prom.c | 5 ++- arch/powerpc/kernel/setup-common.c | 24 +++++++--- 4 files changed, 53 insertions(+), 72 deletions(-) diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index e89887f5e56f..2d04c5575631 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -255,12 +255,14 @@ extern void copy_mm_to_paca(struct mm_struct *mm); extern struct paca_struct **paca_ptrs; extern void initialise_paca(struct paca_struct *new_paca, int cpu); extern void setup_paca(struct paca_struct *new_paca); -extern void allocate_pacas(void); +extern void allocate_paca_ptrs(void); +extern void allocate_paca(int cpu); extern void free_unused_pacas(void); #else /* CONFIG_PPC64 */ -static inline void allocate_pacas(void) { }; +static inline void allocate_paca_ptrs(void) { }; +static inline void allocate_paca(int cpu) { }; static inline void free_unused_pacas(void) { }; #endif /* CONFIG_PPC64 */ diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index 2699f9009286..a186911791c7 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -57,16 +57,6 @@ static struct lppaca * __init new_lppaca(int cpu, unsigned long limit) return lp; } - -static void __init free_lppaca(struct lppaca *lp) -{ - size_t size = 0x400; - - if (early_cpu_has_feature(CPU_FTR_HVMODE)) - return; - - memblock_free(__pa(lp), size); -} #endif /* CONFIG_PPC_BOOK3S */ #ifdef CONFIG_PPC_BOOK3S_64 @@ -166,12 +156,24 @@ void setup_paca(struct paca_struct *new_paca) static int __initdata paca_nr_cpu_ids; static int __initdata paca_ptrs_size; +static int __initdata paca_struct_size; + +void __init allocate_paca_ptrs(void) +{ + paca_nr_cpu_ids = nr_cpu_ids; + + paca_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids; + paca_ptrs = __va(memblock_alloc(paca_ptrs_size, 0)); + memset(paca_ptrs, 0x88, paca_ptrs_size); +} -void __init allocate_pacas(void) +void __init allocate_paca(int cpu) { u64 limit; - unsigned long size = 0; - int cpu; + unsigned long pa; + struct paca_struct *paca; + + BUG_ON(cpu >= paca_nr_cpu_ids); #ifdef CONFIG_PPC_BOOK3S_64 /* @@ -183,69 +185,30 @@ void __init allocate_pacas(void) limit = ppc64_rma_size; #endif - paca_nr_cpu_ids = nr_cpu_ids; - - paca_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids; - paca_ptrs = __va(memblock_alloc_base(paca_ptrs_size, 0, limit)); - memset(paca_ptrs, 0, paca_ptrs_size); - - size += paca_ptrs_size; - - for (cpu = 0; cpu < nr_cpu_ids; cpu++) { - unsigned long pa; - - pa = memblock_alloc_base(sizeof(struct paca_struct), - L1_CACHE_BYTES, limit); - paca_ptrs[cpu] = __va(pa); - memset(paca_ptrs[cpu], 0, sizeof(struct paca_struct)); - - size += sizeof(struct paca_struct); - } - - printk(KERN_DEBUG "Allocated %lu bytes for %u pacas\n", - size, nr_cpu_ids); - - /* Can't use for_each_*_cpu, as they aren't functional yet */ - for (cpu = 0; cpu < nr_cpu_ids; cpu++) { - struct paca_struct *paca = paca_ptrs[cpu]; + pa = memblock_alloc_base(sizeof(struct paca_struct), + L1_CACHE_BYTES, limit); + paca = __va(pa); + paca_ptrs[cpu] = paca; + memset(paca, 0, sizeof(struct paca_struct)); - initialise_paca(paca, cpu); + initialise_paca(paca, cpu); #ifdef CONFIG_PPC_PSERIES - paca->lppaca_ptr = new_lppaca(cpu, limit); + paca->lppaca_ptr = new_lppaca(cpu, limit); #endif #ifdef CONFIG_PPC_BOOK3S_64 - paca->slb_shadow_ptr = new_slb_shadow(cpu, limit); + paca->slb_shadow_ptr = new_slb_shadow(cpu, limit); #endif - } + paca_struct_size += sizeof(struct paca_struct); } void __init free_unused_pacas(void) { - unsigned long size = 0; int new_ptrs_size; - int cpu; - - for (cpu = 0; cpu < paca_nr_cpu_ids; cpu++) { - if (!cpu_possible(cpu)) { - unsigned long pa = __pa(paca_ptrs[cpu]); -#ifdef CONFIG_PPC_PSERIES - free_lppaca(paca_ptrs[cpu]->lppaca_ptr); -#endif - memblock_free(pa, sizeof(struct paca_struct)); - paca_ptrs[cpu] = NULL; - size += sizeof(struct paca_struct); - } - } new_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids; - if (new_ptrs_size < paca_ptrs_size) { + if (new_ptrs_size < paca_ptrs_size) memblock_free(__pa(paca_ptrs) + new_ptrs_size, paca_ptrs_size - new_ptrs_size); - size += paca_ptrs_size - new_ptrs_size; - } - - if (size) - printk(KERN_DEBUG "Freed %lu bytes for unused pacas\n", size); paca_nr_cpu_ids = nr_cpu_ids; paca_ptrs_size = new_ptrs_size; @@ -258,6 +221,9 @@ void __init free_unused_pacas(void) paca_ptrs[boot_cpuid]->slb_shadow_ptr = NULL; } #endif + + printk(KERN_DEBUG "Allocated %u bytes for %u pacas\n", + paca_ptrs_size + paca_struct_size, nr_cpu_ids); } void copy_mm_to_paca(struct mm_struct *mm) diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 0d59a7128deb..e19f5e374200 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -365,7 +365,6 @@ static int __init early_init_dt_scan_cpus(unsigned long node, DBG("boot cpu: logical %d physical %d\n", found, be32_to_cpu(intserv[found_thread])); boot_cpuid = found; - set_hard_smp_processor_id(found, be32_to_cpu(intserv[found_thread])); /* * PAPR defines "logical" PVR values for cpus that @@ -403,7 +402,9 @@ static int __init early_init_dt_scan_cpus(unsigned long node, cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT; else if (!dt_cpu_ftrs_in_use()) cur_cpu_spec->cpu_features |= CPU_FTR_SMT; + allocate_paca(boot_cpuid); #endif + set_hard_smp_processor_id(found, be32_to_cpu(intserv[found_thread])); return 0; } @@ -744,7 +745,7 @@ void __init early_init_devtree(void *params) * FIXME .. and the initrd too? */ move_device_tree(); - allocate_pacas(); + allocate_paca_ptrs(); DBG("Scanning CPUs ...\n"); diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index bd79a5644c78..af7a47c8fe10 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -577,12 +577,6 @@ void __init smp_setup_cpu_maps(void) setup_nr_cpu_ids(); free_unused_pacas(); - - for_each_possible_cpu(cpu) { - if (cpu == smp_processor_id()) - continue; - set_hard_smp_processor_id(cpu, cpu_to_phys_id[cpu]); - } } #endif /* CONFIG_SMP */ @@ -848,6 +842,23 @@ static __init void print_system_info(void) pr_info("-----------------------------------------------------\n"); } +#ifdef CONFIG_SMP +static void smp_setup_pacas(void) +{ + int cpu; + + for_each_possible_cpu(cpu) { + if (cpu == smp_processor_id()) + continue; + allocate_paca(cpu); + set_hard_smp_processor_id(cpu, cpu_to_phys_id[cpu]); + } + + memblock_free(__pa(cpu_to_phys_id), nr_cpu_ids * sizeof(u32)); + cpu_to_phys_id = NULL; +} +#endif + /* * Called into from start_kernel this initializes memblock, which is used * to manage page allocation until mem_init is called. @@ -915,6 +926,7 @@ void __init setup_arch(char **cmdline_p) * so smp_release_cpus() does nothing for them. */ #ifdef CONFIG_SMP + smp_setup_pacas(); smp_release_cpus(); #endif -- cgit v1.2.3 From 4890aea65ae7b5d424b5020e8be193b08a545990 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 14 Feb 2018 01:08:20 +1000 Subject: powerpc/64: Allocate pacas per node Per-node allocations are possible on 64s with radix that does not have the bolted SLB limitation. Hash would be able to do the same if all CPUs had the bottom of their node-local memory bolted as well. This is left as an exercise for the reader. Signed-off-by: Nicholas Piggin [mpe: Add dummy definition of boot_cpuid for !SMP] Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/paca.c | 45 ++++++++++++++++++++++++++++++++++++------ arch/powerpc/kernel/setup_64.c | 4 ++++ 2 files changed, 43 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index a186911791c7..0f7e2be23fa2 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -20,6 +20,41 @@ #include "setup.h" +#ifndef CONFIG_SMP +#define boot_cpuid 0 +#endif + +static void *__init alloc_paca_data(unsigned long size, unsigned long align, + unsigned long limit, int cpu) +{ + unsigned long pa; + int nid; + + /* + * boot_cpuid paca is allocated very early before cpu_to_node is up. + * Set bottom-up mode, because the boot CPU should be on node-0, + * which will put its paca in the right place. + */ + if (cpu == boot_cpuid) { + nid = -1; + memblock_set_bottom_up(true); + } else { + nid = early_cpu_to_node(cpu); + } + + pa = memblock_alloc_base_nid(size, align, limit, nid, MEMBLOCK_NONE); + if (!pa) { + pa = memblock_alloc_base(size, align, limit); + if (!pa) + panic("cannot allocate paca data"); + } + + if (cpu == boot_cpuid) + memblock_set_bottom_up(false); + + return __va(pa); +} + #ifdef CONFIG_PPC_PSERIES /* @@ -52,7 +87,7 @@ static struct lppaca * __init new_lppaca(int cpu, unsigned long limit) if (early_cpu_has_feature(CPU_FTR_HVMODE)) return NULL; - lp = __va(memblock_alloc_base(size, 0x400, limit)); + lp = alloc_paca_data(size, 0x400, limit, cpu); init_lppaca(lp); return lp; @@ -82,7 +117,7 @@ static struct slb_shadow * __init new_slb_shadow(int cpu, unsigned long limit) return NULL; } - s = __va(memblock_alloc_base(sizeof(*s), L1_CACHE_BYTES, limit)); + s = alloc_paca_data(sizeof(*s), L1_CACHE_BYTES, limit, cpu); memset(s, 0, sizeof(*s)); s->persistent = cpu_to_be32(SLB_NUM_BOLTED); @@ -170,7 +205,6 @@ void __init allocate_paca_ptrs(void) void __init allocate_paca(int cpu) { u64 limit; - unsigned long pa; struct paca_struct *paca; BUG_ON(cpu >= paca_nr_cpu_ids); @@ -185,9 +219,8 @@ void __init allocate_paca(int cpu) limit = ppc64_rma_size; #endif - pa = memblock_alloc_base(sizeof(struct paca_struct), - L1_CACHE_BYTES, limit); - paca = __va(pa); + paca = alloc_paca_data(sizeof(struct paca_struct), L1_CACHE_BYTES, + limit, cpu); paca_ptrs[cpu] = paca; memset(paca, 0, sizeof(struct paca_struct)); diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index dde34d35d1e7..02fa358982e6 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -312,6 +312,10 @@ void __init early_setup(unsigned long dt_ptr) early_init_devtree(__va(dt_ptr)); /* Now we know the logical id of our boot cpu, setup the paca. */ + if (boot_cpuid != 0) { + /* Poison paca_ptrs[0] again if it's not the boot cpu */ + memset(&paca_ptrs[0], 0x88, sizeof(paca_ptrs[0])); + } setup_paca(paca_ptrs[boot_cpuid]); fixup_boot_paca(); -- cgit v1.2.3 From f3865f9a7112590f0cae02dce05ec3c3a09ff405 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 14 Feb 2018 01:08:21 +1000 Subject: powerpc/64: Allocate per-cpu stacks node-local if possible Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/setup_64.c | 51 ++++++++++++++++++++++++++---------------- 1 file changed, 32 insertions(+), 19 deletions(-) diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 02fa358982e6..16ea71fa1ead 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -611,6 +611,21 @@ __init u64 ppc64_bolted_size(void) #endif } +static void *__init alloc_stack(unsigned long limit, int cpu) +{ + unsigned long pa; + + pa = memblock_alloc_base_nid(THREAD_SIZE, THREAD_SIZE, limit, + early_cpu_to_node(cpu), MEMBLOCK_NONE); + if (!pa) { + pa = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit); + if (!pa) + panic("cannot allocate stacks"); + } + + return __va(pa); +} + void __init irqstack_early_init(void) { u64 limit = ppc64_bolted_size(); @@ -622,12 +637,8 @@ void __init irqstack_early_init(void) * accessed in realmode. */ for_each_possible_cpu(i) { - softirq_ctx[i] = (struct thread_info *) - __va(memblock_alloc_base(THREAD_SIZE, - THREAD_SIZE, limit)); - hardirq_ctx[i] = (struct thread_info *) - __va(memblock_alloc_base(THREAD_SIZE, - THREAD_SIZE, limit)); + softirq_ctx[i] = alloc_stack(limit, i); + hardirq_ctx[i] = alloc_stack(limit, i); } } @@ -635,20 +646,21 @@ void __init irqstack_early_init(void) void __init exc_lvl_early_init(void) { unsigned int i; - unsigned long sp; for_each_possible_cpu(i) { - sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE); - critirq_ctx[i] = (struct thread_info *)__va(sp); - paca_ptrs[i]->crit_kstack = __va(sp + THREAD_SIZE); + void *sp; - sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE); - dbgirq_ctx[i] = (struct thread_info *)__va(sp); - paca_ptrs[i]->dbg_kstack = __va(sp + THREAD_SIZE); + sp = alloc_stack(ULONG_MAX, i); + critirq_ctx[i] = sp; + paca_ptrs[i]->crit_kstack = sp + THREAD_SIZE; - sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE); - mcheckirq_ctx[i] = (struct thread_info *)__va(sp); - paca_ptrs[i]->mc_kstack = __va(sp + THREAD_SIZE); + sp = alloc_stack(ULONG_MAX, i); + dbgirq_ctx[i] = sp; + paca_ptrs[i]->dbg_kstack = sp + THREAD_SIZE; + + sp = alloc_stack(ULONG_MAX, i); + mcheckirq_ctx[i] = sp; + paca_ptrs[i]->mc_kstack = sp + THREAD_SIZE; } if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) @@ -702,20 +714,21 @@ void __init emergency_stack_init(void) for_each_possible_cpu(i) { struct thread_info *ti; - ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); + + ti = alloc_stack(limit, i); memset(ti, 0, THREAD_SIZE); emerg_stack_init_thread_info(ti, i); paca_ptrs[i]->emergency_sp = (void *)ti + THREAD_SIZE; #ifdef CONFIG_PPC_BOOK3S_64 /* emergency stack for NMI exception handling. */ - ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); + ti = alloc_stack(limit, i); memset(ti, 0, THREAD_SIZE); emerg_stack_init_thread_info(ti, i); paca_ptrs[i]->nmi_emergency_sp = (void *)ti + THREAD_SIZE; /* emergency stack for machine check exception handling. */ - ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); + ti = alloc_stack(limit, i); memset(ti, 0, THREAD_SIZE); emerg_stack_init_thread_info(ti, i); paca_ptrs[i]->mc_emergency_sp = (void *)ti + THREAD_SIZE; -- cgit v1.2.3 From 0633dafcf8921048eb1ddc8c6fcfbe1b1cf3e42c Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 14 Feb 2018 01:08:23 +1000 Subject: powerpc/64s/radix: Split early page table mapping to its own function Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/mm/pgtable-radix.c | 112 +++++++++++++++++++++++----------------- 1 file changed, 65 insertions(+), 47 deletions(-) diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index 573a9a2ee455..5afff0eb57d4 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c @@ -56,6 +56,50 @@ static __ref void *early_alloc_pgtable(unsigned long size) return pt; } +static int early_map_kernel_page(unsigned long ea, unsigned long pa, + pgprot_t flags, + unsigned int map_page_size) +{ + pgd_t *pgdp; + pud_t *pudp; + pmd_t *pmdp; + pte_t *ptep; + + pgdp = pgd_offset_k(ea); + if (pgd_none(*pgdp)) { + pudp = early_alloc_pgtable(PUD_TABLE_SIZE); + BUG_ON(pudp == NULL); + pgd_populate(&init_mm, pgdp, pudp); + } + pudp = pud_offset(pgdp, ea); + if (map_page_size == PUD_SIZE) { + ptep = (pte_t *)pudp; + goto set_the_pte; + } + if (pud_none(*pudp)) { + pmdp = early_alloc_pgtable(PMD_TABLE_SIZE); + BUG_ON(pmdp == NULL); + pud_populate(&init_mm, pudp, pmdp); + } + pmdp = pmd_offset(pudp, ea); + if (map_page_size == PMD_SIZE) { + ptep = pmdp_ptep(pmdp); + goto set_the_pte; + } + if (!pmd_present(*pmdp)) { + ptep = early_alloc_pgtable(PAGE_SIZE); + BUG_ON(ptep == NULL); + pmd_populate_kernel(&init_mm, pmdp, ptep); + } + ptep = pte_offset_kernel(pmdp, ea); + +set_the_pte: + set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, flags)); + smp_wmb(); + return 0; +} + + int radix__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t flags, unsigned int map_page_size) @@ -68,54 +112,28 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa, * Make sure task size is correct as per the max adddr */ BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE); - if (slab_is_available()) { - pgdp = pgd_offset_k(ea); - pudp = pud_alloc(&init_mm, pgdp, ea); - if (!pudp) - return -ENOMEM; - if (map_page_size == PUD_SIZE) { - ptep = (pte_t *)pudp; - goto set_the_pte; - } - pmdp = pmd_alloc(&init_mm, pudp, ea); - if (!pmdp) - return -ENOMEM; - if (map_page_size == PMD_SIZE) { - ptep = pmdp_ptep(pmdp); - goto set_the_pte; - } - ptep = pte_alloc_kernel(pmdp, ea); - if (!ptep) - return -ENOMEM; - } else { - pgdp = pgd_offset_k(ea); - if (pgd_none(*pgdp)) { - pudp = early_alloc_pgtable(PUD_TABLE_SIZE); - BUG_ON(pudp == NULL); - pgd_populate(&init_mm, pgdp, pudp); - } - pudp = pud_offset(pgdp, ea); - if (map_page_size == PUD_SIZE) { - ptep = (pte_t *)pudp; - goto set_the_pte; - } - if (pud_none(*pudp)) { - pmdp = early_alloc_pgtable(PMD_TABLE_SIZE); - BUG_ON(pmdp == NULL); - pud_populate(&init_mm, pudp, pmdp); - } - pmdp = pmd_offset(pudp, ea); - if (map_page_size == PMD_SIZE) { - ptep = pmdp_ptep(pmdp); - goto set_the_pte; - } - if (!pmd_present(*pmdp)) { - ptep = early_alloc_pgtable(PAGE_SIZE); - BUG_ON(ptep == NULL); - pmd_populate_kernel(&init_mm, pmdp, ptep); - } - ptep = pte_offset_kernel(pmdp, ea); + + if (!slab_is_available()) + return early_map_kernel_page(ea, pa, flags, map_page_size); + + pgdp = pgd_offset_k(ea); + pudp = pud_alloc(&init_mm, pgdp, ea); + if (!pudp) + return -ENOMEM; + if (map_page_size == PUD_SIZE) { + ptep = (pte_t *)pudp; + goto set_the_pte; + } + pmdp = pmd_alloc(&init_mm, pudp, ea); + if (!pmdp) + return -ENOMEM; + if (map_page_size == PMD_SIZE) { + ptep = pmdp_ptep(pmdp); + goto set_the_pte; } + ptep = pte_alloc_kernel(pmdp, ea); + if (!ptep) + return -ENOMEM; set_the_pte: set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, flags)); -- cgit v1.2.3 From 2ad452ffaaa8d2f1124208e507e7b045e8ee98a6 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 14 Feb 2018 01:08:24 +1000 Subject: powerpc/64s/radix: Allocate kernel page tables node-local if possible Try to allocate kernel page tables for direct mapping and vmemmap according to the node of the memory they will map. The node is not available for the linear map in early boot, so use range allocation to allocate the page tables from the region they map, which is effectively node-local. Signed-off-by: Nicholas Piggin [mpe: Fix build error in radix__create_section_mapping()] Signed-off-by: Michael Ellerman --- arch/powerpc/mm/pgtable-radix.c | 104 ++++++++++++++++++++++++++++++---------- 1 file changed, 80 insertions(+), 24 deletions(-) diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index 5afff0eb57d4..5f4b75315990 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c @@ -46,11 +46,26 @@ static int native_register_process_table(unsigned long base, unsigned long pg_sz return 0; } -static __ref void *early_alloc_pgtable(unsigned long size) +static __ref void *early_alloc_pgtable(unsigned long size, int nid, + unsigned long region_start, unsigned long region_end) { + unsigned long pa = 0; void *pt; - pt = __va(memblock_alloc_base(size, size, MEMBLOCK_ALLOC_ANYWHERE)); + if (region_start || region_end) /* has region hint */ + pa = memblock_alloc_range(size, size, region_start, region_end, + MEMBLOCK_NONE); + else if (nid != -1) /* has node hint */ + pa = memblock_alloc_base_nid(size, size, + MEMBLOCK_ALLOC_ANYWHERE, + nid, MEMBLOCK_NONE); + + if (!pa) + pa = memblock_alloc_base(size, size, MEMBLOCK_ALLOC_ANYWHERE); + + BUG_ON(!pa); + + pt = __va(pa); memset(pt, 0, size); return pt; @@ -58,8 +73,11 @@ static __ref void *early_alloc_pgtable(unsigned long size) static int early_map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t flags, - unsigned int map_page_size) + unsigned int map_page_size, + int nid, + unsigned long region_start, unsigned long region_end) { + unsigned long pfn = pa >> PAGE_SHIFT; pgd_t *pgdp; pud_t *pudp; pmd_t *pmdp; @@ -67,8 +85,8 @@ static int early_map_kernel_page(unsigned long ea, unsigned long pa, pgdp = pgd_offset_k(ea); if (pgd_none(*pgdp)) { - pudp = early_alloc_pgtable(PUD_TABLE_SIZE); - BUG_ON(pudp == NULL); + pudp = early_alloc_pgtable(PUD_TABLE_SIZE, nid, + region_start, region_end); pgd_populate(&init_mm, pgdp, pudp); } pudp = pud_offset(pgdp, ea); @@ -77,8 +95,8 @@ static int early_map_kernel_page(unsigned long ea, unsigned long pa, goto set_the_pte; } if (pud_none(*pudp)) { - pmdp = early_alloc_pgtable(PMD_TABLE_SIZE); - BUG_ON(pmdp == NULL); + pmdp = early_alloc_pgtable(PMD_TABLE_SIZE, nid, + region_start, region_end); pud_populate(&init_mm, pudp, pmdp); } pmdp = pmd_offset(pudp, ea); @@ -87,23 +105,29 @@ static int early_map_kernel_page(unsigned long ea, unsigned long pa, goto set_the_pte; } if (!pmd_present(*pmdp)) { - ptep = early_alloc_pgtable(PAGE_SIZE); - BUG_ON(ptep == NULL); + ptep = early_alloc_pgtable(PAGE_SIZE, nid, + region_start, region_end); pmd_populate_kernel(&init_mm, pmdp, ptep); } ptep = pte_offset_kernel(pmdp, ea); set_the_pte: - set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, flags)); + set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags)); smp_wmb(); return 0; } - -int radix__map_kernel_page(unsigned long ea, unsigned long pa, +/* + * nid, region_start, and region_end are hints to try to place the page + * table memory in the same node or region. + */ +static int __map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t flags, - unsigned int map_page_size) + unsigned int map_page_size, + int nid, + unsigned long region_start, unsigned long region_end) { + unsigned long pfn = pa >> PAGE_SHIFT; pgd_t *pgdp; pud_t *pudp; pmd_t *pmdp; @@ -113,9 +137,15 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa, */ BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE); - if (!slab_is_available()) - return early_map_kernel_page(ea, pa, flags, map_page_size); + if (unlikely(!slab_is_available())) + return early_map_kernel_page(ea, pa, flags, map_page_size, + nid, region_start, region_end); + /* + * Should make page table allocation functions be able to take a + * node, so we can place kernel page tables on the right nodes after + * boot. + */ pgdp = pgd_offset_k(ea); pudp = pud_alloc(&init_mm, pgdp, ea); if (!pudp) @@ -136,11 +166,25 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa, return -ENOMEM; set_the_pte: - set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, flags)); + set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags)); smp_wmb(); return 0; } +static int __map_kernel_page_nid(unsigned long ea, unsigned long pa, + pgprot_t flags, + unsigned int map_page_size, int nid) +{ + return __map_kernel_page(ea, pa, flags, map_page_size, nid, 0, 0); +} + +int radix__map_kernel_page(unsigned long ea, unsigned long pa, + pgprot_t flags, + unsigned int map_page_size) +{ + return __map_kernel_page(ea, pa, flags, map_page_size, -1, 0, 0); +} + #ifdef CONFIG_STRICT_KERNEL_RWX void radix__change_memory_range(unsigned long start, unsigned long end, unsigned long clear) @@ -227,7 +271,8 @@ static inline void __meminit print_mapping(unsigned long start, } static int __meminit create_physical_mapping(unsigned long start, - unsigned long end) + unsigned long end, + int nid) { unsigned long vaddr, addr, mapping_size = 0; pgprot_t prot; @@ -283,7 +328,7 @@ retry: else prot = PAGE_KERNEL; - rc = radix__map_kernel_page(vaddr, addr, prot, mapping_size); + rc = __map_kernel_page(vaddr, addr, prot, mapping_size, nid, start, end); if (rc) return rc; } @@ -292,7 +337,7 @@ retry: return 0; } -static void __init radix_init_pgtable(void) +void __init radix_init_pgtable(void) { unsigned long rts_field; struct memblock_region *reg; @@ -302,9 +347,16 @@ static void __init radix_init_pgtable(void) /* * Create the linear mapping, using standard page size for now */ - for_each_memblock(memory, reg) + for_each_memblock(memory, reg) { + /* + * The memblock allocator is up at this point, so the + * page tables will be allocated within the range. No + * need or a node (which we don't have yet). + */ WARN_ON(create_physical_mapping(reg->base, - reg->base + reg->size)); + reg->base + reg->size, + -1)); + } /* Find out how many PID bits are supported */ if (cpu_has_feature(CPU_FTR_HVMODE)) { @@ -333,7 +385,7 @@ static void __init radix_init_pgtable(void) * host. */ BUG_ON(PRTB_SIZE_SHIFT > 36); - process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT); + process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT, -1, 0, 0); /* * Fill in the process table. */ @@ -810,7 +862,7 @@ static void remove_pagetable(unsigned long start, unsigned long end) int __ref radix__create_section_mapping(unsigned long start, unsigned long end) { - return create_physical_mapping(start, end); + return create_physical_mapping(start, end, -1); } int radix__remove_section_mapping(unsigned long start, unsigned long end) @@ -827,8 +879,12 @@ int __meminit radix__vmemmap_create_mapping(unsigned long start, { /* Create a PTE encoding */ unsigned long flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW; + int nid = early_pfn_to_nid(phys >> PAGE_SHIFT); + int ret; + + ret = __map_kernel_page_nid(start, phys, __pgprot(flags), page_size, nid); + BUG_ON(ret); - BUG_ON(radix__map_kernel_page(start, phys, __pgprot(flags), page_size)); return 0; } -- cgit v1.2.3 From 29ab6c4708a587bc27ea0c765ac36aef9c1a77c9 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 14 Feb 2018 01:08:22 +1000 Subject: powerpc/mm: Pass node id into create_section_mapping Signed-off-by: Nicholas Piggin [mpe: Move __map_kernel_page_nid() inside #ifdef SPARSEMEM_VMEMMAP] Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/book3s/64/hash.h | 2 +- arch/powerpc/include/asm/book3s/64/radix.h | 2 +- arch/powerpc/include/asm/sparsemem.h | 2 +- arch/powerpc/mm/hash_utils_64.c | 2 +- arch/powerpc/mm/mem.c | 4 ++-- arch/powerpc/mm/pgtable-book3s64.c | 6 +++--- arch/powerpc/mm/pgtable-radix.c | 18 +++++++++--------- 7 files changed, 18 insertions(+), 18 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h index 0920eff731b3..b1ace9619e94 100644 --- a/arch/powerpc/include/asm/book3s/64/hash.h +++ b/arch/powerpc/include/asm/book3s/64/hash.h @@ -201,7 +201,7 @@ extern int __meminit hash__vmemmap_create_mapping(unsigned long start, extern void hash__vmemmap_remove_mapping(unsigned long start, unsigned long page_size); -int hash__create_section_mapping(unsigned long start, unsigned long end); +int hash__create_section_mapping(unsigned long start, unsigned long end, int nid); int hash__remove_section_mapping(unsigned long start, unsigned long end); #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h index 365010f66570..705193e7192f 100644 --- a/arch/powerpc/include/asm/book3s/64/radix.h +++ b/arch/powerpc/include/asm/book3s/64/radix.h @@ -313,7 +313,7 @@ static inline unsigned long radix__get_tree_size(void) } #ifdef CONFIG_MEMORY_HOTPLUG -int radix__create_section_mapping(unsigned long start, unsigned long end); +int radix__create_section_mapping(unsigned long start, unsigned long end, int nid); int radix__remove_section_mapping(unsigned long start, unsigned long end); #endif /* CONFIG_MEMORY_HOTPLUG */ #endif /* __ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/sparsemem.h b/arch/powerpc/include/asm/sparsemem.h index a7916ee6dfb6..bc66712bdc3c 100644 --- a/arch/powerpc/include/asm/sparsemem.h +++ b/arch/powerpc/include/asm/sparsemem.h @@ -17,7 +17,7 @@ #endif /* CONFIG_SPARSEMEM */ #ifdef CONFIG_MEMORY_HOTPLUG -extern int create_section_mapping(unsigned long start, unsigned long end); +extern int create_section_mapping(unsigned long start, unsigned long end, int nid); extern int remove_section_mapping(unsigned long start, unsigned long end); #ifdef CONFIG_PPC_BOOK3S_64 diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 7d07c7e17db6..ceb5494804b2 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -781,7 +781,7 @@ void resize_hpt_for_hotplug(unsigned long new_mem_size) } } -int hash__create_section_mapping(unsigned long start, unsigned long end) +int hash__create_section_mapping(unsigned long start, unsigned long end, int nid) { int rc = htab_bolt_mapping(start, end, __pa(start), pgprot_val(PAGE_KERNEL), mmu_linear_psize, diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 4eee46ea4d96..f50ce66dd6bd 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -117,7 +117,7 @@ int memory_add_physaddr_to_nid(u64 start) } #endif -int __weak create_section_mapping(unsigned long start, unsigned long end) +int __weak create_section_mapping(unsigned long start, unsigned long end, int nid) { return -ENODEV; } @@ -137,7 +137,7 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, resize_hpt_for_hotplug(memblock_phys_mem_size()); start = (unsigned long)__va(start); - rc = create_section_mapping(start, start + size); + rc = create_section_mapping(start, start + size, nid); if (rc) { pr_warn("Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n", start, start + size, rc); diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c index 422e80253a33..c736280068ce 100644 --- a/arch/powerpc/mm/pgtable-book3s64.c +++ b/arch/powerpc/mm/pgtable-book3s64.c @@ -155,12 +155,12 @@ void mmu_cleanup_all(void) } #ifdef CONFIG_MEMORY_HOTPLUG -int create_section_mapping(unsigned long start, unsigned long end) +int create_section_mapping(unsigned long start, unsigned long end, int nid) { if (radix_enabled()) - return radix__create_section_mapping(start, end); + return radix__create_section_mapping(start, end, nid); - return hash__create_section_mapping(start, end); + return hash__create_section_mapping(start, end, nid); } int remove_section_mapping(unsigned long start, unsigned long end) diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index 5f4b75315990..a425636af8b4 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c @@ -171,13 +171,6 @@ set_the_pte: return 0; } -static int __map_kernel_page_nid(unsigned long ea, unsigned long pa, - pgprot_t flags, - unsigned int map_page_size, int nid) -{ - return __map_kernel_page(ea, pa, flags, map_page_size, nid, 0, 0); -} - int radix__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t flags, unsigned int map_page_size) @@ -860,9 +853,9 @@ static void remove_pagetable(unsigned long start, unsigned long end) radix__flush_tlb_kernel_range(start, end); } -int __ref radix__create_section_mapping(unsigned long start, unsigned long end) +int __ref radix__create_section_mapping(unsigned long start, unsigned long end, int nid) { - return create_physical_mapping(start, end, -1); + return create_physical_mapping(start, end, nid); } int radix__remove_section_mapping(unsigned long start, unsigned long end) @@ -873,6 +866,13 @@ int radix__remove_section_mapping(unsigned long start, unsigned long end) #endif /* CONFIG_MEMORY_HOTPLUG */ #ifdef CONFIG_SPARSEMEM_VMEMMAP +static int __map_kernel_page_nid(unsigned long ea, unsigned long pa, + pgprot_t flags, unsigned int map_page_size, + int nid) +{ + return __map_kernel_page(ea, pa, flags, map_page_size, nid, 0, 0); +} + int __meminit radix__vmemmap_create_mapping(unsigned long start, unsigned long page_size, unsigned long phys) -- cgit v1.2.3 From 0f9bdfe3c77091e8704d2e510eb7c2c2c6cde524 Mon Sep 17 00:00:00 2001 From: Mauricio Faria de Oliveira Date: Thu, 29 Mar 2018 15:32:11 -0300 Subject: powerpc/pseries: Fix clearing of security feature flags The H_CPU_BEHAV_* flags should be checked for in the 'behaviour' field of 'struct h_cpu_char_result' -- 'character' is for H_CPU_CHAR_* flags. Found by playing around with QEMU's implementation of the hypercall: H_CPU_CHAR=0xf000000000000000 H_CPU_BEHAV=0x0000000000000000 This clears H_CPU_BEHAV_FAVOUR_SECURITY and H_CPU_BEHAV_L1D_FLUSH_PR so pseries_setup_rfi_flush() disables 'rfi_flush'; and it also clears H_CPU_CHAR_L1D_THREAD_PRIV flag. So there is no RFI flush mitigation at all for cpu_show_meltdown() to report; but currently it does: Original kernel: # cat /sys/devices/system/cpu/vulnerabilities/meltdown Mitigation: RFI Flush Patched kernel: # cat /sys/devices/system/cpu/vulnerabilities/meltdown Not affected H_CPU_CHAR=0x0000000000000000 H_CPU_BEHAV=0xf000000000000000 This sets H_CPU_BEHAV_BNDS_CHK_SPEC_BAR so cpu_show_spectre_v1() should report vulnerable; but currently it doesn't: Original kernel: # cat /sys/devices/system/cpu/vulnerabilities/spectre_v1 Not affected Patched kernel: # cat /sys/devices/system/cpu/vulnerabilities/spectre_v1 Vulnerable Brown-paper-bag-by: Michael Ellerman Fixes: f636c14790ea ("powerpc/pseries: Set or clear security feature flags") Signed-off-by: Mauricio Faria de Oliveira Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/pseries/setup.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 1f122359cd8f..b11564f2a4c7 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -484,13 +484,13 @@ static void init_cpu_char_feature_flags(struct h_cpu_char_result *result) * The features below are enabled by default, so we instead look to see * if firmware has *disabled* them, and clear them if so. */ - if (!(result->character & H_CPU_BEHAV_FAVOUR_SECURITY)) + if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY)) security_ftr_clear(SEC_FTR_FAVOUR_SECURITY); - if (!(result->character & H_CPU_BEHAV_L1D_FLUSH_PR)) + if (!(result->behaviour & H_CPU_BEHAV_L1D_FLUSH_PR)) security_ftr_clear(SEC_FTR_L1D_FLUSH_PR); - if (!(result->character & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR)) + if (!(result->behaviour & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR)) security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR); } -- cgit v1.2.3 From 741de617661794246f84a21a02fc5e327bffc9ad Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Tue, 27 Mar 2018 01:02:33 +1000 Subject: powerpc/powernv: Handle unknown OPAL errors in opal_nvram_write() opal_nvram_write currently just assumes success if it encounters an error other than OPAL_BUSY or OPAL_BUSY_EVENT. Have it return -EIO on other errors instead. Fixes: 628daa8d5abf ("powerpc/powernv: Add RTC and NVRAM support plus RTAS fallbacks") Cc: stable@vger.kernel.org # v3.2+ Signed-off-by: Nicholas Piggin Reviewed-by: Vasant Hegde Acked-by: Stewart Smith Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/powernv/opal-nvram.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/powerpc/platforms/powernv/opal-nvram.c b/arch/powerpc/platforms/powernv/opal-nvram.c index 9db4398ded5d..ba2ff06a2c98 100644 --- a/arch/powerpc/platforms/powernv/opal-nvram.c +++ b/arch/powerpc/platforms/powernv/opal-nvram.c @@ -59,6 +59,10 @@ static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index) if (rc == OPAL_BUSY_EVENT) opal_poll_events(NULL); } + + if (rc) + return -EIO; + *index += count; return count; } -- cgit v1.2.3 From 3a52f6f980c45b0dfa667f30d3ff034d033d260f Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Thu, 29 Mar 2018 11:36:04 +1100 Subject: macintosh/adb: Use C99 initializers for struct adb_driver instances No change to object files. Cc: Benjamin Herrenschmidt Signed-off-by: Finn Thain Signed-off-by: Michael Ellerman --- drivers/macintosh/adb-iop.c | 14 +++++++------- drivers/macintosh/macio-adb.c | 15 +++++++-------- drivers/macintosh/via-macii.c | 14 +++++++------- drivers/macintosh/via-pmu.c | 14 +++++++------- drivers/macintosh/via-pmu68k.c | 14 +++++++------- 5 files changed, 35 insertions(+), 36 deletions(-) diff --git a/drivers/macintosh/adb-iop.c b/drivers/macintosh/adb-iop.c index 15db69d8ba69..ca623e6446e4 100644 --- a/drivers/macintosh/adb-iop.c +++ b/drivers/macintosh/adb-iop.c @@ -53,13 +53,13 @@ static void adb_iop_poll(void); static int adb_iop_reset_bus(void); struct adb_driver adb_iop_driver = { - "ISM IOP", - adb_iop_probe, - adb_iop_init, - adb_iop_send_request, - adb_iop_autopoll, - adb_iop_poll, - adb_iop_reset_bus + .name = "ISM IOP", + .probe = adb_iop_probe, + .init = adb_iop_init, + .send_request = adb_iop_send_request, + .autopoll = adb_iop_autopoll, + .poll = adb_iop_poll, + .reset_bus = adb_iop_reset_bus }; static void adb_iop_end_req(struct adb_request *req, int state) diff --git a/drivers/macintosh/macio-adb.c b/drivers/macintosh/macio-adb.c index 9a6223add30e..eb3adfb7f88d 100644 --- a/drivers/macintosh/macio-adb.c +++ b/drivers/macintosh/macio-adb.c @@ -70,14 +70,13 @@ static void macio_adb_poll(void); static int macio_adb_reset_bus(void); struct adb_driver macio_adb_driver = { - "MACIO", - macio_probe, - macio_init, - macio_send_request, - /*macio_write,*/ - macio_adb_autopoll, - macio_adb_poll, - macio_adb_reset_bus + .name = "MACIO", + .probe = macio_probe, + .init = macio_init, + .send_request = macio_send_request, + .autopoll = macio_adb_autopoll, + .poll = macio_adb_poll, + .reset_bus = macio_adb_reset_bus, }; int macio_probe(void) diff --git a/drivers/macintosh/via-macii.c b/drivers/macintosh/via-macii.c index 4ba06a1695ea..cf6f7d52d6be 100644 --- a/drivers/macintosh/via-macii.c +++ b/drivers/macintosh/via-macii.c @@ -91,13 +91,13 @@ static void macii_poll(void); static int macii_reset_bus(void); struct adb_driver via_macii_driver = { - "Mac II", - macii_probe, - macii_init, - macii_send_request, - macii_autopoll, - macii_poll, - macii_reset_bus + .name = "Mac II", + .probe = macii_probe, + .init = macii_init, + .send_request = macii_send_request, + .autopoll = macii_autopoll, + .poll = macii_poll, + .reset_bus = macii_reset_bus, }; static enum macii_state { diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index fc56c7067732..433dbeddfcf9 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c @@ -199,13 +199,13 @@ static const struct file_operations pmu_options_proc_fops; #ifdef CONFIG_ADB const struct adb_driver via_pmu_driver = { - "PMU", - pmu_probe, - pmu_init, - pmu_send_request, - pmu_adb_autopoll, - pmu_poll_adb, - pmu_adb_reset_bus + .name = "PMU", + .probe = pmu_probe, + .init = pmu_init, + .send_request = pmu_send_request, + .autopoll = pmu_adb_autopoll, + .poll = pmu_poll_adb, + .reset_bus = pmu_adb_reset_bus, }; #endif /* CONFIG_ADB */ diff --git a/drivers/macintosh/via-pmu68k.c b/drivers/macintosh/via-pmu68k.c index 25465fb91ec9..de69369cdb2d 100644 --- a/drivers/macintosh/via-pmu68k.c +++ b/drivers/macintosh/via-pmu68k.c @@ -119,13 +119,13 @@ static void pmu_enable_backlight(int on); static void pmu_set_brightness(int level); struct adb_driver via_pmu_driver = { - "68K PMU", - pmu_probe, - pmu_init, - pmu_send_request, - pmu_autopoll, - pmu_poll, - pmu_reset_bus + .name = "68K PMU", + .probe = pmu_probe, + .init = pmu_init, + .send_request = pmu_send_request, + .autopoll = pmu_autopoll, + .poll = pmu_poll, + .reset_bus = pmu_reset_bus, }; /* -- cgit v1.2.3 From e6e133c47e6bd4d5dac05b35d06634a8e5648615 Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Wed, 17 Jan 2018 17:52:24 +0530 Subject: powerpc/kprobes: Fix call trace due to incorrect preempt count Michael Ellerman reported the following call trace when running ftracetest: BUG: using __this_cpu_write() in preemptible [00000000] code: ftracetest/6178 caller is opt_pre_handler+0xc4/0x110 CPU: 1 PID: 6178 Comm: ftracetest Not tainted 4.15.0-rc7-gcc6x-gb2cd1df #1 Call Trace: [c0000000f9ec39c0] [c000000000ac4304] dump_stack+0xb4/0x100 (unreliable) [c0000000f9ec3a00] [c00000000061159c] check_preemption_disabled+0x15c/0x170 [c0000000f9ec3a90] [c000000000217e84] opt_pre_handler+0xc4/0x110 [c0000000f9ec3af0] [c00000000004cf68] optimized_callback+0x148/0x170 [c0000000f9ec3b40] [c00000000004d954] optinsn_slot+0xec/0x10000 [c0000000f9ec3e30] [c00000000004bae0] kretprobe_trampoline+0x0/0x10 This is showing up since OPTPROBES is now enabled with CONFIG_PREEMPT. trampoline_probe_handler() considers itself to be a special kprobe handler for kretprobes. In doing so, it expects to be called from kprobe_handler() on a trap, and re-enables preemption before returning a non-zero return value so as to suppress any subsequent processing of the trap by the kprobe_handler(). However, with optprobes, we don't deal with special handlers (we ignore the return code) and just try to re-enable preemption causing the above trace. To address this, modify trampoline_probe_handler() to not be special. The only additional processing done in kprobe_handler() is to emulate the instruction (in this case, a 'nop'). We adjust the value of regs->nip for the purpose and delegate the job of re-enabling preemption and resetting current kprobe to the probe handlers (kprobe_handler() or optimized_callback()). Fixes: 8a2d71a3f273 ("powerpc/kprobes: Disable preemption before invoking probe handler for optprobes") Cc: stable@vger.kernel.org # v4.15+ Reported-by: Michael Ellerman Signed-off-by: Naveen N. Rao Acked-by: Ananth N Mavinakayanahalli Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/kprobes.c | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index ca5d5a081e75..e4c5bf33970b 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -455,29 +455,33 @@ static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) } kretprobe_assert(ri, orig_ret_address, trampoline_address); - regs->nip = orig_ret_address; + /* - * Make LR point to the orig_ret_address. - * When the 'nop' inside the kretprobe_trampoline - * is optimized, we can do a 'blr' after executing the - * detour buffer code. + * We get here through one of two paths: + * 1. by taking a trap -> kprobe_handler() -> here + * 2. by optprobe branch -> optimized_callback() -> opt_pre_handler() -> here + * + * When going back through (1), we need regs->nip to be setup properly + * as it is used to determine the return address from the trap. + * For (2), since nip is not honoured with optprobes, we instead setup + * the link register properly so that the subsequent 'blr' in + * kretprobe_trampoline jumps back to the right instruction. + * + * For nip, we should set the address to the previous instruction since + * we end up emulating it in kprobe_handler(), which increments the nip + * again. */ + regs->nip = orig_ret_address - 4; regs->link = orig_ret_address; - reset_current_kprobe(); kretprobe_hash_unlock(current, &flags); - preempt_enable_no_resched(); hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { hlist_del(&ri->hlist); kfree(ri); } - /* - * By returning a non-zero value, we are telling - * kprobe_handler() that we don't want the post_handler - * to run (and have re-enabled preemption) - */ - return 1; + + return 0; } NOKPROBE_SYMBOL(trampoline_probe_handler); -- cgit v1.2.3 From f208638680e5cb4eb0d2871ce8c29a6cfbe44dce Mon Sep 17 00:00:00 2001 From: Ram Pai Date: Mon, 26 Mar 2018 19:36:54 -0700 Subject: powerpc/mm: Fix thread_pkey_regs_init() thread_pkey_regs_init() initializes the pkey related registers instead of initializing the fields in the task structures. Fortunately those key related registers are re-set to zero when the task gets scheduled on the cpu. However its good to fix this glaringly visible error. Fixes: 06bb53b33804 ("powerpc: store and restore the pkey state across context switches") Signed-off-by: Ram Pai Signed-off-by: Thiago Jung Bauermann Acked-by: Balbir Singh Signed-off-by: Michael Ellerman --- arch/powerpc/mm/pkeys.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/mm/pkeys.c b/arch/powerpc/mm/pkeys.c index ba71c5481f42..328737b4d73c 100644 --- a/arch/powerpc/mm/pkeys.c +++ b/arch/powerpc/mm/pkeys.c @@ -308,9 +308,9 @@ void thread_pkey_regs_init(struct thread_struct *thread) if (static_branch_likely(&pkey_disabled)) return; - write_amr(read_amr() & pkey_amr_uamor_mask); - write_iamr(read_iamr() & pkey_iamr_mask); - write_uamor(read_uamor() & pkey_amr_uamor_mask); + thread->amr = read_amr() & pkey_amr_uamor_mask; + thread->iamr = read_iamr() & pkey_iamr_mask; + thread->uamor = read_uamor() & pkey_amr_uamor_mask; } static inline bool pkey_allows_readwrite(int pkey) -- cgit v1.2.3 From 9a2c1d31e6910b9d5e0205f9167d0b1abeea1413 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Mon, 26 Mar 2018 14:43:09 +0000 Subject: powerpc/4xx: Fix error return code in ppc4xx_msi_probe() Fix to return a negative error code from the error handling case instead of 0, as done elsewhere in this function. Signed-off-by: Wei Yongjun [mpe: Add missing ';' to make it compile] Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/4xx/msi.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/4xx/msi.c b/arch/powerpc/platforms/4xx/msi.c index 4b859c840ea9..96aaae678928 100644 --- a/arch/powerpc/platforms/4xx/msi.c +++ b/arch/powerpc/platforms/4xx/msi.c @@ -241,7 +241,8 @@ static int ppc4xx_msi_probe(struct platform_device *dev) if (!msi_irqs) return -ENODEV; - if (ppc4xx_setup_pcieh_hw(dev, res, msi)) + err = ppc4xx_setup_pcieh_hw(dev, res, msi); + if (err) goto error_out; err = ppc4xx_msi_init_allocator(dev, msi); -- cgit v1.2.3 From 0bfdf598900fd62869659f360d3387ed80eb71cf Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 22 Mar 2018 20:41:46 +1000 Subject: powerpc/64: Fix smp_wmb barrier definition use use lwsync consistently asm/barrier.h is not always included after asm/synch.h, which meant it was missing __SUBARCH_HAS_LWSYNC, so in some files smp_wmb() would be eieio when it should be lwsync. kernel/time/hrtimer.c is one case. __SUBARCH_HAS_LWSYNC is only used in one place, so just fold it in to where it's used. Previously with my small simulator config, 377 instances of eieio in the tree. After this patch there are 55. Fixes: 46d075be585e ("powerpc: Optimise smp_wmb") Cc: stable@vger.kernel.org # v2.6.29+ Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/barrier.h | 3 ++- arch/powerpc/include/asm/synch.h | 4 ---- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index 10daa1d56e0a..c7c63959ba91 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h @@ -35,7 +35,8 @@ #define rmb() __asm__ __volatile__ ("sync" : : : "memory") #define wmb() __asm__ __volatile__ ("sync" : : : "memory") -#ifdef __SUBARCH_HAS_LWSYNC +/* The sub-arch has lwsync */ +#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC) # define SMPWMB LWSYNC #else # define SMPWMB eieio diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h index 63e7f5a1f105..6ec546090ba1 100644 --- a/arch/powerpc/include/asm/synch.h +++ b/arch/powerpc/include/asm/synch.h @@ -6,10 +6,6 @@ #include #include -#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC) -#define __SUBARCH_HAS_LWSYNC -#endif - #ifndef __ASSEMBLY__ extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup; extern void do_lwsync_fixups(unsigned long value, void *fixup_start, -- cgit v1.2.3 From e283655b5abe26462d53d5196f186c5e8863af3b Mon Sep 17 00:00:00 2001 From: Aaro Koskinen Date: Fri, 16 Mar 2018 22:17:28 +0200 Subject: drivers: macintosh: rack-meter: really fix bogus memsets We should zero an array using sizeof instead of number of elements. Fixes the following compiler (GCC 7.3.0) warnings: drivers/macintosh/rack-meter.c: In function 'rackmeter_do_pause': drivers/macintosh/rack-meter.c:157:2: warning: 'memset' used with length equal to number of elements without multiplication by element size [-Wmemset-elt-size] drivers/macintosh/rack-meter.c:158:2: warning: 'memset' used with length equal to number of elements without multiplication by element size [-Wmemset-elt-size] Fixes: 4f7bef7a9f69 ("drivers: macintosh: rack-meter: fix bogus memsets") Reported-by: Stephen Rothwell Signed-off-by: Aaro Koskinen Signed-off-by: Michael Ellerman --- drivers/macintosh/rack-meter.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c index 190c9efeace5..1f29d2413c74 100644 --- a/drivers/macintosh/rack-meter.c +++ b/drivers/macintosh/rack-meter.c @@ -154,8 +154,8 @@ static void rackmeter_do_pause(struct rackmeter *rm, int pause) DBDMA_DO_STOP(rm->dma_regs); return; } - memset(rdma->buf1, 0, ARRAY_SIZE(rdma->buf1)); - memset(rdma->buf2, 0, ARRAY_SIZE(rdma->buf2)); + memset(rdma->buf1, 0, sizeof(rdma->buf1)); + memset(rdma->buf2, 0, sizeof(rdma->buf2)); rm->dma_buf_v->mark = 0; -- cgit v1.2.3 From 16b19f1a03f6a49618611b76818f04ea9cd15fb5 Mon Sep 17 00:00:00 2001 From: Frederic Barrat Date: Wed, 14 Mar 2018 18:01:14 +0100 Subject: powerpc/xive: Fix wrong xmon output caused by typo Signed-off-by: Frederic Barrat Signed-off-by: Michael Ellerman --- arch/powerpc/sysdev/xive/common.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c index 40c06110821c..3459015092fa 100644 --- a/arch/powerpc/sysdev/xive/common.c +++ b/arch/powerpc/sysdev/xive/common.c @@ -246,7 +246,7 @@ notrace void xmon_xive_do_dump(int cpu) u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET); xmon_printf(" IPI state: %x:%c%c\n", xc->hw_ipi, val & XIVE_ESB_VAL_P ? 'P' : 'p', - val & XIVE_ESB_VAL_P ? 'Q' : 'q'); + val & XIVE_ESB_VAL_Q ? 'Q' : 'q'); } #endif } -- cgit v1.2.3 From 1a2f778970c81273c121be160fb6610be8fa423e Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Wed, 7 Mar 2018 19:06:44 +0530 Subject: powerpc/mm/keys: Move pte bits to correct headers Memory keys are supported only with hash translation mode. Instead of using #ifdef in generic code move the key related pte bits to respective headers Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/book3s/64/hash-4k.h | 8 ++++++++ arch/powerpc/include/asm/book3s/64/hash-64k.h | 7 +++++++ arch/powerpc/include/asm/book3s/64/pgtable.h | 19 ------------------- 3 files changed, 15 insertions(+), 19 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h index 67c5475311ee..4cbec8195f85 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-4k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h @@ -34,6 +34,14 @@ #define H_PAGE_COMBO 0x0 #define H_PTE_FRAG_NR 0 #define H_PTE_FRAG_SIZE_SHIFT 0 + +/* memory key bits, only 8 keys supported */ +#define H_PTE_PKEY_BIT0 0 +#define H_PTE_PKEY_BIT1 0 +#define H_PTE_PKEY_BIT2 _RPAGE_RSV3 +#define H_PTE_PKEY_BIT3 _RPAGE_RSV4 +#define H_PTE_PKEY_BIT4 _RPAGE_RSV5 + /* * On all 4K setups, remap_4k_pfn() equates to remap_pfn_range() */ diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h index 3bcf269f8f55..fcca6de62f3a 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-64k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h @@ -16,6 +16,13 @@ #define H_PAGE_BUSY _RPAGE_RPN44 /* software: PTE & hash are busy */ #define H_PAGE_HASHPTE _RPAGE_RPN43 /* PTE has associated HPTE */ +/* memory key bits. */ +#define H_PTE_PKEY_BIT0 _RPAGE_RSV1 +#define H_PTE_PKEY_BIT1 _RPAGE_RSV2 +#define H_PTE_PKEY_BIT2 _RPAGE_RSV3 +#define H_PTE_PKEY_BIT3 _RPAGE_RSV4 +#define H_PTE_PKEY_BIT4 _RPAGE_RSV5 + /* * We need to differentiate between explicit huge page and THP huge * page, since THP huge page also need to track real subpage details diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index a6b9f1d74600..47b5ffc8715d 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -60,25 +60,6 @@ /* Max physical address bit as per radix table */ #define _RPAGE_PA_MAX 57 -#ifdef CONFIG_PPC_MEM_KEYS -#ifdef CONFIG_PPC_64K_PAGES -#define H_PTE_PKEY_BIT0 _RPAGE_RSV1 -#define H_PTE_PKEY_BIT1 _RPAGE_RSV2 -#else /* CONFIG_PPC_64K_PAGES */ -#define H_PTE_PKEY_BIT0 0 /* _RPAGE_RSV1 is not available */ -#define H_PTE_PKEY_BIT1 0 /* _RPAGE_RSV2 is not available */ -#endif /* CONFIG_PPC_64K_PAGES */ -#define H_PTE_PKEY_BIT2 _RPAGE_RSV3 -#define H_PTE_PKEY_BIT3 _RPAGE_RSV4 -#define H_PTE_PKEY_BIT4 _RPAGE_RSV5 -#else /* CONFIG_PPC_MEM_KEYS */ -#define H_PTE_PKEY_BIT0 0 -#define H_PTE_PKEY_BIT1 0 -#define H_PTE_PKEY_BIT2 0 -#define H_PTE_PKEY_BIT3 0 -#define H_PTE_PKEY_BIT4 0 -#endif /* CONFIG_PPC_MEM_KEYS */ - /* * Max physical address bit we will use for now. * -- cgit v1.2.3 From 0dea04b288c06654b0de3563c5b8cdfe6130e450 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 26 Mar 2018 15:34:47 +0530 Subject: powerpc/mm/slice: Consolidate return path in slice_get_unmapped_area() In a following patch, on finding a free area we will need to do allocatinon of extra contexts as needed. Consolidating the return path for slice_get_unmapped_area() will make that easier. Split into a separate patch to make review easy. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/mm/slice.c | 36 ++++++++++++++++++++---------------- 1 file changed, 20 insertions(+), 16 deletions(-) diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 1297b3ad7dd2..09ac1a709d0c 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -574,7 +574,8 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, */ if (slice_check_range_fits(mm, &good_mask, addr, len)) { slice_dbg(" fits good !\n"); - return addr; + newaddr = addr; + goto return_addr; } } else { /* Now let's see if we can find something in the existing @@ -587,7 +588,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, * we thus return directly */ slice_dbg(" found area at 0x%lx\n", newaddr); - return newaddr; + goto return_addr; } } /* @@ -601,6 +602,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, if (addr != 0 || fixed) { if (slice_check_range_fits(mm, &potential_mask, addr, len)) { slice_dbg(" fits potential !\n"); + newaddr = addr; goto convert; } } @@ -615,34 +617,34 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, * anywhere in the good area. */ if (addr) { - addr = slice_find_area(mm, len, &good_mask, - psize, topdown, high_limit); - if (addr != -ENOMEM) { - slice_dbg(" found area at 0x%lx\n", addr); - return addr; + newaddr = slice_find_area(mm, len, &good_mask, + psize, topdown, high_limit); + if (newaddr != -ENOMEM) { + slice_dbg(" found area at 0x%lx\n", newaddr); + goto return_addr; } } /* Now let's see if we can find something in the existing slices * for that size plus free slices */ - addr = slice_find_area(mm, len, &potential_mask, - psize, topdown, high_limit); + newaddr = slice_find_area(mm, len, &potential_mask, + psize, topdown, high_limit); #ifdef CONFIG_PPC_64K_PAGES - if (addr == -ENOMEM && psize == MMU_PAGE_64K) { + if (newaddr == -ENOMEM && psize == MMU_PAGE_64K) { /* retry the search with 4k-page slices included */ slice_or_mask(&potential_mask, &potential_mask, compat_maskp); - addr = slice_find_area(mm, len, &potential_mask, - psize, topdown, high_limit); + newaddr = slice_find_area(mm, len, &potential_mask, + psize, topdown, high_limit); } #endif - if (addr == -ENOMEM) + if (newaddr == -ENOMEM) return -ENOMEM; - slice_range_to_mask(addr, len, &potential_mask); - slice_dbg(" found potential area at 0x%lx\n", addr); + slice_range_to_mask(newaddr, len, &potential_mask); + slice_dbg(" found potential area at 0x%lx\n", newaddr); slice_print_mask(" mask", &potential_mask); convert: @@ -656,7 +658,9 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, if (psize > MMU_PAGE_BASE) on_each_cpu(slice_flush_segments, mm, 1); } - return addr; + +return_addr: + return newaddr; } EXPORT_SYMBOL_GPL(slice_get_unmapped_area); -- cgit v1.2.3 From f384796c40dc55b3dba25e0ee9c1afd98c6d24d1 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 26 Mar 2018 15:34:48 +0530 Subject: powerpc/mm: Add support for handling > 512TB address in SLB miss For addresses above 512TB we allocate additional mmu contexts. To make it all easy, addresses above 512TB are handled with IR/DR=1 and with stack frame setup. The mmu_context_t is also updated to track the new extended_ids. To support upto 4PB we need a total 8 contexts. Signed-off-by: Aneesh Kumar K.V [mpe: Minor formatting tweaks and comment wording, switch BUG to WARN in get_ea_context().] Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/book3s/64/hash-4k.h | 6 ++ arch/powerpc/include/asm/book3s/64/hash-64k.h | 6 ++ arch/powerpc/include/asm/book3s/64/mmu.h | 33 +++++++- arch/powerpc/include/asm/mmu_context.h | 39 ++++++++++ arch/powerpc/include/asm/processor.h | 6 ++ arch/powerpc/kernel/exceptions-64s.S | 11 ++- arch/powerpc/kernel/traps.c | 12 --- arch/powerpc/mm/copro_fault.c | 2 +- arch/powerpc/mm/hash_utils_64.c | 4 +- arch/powerpc/mm/mmu_context_book3s64.c | 15 +++- arch/powerpc/mm/pgtable-hash64.c | 2 +- arch/powerpc/mm/slb.c | 108 ++++++++++++++++++++++++++ arch/powerpc/mm/slb_low.S | 11 ++- arch/powerpc/mm/slice.c | 15 +++- arch/powerpc/mm/tlb_hash64.c | 2 +- 15 files changed, 245 insertions(+), 27 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h index 4cbec8195f85..4b5423030d4b 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-4k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h @@ -11,6 +11,12 @@ #define H_PUD_INDEX_SIZE 9 #define H_PGD_INDEX_SIZE 9 +/* + * Each context is 512TB. But on 4k we restrict our max TASK size to 64TB + * Hence also limit max EA bits to 64TB. + */ +#define MAX_EA_BITS_PER_CONTEXT 46 + #ifndef __ASSEMBLY__ #define H_PTE_TABLE_SIZE (sizeof(pte_t) << H_PTE_INDEX_SIZE) #define H_PMD_TABLE_SIZE (sizeof(pmd_t) << H_PMD_INDEX_SIZE) diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h index fcca6de62f3a..f7a4f5cbf63e 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-64k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h @@ -7,6 +7,12 @@ #define H_PUD_INDEX_SIZE 7 #define H_PGD_INDEX_SIZE 8 +/* + * Each context is 512TB size. SLB miss for first context/default context + * is handled in the hotpath. + */ +#define MAX_EA_BITS_PER_CONTEXT 49 + /* * 64k aligned address free up few of the lower bits of RPN for us * We steal that here. For more deatils look at pte_pfn/pfn_pte() diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h index c8c836e8ad1b..5094696eecd6 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu.h +++ b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -91,7 +91,18 @@ struct slice_mask { }; typedef struct { - mm_context_id_t id; + union { + /* + * We use id as the PIDR content for radix. On hash we can use + * more than one id. The extended ids are used when we start + * having address above 512TB. We allocate one extended id + * for each 512TB. The new id is then used with the 49 bit + * EA to build a new VA. We always use ESID_BITS_1T_MASK bits + * from EA and new context ids to build the new VAs. + */ + mm_context_id_t id; + mm_context_id_t extended_id[TASK_SIZE_USER64/TASK_CONTEXT_SIZE]; + }; u16 user_psize; /* page size index */ /* Number of bits in the mm_cpumask */ @@ -196,5 +207,25 @@ extern void radix_init_pseries(void); static inline void radix_init_pseries(void) { }; #endif +static inline int get_ea_context(mm_context_t *ctx, unsigned long ea) +{ + int index = ea >> MAX_EA_BITS_PER_CONTEXT; + + if (likely(index < ARRAY_SIZE(ctx->extended_id))) + return ctx->extended_id[index]; + + /* should never happen */ + WARN_ON(1); + return 0; +} + +static inline unsigned long get_user_vsid(mm_context_t *ctx, + unsigned long ea, int ssize) +{ + unsigned long context = get_ea_context(ctx, ea); + + return get_vsid(context, ea, ssize); +} + #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_BOOK3S_64_MMU_H_ */ diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index 3a15b6db9501..1835ca1505d6 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -60,12 +60,51 @@ extern int hash__alloc_context_id(void); extern void hash__reserve_context_id(int id); extern void __destroy_context(int context_id); static inline void mmu_context_init(void) { } + +static inline int alloc_extended_context(struct mm_struct *mm, + unsigned long ea) +{ + int context_id; + + int index = ea >> MAX_EA_BITS_PER_CONTEXT; + + context_id = hash__alloc_context_id(); + if (context_id < 0) + return context_id; + + VM_WARN_ON(mm->context.extended_id[index]); + mm->context.extended_id[index] = context_id; + return context_id; +} + +static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea) +{ + int context_id; + + context_id = get_ea_context(&mm->context, ea); + if (!context_id) + return true; + return false; +} + #else extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk); extern unsigned long __init_new_context(void); extern void __destroy_context(unsigned long context_id); extern void mmu_context_init(void); +static inline int alloc_extended_context(struct mm_struct *mm, + unsigned long ea) +{ + /* non book3s_64 should never find this called */ + WARN_ON(1); + return -ENOMEM; +} + +static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea) +{ + return false; +} #endif #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU) diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 01299cdc9806..75b084486ce1 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -119,9 +119,15 @@ void release_thread(struct task_struct *); */ #define TASK_SIZE_USER64 TASK_SIZE_512TB #define DEFAULT_MAP_WINDOW_USER64 TASK_SIZE_128TB +#define TASK_CONTEXT_SIZE TASK_SIZE_512TB #else #define TASK_SIZE_USER64 TASK_SIZE_64TB #define DEFAULT_MAP_WINDOW_USER64 TASK_SIZE_64TB +/* + * We don't need to allocate extended context ids for 4K page size, because + * we limit the max effective address on this config to 64TB. + */ +#define TASK_CONTEXT_SIZE TASK_SIZE_64TB #endif /* diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 6bee20c43feb..1a0aa70bcb2b 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -621,7 +621,10 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX) lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ mtlr r10 - beq- 8f /* if bad address, make full stack frame */ + /* + * Large address, check whether we have to allocate new contexts. + */ + beq- 8f bne- cr5,2f /* if unrecoverable exception, oops */ @@ -685,7 +688,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX) mr r3,r12 mfspr r11,SPRN_SRR0 mfspr r12,SPRN_SRR1 - LOAD_HANDLER(r10,bad_addr_slb) + LOAD_HANDLER(r10, large_addr_slb) mtspr SPRN_SRR0,r10 ld r10,PACAKMSR(r13) mtspr SPRN_SRR1,r10 @@ -700,7 +703,7 @@ EXC_COMMON_BEGIN(unrecov_slb) bl unrecoverable_exception b 1b -EXC_COMMON_BEGIN(bad_addr_slb) +EXC_COMMON_BEGIN(large_addr_slb) EXCEPTION_PROLOG_COMMON(0x380, PACA_EXSLB) RECONCILE_IRQ_STATE(r10, r11) ld r3, PACA_EXSLB+EX_DAR(r13) @@ -710,7 +713,7 @@ EXC_COMMON_BEGIN(bad_addr_slb) std r10, _TRAP(r1) 2: bl save_nvgprs addi r3, r1, STACK_FRAME_OVERHEAD - bl slb_miss_bad_addr + bl slb_miss_large_addr b ret_from_except EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x100) diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 1e48d157196a..f200bfd98b17 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -1495,18 +1495,6 @@ bail: exception_exit(prev_state); } -void slb_miss_bad_addr(struct pt_regs *regs) -{ - enum ctx_state prev_state = exception_enter(); - - if (user_mode(regs)) - _exception(SIGSEGV, regs, SEGV_BNDERR, regs->dar); - else - bad_page_fault(regs, regs->dar, SIGSEGV); - - exception_exit(prev_state); -} - void StackOverflow(struct pt_regs *regs) { printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n", diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c index 697b70ad1195..7d0945bd3a61 100644 --- a/arch/powerpc/mm/copro_fault.c +++ b/arch/powerpc/mm/copro_fault.c @@ -112,7 +112,7 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb) return 1; psize = get_slice_psize(mm, ea); ssize = user_segment_size(ea); - vsid = get_vsid(mm->context.id, ea, ssize); + vsid = get_user_vsid(&mm->context, ea, ssize); vsidkey = SLB_VSID_USER; break; case VMALLOC_REGION_ID: diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 17fc13cab8dc..4180b89b8922 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -1267,7 +1267,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, } psize = get_slice_psize(mm, ea); ssize = user_segment_size(ea); - vsid = get_vsid(mm->context.id, ea, ssize); + vsid = get_user_vsid(&mm->context, ea, ssize); break; case VMALLOC_REGION_ID: vsid = get_kernel_vsid(ea, mmu_kernel_ssize); @@ -1532,7 +1532,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, /* Get VSID */ ssize = user_segment_size(ea); - vsid = get_vsid(mm->context.id, ea, ssize); + vsid = get_user_vsid(&mm->context, ea, ssize); if (!vsid) return; /* diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c index 422be81bf69f..b75194dff64c 100644 --- a/arch/powerpc/mm/mmu_context_book3s64.c +++ b/arch/powerpc/mm/mmu_context_book3s64.c @@ -179,6 +179,19 @@ void __destroy_context(int context_id) } EXPORT_SYMBOL_GPL(__destroy_context); +static void destroy_contexts(mm_context_t *ctx) +{ + int index, context_id; + + spin_lock(&mmu_context_lock); + for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) { + context_id = ctx->extended_id[index]; + if (context_id) + ida_remove(&mmu_context_ida, context_id); + } + spin_unlock(&mmu_context_lock); +} + #ifdef CONFIG_PPC_64K_PAGES static void destroy_pagetable_page(struct mm_struct *mm) { @@ -217,7 +230,7 @@ void destroy_context(struct mm_struct *mm) else subpage_prot_free(mm); destroy_pagetable_page(mm); - __destroy_context(mm->context.id); + destroy_contexts(&mm->context); mm->context.id = MMU_NO_CONTEXT; } diff --git a/arch/powerpc/mm/pgtable-hash64.c b/arch/powerpc/mm/pgtable-hash64.c index 469808e77e58..a87b18cf6749 100644 --- a/arch/powerpc/mm/pgtable-hash64.c +++ b/arch/powerpc/mm/pgtable-hash64.c @@ -320,7 +320,7 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, if (!is_kernel_addr(addr)) { ssize = user_segment_size(addr); - vsid = get_vsid(mm->context.id, addr, ssize); + vsid = get_user_vsid(&mm->context, addr, ssize); WARN_ON(vsid == 0); } else { vsid = get_kernel_vsid(addr, mmu_kernel_ssize); diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 13cfe413b40d..66577cc66dc9 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -340,3 +341,110 @@ void slb_initialize(void) asm volatile("isync":::"memory"); } + +static void insert_slb_entry(unsigned long vsid, unsigned long ea, + int bpsize, int ssize) +{ + unsigned long flags, vsid_data, esid_data; + enum slb_index index; + int slb_cache_index; + + /* + * We are irq disabled, hence should be safe to access PACA. + */ + index = get_paca()->stab_rr; + + /* + * simple round-robin replacement of slb starting at SLB_NUM_BOLTED. + */ + if (index < (mmu_slb_size - 1)) + index++; + else + index = SLB_NUM_BOLTED; + + get_paca()->stab_rr = index; + + flags = SLB_VSID_USER | mmu_psize_defs[bpsize].sllp; + vsid_data = (vsid << slb_vsid_shift(ssize)) | flags | + ((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT); + esid_data = mk_esid_data(ea, ssize, index); + + asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data) + : "memory"); + + /* + * Now update slb cache entries + */ + slb_cache_index = get_paca()->slb_cache_ptr; + if (slb_cache_index < SLB_CACHE_ENTRIES) { + /* + * We have space in slb cache for optimized switch_slb(). + * Top 36 bits from esid_data as per ISA + */ + get_paca()->slb_cache[slb_cache_index++] = esid_data >> 28; + get_paca()->slb_cache_ptr++; + } else { + /* + * Our cache is full and the current cache content strictly + * doesn't indicate the active SLB conents. Bump the ptr + * so that switch_slb() will ignore the cache. + */ + get_paca()->slb_cache_ptr = SLB_CACHE_ENTRIES + 1; + } +} + +static void handle_multi_context_slb_miss(int context_id, unsigned long ea) +{ + struct mm_struct *mm = current->mm; + unsigned long vsid; + int bpsize; + + /* + * We are always above 1TB, hence use high user segment size. + */ + vsid = get_vsid(context_id, ea, mmu_highuser_ssize); + bpsize = get_slice_psize(mm, ea); + insert_slb_entry(vsid, ea, bpsize, mmu_highuser_ssize); +} + +void slb_miss_large_addr(struct pt_regs *regs) +{ + enum ctx_state prev_state = exception_enter(); + unsigned long ea = regs->dar; + int context; + + if (REGION_ID(ea) != USER_REGION_ID) + goto slb_bad_addr; + + /* + * Are we beyound what the page table layout supports ? + */ + if ((ea & ~REGION_MASK) >= H_PGTABLE_RANGE) + goto slb_bad_addr; + + /* Lower address should have been handled by asm code */ + if (ea < (1UL << MAX_EA_BITS_PER_CONTEXT)) + goto slb_bad_addr; + + /* + * consider this as bad access if we take a SLB miss + * on an address above addr limit. + */ + if (ea >= current->mm->context.slb_addr_limit) + goto slb_bad_addr; + + context = get_ea_context(¤t->mm->context, ea); + if (!context) + goto slb_bad_addr; + + handle_multi_context_slb_miss(context, ea); + exception_exit(prev_state); + return; + +slb_bad_addr: + if (user_mode(regs)) + _exception(SIGSEGV, regs, SEGV_BNDERR, ea); + else + bad_page_fault(regs, ea, SIGSEGV); + exception_exit(prev_state); +} diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index 2c7c717fd2ea..a83fbd2a4a24 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S @@ -75,10 +75,15 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_68_BIT_VA) */ _GLOBAL(slb_allocate) /* - * check for bad kernel/user address - * (ea & ~REGION_MASK) >= PGTABLE_RANGE + * Check if the address falls within the range of the first context, or + * if we may need to handle multi context. For the first context we + * allocate the slb entry via the fast path below. For large address we + * branch out to C-code and see if additional contexts have been + * allocated. + * The test here is: + * (ea & ~REGION_MASK) >= (1ull << MAX_EA_BITS_PER_CONTEXT) */ - rldicr. r9,r3,4,(63 - H_PGTABLE_EADDR_SIZE - 4) + rldicr. r9,r3,4,(63 - MAX_EA_BITS_PER_CONTEXT - 4) bne- 8f srdi r9,r3,60 /* get region */ diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 09ac1a709d0c..9cd87d11fe4e 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -648,6 +648,15 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, slice_print_mask(" mask", &potential_mask); convert: + /* + * Try to allocate the context before we do slice convert + * so that we handle the context allocation failure gracefully. + */ + if (need_extra_context(mm, newaddr)) { + if (alloc_extended_context(mm, newaddr) < 0) + return -ENOMEM; + } + slice_andnot_mask(&potential_mask, &potential_mask, &good_mask); if (compat_maskp && !fixed) slice_andnot_mask(&potential_mask, &potential_mask, compat_maskp); @@ -658,10 +667,14 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, if (psize > MMU_PAGE_BASE) on_each_cpu(slice_flush_segments, mm, 1); } + return newaddr; return_addr: + if (need_extra_context(mm, newaddr)) { + if (alloc_extended_context(mm, newaddr) < 0) + return -ENOMEM; + } return newaddr; - } EXPORT_SYMBOL_GPL(slice_get_unmapped_area); diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c index 9b23f12e863c..87d71dd25441 100644 --- a/arch/powerpc/mm/tlb_hash64.c +++ b/arch/powerpc/mm/tlb_hash64.c @@ -89,7 +89,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, /* Build full vaddr */ if (!is_kernel_addr(addr)) { ssize = user_segment_size(addr); - vsid = get_vsid(mm->context.id, addr, ssize); + vsid = get_user_vsid(&mm->context, addr, ssize); } else { vsid = get_kernel_vsid(addr, mmu_kernel_ssize); ssize = mmu_kernel_ssize; -- cgit v1.2.3 From c2b4d8b7417a59b7f9a52d0d8402f5257cbbd398 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 26 Mar 2018 15:34:49 +0530 Subject: powerpc/mm/hash64: Increase the VA range This patch increases the max virtual (effective) address value to 4PB. With 4K page size config we continue to limit ourself to 64TB. Signed-off-by: Aneesh Kumar K.V [mpe: Keep the H_PGTABLE_RANGE test, update it to work] Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/book3s/64/hash-64k.h | 2 +- arch/powerpc/include/asm/processor.h | 9 ++++++++- arch/powerpc/mm/init_64.c | 6 ------ arch/powerpc/mm/pgtable-hash64.c | 4 ++++ arch/powerpc/mm/pgtable_64.c | 5 ----- 5 files changed, 13 insertions(+), 13 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h index f7a4f5cbf63e..eb393135d054 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-64k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h @@ -4,7 +4,7 @@ #define H_PTE_INDEX_SIZE 8 #define H_PMD_INDEX_SIZE 10 -#define H_PUD_INDEX_SIZE 7 +#define H_PUD_INDEX_SIZE 10 #define H_PGD_INDEX_SIZE 8 /* diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 75b084486ce1..bb9cb25ffb20 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -109,6 +109,13 @@ void release_thread(struct task_struct *); #define TASK_SIZE_64TB (0x0000400000000000UL) #define TASK_SIZE_128TB (0x0000800000000000UL) #define TASK_SIZE_512TB (0x0002000000000000UL) +#define TASK_SIZE_1PB (0x0004000000000000UL) +#define TASK_SIZE_2PB (0x0008000000000000UL) +/* + * With 52 bits in the address we can support + * upto 4PB of range. + */ +#define TASK_SIZE_4PB (0x0010000000000000UL) /* * For now 512TB is only supported with book3s and 64K linux page size. @@ -117,7 +124,7 @@ void release_thread(struct task_struct *); /* * Max value currently used: */ -#define TASK_SIZE_USER64 TASK_SIZE_512TB +#define TASK_SIZE_USER64 TASK_SIZE_4PB #define DEFAULT_MAP_WINDOW_USER64 TASK_SIZE_128TB #define TASK_CONTEXT_SIZE TASK_SIZE_512TB #else diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index fdb424a29f03..63470b06c502 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -68,12 +68,6 @@ #include "mmu_decl.h" -#ifdef CONFIG_PPC_BOOK3S_64 -#if H_PGTABLE_RANGE > USER_VSID_RANGE -#warning Limited user VSID range means pagetable space is wasted -#endif -#endif /* CONFIG_PPC_BOOK3S_64 */ - phys_addr_t memstart_addr = ~0; EXPORT_SYMBOL_GPL(memstart_addr); phys_addr_t kernstart_addr; diff --git a/arch/powerpc/mm/pgtable-hash64.c b/arch/powerpc/mm/pgtable-hash64.c index a87b18cf6749..199bfda5f0d9 100644 --- a/arch/powerpc/mm/pgtable-hash64.c +++ b/arch/powerpc/mm/pgtable-hash64.c @@ -24,6 +24,10 @@ #define CREATE_TRACE_POINTS #include +#if H_PGTABLE_RANGE > (USER_VSID_RANGE * (TASK_SIZE_USER64 / TASK_CONTEXT_SIZE)) +#warning Limited user VSID range means pagetable space is wasted +#endif + #ifdef CONFIG_SPARSEMEM_VMEMMAP /* * vmemmap is the starting address of the virtual address space where diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index adf469f312f2..9bf659d5078c 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -57,11 +57,6 @@ #include "mmu_decl.h" -#ifdef CONFIG_PPC_BOOK3S_64 -#if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT)) -#error TASK_SIZE_USER64 exceeds user VSID range -#endif -#endif #ifdef CONFIG_PPC_BOOK3S_64 /* -- cgit v1.2.3 From 872a100a49c3785b4577e002580ca5689d1be7a1 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 26 Mar 2018 15:34:50 +0530 Subject: powerpc/mm/hash: Don't memset pgd table if not needed We need to zero-out pgd table only if we share the slab cache with pud/pmd level caches. With the support of 4PB, we don't share the slab cache anymore. Instead of removing the code completely hide it within an #ifdef. We don't need to do this with any other page table level, because they all allocate table of double the size and we take of initializing the first half corrrectly during page table zap. Signed-off-by: Aneesh Kumar K.V [mpe: Consolidate multiple #if / #ifdef into one] Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/book3s/64/pgalloc.h | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h index 4746bc68d446..558a159600ad 100644 --- a/arch/powerpc/include/asm/book3s/64/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h @@ -80,8 +80,18 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), pgtable_gfp_flags(mm, GFP_KERNEL)); + /* + * With hugetlb, we don't clear the second half of the page table. + * If we share the same slab cache with the pmd or pud level table, + * we need to make sure we zero out the full table on alloc. + * With 4K we don't store slot in the second half. Hence we don't + * need to do this for 4k. + */ +#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_64K_PAGES) && \ + ((H_PGD_INDEX_SIZE == H_PUD_CACHE_INDEX) || \ + (H_PGD_INDEX_SIZE == H_PMD_CACHE_INDEX)) memset(pgd, 0, PGD_TABLE_SIZE); - +#endif return pgd; } -- cgit v1.2.3 From ca9a16c3bc1582da62ef9c47fc22008447f7ae0e Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Fri, 30 Mar 2018 17:27:24 +0530 Subject: powerpc/kvm: Fix guest boot failure on Power9 since DAWR changes SLOF checks for 'sc 1' (hypercall) support by issuing a hcall with H_SET_DABR. Since the recent commit e8ebedbf3131 ("KVM: PPC: Book3S HV: Return error from h_set_dabr() on POWER9") changed H_SET_DABR to return H_UNSUPPORTED on Power9, we see guest boot failures, the symptom is the boot seems to just stop in SLOF, eg: SLOF *************************************************************** QEMU Starting Build Date = Sep 24 2017 12:23:07 FW Version = buildd@ release 20170724 SLOF can cope if H_SET_DABR returns H_HARDWARE. So wwitch the return value to H_HARDWARE instead of H_UNSUPPORTED so that we don't break the guest boot. That does mean we return a different error to PowerVM in this case, but that's probably not a big concern. Fixes: e8ebedbf3131 ("KVM: PPC: Book3S HV: Return error from h_set_dabr() on POWER9") Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 95c616f2da22..4e66f578e3cd 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -2575,7 +2575,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2: BEGIN_FTR_SECTION /* POWER9 with disabled DAWR */ - li r3, H_UNSUPPORTED + li r3, H_HARDWARE blr END_FTR_SECTION_IFCLR(CPU_FTR_DAWR) /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */ -- cgit v1.2.3 From 1d0afc0d5a7c281f8ced3bd39f61f3328d5d3822 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Sat, 31 Mar 2018 20:57:10 +1100 Subject: powerpc/64e: Fix oops due to deferral of paca allocation On 64-bit Book3E systems, in setup_tlb_core_data() we reference other CPUs pacas. But in commit 59f577743d71 ("powerpc/64: Defer paca allocation until memory topology is discovered") the allocation of non-boot-CPU pacas was deferred until later in boot. This leads to an oops: CPU maps initialized for 1 thread per core Unable to handle kernel paging request for data at address 0x8888888888888918 Faulting instruction address: 0xc000000000e2f0d0 Oops: Kernel access of bad area, sig: 11 [#1] NIP .setup_tlb_core_data+0xdc/0x160 Call Trace: .setup_tlb_core_data+0x5c/0x160 (unreliable) .setup_arch+0x80/0x348 .start_kernel+0x7c/0x598 start_here_common+0x1c/0x40 Luckily setup_tlb_core_data() is called immediately prior to smp_setup_pacas(). So simply switching their order is sufficient to fix the oops and seems unlikely to have any other unwanted side effects. Fixes: 59f577743d71 ("powerpc/64: Defer paca allocation until memory topology is discovered") Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/setup-common.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 56f7a2b793e0..0af5c11b9e78 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -915,9 +915,6 @@ void __init setup_arch(char **cmdline_p) /* Parse memory topology */ mem_topology_setup(); - /* On BookE, setup per-core TLB data structures. */ - setup_tlb_core_data(); - /* * Release secondary cpus out of their spinloops at 0x60 now that * we can map physical -> logical CPU ids. @@ -927,6 +924,10 @@ void __init setup_arch(char **cmdline_p) */ #ifdef CONFIG_SMP smp_setup_pacas(); + + /* On BookE, setup per-core TLB data structures. */ + setup_tlb_core_data(); + smp_release_cpus(); #endif -- cgit v1.2.3 From 7ab96c0a083f3a298ae595ea142058a20c6e34f6 Mon Sep 17 00:00:00 2001 From: Jonathan Neuschäfer Date: Tue, 6 Feb 2018 13:37:04 +0100 Subject: powerpc/wii: Probe the whole devicetree MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously, wii_device_probe would only initialize devices under the /hollywood node. After this patch, platform devices placed outside of /hollywood will also be initialized. The intended usecase for this are devices located outside of the Hollywood chip, such as GPIO LEDs and GPIO buttons. Signed-off-by: Jonathan Neuschäfer Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/embedded6xx/wii.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c index 3fd683e40bc9..ef2b02b5e9d3 100644 --- a/arch/powerpc/platforms/embedded6xx/wii.c +++ b/arch/powerpc/platforms/embedded6xx/wii.c @@ -239,7 +239,7 @@ static int __init wii_device_probe(void) if (!machine_is(wii)) return 0; - of_platform_bus_probe(NULL, wii_of_bus, NULL); + of_platform_populate(NULL, wii_of_bus, NULL, NULL); return 0; } device_initcall(wii_device_probe); -- cgit v1.2.3 From 9cbaaec1cf0c9f4861c4c1dd65f3ede218c1ab2f Mon Sep 17 00:00:00 2001 From: Jonathan Neuschäfer Date: Fri, 9 Feb 2018 13:07:28 +0100 Subject: powerpc/wii: Explicitly configure GPIO owner for poweroff pin MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Hollywood chipset's GPIO controller has two sets of registers: One for access by the PowerPC CPU, and one for access by the ARM coprocessor (but both are accessible from the PPC because the memory firewall (AHBPROT) is usually disabled when booting Linux, today). The wii_power_off function currently assumes that the poweroff GPIO pin is configured for use via the ARM side, but the upcoming GPIO driver configures all pins for use via the PPC side, breaking poweroff. Configure the owner register explicitly in wii_power_off to make wii_power_off work with and without the new GPIO driver. I think the Wii can be switched to the generic gpio-poweroff driver, after the GPIO driver is merged. Signed-off-by: Jonathan Neuschäfer Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/embedded6xx/wii.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c index ef2b02b5e9d3..419a88938b0a 100644 --- a/arch/powerpc/platforms/embedded6xx/wii.c +++ b/arch/powerpc/platforms/embedded6xx/wii.c @@ -44,6 +44,7 @@ #define HW_GPIO_BASE(idx) (idx * 0x20) #define HW_GPIO_OUT(idx) (HW_GPIO_BASE(idx) + 0) #define HW_GPIO_DIR(idx) (HW_GPIO_BASE(idx) + 4) +#define HW_GPIO_OWNER (HW_GPIO_BASE(1) + 0x1c) #define HW_GPIO_SHUTDOWN (1<<1) #define HW_GPIO_SLOT_LED (1<<5) @@ -176,6 +177,12 @@ static void wii_power_off(void) local_irq_disable(); if (hw_gpio) { + /* + * set the owner of the shutdown pin to ARM, because it is + * accessed through the registers for the ARM, below + */ + clrbits32(hw_gpio + HW_GPIO_OWNER, HW_GPIO_SHUTDOWN); + /* make sure that the poweroff GPIO is configured as output */ setbits32(hw_gpio + HW_GPIO_DIR(1), HW_GPIO_SHUTDOWN); -- cgit v1.2.3 From 9693d5709f8e771c21114ebca59b11e61db9774c Mon Sep 17 00:00:00 2001 From: Jonathan Neuschäfer Date: Fri, 9 Feb 2018 13:07:30 +0100 Subject: powerpc/wii.dts: Add ngpios property MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Hollywood GPIO controller supports 32 GPIOs, but on the Wii, only 24 are used. Signed-off-by: Jonathan Neuschäfer Signed-off-by: Michael Ellerman --- arch/powerpc/boot/dts/wii.dts | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/boot/dts/wii.dts b/arch/powerpc/boot/dts/wii.dts index 17a5babb098d..aa4064000b9f 100644 --- a/arch/powerpc/boot/dts/wii.dts +++ b/arch/powerpc/boot/dts/wii.dts @@ -176,6 +176,7 @@ compatible = "nintendo,hollywood-gpio"; reg = <0x0d8000c0 0x40>; gpio-controller; + ngpios = <24>; /* * This is commented out while a standard binding -- cgit v1.2.3 From 80873a0b3a826bbd5db3c76601a38bfcaaa23c9a Mon Sep 17 00:00:00 2001 From: Jonathan Neuschäfer Date: Fri, 9 Feb 2018 13:07:31 +0100 Subject: powerpc/wii.dts: Add GPIO line names MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These are the GPIO line names on a Nintendo Wii, as documented in: https://wiibrew.org/wiki/Hardware/Hollywood_GPIOs Signed-off-by: Jonathan Neuschäfer Signed-off-by: Michael Ellerman --- arch/powerpc/boot/dts/wii.dts | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/arch/powerpc/boot/dts/wii.dts b/arch/powerpc/boot/dts/wii.dts index aa4064000b9f..d7094804e796 100644 --- a/arch/powerpc/boot/dts/wii.dts +++ b/arch/powerpc/boot/dts/wii.dts @@ -178,6 +178,14 @@ gpio-controller; ngpios = <24>; + gpio-line-names = + "POWER", "SHUTDOWN", "FAN", "DC_DC", + "DI_SPIN", "SLOT_LED", "EJECT_BTN", "SLOT_IN", + "SENSOR_BAR", "DO_EJECT", "EEP_CS", "EEP_CLK", + "EEP_MOSI", "EEP_MISO", "AVE_SCL", "AVE_SDA", + "DEBUG0", "DEBUG1", "DEBUG2", "DEBUG3", + "DEBUG4", "DEBUG5", "DEBUG6", "DEBUG7"; + /* * This is commented out while a standard binding * for i2c over gpio is defined. -- cgit v1.2.3 From 041413b88d6e9c66582d0a38d0e82f8933d42d9a Mon Sep 17 00:00:00 2001 From: Jonathan Neuschäfer Date: Sat, 17 Mar 2018 16:06:17 +0100 Subject: powerpc/wii.dts: Add drive slot LED MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Wii has a blue LED in the disk drive slot, which is controlled via a GPIO line. Add this LED to wii.dts, and mark it as a panic-indicator. Signed-off-by: Jonathan Neuschäfer Signed-off-by: Michael Ellerman --- arch/powerpc/boot/dts/wii.dts | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/arch/powerpc/boot/dts/wii.dts b/arch/powerpc/boot/dts/wii.dts index d7094804e796..104b1d6d5695 100644 --- a/arch/powerpc/boot/dts/wii.dts +++ b/arch/powerpc/boot/dts/wii.dts @@ -13,6 +13,7 @@ */ /dts-v1/; +#include /* * This is commented-out for now. @@ -223,5 +224,16 @@ interrupts = <2>; }; }; + + gpio-leds { + compatible = "gpio-leds"; + + /* This is the blue LED in the disk drive slot */ + drive-slot { + label = "wii:blue:drive_slot"; + gpios = <&GPIO 5 GPIO_ACTIVE_HIGH>; + panic-indicator; + }; + }; }; -- cgit v1.2.3 From 2615c93e5f52db62586112793d889face99eb905 Mon Sep 17 00:00:00 2001 From: Jonathan Neuschäfer Date: Wed, 28 Mar 2018 02:25:40 +0200 Subject: powerpc/mm: Simplify page_is_ram by using memblock_is_memory MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of open-coding the search in page_is_ram, call memblock_is_memory. Signed-off-by: Jonathan Neuschäfer Signed-off-by: Michael Ellerman --- arch/powerpc/mm/mem.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index e2f5025b03b0..8f335cf052f8 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -85,13 +85,7 @@ int page_is_ram(unsigned long pfn) #ifndef CONFIG_PPC64 /* XXX for now */ return pfn < max_pfn; #else - unsigned long paddr = (pfn << PAGE_SHIFT); - struct memblock_region *reg; - - for_each_memblock(memory, reg) - if (paddr >= reg->base && paddr < (reg->base + reg->size)) - return 1; - return 0; + return memblock_is_memory(__pfn_to_phys(pfn)); #endif } -- cgit v1.2.3 From f65e67c7e3308c0af08080782d79a8cb95c44929 Mon Sep 17 00:00:00 2001 From: Jonathan Neuschäfer Date: Wed, 28 Mar 2018 02:25:41 +0200 Subject: powerpc/mm: Use memblock API for PPC32 page_is_ram MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To support accurate checking for different blocks of memory on PPC32, use the same memblock-based approach that's already used on PPC64 also on PPC32. Signed-off-by: Jonathan Neuschäfer Signed-off-by: Michael Ellerman --- arch/powerpc/mm/mem.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 8f335cf052f8..737f8a4632cc 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -82,11 +82,7 @@ static inline pte_t *virt_to_kpte(unsigned long vaddr) int page_is_ram(unsigned long pfn) { -#ifndef CONFIG_PPC64 /* XXX for now */ - return pfn < max_pfn; -#else return memblock_is_memory(__pfn_to_phys(pfn)); -#endif } pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, -- cgit v1.2.3 From 2bbf63264ab2e8cbc740c738f66984b2aafa29c5 Mon Sep 17 00:00:00 2001 From: Jonathan Neuschäfer Date: Wed, 28 Mar 2018 02:25:42 +0200 Subject: powerpc/mm/32: Use page_is_ram to check for RAM MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On systems where there is MMIO space between different blocks of RAM in the physical address space, __ioremap_caller did not allow mapping these MMIO areas, because they were below the end RAM and thus considered RAM as well. Use the memblock-based page_is_ram function, which returns false for such MMIO holes. v2: Keep the check for p < virt_to_phys(high_memory). On 32-bit systems with high memory (memory above physical address 4GiB), the high memory is expected to be available though ioremap. The high_memory variable marks the end of low memory; comparing against it means that only ioremap requests for low RAM will be denied. Reported by Michael Ellerman. Signed-off-by: Jonathan Neuschäfer Signed-off-by: Michael Ellerman --- arch/powerpc/mm/pgtable_32.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index d35d9ad3c1cd..6668ecc041ad 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -148,6 +148,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags, * mem_init() sets high_memory so only do the check after that. */ if (slab_is_available() && (p < virt_to_phys(high_memory)) && + page_is_ram(__phys_to_pfn(p)) && !(__allow_ioremap_reserved && memblock_is_region_reserved(p, size))) { printk("__ioremap(): phys addr 0x%llx is RAM lr %ps\n", (unsigned long long)p, __builtin_return_address(0)); -- cgit v1.2.3 From 57deb8fea01f732d83711ab3b3310204d0026ba8 Mon Sep 17 00:00:00 2001 From: Jonathan Neuschäfer Date: Wed, 28 Mar 2018 02:25:43 +0200 Subject: powerpc/wii: Don't rely on the reserved memory hack MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Because the two memory blocks (usually called MEM1 and MEM2) are not merged anymore, __request_region in kernel/resource.c will correctly allow reserving regions in the physical address space between MEM1 and MEM2, where many important peripherals are (GPIO, MMC, USB, ...). A previous change to __ioremap_caller in arch/powerpc/mm/pgtable_32.c ensures that multiple memblocks are properly considered in ioremap; this makes it unnecessary to set __allow_ioremap_reserved. Signed-off-by: Jonathan Neuschäfer Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/embedded6xx/wii.c | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c index 419a88938b0a..8bb46dcbebd8 100644 --- a/arch/powerpc/platforms/embedded6xx/wii.c +++ b/arch/powerpc/platforms/embedded6xx/wii.c @@ -80,21 +80,9 @@ void __init wii_memory_fixups(void) BUG_ON(memblock.memory.cnt != 2); BUG_ON(!page_aligned(p[0].base) || !page_aligned(p[1].base)); - /* trim unaligned tail */ - memblock_remove(ALIGN(p[1].base + p[1].size, PAGE_SIZE), - (phys_addr_t)ULLONG_MAX); - - /* determine hole, add & reserve them */ + /* determine hole */ wii_hole_start = ALIGN(p[0].base + p[0].size, PAGE_SIZE); wii_hole_size = p[1].base - wii_hole_start; - memblock_add(wii_hole_start, wii_hole_size); - memblock_reserve(wii_hole_start, wii_hole_size); - - BUG_ON(memblock.memory.cnt != 1); - __memblock_dump_all(); - - /* allow ioremapping the address space in the hole */ - __allow_ioremap_reserved = 1; } unsigned long __init wii_mmu_mapin_mem2(unsigned long top) -- cgit v1.2.3 From 7e1405917c145edbb7d4cd520e890e44161dd7be Mon Sep 17 00:00:00 2001 From: Jonathan Neuschäfer Date: Wed, 28 Mar 2018 02:25:44 +0200 Subject: powerpc/mm/32: Remove the reserved memory hack MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This hack, introduced in commit c5df7f775148 ("powerpc: allow ioremap within reserved memory regions") is now unnecessary. Signed-off-by: Jonathan Neuschäfer Signed-off-by: Michael Ellerman --- arch/powerpc/mm/init_32.c | 5 ----- arch/powerpc/mm/mmu_decl.h | 1 - arch/powerpc/mm/pgtable_32.c | 3 +-- 3 files changed, 1 insertion(+), 8 deletions(-) diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index a2bf6965d04f..3e59e5d64b01 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -88,11 +88,6 @@ void MMU_init(void); int __map_without_bats; int __map_without_ltlbs; -/* - * This tells the system to allow ioremapping memory marked as reserved. - */ -int __allow_ioremap_reserved; - /* max amount of low RAM to map in */ unsigned long __max_low_memory = MAX_LOW_MEM; diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index 57fbc554c785..c4c0a09a7775 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -98,7 +98,6 @@ extern void setbat(int index, unsigned long virt, phys_addr_t phys, unsigned int size, pgprot_t prot); extern int __map_without_bats; -extern int __allow_ioremap_reserved; extern unsigned int rtas_data, rtas_size; struct hash_pte; diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 6668ecc041ad..120a49bfb9c6 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -148,8 +148,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags, * mem_init() sets high_memory so only do the check after that. */ if (slab_is_available() && (p < virt_to_phys(high_memory)) && - page_is_ram(__phys_to_pfn(p)) && - !(__allow_ioremap_reserved && memblock_is_region_reserved(p, size))) { + page_is_ram(__phys_to_pfn(p))) { printk("__ioremap(): phys addr 0x%llx is RAM lr %ps\n", (unsigned long long)p, __builtin_return_address(0)); return NULL; -- cgit v1.2.3 From bf8a1abc3ddbd6e9a8312ea7d96e5dd89c140f18 Mon Sep 17 00:00:00 2001 From: Thiago Jung Bauermann Date: Thu, 29 Mar 2018 16:05:43 -0300 Subject: powerpc/kexec_file: Fix error code when trying to load kdump kernel kexec_file_load() on powerpc doesn't support kdump kernels yet, so it returns -ENOTSUPP in that case. I've recently learned that this errno is internal to the kernel and isn't supposed to be exposed to userspace. Therefore, change to -EOPNOTSUPP which is defined in an uapi header. This does indeed make kexec-tools happier. Before the patch, on ppc64le: # ~bauermann/src/kexec-tools/build/sbin/kexec -s -p /boot/vmlinuz kexec_file_load failed: Unknown error 524 After the patch: # ~bauermann/src/kexec-tools/build/sbin/kexec -s -p /boot/vmlinuz kexec_file_load failed: Operation not supported Fixes: a0458284f062 ("powerpc: Add support code for kexec_file_load()") Cc: stable@vger.kernel.org # v4.10+ Reported-by: Dave Young Signed-off-by: Thiago Jung Bauermann Reviewed-by: Simon Horman Reviewed-by: Dave Young Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/machine_kexec_file_64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/machine_kexec_file_64.c b/arch/powerpc/kernel/machine_kexec_file_64.c index e4395f937d63..45e0b7d5f200 100644 --- a/arch/powerpc/kernel/machine_kexec_file_64.c +++ b/arch/powerpc/kernel/machine_kexec_file_64.c @@ -43,7 +43,7 @@ int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, /* We don't support crash kernels yet. */ if (image->type == KEXEC_TYPE_CRASH) - return -ENOTSUPP; + return -EOPNOTSUPP; for (i = 0; i < ARRAY_SIZE(kexec_file_loaders); i++) { fops = kexec_file_loaders[i]; -- cgit v1.2.3 From f0295e047fcf52ccb42561fb7de6942f5201b676 Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Mon, 26 Mar 2018 15:17:07 +1100 Subject: powerpc/eeh: Fix race with driver un/bind The current EEH callbacks can race with a driver unbind. This can result in a backtraces like this: EEH: Frozen PHB#0-PE#1fc detected EEH: PE location: S000009, PHB location: N/A CPU: 2 PID: 2312 Comm: kworker/u258:3 Not tainted 4.15.6-openpower1 #2 Workqueue: nvme-wq nvme_reset_work [nvme] Call Trace: dump_stack+0x9c/0xd0 (unreliable) eeh_dev_check_failure+0x420/0x470 eeh_check_failure+0xa0/0xa4 nvme_reset_work+0x138/0x1414 [nvme] process_one_work+0x1ec/0x328 worker_thread+0x2e4/0x3a8 kthread+0x14c/0x154 ret_from_kernel_thread+0x5c/0xc8 nvme nvme1: Removing after probe failure status: -19 cpu 0x23: Vector: 300 (Data Access) at [c000000ff50f3800] pc: c0080000089a0eb0: nvme_error_detected+0x4c/0x90 [nvme] lr: c000000000026564: eeh_report_error+0xe0/0x110 sp: c000000ff50f3a80 msr: 9000000000009033 dar: 400 dsisr: 40000000 current = 0xc000000ff507c000 paca = 0xc00000000fdc9d80 softe: 0 irq_happened: 0x01 pid = 782, comm = eehd Linux version 4.15.6-openpower1 (smc@smc-desktop) (gcc version 6.4.0 (Buildroot 2017.11.2-00008-g4b6188e)) #2 SM P Tue Feb 27 12:33:27 PST 2018 enter ? for help eeh_report_error+0xe0/0x110 eeh_pe_dev_traverse+0xc0/0xdc eeh_handle_normal_event+0x184/0x4c4 eeh_handle_event+0x30/0x288 eeh_event_handler+0x124/0x170 kthread+0x14c/0x154 ret_from_kernel_thread+0x5c/0xc8 The first part is an EEH (on boot), the second half is the resulting crash. nvme probe starts the nvme_reset_work() worker thread. This worker thread starts touching the device which see a device error (EEH) and hence queues up an event in the powerpc EEH worker thread. nvme_reset_work() then continues and runs nvme_remove_dead_ctrl_work() which results in unbinding the driver from the device and hence releases all resources. At the same time, the EEH worker thread starts doing the EEH .error_detected() driver callback, which no longer works since the resources have been freed. This fixes the problem in the same way the generic PCIe AER code (in drivers/pci/pcie/aer/aerdrv_core.c) does. It makes the EEH code hold the device_lock() while performing the driver EEH callbacks and associated code. This ensures either the callbacks are no longer register, or if they are registered the driver will not be removed from underneath us. This has been broken forever. The EEH call backs were first introduced in 2005 (in 77bd7415610) but it's not clear if a lock was needed back then. Fixes: 77bd74156101 ("[PATCH] powerpc: PCI Error Recovery: PPC64 core recovery routines") Cc: stable@vger.kernel.org # v2.6.16+ Signed-off-by: Michael Neuling Reviewed-by: Benjamin Herrenschmidt Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/eeh_driver.c | 68 +++++++++++++++++++++++++--------------- 1 file changed, 42 insertions(+), 26 deletions(-) diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index 43ceb6263cd8..b8a329f04814 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -207,18 +207,18 @@ static void *eeh_report_error(void *data, void *userdata) if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe)) return NULL; + + device_lock(&dev->dev); dev->error_state = pci_channel_io_frozen; driver = eeh_pcid_get(dev); - if (!driver) return NULL; + if (!driver) goto out_no_dev; eeh_disable_irq(dev); if (!driver->err_handler || - !driver->err_handler->error_detected) { - eeh_pcid_put(dev); - return NULL; - } + !driver->err_handler->error_detected) + goto out; rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen); @@ -227,8 +227,12 @@ static void *eeh_report_error(void *data, void *userdata) if (*res == PCI_ERS_RESULT_NONE) *res = rc; edev->in_error = true; - eeh_pcid_put(dev); pci_uevent_ers(dev, PCI_ERS_RESULT_NONE); + +out: + eeh_pcid_put(dev); +out_no_dev: + device_unlock(&dev->dev); return NULL; } @@ -251,15 +255,14 @@ static void *eeh_report_mmio_enabled(void *data, void *userdata) if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe)) return NULL; + device_lock(&dev->dev); driver = eeh_pcid_get(dev); - if (!driver) return NULL; + if (!driver) goto out_no_dev; if (!driver->err_handler || !driver->err_handler->mmio_enabled || - (edev->mode & EEH_DEV_NO_HANDLER)) { - eeh_pcid_put(dev); - return NULL; - } + (edev->mode & EEH_DEV_NO_HANDLER)) + goto out; rc = driver->err_handler->mmio_enabled(dev); @@ -267,7 +270,10 @@ static void *eeh_report_mmio_enabled(void *data, void *userdata) if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; if (*res == PCI_ERS_RESULT_NONE) *res = rc; +out: eeh_pcid_put(dev); +out_no_dev: + device_unlock(&dev->dev); return NULL; } @@ -290,20 +296,20 @@ static void *eeh_report_reset(void *data, void *userdata) if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe)) return NULL; + + device_lock(&dev->dev); dev->error_state = pci_channel_io_normal; driver = eeh_pcid_get(dev); - if (!driver) return NULL; + if (!driver) goto out_no_dev; eeh_enable_irq(dev); if (!driver->err_handler || !driver->err_handler->slot_reset || (edev->mode & EEH_DEV_NO_HANDLER) || - (!edev->in_error)) { - eeh_pcid_put(dev); - return NULL; - } + (!edev->in_error)) + goto out; rc = driver->err_handler->slot_reset(dev); if ((*res == PCI_ERS_RESULT_NONE) || @@ -311,7 +317,10 @@ static void *eeh_report_reset(void *data, void *userdata) if (*res == PCI_ERS_RESULT_DISCONNECT && rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; +out: eeh_pcid_put(dev); +out_no_dev: + device_unlock(&dev->dev); return NULL; } @@ -362,10 +371,12 @@ static void *eeh_report_resume(void *data, void *userdata) if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe)) return NULL; + + device_lock(&dev->dev); dev->error_state = pci_channel_io_normal; driver = eeh_pcid_get(dev); - if (!driver) return NULL; + if (!driver) goto out_no_dev; was_in_error = edev->in_error; edev->in_error = false; @@ -375,18 +386,20 @@ static void *eeh_report_resume(void *data, void *userdata) !driver->err_handler->resume || (edev->mode & EEH_DEV_NO_HANDLER) || !was_in_error) { edev->mode &= ~EEH_DEV_NO_HANDLER; - eeh_pcid_put(dev); - return NULL; + goto out; } driver->err_handler->resume(dev); - eeh_pcid_put(dev); pci_uevent_ers(dev, PCI_ERS_RESULT_RECOVERED); +out: + eeh_pcid_put(dev); #ifdef CONFIG_PCI_IOV if (eeh_ops->notify_resume && eeh_dev_to_pdn(edev)) eeh_ops->notify_resume(eeh_dev_to_pdn(edev)); #endif +out_no_dev: + device_unlock(&dev->dev); return NULL; } @@ -406,23 +419,26 @@ static void *eeh_report_failure(void *data, void *userdata) if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe)) return NULL; + + device_lock(&dev->dev); dev->error_state = pci_channel_io_perm_failure; driver = eeh_pcid_get(dev); - if (!driver) return NULL; + if (!driver) goto out_no_dev; eeh_disable_irq(dev); if (!driver->err_handler || - !driver->err_handler->error_detected) { - eeh_pcid_put(dev); - return NULL; - } + !driver->err_handler->error_detected) + goto out; driver->err_handler->error_detected(dev, pci_channel_io_perm_failure); - eeh_pcid_put(dev); pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT); +out: + eeh_pcid_put(dev); +out_no_dev: + device_unlock(&dev->dev); return NULL; } -- cgit v1.2.3 From 15b4dd7981496f51c5f9262a5e0761e48de6655f Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Tue, 27 Mar 2018 01:01:03 +1000 Subject: powerpc/64s: return more carefully from sreset NMI System Reset, being an NMI, must return more carefully than other interrupts. It has traditionally returned via the nromal return from exception path, but that has a number of problems. - r13 does not get restored if returning to kernel. This is for interrupts which may cause a context switch, which sreset will never do. Interrupting OPAL (which uses a different r13) is one place where this causes breakage. - It may cause several other problems returning to kernel with preempt or TIF_EMULATE_STACK_STORE if it hits at the wrong time. It's safer just to have a simple restore and return, like machine check which is the other NMI. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/exceptions-64s.S | 61 ++++++++++++++++++++++++++++++++++-- 1 file changed, 58 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 1a0aa70bcb2b..bc5d927da37a 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -139,6 +139,21 @@ EXC_COMMON_BEGIN(system_reset_idle_common) b pnv_powersave_wakeup #endif +/* + * Set IRQS_ALL_DISABLED unconditionally so arch_irqs_disabled does + * the right thing. We do not want to reconcile because that goes + * through irq tracing which we don't want in NMI. + * + * Save PACAIRQHAPPENED because some code will do a hard disable + * (e.g., xmon). So we want to restore this back to where it was + * when we return. DAR is unused in the stack, so save it there. + */ +#define ADD_RECONCILE_NMI \ + li r10,IRQS_ALL_DISABLED; \ + stb r10,PACAIRQSOFTMASK(r13); \ + lbz r10,PACAIRQHAPPENED(r13); \ + std r10,_DAR(r1) + EXC_COMMON_BEGIN(system_reset_common) /* * Increment paca->in_nmi then enable MSR_RI. SLB or MCE will be able @@ -157,16 +172,56 @@ EXC_COMMON_BEGIN(system_reset_common) subi r1,r1,INT_FRAME_SIZE EXCEPTION_COMMON_NORET_STACK(PACA_EXNMI, 0x100, system_reset, system_reset_exception, - ADD_NVGPRS;ADD_RECONCILE) + ADD_NVGPRS;ADD_RECONCILE_NMI) + + /* This (and MCE) can be simplified with mtmsrd L=1 */ + /* Clear MSR_RI before setting SRR0 and SRR1. */ + li r0,MSR_RI + mfmsr r9 + andc r9,r9,r0 + mtmsrd r9,1 /* - * The stack is no longer in use, decrement in_nmi. + * MSR_RI is clear, now we can decrement paca->in_nmi. */ lhz r10,PACA_IN_NMI(r13) subi r10,r10,1 sth r10,PACA_IN_NMI(r13) - b ret_from_except + /* + * Restore soft mask settings. + */ + ld r10,_DAR(r1) + stb r10,PACAIRQHAPPENED(r13) + ld r10,SOFTE(r1) + stb r10,PACAIRQSOFTMASK(r13) + + /* + * Keep below code in synch with MACHINE_CHECK_HANDLER_WINDUP. + * Should share common bits... + */ + + /* Move original SRR0 and SRR1 into the respective regs */ + ld r9,_MSR(r1) + mtspr SPRN_SRR1,r9 + ld r3,_NIP(r1) + mtspr SPRN_SRR0,r3 + ld r9,_CTR(r1) + mtctr r9 + ld r9,_XER(r1) + mtxer r9 + ld r9,_LINK(r1) + mtlr r9 + REST_GPR(0, r1) + REST_8GPRS(2, r1) + REST_GPR(10, r1) + ld r11,_CCR(r1) + mtcr r11 + REST_GPR(11, r1) + REST_2GPRS(12, r1) + /* restore original r1. */ + ld r1,GPR1(r1) + RFI_TO_USER_OR_KERNEL #ifdef CONFIG_PPC_PSERIES /* -- cgit v1.2.3 From d40b6768e45bd9213139b2d91d30c7692b6007b1 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Tue, 27 Mar 2018 01:01:16 +1000 Subject: powerpc/64s: sreset panic if there is no debugger or crash dump handlers system_reset_exception does most of its own crash handling now, invoking the debugger or crash dumps if they are registered. If not, then it goes through to die() to print stack traces, and then is supposed to panic (according to comments). However after die() prints oopses, it does its own handling which doesn't allow system_reset_exception to panic (e.g., it may just kill the current process). This patch causes sreset exceptions to return from die after it prints messages but before acting. This also stops die from invoking the debugger on 0x100 crashes. system_reset_exception similarly calls the debugger. It had been thought this was harmless (because if the debugger was disabled, neither call would fire, and if it was enabled the first call would return). However in some cases like xmon 'X' command, the debugger returns 0, which currently causes it to be entered again (first in system_reset_exception, then in die), which is confusing. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/traps.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index f200bfd98b17..2c1a1d24f0ab 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -208,6 +208,12 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, } raw_local_irq_restore(flags); + /* + * system_reset_excption handles debugger, crash dump, panic, for 0x100 + */ + if (TRAP(regs) == 0x100) + return; + crash_fadump(regs, "die oops"); if (kexec_should_crash(current)) @@ -272,8 +278,13 @@ void die(const char *str, struct pt_regs *regs, long err) { unsigned long flags; - if (debugger(regs)) - return; + /* + * system_reset_excption handles debugger, crash dump, panic, for 0x100 + */ + if (TRAP(regs) != 0x100) { + if (debugger(regs)) + return; + } flags = oops_begin(regs); if (__die(str, regs, err)) -- cgit v1.2.3 From 3d4fbffdd703d2b968db443911f2147c732a4a48 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Sat, 18 Nov 2017 00:08:05 +1000 Subject: powerpc/64s/idle: POWER9 implement a separate idle stop function for hotplug Implement a new function to invoke stop, power9_offline_stop, which is like power9_idle_stop but used by the cpu hotplug code. Move KVM secondary state manipulation code to the offline case. Signed-off-by: Nicholas Piggin Reviewed-by: Vaidyanathan Srinivasan Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/processor.h | 1 + arch/powerpc/kernel/idle_book3s.S | 24 ++++++++++++++++++------ arch/powerpc/platforms/powernv/idle.c | 2 +- 3 files changed, 20 insertions(+), 7 deletions(-) diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index bb9cb25ffb20..c4b36a494a63 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -518,6 +518,7 @@ extern int powersave_nap; /* set if nap mode can be used in idle loop */ extern unsigned long power7_idle_insn(unsigned long type); /* PNV_THREAD_NAP/etc*/ extern void power7_idle_type(unsigned long type); extern unsigned long power9_idle_stop(unsigned long psscr_val); +extern unsigned long power9_offline_stop(unsigned long psscr_val); extern void power9_idle_type(unsigned long stop_psscr_val, unsigned long stop_psscr_mask); diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S index 89157cf452e3..2896ccf3138d 100644 --- a/arch/powerpc/kernel/idle_book3s.S +++ b/arch/powerpc/kernel/idle_book3s.S @@ -325,12 +325,6 @@ enter_winkle: * r3 - PSSCR value corresponding to the requested stop state. */ power_enter_stop: -#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE - /* Tell KVM we're entering idle */ - li r4,KVM_HWTHREAD_IN_IDLE - /* DO THIS IN REAL MODE! See comment above. */ - stb r4,HSTATE_HWTHREAD_STATE(r13) -#endif /* * Check if we are executing the lite variant with ESL=EC=0 */ @@ -425,6 +419,24 @@ ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \ ld r3,ORIG_GPR3(r1); /* Restore original r3 */ \ 20: nop; +/* + * Entered with MSR[EE]=0 and no soft-masked interrupts pending. + * r3 contains desired PSSCR register value. + */ +_GLOBAL(power9_offline_stop) + std r3, PACA_REQ_PSSCR(r13) + mtspr SPRN_PSSCR,r3 +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE + /* Tell KVM we're entering idle */ + li r4,KVM_HWTHREAD_IN_IDLE + /* DO THIS IN REAL MODE! See comment above. */ + stb r4,HSTATE_HWTHREAD_STATE(r13) +#endif + LOAD_REG_ADDR(r4,power_enter_stop) + b pnv_powersave_common + /* No return */ + + /* * Entered with MSR[EE]=0 and no soft-masked interrupts pending. * r3 contains desired PSSCR register value. diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c index d9e366bb23da..378fde1f85a8 100644 --- a/arch/powerpc/platforms/powernv/idle.c +++ b/arch/powerpc/platforms/powernv/idle.c @@ -511,7 +511,7 @@ unsigned long pnv_cpu_offline(unsigned int cpu) psscr = mfspr(SPRN_PSSCR); psscr = (psscr & ~pnv_deepest_stop_psscr_mask) | pnv_deepest_stop_psscr_val; - srr1 = power9_idle_stop(psscr); + srr1 = power9_offline_stop(psscr); } else if ((idle_states & OPAL_PM_WINKLE_ENABLED) && (idle_states & OPAL_PM_LOSE_FULL_CONTEXT)) { -- cgit v1.2.3 From 8c1c7fb0b5ec95c392e9b585a6cf8cde254308d3 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Sat, 18 Nov 2017 00:08:06 +1000 Subject: powerpc/64s/idle: avoid sync for KVM state when waking from idle When waking from a CPU idle instruction (e.g., nap or stop), the sync for ordering the KVM secondary thread state can be avoided if there wakeup is coming from a kernel context rather than KVM context. This improves performance for ping-pong benchmark with the stop0 idle state by 0.46% for 2 threads in the same core, and 1.02% for different cores. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/idle_book3s.S | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S index 2896ccf3138d..903ec2a5c76c 100644 --- a/arch/powerpc/kernel/idle_book3s.S +++ b/arch/powerpc/kernel/idle_book3s.S @@ -551,6 +551,9 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) mr r3,r12 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE + lbz r0,HSTATE_HWTHREAD_STATE(r13) + cmpwi r0,KVM_HWTHREAD_IN_KERNEL + beq 1f li r0,KVM_HWTHREAD_IN_KERNEL stb r0,HSTATE_HWTHREAD_STATE(r13) /* Order setting hwthread_state vs. testing hwthread_req */ -- cgit v1.2.3 From 147704534e2de30dd47171d55240c394b24f4053 Mon Sep 17 00:00:00 2001 From: Mark Greer Date: Fri, 16 Mar 2018 14:54:43 -0700 Subject: powerpc/boot: Remove duplicate typedefs from libfdt_env.h When building a uImage or zImage using ppc6xx_defconfig and some other defconfigs, the following error occurs with GCC 4.5.1: /arch/powerpc/boot/libfdt_env.h:10:13: error: redefinition of typedef 'uint32_t' /arch/powerpc/boot/types.h:21:13: note: previous declaration of 'uint32_t' was here /arch/powerpc/boot/libfdt_env.h:11:13: error: redefinition of typedef 'uint64_t' /arch/powerpc/boot/types.h:22:13: note: previous declaration of 'uint64_t' was here The problem is that commit 656ad58ef19e (powerpc/boot: Add OPAL console to epapr wrappers) adds typedefs for uint32_t and uint64_t to type.h but doesn't remove the pre-existing (and now duplicate) typedefs from libfdt_env.h. Fix the error by removing the duplicate typedefs from libfdt_env.h Signed-off-by: Mark Greer Signed-off-by: Michael Ellerman --- arch/powerpc/boot/libfdt_env.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/powerpc/boot/libfdt_env.h b/arch/powerpc/boot/libfdt_env.h index f52c31b1f48f..2a0c8b1bf147 100644 --- a/arch/powerpc/boot/libfdt_env.h +++ b/arch/powerpc/boot/libfdt_env.h @@ -7,8 +7,6 @@ #include "of.h" -typedef u32 uint32_t; -typedef u64 uint64_t; typedef unsigned long uintptr_t; typedef __be16 fdt16_t; -- cgit v1.2.3 From b842bd0f7a61b129a672f8b038325e2ca5d36329 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 21 Feb 2018 05:08:24 +1000 Subject: powerpc/64s: Add all POWER9 features to CPU_FTRS_ALWAYS It's not a bug to have features missing in CPU_FTR_ALWAYS, but it is a missed opportunity for optimisation. Signed-off-by: Nicholas Piggin [mpe: Change log] Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/cputable.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 66eba1e0007b..4b703fe944c9 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -546,7 +546,7 @@ enum { CPU_FTRS_POWER6 & CPU_FTRS_POWER7 & CPU_FTRS_CELL & \ CPU_FTRS_PA6T & CPU_FTRS_POWER8 & CPU_FTRS_POWER8E & \ CPU_FTRS_POWER8_DD1 & ~CPU_FTR_HVMODE & CPU_FTRS_POSSIBLE & \ - CPU_FTRS_POWER9) + CPU_FTRS_POWER9 & CPU_FTRS_POWER9_DD1 & CPU_FTRS_POWER9_DD2_1) #endif #else enum { -- cgit v1.2.3 From d50614fa45760f5ec1772509625e1bf4abe5d052 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 21 Feb 2018 05:08:25 +1000 Subject: powerpc/64s: Explicitly add vector features to CPU_FTRS_POSSIBLE ALTIVEC and VSX features are not added by to default to the POWERx CPU feature sets because they are intended to be enabled by firmware. Currently they end up in CPU_FTRS_POSSIBLE due to their inclusion in other the set for other CPUs, eg. PPC970. But they should be added individually to the CPU_FTRS_POSSIBLE set, because if we reduce the set of CPUs that are built-for they may disappear from the possible mask. It already contains CPU_FTR_VSX, so add ALTIVEC. The _COMP features should be used because they won't be present if compiled out. Signed-off-by: Nicholas Piggin [mpe: Add detail to change log] Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/cputable.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 4b703fe944c9..f6e0d95da004 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -492,8 +492,8 @@ static inline void cpu_feature_keys_init(void) { } (CPU_FTRS_POWER4 | CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | \ CPU_FTRS_POWER6 | CPU_FTRS_POWER7 | CPU_FTRS_POWER8E | \ CPU_FTRS_POWER8 | CPU_FTRS_POWER8_DD1 | CPU_FTRS_CELL | \ - CPU_FTRS_PA6T | CPU_FTR_VSX | CPU_FTRS_POWER9 | \ - CPU_FTRS_POWER9_DD1 | CPU_FTRS_POWER9_DD2_1 | \ + CPU_FTRS_PA6T | CPU_FTR_VSX_COMP | CPU_FTR_ALTIVEC_COMP | \ + CPU_FTRS_POWER9 | CPU_FTRS_POWER9_DD1 | CPU_FTRS_POWER9_DD2_1 | \ CPU_FTRS_POWER9_DD2_2) #endif #else -- cgit v1.2.3 From 15a3204d24a3c01b116fb7c0692b7c670cac631d Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 21 Feb 2018 05:08:26 +1000 Subject: powerpc/64s: Set assembler machine type to POWER4 Rather than override the machine type in .S code (which can hide wrong or ambiguous code generation for the target), set the type to power4 for all assembly. This also means we need to be careful not to build power4-only code when we're not building for Book3S, such as the "power7" versions of copyuser/page/memcpy. Signed-off-by: Nicholas Piggin [mpe: Fix Book3E build, don't build the "power7" variants for non-Book3S] Signed-off-by: Michael Ellerman --- arch/powerpc/Makefile | 1 + arch/powerpc/include/asm/ppc_asm.h | 11 ++++------- arch/powerpc/kernel/entry_64.S | 2 +- arch/powerpc/kernel/exceptions-64s.S | 10 ++-------- arch/powerpc/lib/Makefile | 6 ++++-- arch/powerpc/lib/copypage_64.S | 2 ++ arch/powerpc/lib/copypage_power7.S | 3 --- arch/powerpc/lib/copyuser_64.S | 2 ++ arch/powerpc/lib/copyuser_power7.S | 3 --- arch/powerpc/lib/memcpy_64.S | 2 ++ arch/powerpc/lib/memcpy_power7.S | 3 --- 11 files changed, 18 insertions(+), 27 deletions(-) diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index c7628e973084..af46d22fa660 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -245,6 +245,7 @@ endif cpu-as-$(CONFIG_4xx) += -Wa,-m405 cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec) cpu-as-$(CONFIG_E200) += -Wa,-me200 +cpu-as-$(CONFIG_PPC_BOOK3S_64) += -Wa,-mpower4 KBUILD_AFLAGS += $(cpu-as-y) KBUILD_CFLAGS += $(cpu-as-y) diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index ae94b3626b6c..13f7f4c0e1ea 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -439,14 +439,11 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601) /* The following stops all load and store data streams associated with stream * ID (ie. streams created explicitly). The embedded and server mnemonics for - * dcbt are different so we use machine "power4" here explicitly. + * dcbt are different so this must only be used for server. */ -#define DCBT_STOP_ALL_STREAM_IDS(scratch) \ -.machine push ; \ -.machine "power4" ; \ - lis scratch,0x60000000@h; \ - dcbt 0,scratch,0b01010; \ -.machine pop +#define DCBT_BOOK3S_STOP_ALL_STREAM_IDS(scratch) \ + lis scratch,0x60000000@h; \ + dcbt 0,scratch,0b01010 /* * toreal/fromreal/tophys/tovirt macros. 32-bit BookE makes them diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 2cb5109a7ea3..51695608c68b 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -545,7 +545,7 @@ _GLOBAL(_switch) /* Cancel all explict user streams as they will have no use after context * switch and will stop the HW from creating streams itself */ - DCBT_STOP_ALL_STREAM_IDS(r6) + DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r6) #endif addi r6,r4,-THREAD /* Convert THREAD to 'current' */ diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index bc5d927da37a..ae6a849db60b 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -687,14 +687,11 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX) bne cr4,1f /* returning to kernel */ -.machine push -.machine "power4" mtcrf 0x80,r9 mtcrf 0x08,r9 /* MSR[PR] indication is in cr4 */ mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */ mtcrf 0x02,r9 /* I/D indication is in cr6 */ mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ -.machine pop RESTORE_CTR(r9, PACA_EXSLB) RESTORE_PPR_PACA(PACA_EXSLB, r9) @@ -707,14 +704,11 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX) RFI_TO_USER b . /* prevent speculative execution */ 1: -.machine push -.machine "power4" mtcrf 0x80,r9 mtcrf 0x08,r9 /* MSR[PR] indication is in cr4 */ mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */ mtcrf 0x02,r9 /* I/D indication is in cr6 */ mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ -.machine pop RESTORE_CTR(r9, PACA_EXSLB) RESTORE_PPR_PACA(PACA_EXSLB, r9) @@ -1524,7 +1518,7 @@ TRAMP_REAL_BEGIN(rfi_flush_fallback) ld r11,PACA_L1D_FLUSH_SIZE(r13) srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ mtctr r11 - DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ + DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ /* order ld/st prior to dcbt stop all streams with flushing */ sync @@ -1564,7 +1558,7 @@ TRAMP_REAL_BEGIN(hrfi_flush_fallback) ld r11,PACA_L1D_FLUSH_SIZE(r13) srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ mtctr r11 - DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ + DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ /* order ld/st prior to dcbt stop all streams with flushing */ sync diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 3c29c9009bbf..653901042ad7 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -22,9 +22,11 @@ ifeq ($(call ld-ifversion, -lt, 225000000, y),y) extra-$(CONFIG_PPC64) += crtsavres.o endif +obj-$(CONFIG_PPC_BOOK3S_64) += copyuser_power7.o copypage_power7.o \ + memcpy_power7.o + obj64-y += copypage_64.o copyuser_64.o mem_64.o hweight_64.o \ - copyuser_power7.o string_64.o copypage_power7.o memcpy_power7.o \ - memcpy_64.o memcmp_64.o pmem.o + string_64.o memcpy_64.o memcmp_64.o pmem.o obj64-$(CONFIG_SMP) += locks.o obj64-$(CONFIG_ALTIVEC) += vmx-helper.o diff --git a/arch/powerpc/lib/copypage_64.S b/arch/powerpc/lib/copypage_64.S index 4bcc9e76fb55..8d5034f645f3 100644 --- a/arch/powerpc/lib/copypage_64.S +++ b/arch/powerpc/lib/copypage_64.S @@ -21,7 +21,9 @@ _GLOBAL_TOC(copy_page) BEGIN_FTR_SECTION lis r5,PAGE_SIZE@h FTR_SECTION_ELSE +#ifdef CONFIG_PPC_BOOK3S_64 b copypage_power7 +#endif ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY) ori r5,r5,PAGE_SIZE@l BEGIN_FTR_SECTION diff --git a/arch/powerpc/lib/copypage_power7.S b/arch/powerpc/lib/copypage_power7.S index ca5fc8fa7efc..8fa73b7ab20e 100644 --- a/arch/powerpc/lib/copypage_power7.S +++ b/arch/powerpc/lib/copypage_power7.S @@ -42,8 +42,6 @@ _GLOBAL(copypage_power7) lis r8,0x8000 /* GO=1 */ clrldi r8,r8,32 -.machine push -.machine "power4" /* setup read stream 0 */ dcbt 0,r4,0b01000 /* addr from */ dcbt 0,r7,0b01010 /* length and depth from */ @@ -52,7 +50,6 @@ _GLOBAL(copypage_power7) dcbtst 0,r10,0b01010 /* length and depth to */ eieio dcbt 0,r8,0b01010 /* all streams GO */ -.machine pop #ifdef CONFIG_ALTIVEC mflr r0 diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S index 08da06e1bd72..506677395681 100644 --- a/arch/powerpc/lib/copyuser_64.S +++ b/arch/powerpc/lib/copyuser_64.S @@ -20,11 +20,13 @@ .align 7 _GLOBAL_TOC(__copy_tofrom_user) +#ifdef CONFIG_PPC_BOOK3S_64 BEGIN_FTR_SECTION nop FTR_SECTION_ELSE b __copy_tofrom_user_power7 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY) +#endif _GLOBAL(__copy_tofrom_user_base) /* first check for a whole page copy on a page boundary */ cmpldi cr1,r5,16 diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S index d416a4a66578..215e4760c09f 100644 --- a/arch/powerpc/lib/copyuser_power7.S +++ b/arch/powerpc/lib/copyuser_power7.S @@ -312,8 +312,6 @@ err1; stb r0,0(r3) lis r8,0x8000 /* GO=1 */ clrldi r8,r8,32 -.machine push -.machine "power4" /* setup read stream 0 */ dcbt 0,r6,0b01000 /* addr from */ dcbt 0,r7,0b01010 /* length and depth from */ @@ -322,7 +320,6 @@ err1; stb r0,0(r3) dcbtst 0,r10,0b01010 /* length and depth to */ eieio dcbt 0,r8,0b01010 /* all streams GO */ -.machine pop beq cr1,.Lunwind_stack_nonvmx_copy diff --git a/arch/powerpc/lib/memcpy_64.S b/arch/powerpc/lib/memcpy_64.S index f4d6088e2d53..8d8265be1a59 100644 --- a/arch/powerpc/lib/memcpy_64.S +++ b/arch/powerpc/lib/memcpy_64.S @@ -19,9 +19,11 @@ BEGIN_FTR_SECTION std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) /* save destination pointer for return value */ #endif FTR_SECTION_ELSE +#ifdef CONFIG_PPC_BOOK3S_64 #ifndef SELFTEST b memcpy_power7 #endif +#endif ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY) #ifdef __LITTLE_ENDIAN__ /* dumb little-endian memcpy that will get replaced at runtime */ diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S index 193909abd18b..df7de9d3da08 100644 --- a/arch/powerpc/lib/memcpy_power7.S +++ b/arch/powerpc/lib/memcpy_power7.S @@ -259,15 +259,12 @@ _GLOBAL(memcpy_power7) lis r8,0x8000 /* GO=1 */ clrldi r8,r8,32 -.machine push -.machine "power4" dcbt 0,r6,0b01000 dcbt 0,r7,0b01010 dcbtst 0,r9,0b01000 dcbtst 0,r10,0b01010 eieio dcbt 0,r8,0b01010 /* GO */ -.machine pop beq cr1,.Lunwind_stack_nonvmx_copy -- cgit v1.2.3 From 9e9626ed3a4affe7fe0e17e98c357849ad299e50 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 21 Feb 2018 05:08:27 +1000 Subject: powerpc/64s: Fix POWER9 DD2.2 and above in DT CPU features The CPU_FTR_POWER9_DD2_1 flag is intended to be set for DD2.1 and above (which is what the cputable setup does). Fix DT CPU features quirk setup to match. Signed-off-by: Nicholas Piggin [mpe: Merge with upstream changes] Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/dt_cpu_ftrs.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index 4313ff07edca..11a3a4fed3fb 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -708,11 +708,16 @@ static __init void cpufeatures_cpu_quirks(void) */ if ((version & 0xffffff00) == 0x004e0100) cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD1; + else if ((version & 0xffffefff) == 0x004e0200) + ; /* DD2.0 has no feature flag */ else if ((version & 0xffffefff) == 0x004e0201) cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1; - else if ((version & 0xffffefff) == 0x004e0202) - cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_HV_ASSIST | - CPU_FTR_P9_TM_XER_SO_BUG; + else if ((version & 0xffffefff) == 0x004e0202) { + cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_HV_ASSIST; + cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_XER_SO_BUG; + cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1; + } else /* DD2.1 and up have DD2_1 */ + cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1; if ((version & 0xffff0000) == 0x004e0000) { cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR); -- cgit v1.2.3 From 3735eb850e6c3e7472329fea368e7d030a4d89dd Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 21 Feb 2018 05:08:28 +1000 Subject: powerpc: Remove unused CPU_FTR_ARCH_201 The last usage was removed in c17b98cf6028 ("KVM: PPC: Book3S HV: Remove code for PPC970 processors") (Dec 2014). Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/cputable.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index f6e0d95da004..17c791277d02 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -183,7 +183,6 @@ static inline void cpu_feature_keys_init(void) { } #define CPU_FTR_REAL_LE LONG_ASM_CONST(0x0000000000001000) #define CPU_FTR_HVMODE LONG_ASM_CONST(0x0000000000002000) -#define CPU_FTR_ARCH_201 LONG_ASM_CONST(0x0000000000004000) #define CPU_FTR_ARCH_206 LONG_ASM_CONST(0x0000000000008000) #define CPU_FTR_ARCH_207S LONG_ASM_CONST(0x0000000000010000) #define CPU_FTR_ARCH_300 LONG_ASM_CONST(0x0000000000020000) @@ -419,7 +418,7 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_MMCRA | CPU_FTR_CP_USE_DCBTZ | \ CPU_FTR_STCX_CHECKS_ADDRESS) #define CPU_FTRS_PPC970 (CPU_FTR_LWSYNC | \ - CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_201 | \ + CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \ CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS | \ CPU_FTR_HVMODE | CPU_FTR_DABRX) -- cgit v1.2.3 From 471d7ff8b51b63521c8ea35c51966ab4caa434ee Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 21 Feb 2018 05:08:29 +1000 Subject: powerpc/64s: Remove POWER4 support POWER4 has been broken since at least the change 49d09bf2a6 ("powerpc/64s: Optimise MSR handling in exception handling"), which requires mtmsrd L=1 support. This was introduced in ISA v2.01, and POWER4 supports ISA v2.00. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/Makefile | 5 +- arch/powerpc/include/asm/cputable.h | 8 +- arch/powerpc/include/asm/mmu.h | 6 +- arch/powerpc/kernel/cputable.c | 36 +- arch/powerpc/kernel/prom_init.c | 10 +- arch/powerpc/kvm/emulate.c | 6 - arch/powerpc/mm/hash_utils_64.c | 9 +- arch/powerpc/perf/Makefile | 2 +- arch/powerpc/perf/power4-pmu.c | 622 --------------------------------- arch/powerpc/platforms/Kconfig.cputype | 6 +- 10 files changed, 18 insertions(+), 692 deletions(-) delete mode 100644 arch/powerpc/perf/power4-pmu.c diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index af46d22fa660..373d7f5b7cde 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -146,8 +146,8 @@ CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 $(MULTIPLEWORD) CFLAGS-$(CONFIG_PPC32) += $(call cc-option,-mno-readonly-in-sdata) ifeq ($(CONFIG_PPC_BOOK3S_64),y) -CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power7,-mtune=power4) -CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=power4 +CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power7,$(call cc-option,-mtune=power5)) +CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mcpu=power5,-mcpu=power4) else CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=powerpc64 endif @@ -168,7 +168,6 @@ ifdef CONFIG_MPROFILE_KERNEL endif CFLAGS-$(CONFIG_CELL_CPU) += $(call cc-option,-mcpu=cell) -CFLAGS-$(CONFIG_POWER4_CPU) += $(call cc-option,-mcpu=power4) CFLAGS-$(CONFIG_POWER5_CPU) += $(call cc-option,-mcpu=power5) CFLAGS-$(CONFIG_POWER6_CPU) += $(call cc-option,-mcpu=power6) CFLAGS-$(CONFIG_POWER7_CPU) += $(call cc-option,-mcpu=power7) diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 17c791277d02..7e22607e07b4 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -413,10 +413,6 @@ static inline void cpu_feature_keys_init(void) { } #define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) /* 64-bit CPUs */ -#define CPU_FTRS_POWER4 (CPU_FTR_LWSYNC | \ - CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ - CPU_FTR_MMCRA | CPU_FTR_CP_USE_DCBTZ | \ - CPU_FTR_STCX_CHECKS_ADDRESS) #define CPU_FTRS_PPC970 (CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \ @@ -488,7 +484,7 @@ static inline void cpu_feature_keys_init(void) { } #define CPU_FTRS_POSSIBLE (CPU_FTRS_E6500 | CPU_FTRS_E5500) #else #define CPU_FTRS_POSSIBLE \ - (CPU_FTRS_POWER4 | CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | \ + (CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | \ CPU_FTRS_POWER6 | CPU_FTRS_POWER7 | CPU_FTRS_POWER8E | \ CPU_FTRS_POWER8 | CPU_FTRS_POWER8_DD1 | CPU_FTRS_CELL | \ CPU_FTRS_PA6T | CPU_FTR_VSX_COMP | CPU_FTR_ALTIVEC_COMP | \ @@ -541,7 +537,7 @@ enum { #define CPU_FTRS_ALWAYS (CPU_FTRS_E6500 & CPU_FTRS_E5500) #else #define CPU_FTRS_ALWAYS \ - (CPU_FTRS_POWER4 & CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & \ + (CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & \ CPU_FTRS_POWER6 & CPU_FTRS_POWER7 & CPU_FTRS_CELL & \ CPU_FTRS_PA6T & CPU_FTRS_POWER8 & CPU_FTRS_POWER8E & \ CPU_FTRS_POWER8_DD1 & ~CPU_FTR_HVMODE & CPU_FTRS_POSSIBLE & \ diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index bb38312cff28..61d15ce92278 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -111,9 +111,9 @@ /* MMU feature bit sets for various CPUs */ #define MMU_FTRS_DEFAULT_HPTE_ARCH_V2 \ MMU_FTR_HPTE_TABLE | MMU_FTR_PPCAS_ARCH_V2 -#define MMU_FTRS_POWER4 MMU_FTRS_DEFAULT_HPTE_ARCH_V2 -#define MMU_FTRS_PPC970 MMU_FTRS_POWER4 | MMU_FTR_TLBIE_CROP_VA -#define MMU_FTRS_POWER5 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE +#define MMU_FTRS_POWER MMU_FTRS_DEFAULT_HPTE_ARCH_V2 +#define MMU_FTRS_PPC970 MMU_FTRS_POWER | MMU_FTR_TLBIE_CROP_VA +#define MMU_FTRS_POWER5 MMU_FTRS_POWER | MMU_FTR_LOCKLESS_TLBIE #define MMU_FTRS_POWER6 MMU_FTRS_POWER5 | MMU_FTR_KERNEL_RO | MMU_FTR_68_BIT_VA #define MMU_FTRS_POWER7 MMU_FTRS_POWER6 #define MMU_FTRS_POWER8 MMU_FTRS_POWER6 diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index b3de017bcd71..c8fc9691f8c7 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -133,36 +133,6 @@ extern void __restore_cpu_e6500(void); static struct cpu_spec __initdata cpu_specs[] = { #ifdef CONFIG_PPC_BOOK3S_64 - { /* Power4 */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x00350000, - .cpu_name = "POWER4 (gp)", - .cpu_features = CPU_FTRS_POWER4, - .cpu_user_features = COMMON_USER_POWER4, - .mmu_features = MMU_FTRS_POWER4 | MMU_FTR_TLBIE_CROP_VA, - .icache_bsize = 128, - .dcache_bsize = 128, - .num_pmcs = 8, - .pmc_type = PPC_PMC_IBM, - .oprofile_cpu_type = "ppc64/power4", - .oprofile_type = PPC_OPROFILE_POWER4, - .platform = "power4", - }, - { /* Power4+ */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x00380000, - .cpu_name = "POWER4+ (gq)", - .cpu_features = CPU_FTRS_POWER4, - .cpu_user_features = COMMON_USER_POWER4, - .mmu_features = MMU_FTRS_POWER4 | MMU_FTR_TLBIE_CROP_VA, - .icache_bsize = 128, - .dcache_bsize = 128, - .num_pmcs = 8, - .pmc_type = PPC_PMC_IBM, - .oprofile_cpu_type = "ppc64/power4", - .oprofile_type = PPC_OPROFILE_POWER4, - .platform = "power4", - }, { /* PPC970 */ .pvr_mask = 0xffff0000, .pvr_value = 0x00390000, @@ -628,15 +598,15 @@ static struct cpu_spec __initdata cpu_specs[] = { { /* default match */ .pvr_mask = 0x00000000, .pvr_value = 0x00000000, - .cpu_name = "POWER4 (compatible)", + .cpu_name = "POWER5 (compatible)", .cpu_features = CPU_FTRS_COMPATIBLE, .cpu_user_features = COMMON_USER_PPC64, - .mmu_features = MMU_FTRS_DEFAULT_HPTE_ARCH_V2, + .mmu_features = MMU_FTRS_POWER, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 6, .pmc_type = PPC_PMC_IBM, - .platform = "power4", + .platform = "power5", } #endif /* CONFIG_PPC_BOOK3S_64 */ diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 0323e073341d..e181fdea3da9 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -1810,16 +1810,8 @@ static void __init prom_initialize_tce_table(void) * size to 4 MB. This is enough to map 2GB of PCI DMA space. * By doing this, we avoid the pitfalls of trying to DMA to * MMIO space and the DMA alias hole. - * - * On POWER4, firmware sets the TCE region by assuming - * each TCE table is 8MB. Using this memory for anything - * else will impact performance, so we always allocate 8MB. - * Anton */ - if (pvr_version_is(PVR_POWER4) || pvr_version_is(PVR_POWER4p)) - minsize = 8UL << 20; - else - minsize = 4UL << 20; + minsize = 4UL << 20; /* Align to the greater of the align or size */ align = max(minalign, minsize); diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index 4d8b4d6cebff..fa888bfc347e 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c @@ -45,12 +45,6 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) #ifdef CONFIG_PPC_BOOK3S /* mtdec lowers the interrupt line when positive. */ kvmppc_core_dequeue_dec(vcpu); - - /* POWER4+ triggers a dec interrupt if the value is < 0 */ - if (vcpu->arch.dec & 0x80000000) { - kvmppc_core_queue_dec(vcpu); - return; - } #endif #ifdef CONFIG_BOOKE diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 7587a2ec8874..0bd3790d35df 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -132,9 +132,10 @@ EXPORT_SYMBOL(mmu_hash_ops); * is provided by the firmware. */ -/* Pre-POWER4 CPUs (4k pages only) +/* + * Fallback (4k pages only) */ -static struct mmu_psize_def mmu_psize_defaults_old[] = { +static struct mmu_psize_def mmu_psize_defaults[] = { [MMU_PAGE_4K] = { .shift = 12, .sllp = 0, @@ -554,8 +555,8 @@ static void __init htab_scan_page_sizes(void) mmu_psize_set_default_penc(); /* Default to 4K pages only */ - memcpy(mmu_psize_defs, mmu_psize_defaults_old, - sizeof(mmu_psize_defaults_old)); + memcpy(mmu_psize_defs, mmu_psize_defaults, + sizeof(mmu_psize_defaults)); /* * Try to find the available page sizes in the device-tree diff --git a/arch/powerpc/perf/Makefile b/arch/powerpc/perf/Makefile index 57ebc655d2ac..82986d2acd9b 100644 --- a/arch/powerpc/perf/Makefile +++ b/arch/powerpc/perf/Makefile @@ -4,7 +4,7 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror obj-$(CONFIG_PERF_EVENTS) += callchain.o perf_regs.o obj-$(CONFIG_PPC_PERF_CTRS) += core-book3s.o bhrb.o -obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \ +obj64-$(CONFIG_PPC_PERF_CTRS) += ppc970-pmu.o power5-pmu.o \ power5+-pmu.o power6-pmu.o power7-pmu.o \ isa207-common.o power8-pmu.o power9-pmu.o obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o diff --git a/arch/powerpc/perf/power4-pmu.c b/arch/powerpc/perf/power4-pmu.c deleted file mode 100644 index ce6072fa481b..000000000000 --- a/arch/powerpc/perf/power4-pmu.c +++ /dev/null @@ -1,622 +0,0 @@ -/* - * Performance counter support for POWER4 (GP) and POWER4+ (GQ) processors. - * - * Copyright 2009 Paul Mackerras, IBM Corporation. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ -#include -#include -#include -#include -#include - -/* - * Bits in event code for POWER4 - */ -#define PM_PMC_SH 12 /* PMC number (1-based) for direct events */ -#define PM_PMC_MSK 0xf -#define PM_UNIT_SH 8 /* TTMMUX number and setting - unit select */ -#define PM_UNIT_MSK 0xf -#define PM_LOWER_SH 6 -#define PM_LOWER_MSK 1 -#define PM_LOWER_MSKS 0x40 -#define PM_BYTE_SH 4 /* Byte number of event bus to use */ -#define PM_BYTE_MSK 3 -#define PM_PMCSEL_MSK 7 - -/* - * Unit code values - */ -#define PM_FPU 1 -#define PM_ISU1 2 -#define PM_IFU 3 -#define PM_IDU0 4 -#define PM_ISU1_ALT 6 -#define PM_ISU2 7 -#define PM_IFU_ALT 8 -#define PM_LSU0 9 -#define PM_LSU1 0xc -#define PM_GPS 0xf - -/* - * Bits in MMCR0 for POWER4 - */ -#define MMCR0_PMC1SEL_SH 8 -#define MMCR0_PMC2SEL_SH 1 -#define MMCR_PMCSEL_MSK 0x1f - -/* - * Bits in MMCR1 for POWER4 - */ -#define MMCR1_TTM0SEL_SH 62 -#define MMCR1_TTC0SEL_SH 61 -#define MMCR1_TTM1SEL_SH 59 -#define MMCR1_TTC1SEL_SH 58 -#define MMCR1_TTM2SEL_SH 56 -#define MMCR1_TTC2SEL_SH 55 -#define MMCR1_TTM3SEL_SH 53 -#define MMCR1_TTC3SEL_SH 52 -#define MMCR1_TTMSEL_MSK 3 -#define MMCR1_TD_CP_DBG0SEL_SH 50 -#define MMCR1_TD_CP_DBG1SEL_SH 48 -#define MMCR1_TD_CP_DBG2SEL_SH 46 -#define MMCR1_TD_CP_DBG3SEL_SH 44 -#define MMCR1_DEBUG0SEL_SH 43 -#define MMCR1_DEBUG1SEL_SH 42 -#define MMCR1_DEBUG2SEL_SH 41 -#define MMCR1_DEBUG3SEL_SH 40 -#define MMCR1_PMC1_ADDER_SEL_SH 39 -#define MMCR1_PMC2_ADDER_SEL_SH 38 -#define MMCR1_PMC6_ADDER_SEL_SH 37 -#define MMCR1_PMC5_ADDER_SEL_SH 36 -#define MMCR1_PMC8_ADDER_SEL_SH 35 -#define MMCR1_PMC7_ADDER_SEL_SH 34 -#define MMCR1_PMC3_ADDER_SEL_SH 33 -#define MMCR1_PMC4_ADDER_SEL_SH 32 -#define MMCR1_PMC3SEL_SH 27 -#define MMCR1_PMC4SEL_SH 22 -#define MMCR1_PMC5SEL_SH 17 -#define MMCR1_PMC6SEL_SH 12 -#define MMCR1_PMC7SEL_SH 7 -#define MMCR1_PMC8SEL_SH 2 /* note bit 0 is in MMCRA for GP */ - -static short mmcr1_adder_bits[8] = { - MMCR1_PMC1_ADDER_SEL_SH, - MMCR1_PMC2_ADDER_SEL_SH, - MMCR1_PMC3_ADDER_SEL_SH, - MMCR1_PMC4_ADDER_SEL_SH, - MMCR1_PMC5_ADDER_SEL_SH, - MMCR1_PMC6_ADDER_SEL_SH, - MMCR1_PMC7_ADDER_SEL_SH, - MMCR1_PMC8_ADDER_SEL_SH -}; - -/* - * Bits in MMCRA - */ -#define MMCRA_PMC8SEL0_SH 17 /* PMC8SEL bit 0 for GP */ - -/* - * Layout of constraint bits: - * 6666555555555544444444443333333333222222222211111111110000000000 - * 3210987654321098765432109876543210987654321098765432109876543210 - * |[ >[ >[ >|||[ >[ >< >< >< >< ><><><><><><><><> - * | UC1 UC2 UC3 ||| PS1 PS2 B0 B1 B2 B3 P1P2P3P4P5P6P7P8 - * \SMPL ||\TTC3SEL - * |\TTC_IFU_SEL - * \TTM2SEL0 - * - * SMPL - SAMPLE_ENABLE constraint - * 56: SAMPLE_ENABLE value 0x0100_0000_0000_0000 - * - * UC1 - unit constraint 1: can't have all three of FPU/ISU1/IDU0|ISU2 - * 55: UC1 error 0x0080_0000_0000_0000 - * 54: FPU events needed 0x0040_0000_0000_0000 - * 53: ISU1 events needed 0x0020_0000_0000_0000 - * 52: IDU0|ISU2 events needed 0x0010_0000_0000_0000 - * - * UC2 - unit constraint 2: can't have all three of FPU/IFU/LSU0 - * 51: UC2 error 0x0008_0000_0000_0000 - * 50: FPU events needed 0x0004_0000_0000_0000 - * 49: IFU events needed 0x0002_0000_0000_0000 - * 48: LSU0 events needed 0x0001_0000_0000_0000 - * - * UC3 - unit constraint 3: can't have all four of LSU0/IFU/IDU0|ISU2/ISU1 - * 47: UC3 error 0x8000_0000_0000 - * 46: LSU0 events needed 0x4000_0000_0000 - * 45: IFU events needed 0x2000_0000_0000 - * 44: IDU0|ISU2 events needed 0x1000_0000_0000 - * 43: ISU1 events needed 0x0800_0000_0000 - * - * TTM2SEL0 - * 42: 0 = IDU0 events needed - * 1 = ISU2 events needed 0x0400_0000_0000 - * - * TTC_IFU_SEL - * 41: 0 = IFU.U events needed - * 1 = IFU.L events needed 0x0200_0000_0000 - * - * TTC3SEL - * 40: 0 = LSU1.U events needed - * 1 = LSU1.L events needed 0x0100_0000_0000 - * - * PS1 - * 39: PS1 error 0x0080_0000_0000 - * 36-38: count of events needing PMC1/2/5/6 0x0070_0000_0000 - * - * PS2 - * 35: PS2 error 0x0008_0000_0000 - * 32-34: count of events needing PMC3/4/7/8 0x0007_0000_0000 - * - * B0 - * 28-31: Byte 0 event source 0xf000_0000 - * 1 = FPU - * 2 = ISU1 - * 3 = IFU - * 4 = IDU0 - * 7 = ISU2 - * 9 = LSU0 - * c = LSU1 - * f = GPS - * - * B1, B2, B3 - * 24-27, 20-23, 16-19: Byte 1, 2, 3 event sources - * - * P8 - * 15: P8 error 0x8000 - * 14-15: Count of events needing PMC8 - * - * P1..P7 - * 0-13: Count of events needing PMC1..PMC7 - * - * Note: this doesn't allow events using IFU.U to be combined with events - * using IFU.L, though that is feasible (using TTM0 and TTM2). However - * there are no listed events for IFU.L (they are debug events not - * verified for performance monitoring) so this shouldn't cause a - * problem. - */ - -static struct unitinfo { - unsigned long value, mask; - int unit; - int lowerbit; -} p4_unitinfo[16] = { - [PM_FPU] = { 0x44000000000000ul, 0x88000000000000ul, PM_FPU, 0 }, - [PM_ISU1] = { 0x20080000000000ul, 0x88000000000000ul, PM_ISU1, 0 }, - [PM_ISU1_ALT] = - { 0x20080000000000ul, 0x88000000000000ul, PM_ISU1, 0 }, - [PM_IFU] = { 0x02200000000000ul, 0x08820000000000ul, PM_IFU, 41 }, - [PM_IFU_ALT] = - { 0x02200000000000ul, 0x08820000000000ul, PM_IFU, 41 }, - [PM_IDU0] = { 0x10100000000000ul, 0x80840000000000ul, PM_IDU0, 1 }, - [PM_ISU2] = { 0x10140000000000ul, 0x80840000000000ul, PM_ISU2, 0 }, - [PM_LSU0] = { 0x01400000000000ul, 0x08800000000000ul, PM_LSU0, 0 }, - [PM_LSU1] = { 0x00000000000000ul, 0x00010000000000ul, PM_LSU1, 40 }, - [PM_GPS] = { 0x00000000000000ul, 0x00000000000000ul, PM_GPS, 0 } -}; - -static unsigned char direct_marked_event[8] = { - (1<<2) | (1<<3), /* PMC1: PM_MRK_GRP_DISP, PM_MRK_ST_CMPL */ - (1<<3) | (1<<5), /* PMC2: PM_THRESH_TIMEO, PM_MRK_BRU_FIN */ - (1<<3), /* PMC3: PM_MRK_ST_CMPL_INT */ - (1<<4) | (1<<5), /* PMC4: PM_MRK_GRP_CMPL, PM_MRK_CRU_FIN */ - (1<<4) | (1<<5), /* PMC5: PM_MRK_GRP_TIMEO */ - (1<<3) | (1<<4) | (1<<5), - /* PMC6: PM_MRK_ST_GPS, PM_MRK_FXU_FIN, PM_MRK_GRP_ISSUED */ - (1<<4) | (1<<5), /* PMC7: PM_MRK_FPU_FIN, PM_MRK_INST_FIN */ - (1<<4), /* PMC8: PM_MRK_LSU_FIN */ -}; - -/* - * Returns 1 if event counts things relating to marked instructions - * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not. - */ -static int p4_marked_instr_event(u64 event) -{ - int pmc, psel, unit, byte, bit; - unsigned int mask; - - pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; - psel = event & PM_PMCSEL_MSK; - if (pmc) { - if (direct_marked_event[pmc - 1] & (1 << psel)) - return 1; - if (psel == 0) /* add events */ - bit = (pmc <= 4)? pmc - 1: 8 - pmc; - else if (psel == 6) /* decode events */ - bit = 4; - else - return 0; - } else - bit = psel; - - byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; - unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; - mask = 0; - switch (unit) { - case PM_LSU1: - if (event & PM_LOWER_MSKS) - mask = 1 << 28; /* byte 7 bit 4 */ - else - mask = 6 << 24; /* byte 3 bits 1 and 2 */ - break; - case PM_LSU0: - /* byte 3, bit 3; byte 2 bits 0,2,3,4,5; byte 1 */ - mask = 0x083dff00; - } - return (mask >> (byte * 8 + bit)) & 1; -} - -static int p4_get_constraint(u64 event, unsigned long *maskp, - unsigned long *valp) -{ - int pmc, byte, unit, lower, sh; - unsigned long mask = 0, value = 0; - int grp = -1; - - pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; - if (pmc) { - if (pmc > 8) - return -1; - sh = (pmc - 1) * 2; - mask |= 2 << sh; - value |= 1 << sh; - grp = ((pmc - 1) >> 1) & 1; - } - unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; - byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; - if (unit) { - lower = (event >> PM_LOWER_SH) & PM_LOWER_MSK; - - /* - * Bus events on bytes 0 and 2 can be counted - * on PMC1/2/5/6; bytes 1 and 3 on PMC3/4/7/8. - */ - if (!pmc) - grp = byte & 1; - - if (!p4_unitinfo[unit].unit) - return -1; - mask |= p4_unitinfo[unit].mask; - value |= p4_unitinfo[unit].value; - sh = p4_unitinfo[unit].lowerbit; - if (sh > 1) - value |= (unsigned long)lower << sh; - else if (lower != sh) - return -1; - unit = p4_unitinfo[unit].unit; - - /* Set byte lane select field */ - mask |= 0xfULL << (28 - 4 * byte); - value |= (unsigned long)unit << (28 - 4 * byte); - } - if (grp == 0) { - /* increment PMC1/2/5/6 field */ - mask |= 0x8000000000ull; - value |= 0x1000000000ull; - } else { - /* increment PMC3/4/7/8 field */ - mask |= 0x800000000ull; - value |= 0x100000000ull; - } - - /* Marked instruction events need sample_enable set */ - if (p4_marked_instr_event(event)) { - mask |= 1ull << 56; - value |= 1ull << 56; - } - - /* PMCSEL=6 decode events on byte 2 need sample_enable clear */ - if (pmc && (event & PM_PMCSEL_MSK) == 6 && byte == 2) - mask |= 1ull << 56; - - *maskp = mask; - *valp = value; - return 0; -} - -static unsigned int ppc_inst_cmpl[] = { - 0x1001, 0x4001, 0x6001, 0x7001, 0x8001 -}; - -static int p4_get_alternatives(u64 event, unsigned int flags, u64 alt[]) -{ - int i, j, na; - - alt[0] = event; - na = 1; - - /* 2 possibilities for PM_GRP_DISP_REJECT */ - if (event == 0x8003 || event == 0x0224) { - alt[1] = event ^ (0x8003 ^ 0x0224); - return 2; - } - - /* 2 possibilities for PM_ST_MISS_L1 */ - if (event == 0x0c13 || event == 0x0c23) { - alt[1] = event ^ (0x0c13 ^ 0x0c23); - return 2; - } - - /* several possibilities for PM_INST_CMPL */ - for (i = 0; i < ARRAY_SIZE(ppc_inst_cmpl); ++i) { - if (event == ppc_inst_cmpl[i]) { - for (j = 0; j < ARRAY_SIZE(ppc_inst_cmpl); ++j) - if (j != i) - alt[na++] = ppc_inst_cmpl[j]; - break; - } - } - - return na; -} - -static int p4_compute_mmcr(u64 event[], int n_ev, - unsigned int hwc[], unsigned long mmcr[], struct perf_event *pevents[]) -{ - unsigned long mmcr0 = 0, mmcr1 = 0, mmcra = 0; - unsigned int pmc, unit, byte, psel, lower; - unsigned int ttm, grp; - unsigned int pmc_inuse = 0; - unsigned int pmc_grp_use[2]; - unsigned char busbyte[4]; - unsigned char unituse[16]; - unsigned int unitlower = 0; - int i; - - if (n_ev > 8) - return -1; - - /* First pass to count resource use */ - pmc_grp_use[0] = pmc_grp_use[1] = 0; - memset(busbyte, 0, sizeof(busbyte)); - memset(unituse, 0, sizeof(unituse)); - for (i = 0; i < n_ev; ++i) { - pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; - if (pmc) { - if (pmc_inuse & (1 << (pmc - 1))) - return -1; - pmc_inuse |= 1 << (pmc - 1); - /* count 1/2/5/6 vs 3/4/7/8 use */ - ++pmc_grp_use[((pmc - 1) >> 1) & 1]; - } - unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; - byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; - lower = (event[i] >> PM_LOWER_SH) & PM_LOWER_MSK; - if (unit) { - if (!pmc) - ++pmc_grp_use[byte & 1]; - if (unit == 6 || unit == 8) - /* map alt ISU1/IFU codes: 6->2, 8->3 */ - unit = (unit >> 1) - 1; - if (busbyte[byte] && busbyte[byte] != unit) - return -1; - busbyte[byte] = unit; - lower <<= unit; - if (unituse[unit] && lower != (unitlower & lower)) - return -1; - unituse[unit] = 1; - unitlower |= lower; - } - } - if (pmc_grp_use[0] > 4 || pmc_grp_use[1] > 4) - return -1; - - /* - * Assign resources and set multiplexer selects. - * - * Units 1,2,3 are on TTM0, 4,6,7 on TTM1, 8,10 on TTM2. - * Each TTMx can only select one unit, but since - * units 2 and 6 are both ISU1, and 3 and 8 are both IFU, - * we have some choices. - */ - if (unituse[2] & (unituse[1] | (unituse[3] & unituse[9]))) { - unituse[6] = 1; /* Move 2 to 6 */ - unituse[2] = 0; - } - if (unituse[3] & (unituse[1] | unituse[2])) { - unituse[8] = 1; /* Move 3 to 8 */ - unituse[3] = 0; - unitlower = (unitlower & ~8) | ((unitlower & 8) << 5); - } - /* Check only one unit per TTMx */ - if (unituse[1] + unituse[2] + unituse[3] > 1 || - unituse[4] + unituse[6] + unituse[7] > 1 || - unituse[8] + unituse[9] > 1 || - (unituse[5] | unituse[10] | unituse[11] | - unituse[13] | unituse[14])) - return -1; - - /* Set TTMxSEL fields. Note, units 1-3 => TTM0SEL codes 0-2 */ - mmcr1 |= (unsigned long)(unituse[3] * 2 + unituse[2]) - << MMCR1_TTM0SEL_SH; - mmcr1 |= (unsigned long)(unituse[7] * 3 + unituse[6] * 2) - << MMCR1_TTM1SEL_SH; - mmcr1 |= (unsigned long)unituse[9] << MMCR1_TTM2SEL_SH; - - /* Set TTCxSEL fields. */ - if (unitlower & 0xe) - mmcr1 |= 1ull << MMCR1_TTC0SEL_SH; - if (unitlower & 0xf0) - mmcr1 |= 1ull << MMCR1_TTC1SEL_SH; - if (unitlower & 0xf00) - mmcr1 |= 1ull << MMCR1_TTC2SEL_SH; - if (unitlower & 0x7000) - mmcr1 |= 1ull << MMCR1_TTC3SEL_SH; - - /* Set byte lane select fields. */ - for (byte = 0; byte < 4; ++byte) { - unit = busbyte[byte]; - if (!unit) - continue; - if (unit == 0xf) { - /* special case for GPS */ - mmcr1 |= 1ull << (MMCR1_DEBUG0SEL_SH - byte); - } else { - if (!unituse[unit]) - ttm = unit - 1; /* 2->1, 3->2 */ - else - ttm = unit >> 2; - mmcr1 |= (unsigned long)ttm - << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte); - } - } - - /* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */ - for (i = 0; i < n_ev; ++i) { - pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; - unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; - byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; - psel = event[i] & PM_PMCSEL_MSK; - if (!pmc) { - /* Bus event or 00xxx direct event (off or cycles) */ - if (unit) - psel |= 0x10 | ((byte & 2) << 2); - for (pmc = 0; pmc < 8; ++pmc) { - if (pmc_inuse & (1 << pmc)) - continue; - grp = (pmc >> 1) & 1; - if (unit) { - if (grp == (byte & 1)) - break; - } else if (pmc_grp_use[grp] < 4) { - ++pmc_grp_use[grp]; - break; - } - } - pmc_inuse |= 1 << pmc; - } else { - /* Direct event */ - --pmc; - if (psel == 0 && (byte & 2)) - /* add events on higher-numbered bus */ - mmcr1 |= 1ull << mmcr1_adder_bits[pmc]; - else if (psel == 6 && byte == 3) - /* seem to need to set sample_enable here */ - mmcra |= MMCRA_SAMPLE_ENABLE; - psel |= 8; - } - if (pmc <= 1) - mmcr0 |= psel << (MMCR0_PMC1SEL_SH - 7 * pmc); - else - mmcr1 |= psel << (MMCR1_PMC3SEL_SH - 5 * (pmc - 2)); - if (pmc == 7) /* PMC8 */ - mmcra |= (psel & 1) << MMCRA_PMC8SEL0_SH; - hwc[i] = pmc; - if (p4_marked_instr_event(event[i])) - mmcra |= MMCRA_SAMPLE_ENABLE; - } - - if (pmc_inuse & 1) - mmcr0 |= MMCR0_PMC1CE; - if (pmc_inuse & 0xfe) - mmcr0 |= MMCR0_PMCjCE; - - mmcra |= 0x2000; /* mark only one IOP per PPC instruction */ - - /* Return MMCRx values */ - mmcr[0] = mmcr0; - mmcr[1] = mmcr1; - mmcr[2] = mmcra; - return 0; -} - -static void p4_disable_pmc(unsigned int pmc, unsigned long mmcr[]) -{ - /* - * Setting the PMCxSEL field to 0 disables PMC x. - * (Note that pmc is 0-based here, not 1-based.) - */ - if (pmc <= 1) { - mmcr[0] &= ~(0x1fUL << (MMCR0_PMC1SEL_SH - 7 * pmc)); - } else { - mmcr[1] &= ~(0x1fUL << (MMCR1_PMC3SEL_SH - 5 * (pmc - 2))); - if (pmc == 7) - mmcr[2] &= ~(1UL << MMCRA_PMC8SEL0_SH); - } -} - -static int p4_generic_events[] = { - [PERF_COUNT_HW_CPU_CYCLES] = 7, - [PERF_COUNT_HW_INSTRUCTIONS] = 0x1001, - [PERF_COUNT_HW_CACHE_REFERENCES] = 0x8c10, /* PM_LD_REF_L1 */ - [PERF_COUNT_HW_CACHE_MISSES] = 0x3c10, /* PM_LD_MISS_L1 */ - [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x330, /* PM_BR_ISSUED */ - [PERF_COUNT_HW_BRANCH_MISSES] = 0x331, /* PM_BR_MPRED_CR */ -}; - -#define C(x) PERF_COUNT_HW_CACHE_##x - -/* - * Table of generalized cache-related events. - * 0 means not supported, -1 means nonsensical, other values - * are event codes. - */ -static int power4_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { - [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ - [C(OP_READ)] = { 0x8c10, 0x3c10 }, - [C(OP_WRITE)] = { 0x7c10, 0xc13 }, - [C(OP_PREFETCH)] = { 0xc35, 0 }, - }, - [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */ - [C(OP_READ)] = { 0, 0 }, - [C(OP_WRITE)] = { -1, -1 }, - [C(OP_PREFETCH)] = { 0, 0 }, - }, - [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */ - [C(OP_READ)] = { 0, 0 }, - [C(OP_WRITE)] = { 0, 0 }, - [C(OP_PREFETCH)] = { 0xc34, 0 }, - }, - [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */ - [C(OP_READ)] = { 0, 0x904 }, - [C(OP_WRITE)] = { -1, -1 }, - [C(OP_PREFETCH)] = { -1, -1 }, - }, - [C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */ - [C(OP_READ)] = { 0, 0x900 }, - [C(OP_WRITE)] = { -1, -1 }, - [C(OP_PREFETCH)] = { -1, -1 }, - }, - [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */ - [C(OP_READ)] = { 0x330, 0x331 }, - [C(OP_WRITE)] = { -1, -1 }, - [C(OP_PREFETCH)] = { -1, -1 }, - }, - [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */ - [C(OP_READ)] = { -1, -1 }, - [C(OP_WRITE)] = { -1, -1 }, - [C(OP_PREFETCH)] = { -1, -1 }, - }, -}; - -static struct power_pmu power4_pmu = { - .name = "POWER4/4+", - .n_counter = 8, - .max_alternatives = 5, - .add_fields = 0x0000001100005555ul, - .test_adder = 0x0011083300000000ul, - .compute_mmcr = p4_compute_mmcr, - .get_constraint = p4_get_constraint, - .get_alternatives = p4_get_alternatives, - .disable_pmc = p4_disable_pmc, - .n_generic = ARRAY_SIZE(p4_generic_events), - .generic_events = p4_generic_events, - .cache_events = &power4_cache_events, - .flags = PPMU_NO_SIPR | PPMU_NO_CONT_SAMPLING, -}; - -static int __init init_power4_pmu(void) -{ - if (!cur_cpu_spec->oprofile_cpu_type || - strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power4")) - return -ENODEV; - - return register_power_pmu(&power4_pmu); -} - -early_initcall(init_power4_pmu); diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 5a8b1bf1e819..fdae4584a016 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -61,7 +61,7 @@ choice help There are two families of 64 bit PowerPC chips supported. The most common ones are the desktop and server CPUs - (POWER4, POWER5, 970, POWER5+, POWER6, POWER7, POWER8 ...) + (POWER5, 970, POWER5+, POWER6, POWER7, POWER8, POWER9 ...) The other are the "embedded" processors compliant with the "Book 3E" variant of the architecture @@ -103,10 +103,6 @@ config CELL_CPU bool "Cell Broadband Engine" depends on PPC_BOOK3S_64 && !CPU_LITTLE_ENDIAN -config POWER4_CPU - bool "POWER4" - depends on PPC_BOOK3S_64 && !CPU_LITTLE_ENDIAN - config POWER5_CPU bool "POWER5" depends on PPC_BOOK3S_64 && !CPU_LITTLE_ENDIAN -- cgit v1.2.3 From a73657ea19aeb92f98438263d42f90188f115d58 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 21 Feb 2018 05:08:30 +1000 Subject: powerpc/64: Add GENERIC_CPU support for little endian Add GENERIC_CPU support for little-endian rather than using POWER8 specific selection for POWER9 and above. Restrict GENERIC_CPU to POWER8 and above on little endian. Signed-off-by: Nicholas Piggin [mpe: Duplicate GENERIC_CPU to avoid a kbuild warning about the prompt being redefined. Spell out that GENERIC means >= POWER4 for BE.] Signed-off-by: Michael Ellerman --- arch/powerpc/Makefile | 5 +++++ arch/powerpc/platforms/Kconfig.cputype | 8 ++++++-- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 373d7f5b7cde..d6fa14077763 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -146,8 +146,13 @@ CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 $(MULTIPLEWORD) CFLAGS-$(CONFIG_PPC32) += $(call cc-option,-mno-readonly-in-sdata) ifeq ($(CONFIG_PPC_BOOK3S_64),y) +ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y) +CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=power8 +CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power8) +else CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power7,$(call cc-option,-mtune=power5)) CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mcpu=power5,-mcpu=power4) +endif else CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=powerpc64 endif diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index fdae4584a016..e8eac27db592 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -87,7 +87,6 @@ endchoice choice prompt "CPU selection" depends on PPC64 - default POWER8_CPU if CPU_LITTLE_ENDIAN default GENERIC_CPU help This will create a kernel which is optimised for a particular CPU. @@ -96,9 +95,14 @@ choice If unsure, select Generic. config GENERIC_CPU - bool "Generic" + bool "Generic (POWER4 and above)" depends on !CPU_LITTLE_ENDIAN +config GENERIC_CPU + bool "Generic (POWER8 and above)" + depends on CPU_LITTLE_ENDIAN + select ARCH_HAS_FAST_MULTIPLIER + config CELL_CPU bool "Cell Broadband Engine" depends on PPC_BOOK3S_64 && !CPU_LITTLE_ENDIAN -- cgit v1.2.3 From db5ae1c155af7a76c9f538bf134066303089679d Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 21 Feb 2018 05:08:31 +1000 Subject: powerpc/64s: Refine feature sets for little endian builds This reduces vmlinux text size by 1kB and data by 1.5kB with a small build! Signed-off-by: Nicholas Piggin [mpe: Add the recently added CPU_FTRS_POWER9_DD2_2 to the little endian possible mask as noticed by Nick.] Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/cputable.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 7e22607e07b4..4e332f3531c5 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -483,6 +483,13 @@ static inline void cpu_feature_keys_init(void) { } #ifdef CONFIG_PPC_BOOK3E #define CPU_FTRS_POSSIBLE (CPU_FTRS_E6500 | CPU_FTRS_E5500) #else +#ifdef CONFIG_CPU_LITTLE_ENDIAN +#define CPU_FTRS_POSSIBLE \ + (CPU_FTRS_POWER7 | CPU_FTRS_POWER8E | CPU_FTRS_POWER8 | \ + CPU_FTRS_POWER8_DD1 | CPU_FTR_ALTIVEC_COMP | CPU_FTR_VSX_COMP | \ + CPU_FTRS_POWER9 | CPU_FTRS_POWER9_DD1 | CPU_FTRS_POWER9_DD2_1 | \ + CPU_FTRS_POWER9_DD2_2) +#else #define CPU_FTRS_POSSIBLE \ (CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | \ CPU_FTRS_POWER6 | CPU_FTRS_POWER7 | CPU_FTRS_POWER8E | \ @@ -490,6 +497,7 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTRS_PA6T | CPU_FTR_VSX_COMP | CPU_FTR_ALTIVEC_COMP | \ CPU_FTRS_POWER9 | CPU_FTRS_POWER9_DD1 | CPU_FTRS_POWER9_DD2_1 | \ CPU_FTRS_POWER9_DD2_2) +#endif /* CONFIG_CPU_LITTLE_ENDIAN */ #endif #else enum { @@ -536,12 +544,19 @@ enum { #ifdef CONFIG_PPC_BOOK3E #define CPU_FTRS_ALWAYS (CPU_FTRS_E6500 & CPU_FTRS_E5500) #else +#ifdef CONFIG_CPU_LITTLE_ENDIAN +#define CPU_FTRS_ALWAYS \ + (CPU_FTRS_POSSIBLE & ~CPU_FTR_HVMODE & CPU_FTRS_POWER7 & \ + CPU_FTRS_POWER8E & CPU_FTRS_POWER8 & CPU_FTRS_POWER8_DD1 & \ + CPU_FTRS_POWER9 & CPU_FTRS_POWER9_DD1 & CPU_FTRS_POWER9_DD2_1) +#else #define CPU_FTRS_ALWAYS \ (CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & \ CPU_FTRS_POWER6 & CPU_FTRS_POWER7 & CPU_FTRS_CELL & \ CPU_FTRS_PA6T & CPU_FTRS_POWER8 & CPU_FTRS_POWER8E & \ CPU_FTRS_POWER8_DD1 & ~CPU_FTR_HVMODE & CPU_FTRS_POSSIBLE & \ CPU_FTRS_POWER9 & CPU_FTRS_POWER9_DD1 & CPU_FTRS_POWER9_DD2_1) +#endif /* CONFIG_CPU_LITTLE_ENDIAN */ #endif #else enum { -- cgit v1.2.3 From 4b7e5532d2113d002aa54bfe581f35b3f1f72306 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 21 Feb 2018 05:08:32 +1000 Subject: powerpc/64s: Add POWER9 CPU type selection Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/Makefile | 3 ++- arch/powerpc/platforms/Kconfig.cputype | 5 +++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index d6fa14077763..95813df90801 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -148,7 +148,7 @@ CFLAGS-$(CONFIG_PPC32) += $(call cc-option,-mno-readonly-in-sdata) ifeq ($(CONFIG_PPC_BOOK3S_64),y) ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y) CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=power8 -CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power8) +CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power9,-mtune=power8) else CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power7,$(call cc-option,-mtune=power5)) CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mcpu=power5,-mcpu=power4) @@ -177,6 +177,7 @@ CFLAGS-$(CONFIG_POWER5_CPU) += $(call cc-option,-mcpu=power5) CFLAGS-$(CONFIG_POWER6_CPU) += $(call cc-option,-mcpu=power6) CFLAGS-$(CONFIG_POWER7_CPU) += $(call cc-option,-mcpu=power7) CFLAGS-$(CONFIG_POWER8_CPU) += $(call cc-option,-mcpu=power8) +CFLAGS-$(CONFIG_POWER9_CPU) += $(call cc-option,-mcpu=power9) # Altivec option not allowed with e500mc64 in GCC. ifeq ($(CONFIG_ALTIVEC),y) diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index e8eac27db592..67d3125d0610 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -125,6 +125,11 @@ config POWER8_CPU depends on PPC_BOOK3S_64 select ARCH_HAS_FAST_MULTIPLIER +config POWER9_CPU + bool "POWER9" + depends on PPC_BOOK3S_64 + select ARCH_HAS_FAST_MULTIPLIER + config E5500_CPU bool "Freescale e5500" depends on E500 -- cgit v1.2.3 From 0e524e761fc2157f1037e0f5d616cd39e468d89c Mon Sep 17 00:00:00 2001 From: Matt Evans Date: Mon, 26 Mar 2018 17:55:21 +0100 Subject: powerpc: Clear branch trap (MSR.BE) before delivering SIGTRAP When using SIG_DBG_BRANCH_TRACING, MSR.BE is left enabled in the user context when single_step_exception() prepares the SIGTRAP delivery. The resulting branch-trap-within-the-SIGTRAP-handler isn't healthy. Commit 2538c2d08f46141550a1e68819efa8fe31c6e3dc broke this, by replacing an MSR mask operation of ~(MSR_SE | MSR_BE) with a call to clear_single_step() which only clears MSR_SE. This patch adds a new helper, clear_br_trace(), which clears the debug trap before invoking the signal handler. This helper is a NOP for BookE as SIG_DBG_BRANCH_TRACING isn't supported on BookE. Signed-off-by: Matt Evans Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/traps.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 2c1a1d24f0ab..a2ef0c0e6c31 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -471,7 +471,7 @@ static inline int check_io_access(struct pt_regs *regs) /* single-step stuff */ #define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC) #define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC) - +#define clear_br_trace(regs) do {} while(0) #else /* On non-4xx, the reason for the machine check or program exception is in the MSR. */ @@ -484,6 +484,7 @@ static inline int check_io_access(struct pt_regs *regs) #define single_stepping(regs) ((regs)->msr & MSR_SE) #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) +#define clear_br_trace(regs) ((regs)->msr &= ~MSR_BE) #endif #if defined(CONFIG_E500) @@ -999,6 +1000,7 @@ void single_step_exception(struct pt_regs *regs) enum ctx_state prev_state = exception_enter(); clear_single_step(regs); + clear_br_trace(regs); if (kprobe_post_handler(regs)) return; -- cgit v1.2.3 From 19e68b2aec3c0a2bd770d3c358a296a1849f308a Mon Sep 17 00:00:00 2001 From: Mathieu Malaterre Date: Thu, 22 Mar 2018 22:03:18 +0100 Subject: powerpc/mm/radix: Fix always false comparison against MMU_NO_CONTEXT In commit 9690c1574268 ("powerpc/mm/radix: Fix always false comparison against MMU_NO_CONTEXT") an issue was discovered where `mm->context.id` was being truncated to an `unsigned int`, while the PID is actually an `unsigned long`. Update the earlier patch by fixing one remaining occurrence. Discovered during a compilation with W=1: arch/powerpc/mm/tlb-radix.c:702:19: error: comparison is always false due to limited range of data type [-Werror=type-limits] Signed-off-by: Mathieu Malaterre Signed-off-by: Michael Ellerman --- arch/powerpc/mm/tlb-radix.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c index a8b178dd2e82..2fba6170ab3f 100644 --- a/arch/powerpc/mm/tlb-radix.c +++ b/arch/powerpc/mm/tlb-radix.c @@ -706,7 +706,7 @@ void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm, #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE extern void radix_kvm_prefetch_workaround(struct mm_struct *mm) { - unsigned int pid = mm->context.id; + unsigned long pid = mm->context.id; if (unlikely(pid == MMU_NO_CONTEXT)) return; -- cgit v1.2.3 From a6201da34ff9366680e97392efd06abb9ff15014 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 2 Apr 2018 13:03:37 +0530 Subject: powerpc: Fix oops due to bad access of lppaca on bare metal Commit 8e0b634b1327 ("powerpc/64s: Do not allocate lppaca if we are not virtualized") removed allocation of lppaca on bare metal platforms. But with CONFIG_PPC_SPLPAR enabled, we still access the lppaca on bare metal in some code paths. Fix this but adding runtime checks for SPLPAR (shared processor LPAR). Fixes: 8e0b634b1327 ("powerpc/64s: Do not allocate lppaca if we are not virtualized") Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/lppaca.h | 3 +++ arch/powerpc/include/asm/spinlock.h | 2 ++ arch/powerpc/kernel/time.c | 3 +++ 3 files changed, 8 insertions(+) diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h index 65d589689f01..7c23ce8a5a4c 100644 --- a/arch/powerpc/include/asm/lppaca.h +++ b/arch/powerpc/include/asm/lppaca.h @@ -34,6 +34,7 @@ #include #include #include +#include /* * The lppaca is the "virtual processor area" registered with the hypervisor, @@ -114,6 +115,8 @@ struct lppaca { static inline bool lppaca_shared_proc(struct lppaca *l) { + if (!firmware_has_feature(FW_FEATURE_SPLPAR)) + return false; return !!(l->__old_status & LPPACA_OLD_SHARED_PROC); } diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index b9ebc3085fb7..72dc4ddc2972 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -56,6 +56,8 @@ #define vcpu_is_preempted vcpu_is_preempted static inline bool vcpu_is_preempted(int cpu) { + if (!firmware_has_feature(FW_FEATURE_SPLPAR)) + return false; return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1); } #endif diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index f7d96a68ecaa..360e71d455cc 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -266,6 +266,9 @@ void accumulate_stolen_time(void) static inline u64 calculate_stolen_time(u64 stop_tb) { + if (!firmware_has_feature(FW_FEATURE_SPLPAR)) + return 0; + if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx)) return scan_dispatch_log(stop_tb); -- cgit v1.2.3 From e303c08787c4cbe1ca07912817dff205ed802985 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Sun, 1 Apr 2018 15:50:35 +1000 Subject: KVM: PPC: Book3S HV: Fix ppc_breakpoint_available compile error MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit arch/powerpc/kvm/book3s_hv.c: In function ‘kvmppc_h_set_mode’: arch/powerpc/kvm/book3s_hv.c:745:8: error: implicit declaration of function ‘ppc_breakpoint_available’ if (!ppc_breakpoint_available()) ^~~~~~~~~~~~~~~~~~~~~~~~ Fixes: 398e712c007f ("KVM: PPC: Book3S HV: Return error from h_set_mode(SET_DAWR) on POWER9") Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/kvm/book3s_hv.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 1e1211c66b26..d3486ecfc671 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -49,6 +49,7 @@ #include #include #include +#include #include #include #include -- cgit v1.2.3 From 252988cbf037f3d446eea222afb46cc134d32c71 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Sun, 1 Apr 2018 15:50:36 +1000 Subject: powerpc: Don't write to DABR on >= Power8 if DAWR is disabled flush_thread() calls __set_breakpoint() via set_debug_reg_defaults() without checking ppc_breakpoint_available(). On Power8 or later CPUs which have the DAWR feature disabled that will cause a write to the DABR which is incorrect as those CPUs don't have a DABR. Fix it two ways, by checking ppc_breakpoint_available() in set_debug_reg_defaults(), and also by reworking __set_breakpoint() to only write to DABR on Power7 or earlier. Fixes: 9654153158d3 ("powerpc: Disable DAWR in the base POWER9 CPU features") Signed-off-by: Nicholas Piggin [mpe: Rework the logic in __set_breakpoint()] Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/process.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 24a591b4dbe9..1237f13fed51 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -718,7 +718,8 @@ static void set_debug_reg_defaults(struct thread_struct *thread) { thread->hw_brk.address = 0; thread->hw_brk.type = 0; - set_breakpoint(&thread->hw_brk); + if (ppc_breakpoint_available()) + set_breakpoint(&thread->hw_brk); } #endif /* !CONFIG_HAVE_HW_BREAKPOINT */ #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ @@ -815,9 +816,14 @@ void __set_breakpoint(struct arch_hw_breakpoint *brk) memcpy(this_cpu_ptr(¤t_brk), brk, sizeof(*brk)); if (cpu_has_feature(CPU_FTR_DAWR)) + // Power8 or later set_dawr(brk); - else + else if (!cpu_has_feature(CPU_FTR_ARCH_207S)) + // Power7 or earlier set_dabr(brk); + else + // Shouldn't happen due to higher level checks + WARN_ON_ONCE(1); } void set_breakpoint(struct arch_hw_breakpoint *brk) -- cgit v1.2.3 From e7347a86830f38dc3e40c8f7e28c04412b12a2e7 Mon Sep 17 00:00:00 2001 From: Mauricio Faria de Oliveira Date: Fri, 30 Mar 2018 14:28:24 -0300 Subject: powerpc: Move default security feature flags This moves the definition of the default security feature flags (i.e., enabled by default) closer to the security feature flags. This can be used to restore current flags to the default flags. Signed-off-by: Mauricio Faria de Oliveira Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/security_features.h | 8 ++++++++ arch/powerpc/kernel/security.c | 7 +------ 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h index 400a9050e035..fa4d2e1cf772 100644 --- a/arch/powerpc/include/asm/security_features.h +++ b/arch/powerpc/include/asm/security_features.h @@ -63,4 +63,12 @@ static inline bool security_ftr_enabled(unsigned long feature) // Firmware configuration indicates user favours security over performance #define SEC_FTR_FAVOUR_SECURITY 0x0000000000000200ull + +// Features enabled by default +#define SEC_FTR_DEFAULT \ + (SEC_FTR_L1D_FLUSH_HV | \ + SEC_FTR_L1D_FLUSH_PR | \ + SEC_FTR_BNDS_CHK_SPEC_BAR | \ + SEC_FTR_FAVOUR_SECURITY) + #endif /* _ASM_POWERPC_SECURITY_FEATURES_H */ diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index 2cee3dcd231b..bab5a27ea805 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -11,12 +11,7 @@ #include -unsigned long powerpc_security_features __read_mostly = \ - SEC_FTR_L1D_FLUSH_HV | \ - SEC_FTR_L1D_FLUSH_PR | \ - SEC_FTR_BNDS_CHK_SPEC_BAR | \ - SEC_FTR_FAVOUR_SECURITY; - +unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT; ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) { -- cgit v1.2.3 From 6232774f1599028a15418179d17f7df47ede770a Mon Sep 17 00:00:00 2001 From: Mauricio Faria de Oliveira Date: Fri, 30 Mar 2018 14:28:25 -0300 Subject: powerpc/pseries: Restore default security feature flags on setup After migration the security feature flags might have changed (e.g., destination system with unpatched firmware), but some flags are not set/clear again in init_cpu_char_feature_flags() because it assumes the security flags to be the defaults. Additionally, if the H_GET_CPU_CHARACTERISTICS hypercall fails then init_cpu_char_feature_flags() does not run again, which potentially might leave the system in an insecure or sub-optimal configuration. So, just restore the security feature flags to the defaults assumed by init_cpu_char_feature_flags() so it can set/clear them correctly, and to ensure safe settings are in place in case the hypercall fail. Fixes: f636c14790ea ("powerpc/pseries: Set or clear security feature flags") Depends-on: 19887d6a28e2 ("powerpc: Move default security feature flags") Signed-off-by: Mauricio Faria de Oliveira Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/pseries/setup.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 98bca8d9c9e0..b55ad4286dc7 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -462,6 +462,10 @@ static void __init find_and_init_phbs(void) static void init_cpu_char_feature_flags(struct h_cpu_char_result *result) { + /* + * The features below are disabled by default, so we instead look to see + * if firmware has *enabled* them, and set them if so. + */ if (result->character & H_CPU_CHAR_SPEC_BAR_ORI31) security_ftr_set(SEC_FTR_SPEC_BAR_ORI31); @@ -501,6 +505,13 @@ void pseries_setup_rfi_flush(void) bool enable; long rc; + /* + * Set features to the defaults assumed by init_cpu_char_feature_flags() + * so it can set/clear again any features that might have changed after + * migration, and in case the hypercall fails and it is not even called. + */ + powerpc_security_features = SEC_FTR_DEFAULT; + rc = plpar_get_cpu_characteristics(&result); if (rc == H_SUCCESS) init_cpu_char_feature_flags(&result); -- cgit v1.2.3 From b6f534d1a642a9b6263fd52df30806171fbc331e Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 3 Apr 2018 21:24:59 +1000 Subject: selftests/powerpc: Fix copyloops build since Power4 assembler change The recent commit 15a3204d24a3 ("powerpc/64s: Set assembler machine type to POWER4") set the machine type in our ASFLAGS when building the kernel, and removed some ".machine power4" directives from various asm files. This broke the selftests build on old toolchains (that don't assume Power4), because we build the kernel source files into the selftests using different ASFLAGS. The fix is simply to add -mpower4 to the selftest ASFLAGS as well. Fixes: 15a3204d24a3 ("powerpc/64s: Set assembler machine type to POWER4") Signed-off-by: Michael Ellerman --- tools/testing/selftests/powerpc/copyloops/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/powerpc/copyloops/Makefile b/tools/testing/selftests/powerpc/copyloops/Makefile index ac4a52e19e59..eedce3366f64 100644 --- a/tools/testing/selftests/powerpc/copyloops/Makefile +++ b/tools/testing/selftests/powerpc/copyloops/Makefile @@ -5,8 +5,8 @@ CFLAGS += -I$(CURDIR) CFLAGS += -D SELFTEST CFLAGS += -maltivec -# Use our CFLAGS for the implicit .S rule -ASFLAGS = $(CFLAGS) +# Use our CFLAGS for the implicit .S rule & set the asm machine type +ASFLAGS = $(CFLAGS) -Wa,-mpower4 TEST_GEN_PROGS := copyuser_64 copyuser_power7 memcpy_64 memcpy_power7 EXTRA_SOURCES := validate.c ../harness.c -- cgit v1.2.3 From a2b5e056b75ee6ef0777817644a456b36b96ce38 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Sun, 1 Apr 2018 15:38:13 +1000 Subject: powerpc/powernv: Fix SMT4 forcing idle code The PSSCR value is not stored to PACA_REQ_PSSCR if the CPU does not have the XER[SO] bug. Fix this by storing up-front, outside the workaround code. The initial test is not required because it is a slow path. The workaround is made to depend on CONFIG_KVM_BOOK3S_HV_POSSIBLE, to match pnv_power9_force_smt4_catch() where it is used. Drop the comment on pnv_power9_force_smt4_catch() as it's no longer true. Fixes: 7672691a08c8 ("powerpc/powernv: Provide a way to force a core into SMT4 mode") Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/idle_book3s.S | 9 +++++---- arch/powerpc/platforms/powernv/idle.c | 4 ---- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S index 903ec2a5c76c..81defb6a9b74 100644 --- a/arch/powerpc/kernel/idle_book3s.S +++ b/arch/powerpc/kernel/idle_book3s.S @@ -442,20 +442,20 @@ _GLOBAL(power9_offline_stop) * r3 contains desired PSSCR register value. */ _GLOBAL(power9_idle_stop) -BEGIN_FTR_SECTION - lwz r5, PACA_DONT_STOP(r13) - cmpwi r5, 0 - bne 1f std r3, PACA_REQ_PSSCR(r13) +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE +BEGIN_FTR_SECTION sync lwz r5, PACA_DONT_STOP(r13) cmpwi r5, 0 bne 1f END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) +#endif mtspr SPRN_PSSCR,r3 LOAD_REG_ADDR(r4,power_enter_stop) b pnv_powersave_common /* No return */ +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 1: /* * We get here when TM / thread reconfiguration bug workaround @@ -465,6 +465,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) li r3, 0 std r3, PACA_REQ_PSSCR(r13) blr /* return 0 for wakeup cause / SRR1 value */ +#endif /* * On waking up from stop 0,1,2 with ESL=1 on POWER9 DD1, diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c index 378fde1f85a8..1f12ab1e6030 100644 --- a/arch/powerpc/platforms/powernv/idle.c +++ b/arch/powerpc/platforms/powernv/idle.c @@ -397,10 +397,6 @@ void power9_idle(void) * all other threads not to stop, and sending a message to any * that are in a stop state. * Must be called with preemption disabled. - * - * DO NOT call this unless cpu_has_feature(CPU_FTR_P9_TM_XER_SO_BUG) is - * true; otherwise this function will hang the system, due to the - * optimization in power9_idle_stop. */ void pnv_power9_force_smt4_catch(void) { -- cgit v1.2.3 From 6bed3237624e3faad1592543952907cd01a42c83 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Sun, 1 Apr 2018 20:36:13 +1000 Subject: powerpc: use NMI IPI for smp_send_stop Use the NMI IPI rather than smp_call_function for smp_send_stop. Have stopped CPUs hard disable interrupts rather than just soft disable. This function is used in crash/panic/shutdown paths to bring other CPUs down as quickly and reliably as possible, and minimizing their potential to cause trouble. Avoiding the Linux smp_call_function infrastructure and (if supported) using true NMI IPIs makes this more robust. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/smp.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index cfc08b099c49..db88660bf6bd 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -565,7 +565,11 @@ void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) } #endif +#ifdef CONFIG_NMI_IPI +static void stop_this_cpu(struct pt_regs *regs) +#else static void stop_this_cpu(void *dummy) +#endif { /* Remove this CPU */ set_cpu_online(smp_processor_id(), false); @@ -577,7 +581,11 @@ static void stop_this_cpu(void *dummy) void smp_send_stop(void) { +#ifdef CONFIG_NMI_IPI + smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, stop_this_cpu, 1000000); +#else smp_call_function(stop_this_cpu, NULL, 0); +#endif } struct thread_info *current_set[NR_CPUS]; -- cgit v1.2.3 From 855bfe0de1a05a01f89975ea8ba9f5521fb0f567 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Sun, 1 Apr 2018 20:36:14 +1000 Subject: powerpc: hard disable irqs in smp_send_stop loop The hard lockup watchdog can fire under local_irq_disable on platforms with irq soft masking. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/smp.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index db88660bf6bd..e16ec7b3b427 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -574,9 +574,10 @@ static void stop_this_cpu(void *dummy) /* Remove this CPU */ set_cpu_online(smp_processor_id(), false); - local_irq_disable(); + hard_irq_disable(); + spin_begin(); while (1) - ; + spin_cpu_relax(); } void smp_send_stop(void) -- cgit v1.2.3 From f2748bdfe157343eb8cf910a1d89ccf2fd20100b Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Sun, 1 Apr 2018 20:36:15 +1000 Subject: powerpc/powernv: Always stop secondaries before reboot/shutdown Currently powernv reboot and shutdown requests just leave secondaries to do their own things. This is undesirable because they can trigger any number of watchdogs while waiting for reboot, but also we don't know what else they might be doing -- they might be causing trouble, trampling memory, etc. The opal scheduled flash update code already ran into watchdog problems due to flashing taking a long time, and it was fixed with 2196c6f1ed ("powerpc/powernv: Return secondary CPUs to firmware before FW update"), which returns secondaries to opal. It's been found that regular reboots can take over 10 seconds, which can result in the hard lockup watchdog firing, reboot: Restarting system [ 360.038896709,5] OPAL: Reboot request... Watchdog CPU:0 Hard LOCKUP Watchdog CPU:44 detected Hard LOCKUP other CPUS:16 Watchdog CPU:16 Hard LOCKUP watchdog: BUG: soft lockup - CPU#16 stuck for 3s! [swapper/16:0] This patch removes the special case for flash update, and calls smp_send_stop in all cases before calling reboot/shutdown. smp_send_stop could return CPUs to OPAL, the main reason not to is that the request could come from a NMI that interrupts OPAL code, so re-entry to OPAL can cause a number of problems. Putting secondaries into simple spin loops improves the chances of a successful reboot. Signed-off-by: Nicholas Piggin Reviewed-by: Vasant Hegde Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/opal.h | 2 +- arch/powerpc/platforms/powernv/opal-flash.c | 28 +--------------------------- arch/powerpc/platforms/powernv/setup.c | 15 +++++---------- 3 files changed, 7 insertions(+), 38 deletions(-) diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index dde60089d0d4..7159e1a6a61a 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h @@ -325,7 +325,7 @@ struct rtc_time; extern unsigned long opal_get_boot_time(void); extern void opal_nvram_init(void); extern void opal_flash_update_init(void); -extern void opal_flash_term_callback(void); +extern void opal_flash_update_print_message(void); extern int opal_elog_init(void); extern void opal_platform_dump_init(void); extern void opal_sys_param_init(void); diff --git a/arch/powerpc/platforms/powernv/opal-flash.c b/arch/powerpc/platforms/powernv/opal-flash.c index 1cb0b895a236..b37015101bf6 100644 --- a/arch/powerpc/platforms/powernv/opal-flash.c +++ b/arch/powerpc/platforms/powernv/opal-flash.c @@ -303,26 +303,9 @@ invalid_img: return rc; } -/* Return CPUs to OPAL before starting FW update */ -static void flash_return_cpu(void *info) -{ - int cpu = smp_processor_id(); - - if (!cpu_online(cpu)) - return; - - /* Disable IRQ */ - hard_irq_disable(); - - /* Return the CPU to OPAL */ - opal_return_cpu(); -} - /* This gets called just before system reboots */ -void opal_flash_term_callback(void) +void opal_flash_update_print_message(void) { - struct cpumask mask; - if (update_flash_data.status != FLASH_IMG_READY) return; @@ -333,15 +316,6 @@ void opal_flash_term_callback(void) /* Small delay to help getting the above message out */ msleep(500); - - /* Return secondary CPUs to firmware */ - cpumask_copy(&mask, cpu_online_mask); - cpumask_clear_cpu(smp_processor_id(), &mask); - if (!cpumask_empty(&mask)) - smp_call_function_many(&mask, - flash_return_cpu, NULL, false); - /* Hard disable interrupts */ - hard_irq_disable(); } /* diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index 5f963286232f..ef8c9ce53a61 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -201,17 +201,12 @@ static void pnv_prepare_going_down(void) */ opal_event_shutdown(); - /* Soft disable interrupts */ - local_irq_disable(); + /* Print flash update message if one is scheduled. */ + opal_flash_update_print_message(); - /* - * Return secondary CPUs to firwmare if a flash update - * is pending otherwise we will get all sort of error - * messages about CPU being stuck etc.. This will also - * have the side effect of hard disabling interrupts so - * past this point, the kernel is effectively dead. - */ - opal_flash_term_callback(); + smp_send_stop(); + + hard_irq_disable(); } static void __noreturn pnv_restart(char *cmd) -- cgit v1.2.3 From d0b791c02994486b21fc48949ba276c72a88938d Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 4 Apr 2018 09:01:08 +1000 Subject: powerpc/64s/idle: Consolidate power9_offline_stop()/power9_idle_stop() Commit 3d4fbffdd703 ("powerpc/64s/idle: POWER9 implement a separate idle stop function for hotplug") that added power9_offline_stop() was written before commit 7672691a08c8 ("powerpc/powernv: Provide a way to force a core into SMT4 mode"). When merging the former I failed to notice that it caused us to skip the force-SMT4 logic for offline CPUs. The result is that offlined CPUs will not correctly participate in the force-SMT4 logic, which presumably will result in badness (not tested). Reconcile the two commits by making power9_offline_stop() a pre-cursor to power9_idle_stop(), so that they share the force-SMT4 logic. This is based on an original commit from Nick, all breakage is my own. Fixes: 3d4fbffdd703 ("powerpc/64s/idle: POWER9 implement a separate idle stop function for hotplug") Signed-off-by: Michael Ellerman Signed-off-by: Nicholas Piggin --- arch/powerpc/kernel/idle_book3s.S | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S index 81defb6a9b74..e5cb3eedb564 100644 --- a/arch/powerpc/kernel/idle_book3s.S +++ b/arch/powerpc/kernel/idle_book3s.S @@ -422,25 +422,24 @@ ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \ /* * Entered with MSR[EE]=0 and no soft-masked interrupts pending. * r3 contains desired PSSCR register value. + * + * Offline (CPU unplug) case also must notify KVM that the CPU is + * idle. */ _GLOBAL(power9_offline_stop) - std r3, PACA_REQ_PSSCR(r13) - mtspr SPRN_PSSCR,r3 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE - /* Tell KVM we're entering idle */ + /* + * Tell KVM we're entering idle. + * This does not have to be done in real mode because the P9 MMU + * is independent per-thread. Some steppings share radix/hash mode + * between threads, but in that case KVM has a barrier sync in real + * mode before and after switching between radix and hash. + */ li r4,KVM_HWTHREAD_IN_IDLE - /* DO THIS IN REAL MODE! See comment above. */ stb r4,HSTATE_HWTHREAD_STATE(r13) #endif - LOAD_REG_ADDR(r4,power_enter_stop) - b pnv_powersave_common - /* No return */ + /* fall through */ - -/* - * Entered with MSR[EE]=0 and no soft-masked interrupts pending. - * r3 contains desired PSSCR register value. - */ _GLOBAL(power9_idle_stop) std r3, PACA_REQ_PSSCR(r13) #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE -- cgit v1.2.3 From b9ee31e100e73075431faaf7af2ee0fbfd6e624b Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Sun, 1 Apr 2018 15:48:55 +1000 Subject: powerpc/64s/idle: POWER9 ESL=0 stop avoid save/restore overhead When stop is executed with EC=ESL=0, it appears to execute like a normal instruction (resuming from NIP when woken by interrupt). So all the save/restore handling can be avoided completely. In particular NV GPRs do not have to be saved, and MSR does not have to be switched back to kernel MSR. So move the test for EC=ESL=0 sleep states out to power9_idle_stop, and return directly to the caller after stop in that case. This improves performance for ping-pong benchmark with the stop0_lite idle state by 2.54% for 2 threads in the same core, and 2.57% for different cores. Performance increase with HV_POSSIBLE defined will be improved further by avoiding the hwsync. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/idle_book3s.S | 45 ++++++++++++++------------------------- 1 file changed, 16 insertions(+), 29 deletions(-) diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S index e5cb3eedb564..bc4e391d031e 100644 --- a/arch/powerpc/kernel/idle_book3s.S +++ b/arch/powerpc/kernel/idle_book3s.S @@ -324,32 +324,8 @@ enter_winkle: /* * r3 - PSSCR value corresponding to the requested stop state. */ -power_enter_stop: -/* - * Check if we are executing the lite variant with ESL=EC=0 - */ - andis. r4,r3,PSSCR_EC_ESL_MASK_SHIFTED +power_enter_stop_esl: clrldi r3,r3,60 /* r3 = Bits[60:63] = Requested Level (RL) */ - bne .Lhandle_esl_ec_set - PPC_STOP - li r3,0 /* Since we didn't lose state, return 0 */ - std r3, PACA_REQ_PSSCR(r13) - - /* - * pnv_wakeup_noloss() expects r12 to contain the SRR1 value so - * it can determine if the wakeup reason is an HMI in - * CHECK_HMI_INTERRUPT. - * - * However, when we wakeup with ESL=0, SRR1 will not contain the wakeup - * reason, so there is no point setting r12 to SRR1. - * - * Further, we clear r12 here, so that we don't accidentally enter the - * HMI in pnv_wakeup_noloss() if the value of r12[42:45] == WAKE_HMI. - */ - li r12, 0 - b pnv_wakeup_noloss - -.Lhandle_esl_ec_set: BEGIN_FTR_SECTION /* * POWER9 DD2.0 or earlier can incorrectly set PMAO when waking up after @@ -441,21 +417,32 @@ _GLOBAL(power9_offline_stop) /* fall through */ _GLOBAL(power9_idle_stop) + mtspr SPRN_PSSCR,r3 + /* + * The ESL=EC=0 case does not wake up at 0x100, and it does not + * allow SMT mode switching, so it does not require PSSCR to be + * saved. + */ + andis. r4,r3,PSSCR_EC_ESL_MASK_SHIFTED + bne 1f + PPC_STOP + li r3,0 /* Since we didn't lose state, return 0 */ + blr +1: std r3, PACA_REQ_PSSCR(r13) #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE BEGIN_FTR_SECTION sync lwz r5, PACA_DONT_STOP(r13) cmpwi r5, 0 - bne 1f + bne 2f END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) #endif - mtspr SPRN_PSSCR,r3 - LOAD_REG_ADDR(r4,power_enter_stop) + LOAD_REG_ADDR(r4,power_enter_stop_esl) b pnv_powersave_common /* No return */ #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE -1: +2: /* * We get here when TM / thread reconfiguration bug workaround * code wants to get the CPU into SMT4 mode, and therefore -- cgit v1.2.3 From f2ed480fa4d7f95b190279722690df8d4a396b3e Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Wed, 7 Mar 2018 19:06:45 +0530 Subject: powerpc/mm/keys: Update documentation and remove unnecessary check Adds more code comments. We also remove an unnecessary pkey check after we check for pkey error in this patch. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/mm/fault.c | 28 ++++++++++++---------------- arch/powerpc/mm/pkeys.c | 11 ++++------- 2 files changed, 16 insertions(+), 23 deletions(-) diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 866446cf2d9a..c01d627e687a 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -297,7 +297,12 @@ static bool access_error(bool is_write, bool is_exec, if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) return true; - + /* + * We should ideally do the vma pkey access check here. But in the + * fault path, handle_mm_fault() also does the same check. To avoid + * these multiple checks, we skip it here and handle access error due + * to pkeys later. + */ return false; } @@ -518,25 +523,16 @@ good_area: #ifdef CONFIG_PPC_MEM_KEYS /* - * if the HPTE is not hashed, hardware will not detect - * a key fault. Lets check if we failed because of a - * software detected key fault. + * we skipped checking for access error due to key earlier. + * Check that using handle_mm_fault error return. */ if (unlikely(fault & VM_FAULT_SIGSEGV) && - !arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE, - is_exec, 0)) { - /* - * The PGD-PDT...PMD-PTE tree may not have been fully setup. - * Hence we cannot walk the tree to locate the PTE, to locate - * the key. Hence let's use vma_pkey() to get the key; instead - * of get_mm_addr_key(). - */ + !arch_vma_access_permitted(vma, is_write, is_exec, 0)) { + int pkey = vma_pkey(vma); - if (likely(pkey)) { - up_read(&mm->mmap_sem); - return bad_key_fault_exception(regs, address, pkey); - } + up_read(&mm->mmap_sem); + return bad_key_fault_exception(regs, address, pkey); } #endif /* CONFIG_PPC_MEM_KEYS */ diff --git a/arch/powerpc/mm/pkeys.c b/arch/powerpc/mm/pkeys.c index 328737b4d73c..0eafdf01edc7 100644 --- a/arch/powerpc/mm/pkeys.c +++ b/arch/powerpc/mm/pkeys.c @@ -119,18 +119,15 @@ int pkey_initialize(void) #else os_reserved = 0; #endif + initial_allocation_mask = ~0x0; + pkey_amr_uamor_mask = ~0x0ul; + pkey_iamr_mask = ~0x0ul; /* - * Bits are in LE format. NOTE: 1, 0 are reserved. + * key 0, 1 are reserved. * key 0 is the default key, which allows read/write/execute. * key 1 is recommended not to be used. PowerISA(3.0) page 1015, * programming note. */ - initial_allocation_mask = ~0x0; - - /* register mask is in BE format */ - pkey_amr_uamor_mask = ~0x0ul; - pkey_iamr_mask = ~0x0ul; - for (i = 2; i < (pkeys_total - os_reserved); i++) { initial_allocation_mask &= ~(0x1 << i); pkey_amr_uamor_mask &= ~(0x3ul << pkeyshift(i)); -- cgit v1.2.3 From fb4e5dbd44564077fa0267a59b45961a1fd181b6 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 22 Mar 2018 14:13:50 +0530 Subject: powerpc/mm/radix: Update pte fragment count from 16 to 256 on radix With split PTL (page table lock) config, we allocate the level 4 (leaf) page table using pte fragment framework instead of slab cache like other levels. This was done to enable us to have split page table lock at the level 4 of the page table. We use page->plt backing the all the level 4 pte fragment for the lock. Currently with Radix, we use only 16 fragments out of the allocated page. In radix each fragment is 256 bytes which means we use only 4k out of the allocated 64K page wasting 60k of the allocated memory. This was done earlier to keep it closer to hash. This patch update the pte fragment count to 256, thereby using the full 64K page and reducing the memory usage. Performance tests shows really low impact even with THP disabled. With THP disabled we will be contenting further less on level 4 ptl and hence the impact should be further low. 256 threads: without patch (10 runs of ./ebizzy -m -n 1000 -s 131072 -S 100) median = 15678.5 stdev = 42.1209 with patch: median = 15354 stdev = 194.743 This is with THP disabled. With THP enabled the impact of the patch will be less. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/book3s/64/hash-64k.h | 10 ++++------ arch/powerpc/include/asm/book3s/64/radix-4k.h | 5 +++++ arch/powerpc/include/asm/book3s/64/radix-64k.h | 6 ++++++ arch/powerpc/mm/pgtable-radix.c | 8 ++------ 4 files changed, 17 insertions(+), 12 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h index eb393135d054..cc82745355b3 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-64k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h @@ -37,16 +37,14 @@ /* PTE flags to conserve for HPTE identification */ #define _PAGE_HPTEFLAGS (H_PAGE_BUSY | H_PAGE_HASHPTE | H_PAGE_COMBO) -/* - * we support 16 fragments per PTE page of 64K size. - */ -#define H_PTE_FRAG_NR 16 /* * We use a 2K PTE page fragment and another 2K for storing * real_pte_t hash index + * 8 bytes per each pte entry and another 8 bytes for storing + * slot details. */ -#define H_PTE_FRAG_SIZE_SHIFT 12 -#define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT) +#define H_PTE_FRAG_SIZE_SHIFT (H_PTE_INDEX_SIZE + 3 + 1) +#define H_PTE_FRAG_NR (PAGE_SIZE >> H_PTE_FRAG_SIZE_SHIFT) #ifndef __ASSEMBLY__ #include diff --git a/arch/powerpc/include/asm/book3s/64/radix-4k.h b/arch/powerpc/include/asm/book3s/64/radix-4k.h index a61aa9cd63ec..ca366ec86310 100644 --- a/arch/powerpc/include/asm/book3s/64/radix-4k.h +++ b/arch/powerpc/include/asm/book3s/64/radix-4k.h @@ -9,5 +9,10 @@ #define RADIX_PMD_INDEX_SIZE 9 /* 1G huge page */ #define RADIX_PUD_INDEX_SIZE 9 #define RADIX_PGD_INDEX_SIZE 13 +/* + * One fragment per per page + */ +#define RADIX_PTE_FRAG_SIZE_SHIFT (RADIX_PTE_INDEX_SIZE + 3) +#define RADIX_PTE_FRAG_NR (PAGE_SIZE >> RADIX_PTE_FRAG_SIZE_SHIFT) #endif /* _ASM_POWERPC_PGTABLE_RADIX_4K_H */ diff --git a/arch/powerpc/include/asm/book3s/64/radix-64k.h b/arch/powerpc/include/asm/book3s/64/radix-64k.h index c7e71ba29555..830082496876 100644 --- a/arch/powerpc/include/asm/book3s/64/radix-64k.h +++ b/arch/powerpc/include/asm/book3s/64/radix-64k.h @@ -10,4 +10,10 @@ #define RADIX_PUD_INDEX_SIZE 9 #define RADIX_PGD_INDEX_SIZE 13 +/* + * We use a 256 byte PTE page fragment in radix + * 8 bytes per each PTE entry. + */ +#define RADIX_PTE_FRAG_SIZE_SHIFT (RADIX_PTE_INDEX_SIZE + 3) +#define RADIX_PTE_FRAG_NR (PAGE_SIZE >> RADIX_PTE_FRAG_SIZE_SHIFT) #endif /* _ASM_POWERPC_PGTABLE_RADIX_64K_H */ diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index 7095384344b4..f1891e215e39 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c @@ -638,12 +638,8 @@ void __init radix__early_init_mmu(void) #ifdef CONFIG_PCI pci_io_base = ISA_IO_BASE; #endif - - /* - * For now radix also use the same frag size - */ - __pte_frag_nr = H_PTE_FRAG_NR; - __pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT; + __pte_frag_nr = RADIX_PTE_FRAG_NR; + __pte_frag_size_shift = RADIX_PTE_FRAG_SIZE_SHIFT; if (!firmware_has_feature(FW_FEATURE_LPAR)) { radix_init_native(); -- cgit v1.2.3 From 6fa504835d6969144b2bd3699684dd447c789ba2 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Fri, 30 Mar 2018 17:34:08 +0530 Subject: powerpc/mm/hugetlb: initialize the pagetable cache correctly for hugetlb With 64k page size, we have hugetlb pte entries at the pmd and pud level for book3s64. We don't need to create a separate page table cache for that. With 4k we need to make sure hugepd page table cache for 16M is placed at PUD level and 16G at the PGD level. Simplify all these by not using HUGEPD_PD_SHIFT which is confusing for book3s64. Without this patch, with 64k page size we create pagetable caches with shift value 10 and 7 which are not used at all. Fixes: 419df06eea5b ("powerpc: Reduce the PTE_INDEX_SIZE") Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/mm/hugetlbpage.c | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index f4153f21d214..99cf86096970 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -122,9 +122,6 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx) #define HUGEPD_PGD_SHIFT PGDIR_SHIFT #define HUGEPD_PUD_SHIFT PUD_SHIFT -#else -#define HUGEPD_PGD_SHIFT PUD_SHIFT -#define HUGEPD_PUD_SHIFT PMD_SHIFT #endif /* @@ -670,15 +667,26 @@ static int __init hugetlbpage_init(void) shift = mmu_psize_to_shift(psize); - if (add_huge_page_size(1ULL << shift) < 0) +#ifdef CONFIG_PPC_BOOK3S_64 + if (shift > PGDIR_SHIFT) continue; - + else if (shift > PUD_SHIFT) + pdshift = PGDIR_SHIFT; + else if (shift > PMD_SHIFT) + pdshift = PUD_SHIFT; + else + pdshift = PMD_SHIFT; +#else if (shift < HUGEPD_PUD_SHIFT) pdshift = PMD_SHIFT; else if (shift < HUGEPD_PGD_SHIFT) pdshift = PUD_SHIFT; else pdshift = PGDIR_SHIFT; +#endif + + if (add_huge_page_size(1ULL << shift) < 0) + continue; /* * if we have pdshift and shift value same, we don't * use pgt cache for hugepd. -- cgit v1.2.3 From cec4e9b28ffbcb9ce04b9e33946c505e5ad7a295 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Fri, 30 Mar 2018 17:39:01 +0530 Subject: powerpc/mm/radix: Parse disable_radix commandline correctly. kernel parameter disable_radix takes different options disable_radix=yes|no|1|0 or just disable_radix. When using the later format we get below error. `Malformed early option 'disable_radix'` Fixes: 1fd6c0220710 ("powerpc/mm: Add a CONFIG option to choose if radix is used by default") Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/mm/init_64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 63470b06c502..51ce091914f9 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -366,7 +366,7 @@ static int __init parse_disable_radix(char *p) { bool val; - if (strlen(p) == 0) + if (!p) val = true; else if (kstrtobool(p, &val)) return -EINVAL; -- cgit v1.2.3 From 7a22d6321c3da61a3778f84caa5b3a398ed019d1 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Fri, 30 Mar 2018 17:39:02 +0530 Subject: powerpc/mm/radix: Update command line parsing for disable_radix kernel parameter disable_radix takes different options disable_radix=yes|no|1|0 or just disable_radix. prom_init parsing is not supporting these options. Fixes: 1fd6c0220710 ("powerpc/mm: Add a CONFIG option to choose if radix is used by default") Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/prom_init.c | 16 +++++++++++++--- arch/powerpc/kernel/prom_init_check.sh | 2 +- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index e181fdea3da9..f9d6befb55a6 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -171,7 +171,7 @@ static unsigned long __initdata prom_tce_alloc_start; static unsigned long __initdata prom_tce_alloc_end; #endif -static bool __initdata prom_radix_disable; +static bool prom_radix_disable __initdata = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT); struct platform_support { bool hash_mmu; @@ -641,9 +641,19 @@ static void __init early_cmdline_parse(void) opt = strstr(prom_cmd_line, "disable_radix"); if (opt) { - prom_debug("Radix disabled from cmdline\n"); - prom_radix_disable = true; + opt += 13; + if (*opt && *opt == '=') { + bool val; + + if (kstrtobool(++opt, &val)) + prom_radix_disable = false; + else + prom_radix_disable = val; + } else + prom_radix_disable = true; } + if (prom_radix_disable) + prom_debug("Radix disabled from cmdline\n"); } #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh index 12640f7e726b..acb6b9226352 100644 --- a/arch/powerpc/kernel/prom_init_check.sh +++ b/arch/powerpc/kernel/prom_init_check.sh @@ -19,7 +19,7 @@ WHITELIST="add_reloc_offset __bss_start __bss_stop copy_and_flush _end enter_prom memcpy memset reloc_offset __secondary_hold __secondary_hold_acknowledge __secondary_hold_spinloop __start -strcmp strcpy strlcpy strlen strncmp strstr logo_linux_clut224 +strcmp strcpy strlcpy strlen strncmp strstr kstrtobool logo_linux_clut224 reloc_got2 kernstart_addr memstart_addr linux_banner _stext __prom_init_toc_start __prom_init_toc_end btext_setup_display TOC." -- cgit v1.2.3 From 5d6a03ebc88f82b0b0adcec24eabb9eb2fcd97db Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Wed, 4 Apr 2018 16:11:16 +0530 Subject: powerpc/hw_breakpoint: Only disable hw breakpoint if cpu supports it We get the below warning if we try to use kexec on P9: kexec_core: Starting new kernel WARNING: CPU: 0 PID: 1223 at arch/powerpc/kernel/process.c:826 __set_breakpoint+0xb4/0x140 [snip] NIP __set_breakpoint+0xb4/0x140 LR kexec_prepare_cpus_wait+0x58/0x150 Call Trace: 0xc0000000ee70fb20 (unreliable) 0xc0000000ee70fb20 default_machine_kexec+0x234/0x2c0 machine_kexec+0x84/0x90 kernel_kexec+0xd8/0xe0 SyS_reboot+0x214/0x2c0 system_call+0x58/0x6c This happens since we are trying to clear hw breakpoint on POWER9, though we don't have CPU_FTR_DAWR enabled. Guard __set_breakpoint() within hw_breakpoint_disable() with ppc_breakpoint_available() to address this. Fixes: 9654153158d3 ("powerpc: Disable DAWR in the base POWER9 CPU features") Signed-off-by: Naveen N. Rao Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/hw_breakpoint.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h index 90c708e5e7c4..8e7b09703ca4 100644 --- a/arch/powerpc/include/asm/hw_breakpoint.h +++ b/arch/powerpc/include/asm/hw_breakpoint.h @@ -80,7 +80,8 @@ static inline void hw_breakpoint_disable(void) brk.address = 0; brk.type = 0; brk.len = 0; - __set_breakpoint(&brk); + if (ppc_breakpoint_available()) + __set_breakpoint(&brk); } extern void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs); int hw_breakpoint_handler(struct die_args *args); -- cgit v1.2.3 From ad7b4e8022b9864c075fe71e1328b1d25cad82f6 Mon Sep 17 00:00:00 2001 From: Frederic Barrat Date: Tue, 3 Apr 2018 15:54:02 +0200 Subject: cxl: Fix possible deadlock when processing page faults from cxllib cxllib_handle_fault() is called by an external driver when it needs to have the host resolve page faults for a buffer. The buffer can cover several pages and VMAs. The function iterates over all the pages used by the buffer, based on the page size of the VMA. To ensure some stability while processing the faults, the thread T1 grabs the mm->mmap_sem semaphore with read access (R1). However, when processing a page fault for a single page, one of the underlying functions, copro_handle_mm_fault(), also grabs the same semaphore with read access (R2). So the thread T1 takes the semaphore twice. If another thread T2 tries to access the semaphore in write mode W1 (say, because it wants to allocate memory and calls 'brk'), then that thread T2 will have to wait because there's a reader (R1). If the thread T1 is processing a new page at that time, it won't get an automatic grant at R2, because there's now a writer thread waiting (T2). And we have a deadlock. The timeline is: 1. thread T1 owns the semaphore with read access R1 2. thread T2 requests write access W1 and waits 3. thread T1 requests read access R2 and waits The fix is for the thread T1 to release the semaphore R1 once it got the information it needs from the current VMA. The address space/VMAs could evolve while T1 iterates over the full buffer, but in the unlikely case where T1 misses a page, the external driver will raise a new page fault when retrying the memory access. Fixes: 3ced8d730063 ("cxl: Export library to support IBM XSL") Cc: stable@vger.kernel.org # 4.13+ Signed-off-by: Frederic Barrat Signed-off-by: Michael Ellerman --- drivers/misc/cxl/cxllib.c | 85 ++++++++++++++++++++++++++++++----------------- 1 file changed, 55 insertions(+), 30 deletions(-) diff --git a/drivers/misc/cxl/cxllib.c b/drivers/misc/cxl/cxllib.c index bea1eb004b49..0bc7c31cf739 100644 --- a/drivers/misc/cxl/cxllib.c +++ b/drivers/misc/cxl/cxllib.c @@ -208,49 +208,74 @@ int cxllib_get_PE_attributes(struct task_struct *task, } EXPORT_SYMBOL_GPL(cxllib_get_PE_attributes); -int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags) +static int get_vma_info(struct mm_struct *mm, u64 addr, + u64 *vma_start, u64 *vma_end, + unsigned long *page_size) { - int rc; - u64 dar; struct vm_area_struct *vma = NULL; - unsigned long page_size; - - if (mm == NULL) - return -EFAULT; + int rc = 0; down_read(&mm->mmap_sem); vma = find_vma(mm, addr); if (!vma) { - pr_err("Can't find vma for addr %016llx\n", addr); rc = -EFAULT; goto out; } - /* get the size of the pages allocated */ - page_size = vma_kernel_pagesize(vma); - - for (dar = (addr & ~(page_size - 1)); dar < (addr + size); dar += page_size) { - if (dar < vma->vm_start || dar >= vma->vm_end) { - vma = find_vma(mm, addr); - if (!vma) { - pr_err("Can't find vma for addr %016llx\n", addr); - rc = -EFAULT; - goto out; - } - /* get the size of the pages allocated */ - page_size = vma_kernel_pagesize(vma); + *page_size = vma_kernel_pagesize(vma); + *vma_start = vma->vm_start; + *vma_end = vma->vm_end; +out: + up_read(&mm->mmap_sem); + return rc; +} + +int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags) +{ + int rc; + u64 dar, vma_start, vma_end; + unsigned long page_size; + + if (mm == NULL) + return -EFAULT; + + /* + * The buffer we have to process can extend over several pages + * and may also cover several VMAs. + * We iterate over all the pages. The page size could vary + * between VMAs. + */ + rc = get_vma_info(mm, addr, &vma_start, &vma_end, &page_size); + if (rc) + return rc; + + for (dar = (addr & ~(page_size - 1)); dar < (addr + size); + dar += page_size) { + if (dar < vma_start || dar >= vma_end) { + /* + * We don't hold the mm->mmap_sem semaphore + * while iterating, since the semaphore is + * required by one of the lower-level page + * fault processing functions and it could + * create a deadlock. + * + * It means the VMAs can be altered between 2 + * loop iterations and we could theoretically + * miss a page (however unlikely). But that's + * not really a problem, as the driver will + * retry access, get another page fault on the + * missing page and call us again. + */ + rc = get_vma_info(mm, dar, &vma_start, &vma_end, + &page_size); + if (rc) + return rc; } rc = cxl_handle_mm_fault(mm, flags, dar); - if (rc) { - pr_err("cxl_handle_mm_fault failed %d", rc); - rc = -EFAULT; - goto out; - } + if (rc) + return -EFAULT; } - rc = 0; -out: - up_read(&mm->mmap_sem); - return rc; + return 0; } EXPORT_SYMBOL_GPL(cxllib_handle_fault); -- cgit v1.2.3 From ef237039c5c86b6587ee1fd88857a24fa5978474 Mon Sep 17 00:00:00 2001 From: Logan Gunthorpe Date: Tue, 27 Mar 2018 17:08:28 -0600 Subject: powerpc: io.h: move iomap.h include so that it can use readq/writeq defs Subsequent patches in this series makes use of the readq and writeq defines in iomap.h. However, as is, they get missed on the powerpc platform seeing the include comes before the define. This patch moves the include down to fix this. Signed-off-by: Logan Gunthorpe Reviewed-by: Andy Shevchenko Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/io.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index 422f99cf9924..af074923d598 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -33,8 +33,6 @@ extern struct pci_dev *isa_bridge_pcidev; #include #include -#include - #ifdef CONFIG_PPC64 #include #endif @@ -663,6 +661,8 @@ static inline void name at \ #define writel_relaxed(v, addr) writel(v, addr) #define writeq_relaxed(v, addr) writeq(v, addr) +#include + #ifdef CONFIG_PPC32 #define mmiowb() #else -- cgit v1.2.3 From 07c3d9eaa4be3a000e2b9386cf678ee78f7f7abb Mon Sep 17 00:00:00 2001 From: Logan Gunthorpe Date: Tue, 27 Mar 2018 17:08:29 -0600 Subject: powerpc: iomap.c: introduce io{read|write}64_{lo_hi|hi_lo} MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These functions will be introduced into the generic iomap.c so they can deal with PIO accesses in hi-lo/lo-hi variants. Thus, the powerpc version of iomap.c will need to provide the same functions even though, in this arch, they are identical to the regular io{read|write}64 functions. Signed-off-by: Logan Gunthorpe Tested-by: Horia Geantă Reviewed-by: Andy Shevchenko Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/iomap.c | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/arch/powerpc/kernel/iomap.c b/arch/powerpc/kernel/iomap.c index aab456ed2a00..5ac84efc6ede 100644 --- a/arch/powerpc/kernel/iomap.c +++ b/arch/powerpc/kernel/iomap.c @@ -45,12 +45,32 @@ u64 ioread64(void __iomem *addr) { return readq(addr); } +u64 ioread64_lo_hi(void __iomem *addr) +{ + return readq(addr); +} +u64 ioread64_hi_lo(void __iomem *addr) +{ + return readq(addr); +} u64 ioread64be(void __iomem *addr) { return readq_be(addr); } +u64 ioread64be_lo_hi(void __iomem *addr) +{ + return readq_be(addr); +} +u64 ioread64be_hi_lo(void __iomem *addr) +{ + return readq_be(addr); +} EXPORT_SYMBOL(ioread64); +EXPORT_SYMBOL(ioread64_lo_hi); +EXPORT_SYMBOL(ioread64_hi_lo); EXPORT_SYMBOL(ioread64be); +EXPORT_SYMBOL(ioread64be_lo_hi); +EXPORT_SYMBOL(ioread64be_hi_lo); #endif /* __powerpc64__ */ void iowrite8(u8 val, void __iomem *addr) @@ -83,12 +103,32 @@ void iowrite64(u64 val, void __iomem *addr) { writeq(val, addr); } +void iowrite64_lo_hi(u64 val, void __iomem *addr) +{ + writeq(val, addr); +} +void iowrite64_hi_lo(u64 val, void __iomem *addr) +{ + writeq(val, addr); +} void iowrite64be(u64 val, void __iomem *addr) { writeq_be(val, addr); } +void iowrite64be_lo_hi(u64 val, void __iomem *addr) +{ + writeq_be(val, addr); +} +void iowrite64be_hi_lo(u64 val, void __iomem *addr) +{ + writeq_be(val, addr); +} EXPORT_SYMBOL(iowrite64); +EXPORT_SYMBOL(iowrite64_lo_hi); +EXPORT_SYMBOL(iowrite64_hi_lo); EXPORT_SYMBOL(iowrite64be); +EXPORT_SYMBOL(iowrite64be_lo_hi); +EXPORT_SYMBOL(iowrite64be_hi_lo); #endif /* __powerpc64__ */ /* -- cgit v1.2.3 From a67cc594dffd29cfe33fbee40932c9d04197ab2f Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 5 Apr 2018 16:03:39 +1000 Subject: Revert "powerpc/64s/idle: POWER9 ESL=0 stop avoid save/restore overhead" As described in that commit: When stop is executed with EC=ESL=0, it appears to execute like a normal instruction (resuming from NIP when woken by interrupt). So all the save/restore handling can be avoided completely. This is true, except in the case of an NMI interrupt (sreset or machine check) interrupting the instruction. In that case, the NMI gets an "interrupt occurred while the processor was in power-saving mode" indication. The power-save wakeup code uses that bit to decide whether to restore some registers (e.g., LR). Because these are no longer saved, this causes random register corruption. It may be possible to restore this optimisation by detecting the case of no register loss on the wakeup side, and avoid restoring in that case, but that's not a minor fix because the wakeup code itself uses some registers that would be live (e.g., LR). Fixes: b9ee31e100e7 ("powerpc/64s/idle: POWER9 ESL=0 stop avoid save/restore overhead") Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/idle_book3s.S | 45 +++++++++++++++++++++++++-------------- 1 file changed, 29 insertions(+), 16 deletions(-) diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S index bc4e391d031e..e5cb3eedb564 100644 --- a/arch/powerpc/kernel/idle_book3s.S +++ b/arch/powerpc/kernel/idle_book3s.S @@ -324,8 +324,32 @@ enter_winkle: /* * r3 - PSSCR value corresponding to the requested stop state. */ -power_enter_stop_esl: +power_enter_stop: +/* + * Check if we are executing the lite variant with ESL=EC=0 + */ + andis. r4,r3,PSSCR_EC_ESL_MASK_SHIFTED clrldi r3,r3,60 /* r3 = Bits[60:63] = Requested Level (RL) */ + bne .Lhandle_esl_ec_set + PPC_STOP + li r3,0 /* Since we didn't lose state, return 0 */ + std r3, PACA_REQ_PSSCR(r13) + + /* + * pnv_wakeup_noloss() expects r12 to contain the SRR1 value so + * it can determine if the wakeup reason is an HMI in + * CHECK_HMI_INTERRUPT. + * + * However, when we wakeup with ESL=0, SRR1 will not contain the wakeup + * reason, so there is no point setting r12 to SRR1. + * + * Further, we clear r12 here, so that we don't accidentally enter the + * HMI in pnv_wakeup_noloss() if the value of r12[42:45] == WAKE_HMI. + */ + li r12, 0 + b pnv_wakeup_noloss + +.Lhandle_esl_ec_set: BEGIN_FTR_SECTION /* * POWER9 DD2.0 or earlier can incorrectly set PMAO when waking up after @@ -417,32 +441,21 @@ _GLOBAL(power9_offline_stop) /* fall through */ _GLOBAL(power9_idle_stop) - mtspr SPRN_PSSCR,r3 - /* - * The ESL=EC=0 case does not wake up at 0x100, and it does not - * allow SMT mode switching, so it does not require PSSCR to be - * saved. - */ - andis. r4,r3,PSSCR_EC_ESL_MASK_SHIFTED - bne 1f - PPC_STOP - li r3,0 /* Since we didn't lose state, return 0 */ - blr -1: std r3, PACA_REQ_PSSCR(r13) #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE BEGIN_FTR_SECTION sync lwz r5, PACA_DONT_STOP(r13) cmpwi r5, 0 - bne 2f + bne 1f END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) #endif - LOAD_REG_ADDR(r4,power_enter_stop_esl) + mtspr SPRN_PSSCR,r3 + LOAD_REG_ADDR(r4,power_enter_stop) b pnv_powersave_common /* No return */ #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE -2: +1: /* * We get here when TM / thread reconfiguration bug workaround * code wants to get the CPU into SMT4 mode, and therefore -- cgit v1.2.3 From a57ac411832384eb93df4bfed2bf644c4089720e Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 5 Apr 2018 15:50:49 +1000 Subject: powerpc/64s: Fix dt_cpu_ftrs to have restore_cpu clear unwanted LPCR bits Presently the dt_cpu_ftrs restore_cpu will only add bits to the LPCR for secondaries, but some bits must be removed (e.g., UPRT for HPT). Not clearing these bits on secondaries causes checkstops when booting with disable_radix. restore_cpu can not just set LPCR, because it is also called by the idle wakeup code which relies on opal_slw_set_reg to restore the value of LPCR, at least on P8 which does not save LPCR to stack in the idle code. Fix this by including a mask of bits to clear from LPCR as well, which is used by restore_cpu. This is a little messy now, but it's a minimal fix that can be backported. Longer term, the idle SPR save/restore code can be reworked to completely avoid calls to restore_cpu, then restore_cpu would be able to unconditionally set LPCR to match boot processor environment. Fixes: 5a61ef74f269f ("powerpc/64s: Support new device tree binding for discovering CPU features") Cc: stable@vger.kernel.org # v4.12+ Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/dt_cpu_ftrs.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index 11a3a4fed3fb..ed7605d8fd2d 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -83,6 +83,7 @@ static int hv_mode; static struct { u64 lpcr; + u64 lpcr_clear; u64 hfscr; u64 fscr; } system_registers; @@ -91,6 +92,8 @@ static void (*init_pmu_registers)(void); static void __restore_cpu_cpufeatures(void) { + u64 lpcr; + /* * LPCR is restored by the power on engine already. It can be changed * after early init e.g., by radix enable, and we have no unified API @@ -103,8 +106,10 @@ static void __restore_cpu_cpufeatures(void) * The best we can do to accommodate secondary boot and idle restore * for now is "or" LPCR with existing. */ - - mtspr(SPRN_LPCR, system_registers.lpcr | mfspr(SPRN_LPCR)); + lpcr = mfspr(SPRN_LPCR); + lpcr |= system_registers.lpcr; + lpcr &= ~system_registers.lpcr_clear; + mtspr(SPRN_LPCR, lpcr); if (hv_mode) { mtspr(SPRN_LPID, 0); mtspr(SPRN_HFSCR, system_registers.hfscr); @@ -324,8 +329,9 @@ static int __init feat_enable_mmu_hash_v3(struct dt_cpu_feature *f) { u64 lpcr; + system_registers.lpcr_clear |= (LPCR_ISL | LPCR_UPRT | LPCR_HR); lpcr = mfspr(SPRN_LPCR); - lpcr &= ~LPCR_ISL; + lpcr &= ~(LPCR_ISL | LPCR_UPRT | LPCR_HR); mtspr(SPRN_LPCR, lpcr); cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE; -- cgit v1.2.3 From c130153e453cba0f37ad10fa18a1aa9c9a598a59 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 5 Apr 2018 15:57:54 +1000 Subject: powerpc/64s: Fix pkey support in dt_cpu_ftrs, add CPU_FTR_PKEY bit The pkey code added a CPU_FTR_PKEY bit, but did not add it to the dt_cpu_ftrs feature set. Although capability is supported by all processors in the base dt_cpu_ftrs set for 64s, it's a significant and sufficiently well defined feature to make it optional. So add it as a quirk for now, which can be versioned out then controlled by the firmware (once dt_cpu_ftrs gains versioning support). Fixes: cf43d3b26452 ("powerpc: Enable pkey subsystem") Cc: stable@vger.kernel.org # v4.16+ Cc: Ram Pai Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/dt_cpu_ftrs.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index ed7605d8fd2d..e88fbb1fdb8f 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -729,6 +729,13 @@ static __init void cpufeatures_cpu_quirks(void) cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR); cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_BUG; } + + /* + * PKEY was not in the initial base or feature node + * specification, but it should become optional in the next + * cpu feature version sequence. + */ + cur_cpu_spec->cpu_features |= CPU_FTR_PKEY; } static void __init cpufeatures_setup_finished(void) -- cgit v1.2.3 From 3a52f6014d367a6c8d91b1df8a658903345908e3 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 5 Apr 2018 15:57:55 +1000 Subject: powerpc/64s: Fix POWER9 DD2.2 and above in cputable features The CPU_FTR_POWER9_DD2_1 flag is intended to be set for DD2.1 and above (which is what the dt_cpu_ftrs setup does). Fix cputable for DD2.2 to match. This came about due to patches b5af4f279323 ("powerpc: Add CPU feature bits for TM bug workarounds on POWER9 v2.2"), and 9e9626ed3a4a ("powerpc/64s: Fix POWER9 DD2.2 and above in DT CPU features") being in-flight at once. The latter patch fixed dt_cpu_ftrs like this one does. The former changed cputable to match dt_cpu_ftrs. Fixes: b5af4f279323 ("powerpc: Add CPU feature bits for TM bug workarounds on POWER9 v2.2") Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/cputable.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 4e332f3531c5..931dda8be87c 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -467,7 +467,8 @@ static inline void cpu_feature_keys_init(void) { } (~CPU_FTR_SAO)) #define CPU_FTRS_POWER9_DD2_0 CPU_FTRS_POWER9 #define CPU_FTRS_POWER9_DD2_1 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1) -#define CPU_FTRS_POWER9_DD2_2 (CPU_FTRS_POWER9 | CPU_FTR_P9_TM_HV_ASSIST | \ +#define CPU_FTRS_POWER9_DD2_2 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1 | \ + CPU_FTR_P9_TM_HV_ASSIST | \ CPU_FTR_P9_TM_XER_SO_BUG) #define CPU_FTRS_CELL (CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ -- cgit v1.2.3 From c1b25a17d24925b0961c319cfc3fd7e1dc778914 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 5 Apr 2018 16:10:00 +1000 Subject: powerpc/64s/idle: Fix restore of AMOR on POWER9 after deep sleep POWER8 restores AMOR when waking from deep sleep, but POWER9 does not, because it does not go through the subcore restore. Have POWER9 restore it in core restore. Fixes: ee97b6b99f42 ("powerpc/mm/radix: Setup AMOR in HV mode to allow key 0") Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/idle_book3s.S | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S index e5cb3eedb564..79d005445c6c 100644 --- a/arch/powerpc/kernel/idle_book3s.S +++ b/arch/powerpc/kernel/idle_book3s.S @@ -870,6 +870,8 @@ BEGIN_FTR_SECTION mtspr SPRN_PTCR,r4 ld r4,_RPR(r1) mtspr SPRN_RPR,r4 + ld r4,_AMOR(r1) + mtspr SPRN_AMOR,r4 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) ld r4,_TSCR(r1) -- cgit v1.2.3