From 5fa5b16be5b319184378870467352eab6700b1de Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 4 Dec 2017 07:49:10 +0530 Subject: powerpc/mm/hugetlb: Use pte_access_permitted for hugetlb access check No functional change in this patch. This update gup_hugepte to use the helper. This will help later when we add memory keys. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/mm/hugetlbpage.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'arch/powerpc/mm/hugetlbpage.c') diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index a9b9083c5e49..c7e5afe5e118 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -855,9 +855,7 @@ int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, pte = READ_ONCE(*ptep); - if (!pte_present(pte) || !pte_read(pte)) - return 0; - if (write && !pte_write(pte)) + if (!pte_access_permitted(pte, write)) return 0; /* hugepages are never "special" */ -- cgit v1.2.3 From de0f93873937e999fadaba011d368bc042af37b2 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Fri, 12 Jan 2018 13:45:31 +0100 Subject: powerpc/8xx: Remove _PAGE_USER and handle user access at PMD level As Linux kernel separates KERNEL and USER address spaces, there is therefore no need to flag USER access at page level. Today, the 8xx TLB handlers already handle user access in the L1 entry through Access Protection Groups, it is then natural to move the user access handling at PMD level once _PAGE_NA allows to handle PAGE_NONE protection without _PAGE_USER In the mean time, as we free up one bit in the PTE, we can use it to include SPS (page size flag) in the PTE and avoid handling it at every TLB miss hence removing special handling based on compiled page size. For _PAGE_EXEC, we rework it to use PP PTE bits, avoiding the copy of _PAGE_EXEC bit into the L1 entry. Unfortunatly we are not able to put it at the correct location as it conflicts with NA/RO/RW bits for data entries. Upper bits of APG in L1 entry overlap with PMD base address. In order to avoid having to filter that out, we set up all groups so that upper bits can have any value. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/hugetlb.h | 3 +- arch/powerpc/include/asm/mmu-8xx.h | 34 ++++++++++---------- arch/powerpc/include/asm/nohash/32/pgalloc.h | 3 +- arch/powerpc/include/asm/nohash/32/pte-8xx.h | 14 ++++++--- arch/powerpc/include/asm/nohash/pgtable.h | 2 +- arch/powerpc/include/asm/pte-common.h | 6 ++++ arch/powerpc/kernel/head_8xx.S | 46 ++++++---------------------- arch/powerpc/mm/hugetlbpage.c | 2 +- 8 files changed, 48 insertions(+), 62 deletions(-) (limited to 'arch/powerpc/mm/hugetlbpage.c') diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index 14c9d44f355b..1a4847f67ea8 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -47,8 +47,7 @@ static inline pte_t *hugepd_page(hugepd_t hpd) { BUG_ON(!hugepd_ok(hpd)); #ifdef CONFIG_PPC_8xx - return (pte_t *)__va(hpd_val(hpd) & - ~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK)); + return (pte_t *)__va(hpd_val(hpd) & ~HUGEPD_SHIFT_MASK); #else return (pte_t *)((hpd_val(hpd) & ~HUGEPD_SHIFT_MASK) | PD_HUGE); diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h index 5bb3dbede41a..a568ff5c5fb8 100644 --- a/arch/powerpc/include/asm/mmu-8xx.h +++ b/arch/powerpc/include/asm/mmu-8xx.h @@ -29,17 +29,17 @@ #define MI_Kp 0x40000000 /* Should always be set */ /* - * All pages' PP exec bits are set to 000, which means Execute for Supervisor - * and no Execute for User. - * Then we use the APG to say whether accesses are according to Page rules, - * "all Supervisor" rules (Exec for all) and "all User" rules (Exec for noone) - * Therefore, we define 4 APG groups. msb is _PAGE_EXEC, lsb is _PAGE_USER - * 0 (00) => Not User, no exec => 11 (all accesses performed as user) - * 1 (01) => User but no exec => 11 (all accesses performed as user) - * 2 (10) => Not User, exec => 01 (rights according to page definition) - * 3 (11) => User, exec => 00 (all accesses performed as supervisor) - */ -#define MI_APG_INIT 0xf4ffffff + * All pages' PP data bits are set to either 001 or 011 by copying _PAGE_EXEC + * into bit 21 in the ITLBmiss handler (bit 21 is the middle bit), which means + * respectively NA for All or X for Supervisor and no access for User. + * Then we use the APG to say whether accesses are according to Page rules or + * "all Supervisor" rules (Access to all) + * Therefore, we define 2 APG groups. lsb is _PMD_USER + * 0 => No user => 01 (all accesses performed according to page definition) + * 1 => User => 00 (all accesses performed as supervisor iaw page definition) + * We define all 16 groups so that all other bits of APG can take any value + */ +#define MI_APG_INIT 0x44444444 /* The effective page number register. When read, contains the information * about the last instruction TLB miss. When MI_RPN is written, bits in @@ -102,17 +102,17 @@ #define MD_Kp 0x40000000 /* Should always be set */ /* - * All pages' PP data bits are set to either 000 or 011, which means + * All pages' PP data bits are set to either 000 or 011 or 001, which means * respectively RW for Supervisor and no access for User, or RO for - * Supervisor and no access for user. + * Supervisor and no access for user and NA for ALL. * Then we use the APG to say whether accesses are according to Page rules or * "all Supervisor" rules (Access to all) - * Therefore, we define 2 APG groups. lsb is _PAGE_USER + * Therefore, we define 2 APG groups. lsb is _PMD_USER * 0 => No user => 01 (all accesses performed according to page definition) - * 1 => User => 00 (all accesses performed as supervisor - * according to page definition) + * 1 => User => 00 (all accesses performed as supervisor iaw page definition) + * We define all 16 groups so that all other bits of APG can take any value */ -#define MD_APG_INIT 0x4fffffff +#define MD_APG_INIT 0x44444444 /* The effective page number register. When read, contains the information * about the last instruction TLB miss. When MD_RPN is written, bits in diff --git a/arch/powerpc/include/asm/nohash/32/pgalloc.h b/arch/powerpc/include/asm/nohash/32/pgalloc.h index d072139ff2e5..29d37bd1f3b3 100644 --- a/arch/powerpc/include/asm/nohash/32/pgalloc.h +++ b/arch/powerpc/include/asm/nohash/32/pgalloc.h @@ -61,7 +61,8 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t pte_page) { - *pmdp = __pmd((page_to_pfn(pte_page) << PAGE_SHIFT) | _PMD_PRESENT); + *pmdp = __pmd((page_to_pfn(pte_page) << PAGE_SHIFT) | _PMD_USER | + _PMD_PRESENT); } #define pmd_pgtable(pmd) pmd_page(pmd) diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h index 7c7040f015e2..f04cb46ae8a1 100644 --- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h @@ -32,27 +32,33 @@ #define _PAGE_PRESENT 0x0001 /* Page is valid */ #define _PAGE_NO_CACHE 0x0002 /* I: cache inhibit */ #define _PAGE_PRIVILEGED 0x0004 /* No ASID (context) compare */ -#define _PAGE_SPECIAL 0x0008 /* SW entry, forced to 0 by the TLB miss */ +#define _PAGE_HUGE 0x0008 /* SPS: Small Page Size (1 if 16k, 512k or 8M)*/ #define _PAGE_DIRTY 0x0100 /* C: page changed */ /* These 4 software bits must be masked out when the L2 entry is loaded * into the TLB. */ #define _PAGE_GUARDED 0x0010 /* Copied to L1 G entry in DTLB */ -#define _PAGE_USER 0x0020 /* Copied to L1 APG lsb */ -#define _PAGE_EXEC 0x0040 /* Copied to L1 APG */ +#define _PAGE_SPECIAL 0x0020 /* SW entry */ +#define _PAGE_EXEC 0x0040 /* Copied to PP (bit 21) in ITLB */ #define _PAGE_ACCESSED 0x0080 /* software: page referenced */ +#define _PAGE_NA 0x0200 /* Supervisor NA, User no access */ #define _PAGE_RO 0x0600 /* Supervisor RO, User no access */ #define _PMD_PRESENT 0x0001 -#define _PMD_BAD 0x0ff0 +#define _PMD_BAD 0x0fd0 #define _PMD_PAGE_MASK 0x000c #define _PMD_PAGE_8M 0x000c #define _PMD_PAGE_512K 0x0004 +#define _PMD_USER 0x0020 /* APG 1 */ /* Until my rework is finished, 8xx still needs atomic PTE updates */ #define PTE_ATOMIC_UPDATES 1 +#ifdef CONFIG_PPC_16K_PAGES +#define _PAGE_PSIZE _PAGE_HUGE +#endif + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_NOHASH_32_PTE_8xx_H */ diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h index e0864e683cf9..c56de1e8026f 100644 --- a/arch/powerpc/include/asm/nohash/pgtable.h +++ b/arch/powerpc/include/asm/nohash/pgtable.h @@ -126,7 +126,7 @@ static inline pte_t pte_mkspecial(pte_t pte) static inline pte_t pte_mkhuge(pte_t pte) { - return pte; + return __pte(pte_val(pte) | _PAGE_HUGE); } static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h index 426a902816c5..c4a72c7a8c83 100644 --- a/arch/powerpc/include/asm/pte-common.h +++ b/arch/powerpc/include/asm/pte-common.h @@ -53,6 +53,9 @@ #ifndef _PAGE_NA #define _PAGE_NA 0 #endif +#ifndef _PAGE_HUGE +#define _PAGE_HUGE 0 +#endif #ifndef _PMD_PRESENT_MASK #define _PMD_PRESENT_MASK _PMD_PRESENT @@ -61,6 +64,9 @@ #define _PMD_SIZE 0 #define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE() #endif +#ifndef _PMD_USER +#define _PMD_USER 0 +#endif #ifndef _PAGE_KERNEL_RO #define _PAGE_KERNEL_RO (_PAGE_PRIVILEGED | _PAGE_RO) #endif diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 642680389b7e..c3b831bb8bad 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -52,11 +52,7 @@ * Value for the bits that have fixed value in RPN entries. * Also used for tagging DAR for DTLBerror. */ -#ifdef CONFIG_PPC_16K_PAGES -#define RPN_PATTERN (0x00f0 | MD_SPS16K) -#else #define RPN_PATTERN 0x00f0 -#endif #define PAGE_SHIFT_512K 19 #define PAGE_SHIFT_8M 23 @@ -358,31 +354,23 @@ _ENTRY(ITLBMiss_cmp) #if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE) mtcr r12 #endif - /* Insert the APG into the TWC from the Linux PTE. */ - rlwimi r11, r10, 0, 25, 26 /* Load the MI_TWC with the attributes for this "segment." */ mtspr SPRN_MI_TWC, r11 /* Set segment attributes */ -#if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES) - rlwimi r10, r11, 1, MI_SPS16K -#endif #ifdef CONFIG_SWAP rlwinm r11, r10, 32-5, _PAGE_PRESENT and r11, r11, r10 rlwimi r10, r11, 0, _PAGE_PRESENT #endif - li r11, RPN_PATTERN + li r11, RPN_PATTERN | 0x200 /* The Linux PTE won't go exactly into the MMU TLB. - * Software indicator bits 20-23 and 28 must be clear. - * Software indicator bits 24, 25, 26, and 27 must be + * Software indicator bits 20 and 23 must be clear. + * Software indicator bits 22, 24, 25, 26, and 27 must be * set. All other Linux PTE bits control the behavior * of the MMU. */ -#if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES) - rlwimi r10, r11, 0, 0x0ff0 /* Set 24-27, clear 20-23 */ -#else - rlwimi r10, r11, 0, 0x0ff8 /* Set 24-27, clear 20-23,28 */ -#endif + rlwimi r11, r10, 4, 0x0400 /* Copy _PAGE_EXEC into bit 21 */ + rlwimi r10, r11, 0, 0x0ff0 /* Set 22, 24-27, clear 20,23 */ mtspr SPRN_MI_RPN, r10 /* Update TLB entry */ /* Restore registers */ @@ -419,7 +407,6 @@ _ENTRY(itlb_miss_perf) rlwinm r10, r11, 0, ~HUGEPD_SHIFT_MASK #endif lwz r10, 0(r10) /* Get the pte */ - rlwinm r11, r11, 0, 0xf b 4b 20: /* 512k pages */ @@ -428,7 +415,6 @@ _ENTRY(itlb_miss_perf) /* Add level 2 base */ rlwimi r10, r11, 0, 0, 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1) - 1 lwz r10, 0(r10) /* Get the pte */ - rlwinm r11, r11, 0, 0xf b 4b #endif @@ -479,20 +465,15 @@ _ENTRY(DTLBMiss_jmp) 4: mtcr r12 - /* Insert the Guarded flag and APG into the TWC from the Linux PTE. - * It is bit 26-27 of both the Linux PTE and the TWC (at least + /* Insert the Guarded flag into the TWC from the Linux PTE. + * It is bit 27 of both the Linux PTE and the TWC (at least * I got that right :-). It will be better when we can put * this into the Linux pgd/pmd and load it in the operation * above. */ - rlwimi r11, r10, 0, 26, 27 + rlwimi r11, r10, 0, _PAGE_GUARDED mtspr SPRN_MD_TWC, r11 - /* In 4k pages mode, SPS (bit 28) in RPN must match PS[1] (bit 29) - * In 16k pages mode, SPS is always 1 */ -#if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES) - rlwimi r10, r11, 1, MD_SPS16K -#endif /* Both _PAGE_ACCESSED and _PAGE_PRESENT has to be set. * We also need to know if the insn is a load/store, so: * Clear _PAGE_PRESENT and load that which will @@ -508,17 +489,12 @@ _ENTRY(DTLBMiss_jmp) rlwimi r10, r11, 0, _PAGE_PRESENT #endif /* The Linux PTE won't go exactly into the MMU TLB. - * Software indicator bits 22 and 28 must be clear. * Software indicator bits 24, 25, 26, and 27 must be * set. All other Linux PTE bits control the behavior * of the MMU. */ li r11, RPN_PATTERN -#if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES) rlwimi r10, r11, 0, 24, 27 /* Set 24-27 */ -#else - rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */ -#endif mtspr SPRN_MD_RPN, r10 /* Update TLB entry */ /* Restore registers */ @@ -552,7 +528,6 @@ _ENTRY(dtlb_miss_perf) rlwinm r10, r11, 0, ~HUGEPD_SHIFT_MASK #endif lwz r10, 0(r10) /* Get the pte */ - rlwinm r11, r11, 0, 0xf b 4b 20: /* 512k pages */ @@ -561,7 +536,6 @@ _ENTRY(dtlb_miss_perf) /* Add level 2 base */ rlwimi r10, r11, 0, 0, 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1) - 1 lwz r10, 0(r10) /* Get the pte */ - rlwinm r11, r11, 0, 0xf b 4b #endif @@ -712,7 +686,7 @@ _ENTRY(dtlb_miss_exit_3) ITLBMissLinear: mtcr r12 /* Set 8M byte page and mark it valid */ - li r11, MI_PS8MEG | MI_SVALID | _PAGE_EXEC + li r11, MI_PS8MEG | MI_SVALID mtspr SPRN_MI_TWC, r11 rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */ ori r10, r10, 0xf0 | MI_SPS16K | _PAGE_PRIVILEGED | _PAGE_DIRTY | \ @@ -994,7 +968,7 @@ initial_mmu: lis r8, KERNELBASE@h /* Create vaddr for TLB */ ori r8, r8, MI_EVALID /* Mark it valid */ mtspr SPRN_MI_EPN, r8 - li r8, MI_PS8MEG | (2 << 5) /* Set 8M byte page, APG 2 */ + li r8, MI_PS8MEG /* Set 8M byte page */ ori r8, r8, MI_SVALID /* Make it valid */ mtspr SPRN_MI_TWC, r8 li r8, MI_BOOTINIT /* Create RPN for address 0 */ diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index c7e5afe5e118..f5d1008f8574 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -96,7 +96,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, *hpdp = __hugepd(__pa(new) | (shift_to_mmu_psize(pshift) << 2)); #elif defined(CONFIG_PPC_8xx) - *hpdp = __hugepd(__pa(new) | + *hpdp = __hugepd(__pa(new) | _PMD_USER | (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M : _PMD_PAGE_512K) | _PMD_PRESENT); #else -- cgit v1.2.3 From c2e480ba822718190e58849b79a76db13c3dac18 Mon Sep 17 00:00:00 2001 From: Madhavan Srinivasan Date: Wed, 20 Dec 2017 09:25:42 +0530 Subject: powerpc/64: Add #defines for paca->soft_enabled flags Two #defines IRQS_ENABLED and IRQS_DISABLED are added to be used when updating paca->soft_enabled. Replace the hardcoded values used when updating paca->soft_enabled with IRQ_(EN|DIS)ABLED #define. No logic change. Reviewed-by: Nicholas Piggin Signed-off-by: Madhavan Srinivasan Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/exception-64s.h | 2 +- arch/powerpc/include/asm/hw_irq.h | 21 ++++++++++++++------- arch/powerpc/include/asm/irqflags.h | 6 +++--- arch/powerpc/include/asm/kvm_ppc.h | 2 +- arch/powerpc/kernel/entry_64.S | 16 ++++++++-------- arch/powerpc/kernel/exceptions-64e.S | 6 +++--- arch/powerpc/kernel/head_64.S | 5 +++-- arch/powerpc/kernel/idle_book3e.S | 3 ++- arch/powerpc/kernel/idle_power4.S | 3 ++- arch/powerpc/kernel/irq.c | 9 +++++---- arch/powerpc/kernel/process.c | 3 ++- arch/powerpc/kernel/setup_64.c | 3 +++ arch/powerpc/kernel/time.c | 2 +- arch/powerpc/mm/hugetlbpage.c | 2 +- arch/powerpc/perf/core-book3s.c | 2 +- 15 files changed, 50 insertions(+), 35 deletions(-) (limited to 'arch/powerpc/mm/hugetlbpage.c') diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index b27205297e1d..69c4e3d35e02 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -499,7 +499,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) #define __SOFTEN_TEST(h, vec) \ lbz r10,PACASOFTIRQEN(r13); \ - cmpwi r10,0; \ + cmpwi r10,IRQS_DISABLED; \ li r10,SOFTEN_VALUE_##vec; \ beq masked_##h##interrupt diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 3818fa0164f0..7c2717dfd89a 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -28,6 +28,12 @@ #define PACA_IRQ_EE_EDGE 0x10 /* BookE only */ #define PACA_IRQ_HMI 0x20 +/* + * flags for paca->soft_enabled + */ +#define IRQS_ENABLED 1 +#define IRQS_DISABLED 0 + #endif /* CONFIG_PPC64 */ #ifndef __ASSEMBLY__ @@ -60,9 +66,10 @@ static inline unsigned long arch_local_irq_disable(void) unsigned long flags, zero; asm volatile( - "li %1,0; lbz %0,%2(13); stb %1,%2(13)" + "li %1,%3; lbz %0,%2(13); stb %1,%2(13)" : "=r" (flags), "=&r" (zero) - : "i" (offsetof(struct paca_struct, soft_enabled)) + : "i" (offsetof(struct paca_struct, soft_enabled)),\ + "i" (IRQS_DISABLED) : "memory"); return flags; @@ -72,7 +79,7 @@ extern void arch_local_irq_restore(unsigned long); static inline void arch_local_irq_enable(void) { - arch_local_irq_restore(1); + arch_local_irq_restore(IRQS_ENABLED); } static inline unsigned long arch_local_irq_save(void) @@ -82,7 +89,7 @@ static inline unsigned long arch_local_irq_save(void) static inline bool arch_irqs_disabled_flags(unsigned long flags) { - return flags == 0; + return flags == IRQS_DISABLED; } static inline bool arch_irqs_disabled(void) @@ -102,9 +109,9 @@ static inline bool arch_irqs_disabled(void) u8 _was_enabled; \ __hard_irq_disable(); \ _was_enabled = local_paca->soft_enabled; \ - local_paca->soft_enabled = 0; \ + local_paca->soft_enabled = IRQS_DISABLED;\ local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \ - if (_was_enabled) \ + if (_was_enabled == IRQS_ENABLED) \ trace_hardirqs_off(); \ } while(0) @@ -127,7 +134,7 @@ static inline void may_hard_irq_enable(void) static inline bool arch_irq_disabled_regs(struct pt_regs *regs) { - return !regs->softe; + return (regs->softe == IRQS_DISABLED); } extern bool prep_irq_for_idle(void); diff --git a/arch/powerpc/include/asm/irqflags.h b/arch/powerpc/include/asm/irqflags.h index 1aeb5f13b8c4..8d9fdc84828d 100644 --- a/arch/powerpc/include/asm/irqflags.h +++ b/arch/powerpc/include/asm/irqflags.h @@ -49,8 +49,8 @@ #define RECONCILE_IRQ_STATE(__rA, __rB) \ lbz __rA,PACASOFTIRQEN(r13); \ lbz __rB,PACAIRQHAPPENED(r13); \ - cmpwi cr0,__rA,0; \ - li __rA,0; \ + cmpwi cr0,__rA,IRQS_DISABLED;\ + li __rA,IRQS_DISABLED; \ ori __rB,__rB,PACA_IRQ_HARD_DIS; \ stb __rB,PACAIRQHAPPENED(r13); \ beq 44f; \ @@ -64,7 +64,7 @@ #define RECONCILE_IRQ_STATE(__rA, __rB) \ lbz __rA,PACAIRQHAPPENED(r13); \ - li __rB,0; \ + li __rB,IRQS_DISABLED; \ ori __rA,__rA,PACA_IRQ_HARD_DIS; \ stb __rB,PACASOFTIRQEN(r13); \ stb __rA,PACAIRQHAPPENED(r13) diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 941c2a3f231b..68484d77a3cb 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -873,7 +873,7 @@ static inline void kvmppc_fix_ee_before_entry(void) /* Only need to enable IRQs by hard enabling them after this */ local_paca->irq_happened = 0; - local_paca->soft_enabled = 1; + local_paca->soft_enabled = IRQS_ENABLED; #endif } diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 371c05fe250a..f8b32224dc11 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -130,7 +130,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) */ #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG) lbz r10,PACASOFTIRQEN(r13) - xori r10,r10,1 + xori r10,r10,IRQS_ENABLED 1: tdnei r10,0 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING #endif @@ -147,7 +147,7 @@ system_call: /* label this so stack traces look sane */ /* We do need to set SOFTE in the stack frame or the return * from interrupt will be painful */ - li r10,1 + li r10,IRQS_ENABLED std r10,SOFTE(r1) CURRENT_THREAD_INFO(r11, r1) @@ -743,7 +743,7 @@ resume_kernel: lwz r8,TI_PREEMPT(r9) cmpwi cr1,r8,0 ld r0,SOFTE(r1) - cmpdi r0,0 + cmpdi r0,IRQS_DISABLED crandc eq,cr1*4+eq,eq bne restore @@ -783,11 +783,11 @@ restore: */ ld r5,SOFTE(r1) lbz r6,PACASOFTIRQEN(r13) - cmpwi cr0,r5,0 + cmpwi cr0,r5,IRQS_DISABLED beq .Lrestore_irq_off /* We are enabling, were we already enabled ? Yes, just return */ - cmpwi cr0,r6,1 + cmpwi cr0,r6,IRQS_ENABLED beq cr0,.Ldo_restore /* @@ -806,7 +806,7 @@ restore: */ .Lrestore_no_replay: TRACE_ENABLE_INTS - li r0,1 + li r0,IRQS_ENABLED stb r0,PACASOFTIRQEN(r13); /* @@ -915,7 +915,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG) /* The interrupt should not have soft enabled. */ lbz r7,PACASOFTIRQEN(r13) -1: tdnei r7,0 +1: tdnei r7,IRQS_DISABLED EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING #endif b .Ldo_restore @@ -1036,7 +1036,7 @@ _GLOBAL(enter_rtas) * check it with the asm equivalent of WARN_ON */ lbz r0,PACASOFTIRQEN(r13) -1: tdnei r0,0 +1: tdnei r0,IRQS_DISABLED EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING #endif diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index acd8ca76233e..80cc91bbcf1a 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -210,9 +210,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) ld r5,SOFTE(r1) /* Interrupts had better not already be enabled... */ - twnei r6,0 + twnei r6,IRQS_DISABLED - cmpwi cr0,r5,0 + cmpwi cr0,r5,IRQS_DISABLED beq 1f TRACE_ENABLE_INTS @@ -352,7 +352,7 @@ ret_from_mc_except: #define PROLOG_ADDITION_MASKABLE_GEN(n) \ lbz r10,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \ - cmpwi cr0,r10,0; /* yes -> go out of line */ \ + cmpwi cr0,r10,IRQS_DISABLED; /* yes -> go out of line */ \ beq masked_interrupt_book3e_##n #define PROLOG_ADDITION_2REGS_GEN(n) \ diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index aa71a90f5222..0deef350004f 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -765,7 +765,7 @@ _GLOBAL(pmac_secondary_start) /* Mark interrupts soft and hard disabled (they might be enabled * in the PACA when doing hotplug) */ - li r0,0 + li r0,IRQS_DISABLED stb r0,PACASOFTIRQEN(r13) li r0,PACA_IRQ_HARD_DIS stb r0,PACAIRQHAPPENED(r13) @@ -822,6 +822,7 @@ __secondary_start: /* Mark interrupts soft and hard disabled (they might be enabled * in the PACA when doing hotplug) */ + li r7,IRQS_DISABLED stb r7,PACASOFTIRQEN(r13) li r0,PACA_IRQ_HARD_DIS stb r0,PACAIRQHAPPENED(r13) @@ -988,7 +989,7 @@ start_here_common: /* Mark interrupts soft and hard disabled (they might be enabled * in the PACA when doing hotplug) */ - li r0,0 + li r0,IRQS_DISABLED stb r0,PACASOFTIRQEN(r13) li r0,PACA_IRQ_HARD_DIS stb r0,PACAIRQHAPPENED(r13) diff --git a/arch/powerpc/kernel/idle_book3e.S b/arch/powerpc/kernel/idle_book3e.S index 48c21acef915..1bb1d152aa46 100644 --- a/arch/powerpc/kernel/idle_book3e.S +++ b/arch/powerpc/kernel/idle_book3e.S @@ -17,6 +17,7 @@ #include #include #include +#include /* 64-bit version only for now */ #ifdef CONFIG_PPC64 @@ -46,7 +47,7 @@ _GLOBAL(\name) bl trace_hardirqs_on addi r1,r1,128 #endif - li r0,1 + li r0,IRQS_ENABLED stb r0,PACASOFTIRQEN(r13) /* Interrupts will make use return to LR, so get something we want diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S index f57a19348bdd..8b4702d93701 100644 --- a/arch/powerpc/kernel/idle_power4.S +++ b/arch/powerpc/kernel/idle_power4.S @@ -15,6 +15,7 @@ #include #include #include +#include #undef DEBUG @@ -53,7 +54,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP) mfmsr r7 #endif /* CONFIG_TRACE_IRQFLAGS */ - li r0,1 + li r0,IRQS_ENABLED stb r0,PACASOFTIRQEN(r13) /* we'll hard-enable shortly */ BEGIN_FTR_SECTION DSSALL diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index b7a84522e652..483a9206554f 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -67,6 +67,7 @@ #include #include #include +#include #ifdef CONFIG_PPC64 #include @@ -231,7 +232,7 @@ notrace void arch_local_irq_restore(unsigned long en) /* Write the new soft-enabled value */ set_soft_enabled(en); - if (!en) + if (en == IRQS_DISABLED) return; /* * From this point onward, we can take interrupts, preempt, @@ -276,7 +277,7 @@ notrace void arch_local_irq_restore(unsigned long en) } #endif /* CONFIG_TRACE_IRQFLAGS */ - set_soft_enabled(0); + set_soft_enabled(IRQS_DISABLED); trace_hardirqs_off(); /* @@ -288,7 +289,7 @@ notrace void arch_local_irq_restore(unsigned long en) /* We can soft-enable now */ trace_hardirqs_on(); - set_soft_enabled(1); + set_soft_enabled(IRQS_ENABLED); /* * And replay if we have to. This will return with interrupts @@ -363,7 +364,7 @@ bool prep_irq_for_idle(void) * of entering the low power state. */ local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; - local_paca->soft_enabled = 1; + local_paca->soft_enabled = IRQS_ENABLED; /* Tell the caller to enter the low power state */ return true; diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index bcd4441304a5..1563d1190da3 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -57,6 +57,7 @@ #include #ifdef CONFIG_PPC64 #include +#include #endif #include #include @@ -1674,7 +1675,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, childregs->gpr[14] = ppc_function_entry((void *)usp); #ifdef CONFIG_PPC64 clear_tsk_thread_flag(p, TIF_32BIT); - childregs->softe = 1; + childregs->softe = IRQS_ENABLED; #endif childregs->gpr[15] = kthread_arg; p->thread.regs = NULL; /* no user register state */ diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 8f285d6a3db1..2fd4e167ef09 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -66,6 +66,7 @@ #include #include #include +#include #include "setup.h" @@ -187,6 +188,8 @@ static void __init fixup_boot_paca(void) get_paca()->cpu_start = 1; /* Allow percpu accesses to work until we setup percpu data */ get_paca()->data_offset = 0; + /* Mark interrupts disabled in PACA */ + get_paca()->soft_enabled = IRQS_DISABLED; } static void __init configure_exceptions(void) diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index fe6f3a285455..070092b1ba8a 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -253,7 +253,7 @@ void accumulate_stolen_time(void) * needs to reflect that so various debug stuff doesn't * complain */ - local_paca->soft_enabled = 0; + local_paca->soft_enabled = IRQS_DISABLED; sst = scan_dispatch_log(acct->starttime_user); ust = scan_dispatch_log(acct->starttime); diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index f5d1008f8574..760bb5a2b8bd 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -752,7 +752,7 @@ void flush_dcache_icache_hugepage(struct page *page) * So long as we atomically load page table pointers we are safe against teardown, * we can follow the address down to the the page and take a ref on it. * This function need to be called with interrupts disabled. We use this variant - * when we have MSR[EE] = 0 but the paca->soft_enabled = 1 + * when we have MSR[EE] = 0 but the paca->soft_enabled = IRQS_ENABLED */ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea, bool *is_thp, unsigned *hpage_shift) diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 9e3da168d54c..594261e308b3 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -322,7 +322,7 @@ static inline void perf_read_regs(struct pt_regs *regs) */ static inline int perf_intr_is_nmi(struct pt_regs *regs) { - return !regs->softe; + return (regs->softe == IRQS_DISABLED); } /* -- cgit v1.2.3 From 4e26bc4a4ed683c42ba45f09050575a671c6f1f4 Mon Sep 17 00:00:00 2001 From: Madhavan Srinivasan Date: Wed, 20 Dec 2017 09:25:50 +0530 Subject: powerpc/64: Rename soft_enabled to irq_soft_mask Rename the paca->soft_enabled to paca->irq_soft_mask as it is no longer used as a flag for interrupt state, but a mask. Signed-off-by: Madhavan Srinivasan Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/exception-64s.h | 4 +-- arch/powerpc/include/asm/hw_irq.h | 56 ++++++++++++++++++-------------- arch/powerpc/include/asm/irqflags.h | 12 +++---- arch/powerpc/include/asm/kvm_ppc.h | 2 +- arch/powerpc/include/asm/paca.h | 2 +- arch/powerpc/kernel/asm-offsets.c | 2 +- arch/powerpc/kernel/entry_64.S | 12 +++---- arch/powerpc/kernel/exceptions-64e.S | 10 +++--- arch/powerpc/kernel/head_64.S | 6 ++-- arch/powerpc/kernel/idle_book3e.S | 2 +- arch/powerpc/kernel/idle_power4.S | 2 +- arch/powerpc/kernel/irq.c | 23 +++---------- arch/powerpc/kernel/optprobes_head.S | 2 +- arch/powerpc/kernel/ptrace.c | 2 +- arch/powerpc/kernel/setup_64.c | 4 +-- arch/powerpc/kernel/time.c | 6 ++-- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 2 +- arch/powerpc/mm/hugetlbpage.c | 2 +- arch/powerpc/xmon/xmon.c | 4 +-- 19 files changed, 74 insertions(+), 81 deletions(-) (limited to 'arch/powerpc/mm/hugetlbpage.c') diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index a21adcb75fc6..b090db4ca37e 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -432,7 +432,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) mflr r9; /* Get LR, later save to stack */ \ ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ std r9,_LINK(r1); \ - lbz r10,PACASOFTIRQEN(r13); \ + lbz r10,PACAIRQSOFTMASK(r13); \ mfspr r11,SPRN_XER; /* save XER in stackframe */ \ std r10,SOFTE(r1); \ std r11,_XER(r1); \ @@ -498,7 +498,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) #define SOFTEN_VALUE_0xea0 PACA_IRQ_EE #define __SOFTEN_TEST(h, vec) \ - lbz r10,PACASOFTIRQEN(r13); \ + lbz r10,PACAIRQSOFTMASK(r13); \ andi. r10,r10,IRQS_DISABLED; \ li r10,SOFTEN_VALUE_##vec; \ bne masked_##h##interrupt diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index f96280ee9540..5c82bb116832 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -29,7 +29,7 @@ #define PACA_IRQ_HMI 0x20 /* - * flags for paca->soft_enabled + * flags for paca->irq_soft_mask */ #define IRQS_ENABLED 0 #define IRQS_DISABLED 1 @@ -49,14 +49,14 @@ extern void unknown_exception(struct pt_regs *regs); #ifdef CONFIG_PPC64 #include -static inline notrace unsigned long soft_enabled_return(void) +static inline notrace unsigned long irq_soft_mask_return(void) { unsigned long flags; asm volatile( "lbz %0,%1(13)" : "=r" (flags) - : "i" (offsetof(struct paca_struct, soft_enabled))); + : "i" (offsetof(struct paca_struct, irq_soft_mask))); return flags; } @@ -64,18 +64,24 @@ static inline notrace unsigned long soft_enabled_return(void) /* * The "memory" clobber acts as both a compiler barrier * for the critical section and as a clobber because - * we changed paca->soft_enabled + * we changed paca->irq_soft_mask */ -static inline notrace void soft_enabled_set(unsigned long enable) +static inline notrace void irq_soft_mask_set(unsigned long mask) { #ifdef CONFIG_TRACE_IRQFLAGS /* - * mask must always include LINUX bit if any are set, and - * interrupts don't get replayed until the Linux interrupt is - * unmasked. This could be changed to replay partial unmasks - * in future, which would allow Linux masks to nest inside - * other masks, among other things. For now, be very dumb and - * simple. + * The irq mask must always include the STD bit if any are set. + * + * and interrupts don't get replayed until the standard + * interrupt (local_irq_disable()) is unmasked. + * + * Other masks must only provide additional masking beyond + * the standard, and they are also not replayed until the + * standard interrupt becomes unmasked. + * + * This could be changed, but it will require partial + * unmasks to be replayed, among other things. For now, take + * the simple approach. */ WARN_ON(mask && !(mask & IRQS_DISABLED)); #endif @@ -83,12 +89,12 @@ static inline notrace void soft_enabled_set(unsigned long enable) asm volatile( "stb %0,%1(13)" : - : "r" (enable), - "i" (offsetof(struct paca_struct, soft_enabled)) + : "r" (mask), + "i" (offsetof(struct paca_struct, irq_soft_mask)) : "memory"); } -static inline notrace unsigned long soft_enabled_set_return(unsigned long mask) +static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask) { unsigned long flags; @@ -99,7 +105,7 @@ static inline notrace unsigned long soft_enabled_set_return(unsigned long mask) asm volatile( "lbz %0,%1(13); stb %2,%1(13)" : "=&r" (flags) - : "i" (offsetof(struct paca_struct, soft_enabled)), + : "i" (offsetof(struct paca_struct, irq_soft_mask)), "r" (mask) : "memory"); @@ -108,12 +114,12 @@ static inline notrace unsigned long soft_enabled_set_return(unsigned long mask) static inline unsigned long arch_local_save_flags(void) { - return soft_enabled_return(); + return irq_soft_mask_return(); } static inline void arch_local_irq_disable(void) { - soft_enabled_set(IRQS_DISABLED); + irq_soft_mask_set(IRQS_DISABLED); } extern void arch_local_irq_restore(unsigned long); @@ -125,7 +131,7 @@ static inline void arch_local_irq_enable(void) static inline unsigned long arch_local_irq_save(void) { - return soft_enabled_set_return(IRQS_DISABLED); + return irq_soft_mask_set_return(IRQS_DISABLED); } static inline bool arch_irqs_disabled_flags(unsigned long flags) @@ -146,13 +152,13 @@ static inline bool arch_irqs_disabled(void) #define __hard_irq_disable() __mtmsrd(local_paca->kernel_msr, 1) #endif -#define hard_irq_disable() do { \ - unsigned long flags; \ - __hard_irq_disable(); \ - flags = soft_enabled_set_return(IRQS_DISABLED);\ - local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \ - if (!arch_irqs_disabled_flags(flags)) \ - trace_hardirqs_off(); \ +#define hard_irq_disable() do { \ + unsigned long flags; \ + __hard_irq_disable(); \ + flags = irq_soft_mask_set_return(IRQS_DISABLED); \ + local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \ + if (!arch_irqs_disabled_flags(flags)) \ + trace_hardirqs_off(); \ } while(0) static inline bool lazy_irq_pending(void) diff --git a/arch/powerpc/include/asm/irqflags.h b/arch/powerpc/include/asm/irqflags.h index f1ec012232d9..1a6c1ce17735 100644 --- a/arch/powerpc/include/asm/irqflags.h +++ b/arch/powerpc/include/asm/irqflags.h @@ -47,14 +47,14 @@ * be clobbered. */ #define RECONCILE_IRQ_STATE(__rA, __rB) \ - lbz __rA,PACASOFTIRQEN(r13); \ + lbz __rA,PACAIRQSOFTMASK(r13); \ lbz __rB,PACAIRQHAPPENED(r13); \ - andi. __rA,__rA,IRQS_DISABLED;\ - li __rA,IRQS_DISABLED; \ + andi. __rA,__rA,IRQS_DISABLED; \ + li __rA,IRQS_DISABLED; \ ori __rB,__rB,PACA_IRQ_HARD_DIS; \ stb __rB,PACAIRQHAPPENED(r13); \ bne 44f; \ - stb __rA,PACASOFTIRQEN(r13); \ + stb __rA,PACAIRQSOFTMASK(r13); \ TRACE_DISABLE_INTS; \ 44: @@ -64,9 +64,9 @@ #define RECONCILE_IRQ_STATE(__rA, __rB) \ lbz __rA,PACAIRQHAPPENED(r13); \ - li __rB,IRQS_DISABLED; \ + li __rB,IRQS_DISABLED; \ ori __rA,__rA,PACA_IRQ_HARD_DIS; \ - stb __rB,PACASOFTIRQEN(r13); \ + stb __rB,PACAIRQSOFTMASK(r13); \ stb __rA,PACAIRQHAPPENED(r13) #endif #endif diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 369f0640826c..9db18287b5f4 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -873,7 +873,7 @@ static inline void kvmppc_fix_ee_before_entry(void) /* Only need to enable IRQs by hard enabling them after this */ local_paca->irq_happened = 0; - soft_enabled_set(IRQS_ENABLED); + irq_soft_mask_set(IRQS_ENABLED); #endif } diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 3892db93b837..e2ee193eb24d 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -159,7 +159,7 @@ struct paca_struct { u64 saved_r1; /* r1 save for RTAS calls or PM */ u64 saved_msr; /* MSR saved here by enter_rtas */ u16 trap_save; /* Used when bad stack is encountered */ - u8 soft_enabled; /* irq soft-enable flag */ + u8 irq_soft_mask; /* mask for irq soft masking */ u8 irq_happened; /* irq happened while soft-disabled */ u8 io_sync; /* writel() needs spin_unlock sync */ u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */ diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 3f6316bcde4e..43a6967d48ad 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -178,7 +178,7 @@ int main(void) OFFSET(PACATOC, paca_struct, kernel_toc); OFFSET(PACAKBASE, paca_struct, kernelbase); OFFSET(PACAKMSR, paca_struct, kernel_msr); - OFFSET(PACASOFTIRQEN, paca_struct, soft_enabled); + OFFSET(PACAIRQSOFTMASK, paca_struct, irq_soft_mask); OFFSET(PACAIRQHAPPENED, paca_struct, irq_happened); #ifdef CONFIG_PPC_BOOK3S OFFSET(PACACONTEXTID, paca_struct, mm_ctx_id); diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 51e4cc4dba4a..7c51f022d0de 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -129,7 +129,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) * is correct */ #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG) - lbz r10,PACASOFTIRQEN(r13) + lbz r10,PACAIRQSOFTMASK(r13) 1: tdnei r10,IRQS_ENABLED EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING #endif @@ -781,7 +781,7 @@ restore: * are about to re-enable interrupts */ ld r5,SOFTE(r1) - lbz r6,PACASOFTIRQEN(r13) + lbz r6,PACAIRQSOFTMASK(r13) andi. r5,r5,IRQS_DISABLED bne .Lrestore_irq_off @@ -806,7 +806,7 @@ restore: .Lrestore_no_replay: TRACE_ENABLE_INTS li r0,IRQS_ENABLED - stb r0,PACASOFTIRQEN(r13); + stb r0,PACAIRQSOFTMASK(r13); /* * Final return path. BookE is handled in a different file @@ -913,8 +913,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 1: #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG) /* The interrupt should not have soft enabled. */ - lbz r7,PACASOFTIRQEN(r13) -1: tdnei r7,IRQS_DISABLED + lbz r7,PACAIRQSOFTMASK(r13) +1: tdeqi r7,IRQS_ENABLED EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING #endif b .Ldo_restore @@ -1034,7 +1034,7 @@ _GLOBAL(enter_rtas) /* There is no way it is acceptable to get here with interrupts enabled, * check it with the asm equivalent of WARN_ON */ - lbz r0,PACASOFTIRQEN(r13) + lbz r0,PACAIRQSOFTMASK(r13) 1: tdeqi r0,IRQS_ENABLED EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING #endif diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index 9216ece7672c..ee832d344a5a 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -139,7 +139,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) mfspr r10,SPRN_ESR SPECIAL_EXC_STORE(r10,ESR) - lbz r10,PACASOFTIRQEN(r13) + lbz r10,PACAIRQSOFTMASK(r13) SPECIAL_EXC_STORE(r10,SOFTE) ld r10,_NIP(r1) SPECIAL_EXC_STORE(r10,CSRR0) @@ -206,7 +206,7 @@ BEGIN_FTR_SECTION mtspr SPRN_MAS8,r10 END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) - lbz r6,PACASOFTIRQEN(r13) + lbz r6,PACAIRQSOFTMASK(r13) ld r5,SOFTE(r1) /* Interrupts had better not already be enabled... */ @@ -216,7 +216,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) bne 1f TRACE_ENABLE_INTS - stb r5,PACASOFTIRQEN(r13) + stb r5,PACAIRQSOFTMASK(r13) 1: /* * Restore PACAIRQHAPPENED rather than setting it based on @@ -351,7 +351,7 @@ ret_from_mc_except: #define PROLOG_ADDITION_NONE_MC(n) #define PROLOG_ADDITION_MASKABLE_GEN(n) \ - lbz r10,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \ + lbz r10,PACAIRQSOFTMASK(r13); /* are irqs soft-masked? */ \ andi. r10,r10,IRQS_DISABLED; /* yes -> go out of line */ \ bne masked_interrupt_book3e_##n @@ -397,7 +397,7 @@ exc_##n##_common: \ mfspr r8,SPRN_XER; /* save XER in stackframe */ \ ld r9,excf+EX_R1(r13); /* load orig r1 back from PACA */ \ lwz r10,excf+EX_CR(r13); /* load orig CR back from PACA */ \ - lbz r11,PACASOFTIRQEN(r13); /* get current IRQ softe */ \ + lbz r11,PACAIRQSOFTMASK(r13); /* get current IRQ softe */ \ ld r12,exception_marker@toc(r2); \ li r0,0; \ std r3,GPR10(r1); /* save r10 to stackframe */ \ diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 0deef350004f..a61151a6ea5e 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -766,7 +766,7 @@ _GLOBAL(pmac_secondary_start) * in the PACA when doing hotplug) */ li r0,IRQS_DISABLED - stb r0,PACASOFTIRQEN(r13) + stb r0,PACAIRQSOFTMASK(r13) li r0,PACA_IRQ_HARD_DIS stb r0,PACAIRQHAPPENED(r13) @@ -823,7 +823,7 @@ __secondary_start: * in the PACA when doing hotplug) */ li r7,IRQS_DISABLED - stb r7,PACASOFTIRQEN(r13) + stb r7,PACAIRQSOFTMASK(r13) li r0,PACA_IRQ_HARD_DIS stb r0,PACAIRQHAPPENED(r13) @@ -990,7 +990,7 @@ start_here_common: * in the PACA when doing hotplug) */ li r0,IRQS_DISABLED - stb r0,PACASOFTIRQEN(r13) + stb r0,PACAIRQSOFTMASK(r13) li r0,PACA_IRQ_HARD_DIS stb r0,PACAIRQHAPPENED(r13) diff --git a/arch/powerpc/kernel/idle_book3e.S b/arch/powerpc/kernel/idle_book3e.S index 1bb1d152aa46..2b269315d377 100644 --- a/arch/powerpc/kernel/idle_book3e.S +++ b/arch/powerpc/kernel/idle_book3e.S @@ -48,7 +48,7 @@ _GLOBAL(\name) addi r1,r1,128 #endif li r0,IRQS_ENABLED - stb r0,PACASOFTIRQEN(r13) + stb r0,PACAIRQSOFTMASK(r13) /* Interrupts will make use return to LR, so get something we want * in there diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S index 8b4702d93701..08faa93755f9 100644 --- a/arch/powerpc/kernel/idle_power4.S +++ b/arch/powerpc/kernel/idle_power4.S @@ -55,7 +55,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP) #endif /* CONFIG_TRACE_IRQFLAGS */ li r0,IRQS_ENABLED - stb r0,PACASOFTIRQEN(r13) /* we'll hard-enable shortly */ + stb r0,PACAIRQSOFTMASK(r13) /* we'll hard-enable shortly */ BEGIN_FTR_SECTION DSSALL sync diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 406c4329b535..e137b98dc5f8 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -225,22 +225,9 @@ notrace void arch_local_irq_restore(unsigned long mask) unsigned int replay; /* Write the new soft-enabled value */ - soft_enabled_set(mask); - if (mask) { -#ifdef CONFIG_TRACE_IRQFLAGS - /* - * mask must always include LINUX bit if any - * are set, and interrupts don't get replayed until - * the Linux interrupt is unmasked. This could be - * changed to replay partial unmasks in future, - * which would allow Linux masks to nest inside - * other masks, among other things. For now, be very - * dumb and simple. - */ - WARN_ON(!(mask & IRQS_DISABLED)); -#endif + irq_soft_mask_set(mask); + if (mask) return; - } /* * From this point onward, we can take interrupts, preempt, @@ -285,7 +272,7 @@ notrace void arch_local_irq_restore(unsigned long mask) } #endif /* CONFIG_TRACE_IRQFLAGS */ - soft_enabled_set(IRQS_DISABLED); + irq_soft_mask_set(IRQS_DISABLED); trace_hardirqs_off(); /* @@ -297,7 +284,7 @@ notrace void arch_local_irq_restore(unsigned long mask) /* We can soft-enable now */ trace_hardirqs_on(); - soft_enabled_set(IRQS_ENABLED); + irq_soft_mask_set(IRQS_ENABLED); /* * And replay if we have to. This will return with interrupts @@ -372,7 +359,7 @@ bool prep_irq_for_idle(void) * of entering the low power state. */ local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; - soft_enabled_set(IRQS_ENABLED); + irq_soft_mask_set(IRQS_ENABLED); /* Tell the caller to enter the low power state */ return true; diff --git a/arch/powerpc/kernel/optprobes_head.S b/arch/powerpc/kernel/optprobes_head.S index 52fc864cdec4..98a3aeeb3c8c 100644 --- a/arch/powerpc/kernel/optprobes_head.S +++ b/arch/powerpc/kernel/optprobes_head.S @@ -58,7 +58,7 @@ optprobe_template_entry: std r5,_XER(r1) mfcr r5 std r5,_CCR(r1) - lbz r5,PACASOFTIRQEN(r13) + lbz r5,PACAIRQSOFTMASK(r13) std r5,SOFTE(r1) /* diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index bd2c49475473..aef08e579946 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -285,7 +285,7 @@ int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data) #ifdef CONFIG_PPC64 /* - * softe copies paca->soft_enabled variable state. Since soft_enabled is + * softe copies paca->irq_soft_mask variable state. Since irq_soft_mask is * no more used as a flag, lets force usr to alway see the softe value as 1 * which means interrupts are not soft disabled. */ diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index f6bedebda90a..896dacef2f2d 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -189,7 +189,7 @@ static void __init fixup_boot_paca(void) /* Allow percpu accesses to work until we setup percpu data */ get_paca()->data_offset = 0; /* Mark interrupts disabled in PACA */ - soft_enabled_set(IRQS_DISABLED); + irq_soft_mask_set(IRQS_DISABLED); } static void __init configure_exceptions(void) @@ -352,7 +352,7 @@ void __init early_setup(unsigned long dt_ptr) void early_setup_secondary(void) { /* Mark interrupts disabled in PACA */ - soft_enabled_set(IRQS_DISABLED); + irq_soft_mask_set(IRQS_DISABLED); /* Initialize the hash table or TLB handling */ early_init_mmu_secondary(); diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index daa6e9e35ab9..a32823dcd9a4 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -244,7 +244,7 @@ static u64 scan_dispatch_log(u64 stop_tb) void accumulate_stolen_time(void) { u64 sst, ust; - unsigned long save_soft_enabled = soft_enabled_return(); + unsigned long save_irq_soft_mask = irq_soft_mask_return(); struct cpu_accounting_data *acct = &local_paca->accounting; /* We are called early in the exception entry, before @@ -253,7 +253,7 @@ void accumulate_stolen_time(void) * needs to reflect that so various debug stuff doesn't * complain */ - soft_enabled_set(IRQS_DISABLED); + irq_soft_mask_set(IRQS_DISABLED); sst = scan_dispatch_log(acct->starttime_user); ust = scan_dispatch_log(acct->starttime); @@ -261,7 +261,7 @@ void accumulate_stolen_time(void) acct->utime -= ust; acct->steal_time += ust + sst; - soft_enabled_set(save_soft_enabled); + irq_soft_mask_set(save_irq_soft_mask); } static inline u64 calculate_stolen_time(u64 stop_tb) diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 2659844784b8..a92ad8500917 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -3249,7 +3249,7 @@ kvmppc_bad_host_intr: mfctr r4 #endif mfxer r5 - lbz r6, PACASOFTIRQEN(r13) + lbz r6, PACAIRQSOFTMASK(r13) std r3, _LINK(r1) std r4, _CTR(r1) std r5, _XER(r1) diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 760bb5a2b8bd..876da2bc1796 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -752,7 +752,7 @@ void flush_dcache_icache_hugepage(struct page *page) * So long as we atomically load page table pointers we are safe against teardown, * we can follow the address down to the the page and take a ref on it. * This function need to be called with interrupts disabled. We use this variant - * when we have MSR[EE] = 0 but the paca->soft_enabled = IRQS_ENABLED + * when we have MSR[EE] = 0 but the paca->irq_soft_mask = IRQS_ENABLED */ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea, bool *is_thp, unsigned *hpage_shift) diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 1b2d8cb49abb..2695f8dd2359 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -1623,7 +1623,7 @@ static void excprint(struct pt_regs *fp) printf(" current = 0x%lx\n", current); #ifdef CONFIG_PPC64 printf(" paca = 0x%lx\t softe: %d\t irq_happened: 0x%02x\n", - local_paca, local_paca->soft_enabled, local_paca->irq_happened); + local_paca, local_paca->irq_soft_mask, local_paca->irq_happened); #endif if (current) { printf(" pid = %ld, comm = %s\n", @@ -2391,7 +2391,7 @@ static void dump_one_paca(int cpu) DUMP(p, stab_rr, "lx"); DUMP(p, saved_r1, "lx"); DUMP(p, trap_save, "x"); - DUMP(p, soft_enabled, "x"); + DUMP(p, irq_soft_mask, "x"); DUMP(p, irq_happened, "x"); DUMP(p, io_sync, "x"); DUMP(p, irq_work_pending, "x"); -- cgit v1.2.3