From f79f7a2d96769d2a3e663a3e673066be77c30cc3 Mon Sep 17 00:00:00 2001 From: Bhaskar Chowdhury <unixbhaskar@gmail.com> Date: Mon, 22 Mar 2021 17:58:19 +0530 Subject: arc: Fix typos/spellos s/commiting/committing/ s/defintion/definition/ s/gaurantees/guarantees/ s/interrpted/interrupted/ s/interrutps/interrupts/ s/succeded/succeeded/ s/unconditonally/unconditionally/ Reviewed-by: Christian Brauner <christian.brauner@ubuntu.com> Acked-by: Randy Dunlap <rdunlap@infradead.org> Signed-off-by: Bhaskar Chowdhury <unixbhaskar@gmail.com> Signed-off-by: Vineet Gupta <vgupta@synopsys.com> --- arch/arc/Makefile | 2 +- arch/arc/include/asm/cmpxchg.h | 4 ++-- arch/arc/kernel/process.c | 8 ++++---- arch/arc/kernel/signal.c | 4 ++-- 4 files changed, 9 insertions(+), 9 deletions(-) (limited to 'arch/arc') diff --git a/arch/arc/Makefile b/arch/arc/Makefile index 4392c9c189c4..e47adc97a89b 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile @@ -31,7 +31,7 @@ endif ifdef CONFIG_ARC_CURR_IN_REG -# For a global register defintion, make sure it gets passed to every file +# For a global register definition, make sure it gets passed to every file # We had a customer reported bug where some code built in kernel was NOT using # any kernel headers, and missing the r25 global register # Can't do unconditionally because of recursive include issues diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h index 9b87e162e539..dfeffa25499b 100644 --- a/arch/arc/include/asm/cmpxchg.h +++ b/arch/arc/include/asm/cmpxchg.h @@ -116,7 +116,7 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr, * * Technically the lock is also needed for UP (boils down to irq save/restore) * but we can cheat a bit since cmpxchg() atomic_ops_lock() would cause irqs to - * be disabled thus can't possibly be interrpted/preempted/clobbered by xchg() + * be disabled thus can't possibly be interrupted/preempted/clobbered by xchg() * Other way around, xchg is one instruction anyways, so can't be interrupted * as such */ @@ -143,7 +143,7 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr, /* * "atomic" variant of xchg() * REQ: It needs to follow the same serialization rules as other atomic_xxx() - * Since xchg() doesn't always do that, it would seem that following defintion + * Since xchg() doesn't always do that, it would seem that following definition * is incorrect. But here's the rationale: * SMP : Even xchg() takes the atomic_ops_lock, so OK. * LLSC: atomic_ops_lock are not relevant at all (even if SMP, since LLSC diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c index d838d0d57696..3793876f42d9 100644 --- a/arch/arc/kernel/process.c +++ b/arch/arc/kernel/process.c @@ -50,14 +50,14 @@ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new) int ret; /* - * This is only for old cores lacking LLOCK/SCOND, which by defintion + * This is only for old cores lacking LLOCK/SCOND, which by definition * can't possibly be SMP. Thus doesn't need to be SMP safe. * And this also helps reduce the overhead for serializing in * the UP case */ WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP)); - /* Z indicates to userspace if operation succeded */ + /* Z indicates to userspace if operation succeeded */ regs->status32 &= ~STATUS_Z_MASK; ret = access_ok(uaddr, sizeof(*uaddr)); @@ -107,7 +107,7 @@ fail: void arch_cpu_idle(void) { - /* Re-enable interrupts <= default irq priority before commiting SLEEP */ + /* Re-enable interrupts <= default irq priority before committing SLEEP */ const unsigned int arg = 0x10 | ARCV2_IRQ_DEF_PRIO; __asm__ __volatile__( @@ -120,7 +120,7 @@ void arch_cpu_idle(void) void arch_cpu_idle(void) { - /* sleep, but enable both set E1/E2 (levels of interrutps) before committing */ + /* sleep, but enable both set E1/E2 (levels of interrupts) before committing */ __asm__ __volatile__("sleep 0x3 \n"); } diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c index fdbe06c98895..b3ccb9e5ffe4 100644 --- a/arch/arc/kernel/signal.c +++ b/arch/arc/kernel/signal.c @@ -259,7 +259,7 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) regs->r2 = (unsigned long)&sf->uc; /* - * small optim to avoid unconditonally calling do_sigaltstack + * small optim to avoid unconditionally calling do_sigaltstack * in sigreturn path, now that we only have rt_sigreturn */ magic = MAGIC_SIGALTSTK; @@ -391,7 +391,7 @@ void do_signal(struct pt_regs *regs) void do_notify_resume(struct pt_regs *regs) { /* - * ASM glue gaurantees that this is only called when returning to + * ASM glue guarantees that this is only called when returning to * user mode */ if (test_thread_flag(TIF_NOTIFY_RESUME)) -- cgit v1.2.3 From 8e97bf39fa0361af3e64739b3766992b9dafa11d Mon Sep 17 00:00:00 2001 From: Randy Dunlap <rdunlap@infradead.org> Date: Wed, 21 Apr 2021 22:16:53 -0700 Subject: ARC: kgdb: add 'fallthrough' to prevent a warning Use the 'fallthrough' macro to document that this switch case does indeed fall through to the next case. ../arch/arc/kernel/kgdb.c: In function 'kgdb_arch_handle_exception': ../arch/arc/kernel/kgdb.c:141:6: warning: this statement may fall through [-Wimplicit-fallthrough=] 141 | if (kgdb_hex2long(&ptr, &addr)) | ^ ../arch/arc/kernel/kgdb.c:144:2: note: here 144 | case 'D': | ^~~~ Cc: linux-snps-arc@lists.infradead.org Signed-off-by: Randy Dunlap <rdunlap@infradead.org> Signed-off-by: Vineet Gupta <vgupta@synopsys.com> --- arch/arc/kernel/kgdb.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arc') diff --git a/arch/arc/kernel/kgdb.c b/arch/arc/kernel/kgdb.c index ecfbc42d3a40..345a0000554c 100644 --- a/arch/arc/kernel/kgdb.c +++ b/arch/arc/kernel/kgdb.c @@ -140,6 +140,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, ptr = &remcomInBuffer[1]; if (kgdb_hex2long(&ptr, &addr)) regs->ret = addr; + fallthrough; case 'D': case 'k': -- cgit v1.2.3 From 3433adc8bd09fc9f29b8baddf33b4ecd1ecd2cdc Mon Sep 17 00:00:00 2001 From: Vineet Gupta <vgupta@synopsys.com> Date: Fri, 23 Apr 2021 12:16:25 -0700 Subject: ARC: entry: fix off-by-one error in syscall number validation We have NR_syscall syscalls from [0 .. NR_syscall-1]. However the check for invalid syscall number is "> NR_syscall" as opposed to >=. This off-by-one error erronesously allows "NR_syscall" to be treated as valid syscall causeing out-of-bounds access into syscall-call table ensuing a crash (holes within syscall table have a invalid-entry handler but this is beyond the array implementing the table). This problem showed up on v5.6 kernel when testing glibc 2.33 (v5.10 kernel capable, includng faccessat2 syscall 439). The v5.6 kernel has NR_syscalls=439 (0 to 438). Due to the bug, 439 passed by glibc was not handled as -ENOSYS but processed leading to a crash. Link: https://github.com/foss-for-synopsys-dwc-arc-processors/linux/issues/48 Reported-by: Shahab Vahedi <shahab@synopsys.com> Cc: <stable@vger.kernel.org> Signed-off-by: Vineet Gupta <vgupta@synopsys.com> --- arch/arc/kernel/entry.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arc') diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S index 1743506081da..2cb8dfe866b6 100644 --- a/arch/arc/kernel/entry.S +++ b/arch/arc/kernel/entry.S @@ -177,7 +177,7 @@ tracesys: ; Do the Sys Call as we normally would. ; Validate the Sys Call number - cmp r8, NR_syscalls + cmp r8, NR_syscalls - 1 mov.hi r0, -ENOSYS bhi tracesys_exit @@ -255,7 +255,7 @@ ENTRY(EV_Trap) ;============ Normal syscall case ; syscall num shd not exceed the total system calls avail - cmp r8, NR_syscalls + cmp r8, NR_syscalls - 1 mov.hi r0, -ENOSYS bhi .Lret_from_system_call -- cgit v1.2.3 From c5f756d8c6265ebb1736a7787231f010a3b782e5 Mon Sep 17 00:00:00 2001 From: Vladimir Isaev <isaev@synopsys.com> Date: Tue, 27 Apr 2021 15:12:37 +0300 Subject: ARC: mm: PAE: use 40-bit physical page mask 32-bit PAGE_MASK can not be used as a mask for physical addresses when PAE is enabled. PAGE_MASK_PHYS must be used for physical addresses instead of PAGE_MASK. Without this, init gets SIGSEGV if pte_modify was called: | potentially unexpected fatal signal 11. | Path: /bin/busybox | CPU: 0 PID: 1 Comm: init Not tainted 5.12.0-rc5-00003-g1e43c377a79f-dirty | Insn could not be fetched | @No matching VMA found | ECR: 0x00040000 EFA: 0x00000000 ERET: 0x00000000 | STAT: 0x80080082 [IE U ] BTA: 0x00000000 | SP: 0x5f9ffe44 FP: 0x00000000 BLK: 0xaf3d4 | LPS: 0x000d093e LPE: 0x000d0950 LPC: 0x00000000 | r00: 0x00000002 r01: 0x5f9fff14 r02: 0x5f9fff20 | ... | Kernel panic - not syncing: Attempted to kill init! exitcode=0x0000000b Signed-off-by: Vladimir Isaev <isaev@synopsys.com> Reported-by: kernel test robot <lkp@intel.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: stable@vger.kernel.org Signed-off-by: Vineet Gupta <vgupta@synopsys.com> --- arch/arc/include/asm/page.h | 12 ++++++++++++ arch/arc/include/asm/pgtable.h | 12 +++--------- arch/arc/include/uapi/asm/page.h | 1 - arch/arc/mm/ioremap.c | 5 +++-- arch/arc/mm/tlb.c | 2 +- 5 files changed, 19 insertions(+), 13 deletions(-) (limited to 'arch/arc') diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h index ad9b7fe4dba3..4a9d33372fe2 100644 --- a/arch/arc/include/asm/page.h +++ b/arch/arc/include/asm/page.h @@ -7,6 +7,18 @@ #include <uapi/asm/page.h> +#ifdef CONFIG_ARC_HAS_PAE40 + +#define MAX_POSSIBLE_PHYSMEM_BITS 40 +#define PAGE_MASK_PHYS (0xff00000000ull | PAGE_MASK) + +#else /* CONFIG_ARC_HAS_PAE40 */ + +#define MAX_POSSIBLE_PHYSMEM_BITS 32 +#define PAGE_MASK_PHYS PAGE_MASK + +#endif /* CONFIG_ARC_HAS_PAE40 */ + #ifndef __ASSEMBLY__ #define clear_page(paddr) memset((paddr), 0, PAGE_SIZE) diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index 163641726a2b..5878846f00cf 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h @@ -107,8 +107,8 @@ #define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE) /* Set of bits not changed in pte_modify */ -#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL) - +#define _PAGE_CHG_MASK (PAGE_MASK_PHYS | _PAGE_ACCESSED | _PAGE_DIRTY | \ + _PAGE_SPECIAL) /* More Abbrevaited helpers */ #define PAGE_U_NONE __pgprot(___DEF) #define PAGE_U_R __pgprot(___DEF | _PAGE_READ) @@ -132,13 +132,7 @@ #define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ) #define PTE_BITS_RWX (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ) -#ifdef CONFIG_ARC_HAS_PAE40 -#define PTE_BITS_NON_RWX_IN_PD1 (0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE) -#define MAX_POSSIBLE_PHYSMEM_BITS 40 -#else -#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE) -#define MAX_POSSIBLE_PHYSMEM_BITS 32 -#endif +#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK_PHYS | _PAGE_CACHEABLE) /************************************************************************** * Mapping of vm_flags (Generic VM) to PTE flags (arch specific) diff --git a/arch/arc/include/uapi/asm/page.h b/arch/arc/include/uapi/asm/page.h index 2a97e2718a21..2a4ad619abfb 100644 --- a/arch/arc/include/uapi/asm/page.h +++ b/arch/arc/include/uapi/asm/page.h @@ -33,5 +33,4 @@ #define PAGE_MASK (~(PAGE_SIZE-1)) - #endif /* _UAPI__ASM_ARC_PAGE_H */ diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c index fac4adc90204..95c649fbc95a 100644 --- a/arch/arc/mm/ioremap.c +++ b/arch/arc/mm/ioremap.c @@ -53,9 +53,10 @@ EXPORT_SYMBOL(ioremap); void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size, unsigned long flags) { + unsigned int off; unsigned long vaddr; struct vm_struct *area; - phys_addr_t off, end; + phys_addr_t end; pgprot_t prot = __pgprot(flags); /* Don't allow wraparound, zero size */ @@ -72,7 +73,7 @@ void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size, /* Mappings have to be page-aligned */ off = paddr & ~PAGE_MASK; - paddr &= PAGE_MASK; + paddr &= PAGE_MASK_PHYS; size = PAGE_ALIGN(end + 1) - paddr; /* diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c index 9bb3c24f3677..9c7c68247289 100644 --- a/arch/arc/mm/tlb.c +++ b/arch/arc/mm/tlb.c @@ -576,7 +576,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned, pte_t *ptep) { unsigned long vaddr = vaddr_unaligned & PAGE_MASK; - phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK; + phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK_PHYS; struct page *page = pfn_to_page(pte_pfn(*ptep)); create_tlb(vma, vaddr, ptep); -- cgit v1.2.3 From 1d5e4640e5df15252398c1b621f6bd432f2d7f17 Mon Sep 17 00:00:00 2001 From: Vladimir Isaev <isaev@synopsys.com> Date: Tue, 27 Apr 2021 15:13:54 +0300 Subject: ARC: mm: Use max_high_pfn as a HIGHMEM zone border Commit 4af22ded0ecf ("arc: fix memory initialization for systems with two memory banks") fixed highmem, but for the PAE case it causes bug messages: | BUG: Bad page state in process swapper pfn:80000 | page:(ptrval) refcount:0 mapcount:1 mapping:00000000 index:0x0 pfn:0x80000 flags: 0x0() | raw: 00000000 00000100 00000122 00000000 00000000 00000000 00000000 00000000 | raw: 00000000 | page dumped because: nonzero mapcount | Modules linked in: | CPU: 0 PID: 0 Comm: swapper Not tainted 5.12.0-rc5-00003-g1e43c377a79f #1 This is because the fix expects highmem to be always less than lowmem and uses min_low_pfn as an upper zone border for highmem. max_high_pfn should be ok for both highmem and highmem+PAE cases. Fixes: 4af22ded0ecf ("arc: fix memory initialization for systems with two memory banks") Signed-off-by: Vladimir Isaev <isaev@synopsys.com> Cc: Mike Rapoport <rppt@linux.ibm.com> Cc: stable@vger.kernel.org #5.8 onwards Signed-off-by: Vineet Gupta <vgupta@synopsys.com> --- arch/arc/mm/init.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'arch/arc') diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c index 33832e36bdb7..e2ed355438c9 100644 --- a/arch/arc/mm/init.c +++ b/arch/arc/mm/init.c @@ -157,7 +157,16 @@ void __init setup_arch_memory(void) min_high_pfn = PFN_DOWN(high_mem_start); max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz); - max_zone_pfn[ZONE_HIGHMEM] = min_low_pfn; + /* + * max_high_pfn should be ok here for both HIGHMEM and HIGHMEM+PAE. + * For HIGHMEM without PAE max_high_pfn should be less than + * min_low_pfn to guarantee that these two regions don't overlap. + * For PAE case highmem is greater than lowmem, so it is natural + * to use max_high_pfn. + * + * In both cases, holes should be handled by pfn_valid(). + */ + max_zone_pfn[ZONE_HIGHMEM] = max_high_pfn; high_memory = (void *)(min_high_pfn << PAGE_SHIFT); -- cgit v1.2.3