diff options
46 files changed, 1034 insertions, 602 deletions
diff --git a/Documentation/devicetree/bindings/riscv/cpus.yaml b/Documentation/devicetree/bindings/riscv/cpus.yaml index 001931d526ec..f24cf9601c6e 100644 --- a/Documentation/devicetree/bindings/riscv/cpus.yaml +++ b/Documentation/devicetree/bindings/riscv/cpus.yaml @@ -72,6 +72,11 @@ properties: description: The blocksize in bytes for the Zicbom cache operations. + riscv,cboz-block-size: + $ref: /schemas/types.yaml#/definitions/uint32 + description: + The blocksize in bytes for the Zicboz cache operations. + riscv,isa: description: Identifies the specific RISC-V instruction set architecture diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index d1f661425790..d7c467670be8 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -44,7 +44,7 @@ config RISCV select ARCH_USE_QUEUED_RWLOCKS select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU select ARCH_WANT_FRAME_POINTERS - select ARCH_WANT_GENERAL_HUGETLB + select ARCH_WANT_GENERAL_HUGETLB if !RISCV_ISA_SVNAPOT select ARCH_WANT_HUGETLB_PAGE_OPTIMIZE_VMEMMAP select ARCH_WANT_HUGE_PMD_SHARE if 64BIT select ARCH_WANT_LD_ORPHAN_WARN if !XIP_KERNEL @@ -60,6 +60,7 @@ config RISCV select GENERIC_ATOMIC64 if !64BIT select GENERIC_CLOCKEVENTS_BROADCAST if SMP select GENERIC_EARLY_IOREMAP + select GENERIC_ENTRY select GENERIC_GETTIMEOFDAY if HAVE_GENERIC_VDSO select GENERIC_IDLE_POLL_SETUP select GENERIC_IOREMAP if MMU @@ -245,7 +246,7 @@ config AS_HAS_INSN def_bool $(as-instr,.insn r 51$(comma) 0$(comma) 0$(comma) t0$(comma) t0$(comma) zero) source "arch/riscv/Kconfig.socs" -source "arch/riscv/Kconfig.erratas" +source "arch/riscv/Kconfig.errata" menu "Platform type" @@ -377,9 +378,9 @@ config RISCV_ALTERNATIVE depends on !XIP_KERNEL help This Kconfig allows the kernel to automatically patch the - errata required by the execution platform at run time. The - code patching is performed once in the boot stages. It means - that the overhead from this mechanism is just taken once. + erratum or cpufeature required by the execution platform at run + time. The code patching overhead is minimal, as it's only done + once at boot and once on each module load. config RISCV_ALTERNATIVE_EARLY bool @@ -397,6 +398,25 @@ config RISCV_ISA_C If you don't know what to do here, say Y. +config RISCV_ISA_SVNAPOT + bool "SVNAPOT extension support" + depends on 64BIT && MMU + default y + select RISCV_ALTERNATIVE + help + Allow kernel to detect the SVNAPOT ISA-extension dynamically at boot + time and enable its usage. + + The SVNAPOT extension is used to mark contiguous PTEs as a range + of contiguous virtual-to-physical translations for a naturally + aligned power-of-2 (NAPOT) granularity larger than the base 4KB page + size. When HUGETLBFS is also selected this option unconditionally + allocates some memory for each NAPOT page size supported by the kernel. + When optimizing for low memory consumption and for platforms without + the SVNAPOT extension, it may be better to say N here. + + If you don't know what to do here, say Y. + config RISCV_ISA_SVPBMT bool "SVPBMT extension support" depends on 64BIT && MMU @@ -456,6 +476,19 @@ config RISCV_ISA_ZICBOM If you don't know what to do here, say Y. +config RISCV_ISA_ZICBOZ + bool "Zicboz extension support for faster zeroing of memory" + depends on !XIP_KERNEL && MMU + select RISCV_ALTERNATIVE + default y + help + Enable the use of the ZICBOZ extension (cbo.zero instruction) + when available. + + The Zicboz extension is used for faster zeroing of memory. + + If you don't know what to do here, say Y. + config TOOLCHAIN_HAS_ZIHINTPAUSE bool default y diff --git a/arch/riscv/Kconfig.erratas b/arch/riscv/Kconfig.errata index 69621ae6d647..69621ae6d647 100644 --- a/arch/riscv/Kconfig.erratas +++ b/arch/riscv/Kconfig.errata diff --git a/arch/riscv/errata/sifive/errata.c b/arch/riscv/errata/sifive/errata.c index da55cb247e89..7fa7b8b6a811 100644 --- a/arch/riscv/errata/sifive/errata.c +++ b/arch/riscv/errata/sifive/errata.c @@ -14,7 +14,7 @@ #include <asm/errata_list.h> struct errata_info_t { - char name[ERRATA_STRING_LENGTH_MAX]; + char name[32]; bool (*check_func)(unsigned long arch_id, unsigned long impid); }; @@ -101,12 +101,12 @@ void __init_or_module sifive_errata_patch_func(struct alt_entry *begin, for (alt = begin; alt < end; alt++) { if (alt->vendor_id != SIFIVE_VENDOR_ID) continue; - if (alt->errata_id >= ERRATA_SIFIVE_NUMBER) { - WARN(1, "This errata id:%d is not in kernel errata list", alt->errata_id); + if (alt->patch_id >= ERRATA_SIFIVE_NUMBER) { + WARN(1, "This errata id:%d is not in kernel errata list", alt->patch_id); continue; } - tmp = (1U << alt->errata_id); + tmp = (1U << alt->patch_id); if (cpu_req_errata & tmp) { mutex_lock(&text_mutex); patch_text_nosync(ALT_OLD_PTR(alt), ALT_ALT_PTR(alt), diff --git a/arch/riscv/errata/thead/errata.c b/arch/riscv/errata/thead/errata.c index 3b96a06d3c54..7e8d50ebb71a 100644 --- a/arch/riscv/errata/thead/errata.c +++ b/arch/riscv/errata/thead/errata.c @@ -93,10 +93,10 @@ void __init_or_module thead_errata_patch_func(struct alt_entry *begin, struct al for (alt = begin; alt < end; alt++) { if (alt->vendor_id != THEAD_VENDOR_ID) continue; - if (alt->errata_id >= ERRATA_THEAD_NUMBER) + if (alt->patch_id >= ERRATA_THEAD_NUMBER) continue; - tmp = (1U << alt->errata_id); + tmp = (1U << alt->patch_id); if (cpu_req_errata & tmp) { oldptr = ALT_OLD_PTR(alt); altptr = ALT_ALT_PTR(alt); diff --git a/arch/riscv/include/asm/alternative-macros.h b/arch/riscv/include/asm/alternative-macros.h index 51c6867e02f3..b8c55fb3ab2c 100644 --- a/arch/riscv/include/asm/alternative-macros.h +++ b/arch/riscv/include/asm/alternative-macros.h @@ -6,18 +6,18 @@ #ifdef __ASSEMBLY__ -.macro ALT_ENTRY oldptr newptr vendor_id errata_id new_len +.macro ALT_ENTRY oldptr newptr vendor_id patch_id new_len .4byte \oldptr - . .4byte \newptr - . .2byte \vendor_id .2byte \new_len - .4byte \errata_id + .4byte \patch_id .endm -.macro ALT_NEW_CONTENT vendor_id, errata_id, enable = 1, new_c : vararg +.macro ALT_NEW_CONTENT vendor_id, patch_id, enable = 1, new_c .if \enable .pushsection .alternative, "a" - ALT_ENTRY 886b, 888f, \vendor_id, \errata_id, 889f - 888f + ALT_ENTRY 886b, 888f, \vendor_id, \patch_id, 889f - 888f .popsection .subsection 1 888 : @@ -33,7 +33,7 @@ .endif .endm -.macro ALTERNATIVE_CFG old_c, new_c, vendor_id, errata_id, enable +.macro ALTERNATIVE_CFG old_c, new_c, vendor_id, patch_id, enable 886 : .option push .option norvc @@ -41,13 +41,13 @@ \old_c .option pop 887 : - ALT_NEW_CONTENT \vendor_id, \errata_id, \enable, \new_c + ALT_NEW_CONTENT \vendor_id, \patch_id, \enable, "\new_c" .endm -.macro ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, enable_1, \ - new_c_2, vendor_id_2, errata_id_2, enable_2 - ALTERNATIVE_CFG "\old_c", "\new_c_1", \vendor_id_1, \errata_id_1, \enable_1 - ALT_NEW_CONTENT \vendor_id_2, \errata_id_2, \enable_2, \new_c_2 +.macro ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, patch_id_1, enable_1, \ + new_c_2, vendor_id_2, patch_id_2, enable_2 + ALTERNATIVE_CFG "\old_c", "\new_c_1", \vendor_id_1, \patch_id_1, \enable_1 + ALT_NEW_CONTENT \vendor_id_2, \patch_id_2, \enable_2, "\new_c_2" .endm #define __ALTERNATIVE_CFG(...) ALTERNATIVE_CFG __VA_ARGS__ @@ -58,17 +58,17 @@ #include <asm/asm.h> #include <linux/stringify.h> -#define ALT_ENTRY(oldptr, newptr, vendor_id, errata_id, newlen) \ +#define ALT_ENTRY(oldptr, newptr, vendor_id, patch_id, newlen) \ ".4byte ((" oldptr ") - .) \n" \ ".4byte ((" newptr ") - .) \n" \ ".2byte " vendor_id "\n" \ ".2byte " newlen "\n" \ - ".4byte " errata_id "\n" + ".4byte " patch_id "\n" -#define ALT_NEW_CONTENT(vendor_id, errata_id, enable, new_c) \ +#define ALT_NEW_CONTENT(vendor_id, patch_id, enable, new_c) \ ".if " __stringify(enable) " == 1\n" \ ".pushsection .alternative, \"a\"\n" \ - ALT_ENTRY("886b", "888f", __stringify(vendor_id), __stringify(errata_id), "889f - 888f") \ + ALT_ENTRY("886b", "888f", __stringify(vendor_id), __stringify(patch_id), "889f - 888f") \ ".popsection\n" \ ".subsection 1\n" \ "888 :\n" \ @@ -83,7 +83,7 @@ ".previous\n" \ ".endif\n" -#define __ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, enable) \ +#define __ALTERNATIVE_CFG(old_c, new_c, vendor_id, patch_id, enable) \ "886 :\n" \ ".option push\n" \ ".option norvc\n" \ @@ -91,22 +91,22 @@ old_c "\n" \ ".option pop\n" \ "887 :\n" \ - ALT_NEW_CONTENT(vendor_id, errata_id, enable, new_c) + ALT_NEW_CONTENT(vendor_id, patch_id, enable, new_c) -#define __ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, enable_1, \ - new_c_2, vendor_id_2, errata_id_2, enable_2) \ - __ALTERNATIVE_CFG(old_c, new_c_1, vendor_id_1, errata_id_1, enable_1) \ - ALT_NEW_CONTENT(vendor_id_2, errata_id_2, enable_2, new_c_2) +#define __ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, patch_id_1, enable_1, \ + new_c_2, vendor_id_2, patch_id_2, enable_2) \ + __ALTERNATIVE_CFG(old_c, new_c_1, vendor_id_1, patch_id_1, enable_1) \ + ALT_NEW_CONTENT(vendor_id_2, patch_id_2, enable_2, new_c_2) #endif /* __ASSEMBLY__ */ -#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \ - __ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, IS_ENABLED(CONFIG_k)) +#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, patch_id, CONFIG_k) \ + __ALTERNATIVE_CFG(old_c, new_c, vendor_id, patch_id, IS_ENABLED(CONFIG_k)) -#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, CONFIG_k_1, \ - new_c_2, vendor_id_2, errata_id_2, CONFIG_k_2) \ - __ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, IS_ENABLED(CONFIG_k_1), \ - new_c_2, vendor_id_2, errata_id_2, IS_ENABLED(CONFIG_k_2)) +#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, patch_id_1, CONFIG_k_1, \ + new_c_2, vendor_id_2, patch_id_2, CONFIG_k_2) \ + __ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, patch_id_1, IS_ENABLED(CONFIG_k_1), \ + new_c_2, vendor_id_2, patch_id_2, IS_ENABLED(CONFIG_k_2)) #else /* CONFIG_RISCV_ALTERNATIVE */ #ifdef __ASSEMBLY__ @@ -137,19 +137,19 @@ /* * Usage: - * ALTERNATIVE(old_content, new_content, vendor_id, errata_id, CONFIG_k) + * ALTERNATIVE(old_content, new_content, vendor_id, patch_id, CONFIG_k) * in the assembly code. Otherwise, - * asm(ALTERNATIVE(old_content, new_content, vendor_id, errata_id, CONFIG_k)); + * asm(ALTERNATIVE(old_content, new_content, vendor_id, patch_id, CONFIG_k)); * * old_content: The old content which is probably replaced with new content. * new_content: The new content. * vendor_id: The CPU vendor ID. - * errata_id: The errata ID. - * CONFIG_k: The Kconfig of this errata. When Kconfig is disabled, the old + * patch_id: The patch ID (erratum ID or cpufeature ID). + * CONFIG_k: The Kconfig of this patch ID. When Kconfig is disabled, the old * content will alwyas be executed. */ -#define ALTERNATIVE(old_content, new_content, vendor_id, errata_id, CONFIG_k) \ - _ALTERNATIVE_CFG(old_content, new_content, vendor_id, errata_id, CONFIG_k) +#define ALTERNATIVE(old_content, new_content, vendor_id, patch_id, CONFIG_k) \ + _ALTERNATIVE_CFG(old_content, new_content, vendor_id, patch_id, CONFIG_k) /* * A vendor wants to replace an old_content, but another vendor has used @@ -158,9 +158,9 @@ * on the following sample code and then replace ALTERNATIVE() with * ALTERNATIVE_2() to append its customized content. */ -#define ALTERNATIVE_2(old_content, new_content_1, vendor_id_1, errata_id_1, CONFIG_k_1, \ - new_content_2, vendor_id_2, errata_id_2, CONFIG_k_2) \ - _ALTERNATIVE_CFG_2(old_content, new_content_1, vendor_id_1, errata_id_1, CONFIG_k_1, \ - new_content_2, vendor_id_2, errata_id_2, CONFIG_k_2) +#define ALTERNATIVE_2(old_content, new_content_1, vendor_id_1, patch_id_1, CONFIG_k_1, \ + new_content_2, vendor_id_2, patch_id_2, CONFIG_k_2) \ + _ALTERNATIVE_CFG_2(old_content, new_content_1, vendor_id_1, patch_id_1, CONFIG_k_1, \ + new_content_2, vendor_id_2, patch_id_2, CONFIG_k_2) #endif diff --git a/arch/riscv/include/asm/alternative.h b/arch/riscv/include/asm/alternative.h index b8648d4f2ac1..58ccd2f8cab7 100644 --- a/arch/riscv/include/asm/alternative.h +++ b/arch/riscv/include/asm/alternative.h @@ -6,8 +6,6 @@ #ifndef __ASM_ALTERNATIVE_H #define __ASM_ALTERNATIVE_H -#define ERRATA_STRING_LENGTH_MAX 32 - #include <asm/alternative-macros.h> #ifndef __ASSEMBLY__ @@ -15,10 +13,14 @@ #ifdef CONFIG_RISCV_ALTERNATIVE #include <linux/init.h> +#include <linux/kernel.h> #include <linux/types.h> #include <linux/stddef.h> #include <asm/hwcap.h> +#define PATCH_ID_CPUFEATURE_ID(p) lower_16_bits(p) +#define PATCH_ID_CPUFEATURE_VALUE(p) upper_16_bits(p) + #define RISCV_ALTERNATIVES_BOOT 0 /* alternatives applied during regular boot */ #define RISCV_ALTERNATIVES_MODULE 1 /* alternatives applied during module-init */ #define RISCV_ALTERNATIVES_EARLY_BOOT 2 /* alternatives applied before mmu start */ @@ -38,14 +40,9 @@ void riscv_alternative_fix_offsets(void *alt_ptr, unsigned int len, struct alt_entry { s32 old_offset; /* offset relative to original instruction or data */ s32 alt_offset; /* offset relative to replacement instruction or data */ - u16 vendor_id; /* cpu vendor id */ + u16 vendor_id; /* CPU vendor ID */ u16 alt_len; /* The replacement size */ - u32 errata_id; /* The errata id */ -}; - -struct errata_checkfunc_id { - unsigned long vendor_id; - bool (*func)(struct alt_entry *alt); + u32 patch_id; /* The patch ID (erratum ID or cpufeature ID) */ }; void sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end, diff --git a/arch/riscv/include/asm/asm-prototypes.h b/arch/riscv/include/asm/asm-prototypes.h index ef386fcf3939..61ba8ed43d8f 100644 --- a/arch/riscv/include/asm/asm-prototypes.h +++ b/arch/riscv/include/asm/asm-prototypes.h @@ -27,5 +27,7 @@ DECLARE_DO_ERROR_INFO(do_trap_break); asmlinkage unsigned long get_overflow_stack(void); asmlinkage void handle_bad_stack(struct pt_regs *regs); +asmlinkage void do_page_fault(struct pt_regs *regs); +asmlinkage void do_irq(struct pt_regs *regs); #endif /* _ASM_RISCV_PROTOTYPES_H */ diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h index 816e753de636..114bbadaef41 100644 --- a/arch/riscv/include/asm/asm.h +++ b/arch/riscv/include/asm/asm.h @@ -69,6 +69,7 @@ #endif #ifdef __ASSEMBLY__ +#include <asm/asm-offsets.h> /* Common assembly source macros */ @@ -81,6 +82,66 @@ .endr .endm + /* save all GPs except x1 ~ x5 */ + .macro save_from_x6_to_x31 + REG_S x6, PT_T1(sp) + REG_S x7, PT_T2(sp) + REG_S x8, PT_S0(sp) + REG_S x9, PT_S1(sp) + REG_S x10, PT_A0(sp) + REG_S x11, PT_A1(sp) + REG_S x12, PT_A2(sp) + REG_S x13, PT_A3(sp) + REG_S x14, PT_A4(sp) + REG_S x15, PT_A5(sp) + REG_S x16, PT_A6(sp) + REG_S x17, PT_A7(sp) + REG_S x18, PT_S2(sp) + REG_S x19, PT_S3(sp) + REG_S x20, PT_S4(sp) + REG_S x21, PT_S5(sp) + REG_S x22, PT_S6(sp) + REG_S x23, PT_S7(sp) + REG_S x24, PT_S8(sp) + REG_S x25, PT_S9(sp) + REG_S x26, PT_S10(sp) + REG_S x27, PT_S11(sp) + REG_S x28, PT_T3(sp) + REG_S x29, PT_T4(sp) + REG_S x30, PT_T5(sp) + REG_S x31, PT_T6(sp) + .endm + + /* restore all GPs except x1 ~ x5 */ + .macro restore_from_x6_to_x31 + REG_L x6, PT_T1(sp) + REG_L x7, PT_T2(sp) + REG_L x8, PT_S0(sp) + REG_L x9, PT_S1(sp) + REG_L x10, PT_A0(sp) + REG_L x11, PT_A1(sp) + REG_L x12, PT_A2(sp) + REG_L x13, PT_A3(sp) + REG_L x14, PT_A4(sp) + REG_L x15, PT_A5(sp) + REG_L x16, PT_A6(sp) + REG_L x17, PT_A7(sp) + REG_L x18, PT_S2(sp) + REG_L x19, PT_S3(sp) + REG_L x20, PT_S4(sp) + REG_L x21, PT_S5(sp) + REG_L x22, PT_S6(sp) + REG_L x23, PT_S7(sp) + REG_L x24, PT_S8(sp) + REG_L x25, PT_S9(sp) + REG_L x26, PT_S10(sp) + REG_L x27, PT_S11(sp) + REG_L x28, PT_T3(sp) + REG_L x29, PT_T4(sp) + REG_L x30, PT_T5(sp) + REG_L x31, PT_T6(sp) + .endm + #endif /* __ASSEMBLY__ */ #endif /* _ASM_RISCV_ASM_H */ diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h index 03e3b95ae6da..8091b8bf4883 100644 --- a/arch/riscv/include/asm/cacheflush.h +++ b/arch/riscv/include/asm/cacheflush.h @@ -50,7 +50,8 @@ void flush_icache_mm(struct mm_struct *mm, bool local); #endif /* CONFIG_SMP */ extern unsigned int riscv_cbom_block_size; -void riscv_init_cbom_blocksize(void); +extern unsigned int riscv_cboz_block_size; +void riscv_init_cbo_blocksizes(void); #ifdef CONFIG_RISCV_DMA_NONCOHERENT void riscv_noncoherent_supported(void); diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h index 0e571f6483d9..7c2b8cdb7b77 100644 --- a/arch/riscv/include/asm/csr.h +++ b/arch/riscv/include/asm/csr.h @@ -40,7 +40,6 @@ #define SR_UXL _AC(0x300000000, UL) /* XLEN mask for U-mode */ #define SR_UXL_32 _AC(0x100000000, UL) /* XLEN = 32 for U-mode */ #define SR_UXL_64 _AC(0x200000000, UL) /* XLEN = 64 for U-mode */ -#define SR_UXL_SHIFT 32 #endif /* SATP flags */ diff --git a/arch/riscv/include/asm/entry-common.h b/arch/riscv/include/asm/entry-common.h new file mode 100644 index 000000000000..6e4dee49d84b --- /dev/null +++ b/arch/riscv/include/asm/entry-common.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_RISCV_ENTRY_COMMON_H +#define _ASM_RISCV_ENTRY_COMMON_H + +#include <asm/stacktrace.h> + +void handle_page_fault(struct pt_regs *regs); +void handle_break(struct pt_regs *regs); + +#endif /* _ASM_RISCV_ENTRY_COMMON_H */ diff --git a/arch/riscv/include/asm/hugetlb.h b/arch/riscv/include/asm/hugetlb.h index ec19d6afc896..fe6f23006641 100644 --- a/arch/riscv/include/asm/hugetlb.h +++ b/arch/riscv/include/asm/hugetlb.h @@ -2,7 +2,6 @@ #ifndef _ASM_RISCV_HUGETLB_H #define _ASM_RISCV_HUGETLB_H -#include <asm-generic/hugetlb.h> #include <asm/page.h> static inline void arch_clear_hugepage_flags(struct page *page) @@ -11,4 +10,37 @@ static inline void arch_clear_hugepage_flags(struct page *page) } #define arch_clear_hugepage_flags arch_clear_hugepage_flags +#ifdef CONFIG_RISCV_ISA_SVNAPOT +#define __HAVE_ARCH_HUGE_PTE_CLEAR +void huge_pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long sz); + +#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT +void set_huge_pte_at(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, pte_t pte); + +#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR +pte_t huge_ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep); + +#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH +pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep); + +#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT +void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep); + +#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS +int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t pte, int dirty); + +pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags); +#define arch_make_huge_pte arch_make_huge_pte + +#endif /*CONFIG_RISCV_ISA_SVNAPOT*/ + +#include <asm-generic/hugetlb.h> + #endif /* _ASM_RISCV_HUGETLB_H */ diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h index e3021b2590de..bbde5aafa957 100644 --- a/arch/riscv/include/asm/hwcap.h +++ b/arch/riscv/include/asm/hwcap.h @@ -42,6 +42,8 @@ #define RISCV_ISA_EXT_ZBB 30 #define RISCV_ISA_EXT_ZICBOM 31 #define RISCV_ISA_EXT_ZIHINTPAUSE 32 +#define RISCV_ISA_EXT_SVNAPOT 33 +#define RISCV_ISA_EXT_ZICBOZ 34 #define RISCV_ISA_EXT_MAX 64 #define RISCV_ISA_EXT_NAME_LEN_MAX 32 diff --git a/arch/riscv/include/asm/insn-def.h b/arch/riscv/include/asm/insn-def.h index e01ab51f50d2..6960beb75f32 100644 --- a/arch/riscv/include/asm/insn-def.h +++ b/arch/riscv/include/asm/insn-def.h @@ -192,4 +192,8 @@ INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \ RS1(base), SIMM12(2)) +#define CBO_zero(base) \ + INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \ + RS1(base), SIMM12(4)) + #endif /* __ASM_INSN_DEF_H */ diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h index 7fed7c431928..8666960d0113 100644 --- a/arch/riscv/include/asm/page.h +++ b/arch/riscv/include/asm/page.h @@ -16,11 +16,6 @@ #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE - 1)) -#ifdef CONFIG_64BIT -#define HUGE_MAX_HSTATE 2 -#else -#define HUGE_MAX_HSTATE 1 -#endif #define HPAGE_SHIFT PMD_SHIFT #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) #define HPAGE_MASK (~(HPAGE_SIZE - 1)) @@ -49,10 +44,14 @@ #ifndef __ASSEMBLY__ +#ifdef CONFIG_RISCV_ISA_ZICBOZ +void clear_page(void *page); +#else #define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE) +#endif #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) -#define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE) +#define clear_user_page(pgaddr, vaddr, page) clear_page(pgaddr) #define copy_user_page(vto, vfrom, vaddr, topg) \ memcpy((vto), (vfrom), PAGE_SIZE) diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h index 42a042c0e13e..7a5097202e15 100644 --- a/arch/riscv/include/asm/pgtable-64.h +++ b/arch/riscv/include/asm/pgtable-64.h @@ -79,6 +79,40 @@ typedef struct { #define _PAGE_PFN_MASK GENMASK(53, 10) /* + * [63] Svnapot definitions: + * 0 Svnapot disabled + * 1 Svnapot enabled + */ +#define _PAGE_NAPOT_SHIFT 63 +#define _PAGE_NAPOT BIT(_PAGE_NAPOT_SHIFT) +/* + * Only 64KB (order 4) napot ptes supported. + */ +#define NAPOT_CONT_ORDER_BASE 4 +enum napot_cont_order { + NAPOT_CONT64KB_ORDER = NAPOT_CONT_ORDER_BASE, + NAPOT_ORDER_MAX, +}; + +#define for_each_napot_order(order) \ + for (order = NAPOT_CONT_ORDER_BASE; order < NAPOT_ORDER_MAX; order++) +#define for_each_napot_order_rev(order) \ + for (order = NAPOT_ORDER_MAX - 1; \ + order >= NAPOT_CONT_ORDER_BASE; order--) +#define napot_cont_order(val) (__builtin_ctzl((val.pte >> _PAGE_PFN_SHIFT) << 1)) + +#define napot_cont_shift(order) ((order) + PAGE_SHIFT) +#define napot_cont_size(order) BIT(napot_cont_shift(order)) +#define napot_cont_mask(order) (~(napot_cont_size(order) - 1UL)) +#define napot_pte_num(order) BIT(order) + +#ifdef CONFIG_RISCV_ISA_SVNAPOT +#define HUGE_MAX_HSTATE (2 + (NAPOT_ORDER_MAX - NAPOT_CONT_ORDER_BASE)) +#else +#define HUGE_MAX_HSTATE 2 +#endif + +/* * [62:61] Svpbmt Memory Type definitions: * * 00 - PMA Normal Cacheable, No change to implied PMA memory type diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index ab05f892d317..3303fc03d724 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -264,10 +264,47 @@ static inline pte_t pud_pte(pud_t pud) return __pte(pud_val(pud)); } +#ifdef CONFIG_RISCV_ISA_SVNAPOT + +static __always_inline bool has_svnapot(void) +{ + return riscv_has_extension_likely(RISCV_ISA_EXT_SVNAPOT); +} + +static inline unsigned long pte_napot(pte_t pte) +{ + return pte_val(pte) & _PAGE_NAPOT; +} + +static inline pte_t pte_mknapot(pte_t pte, unsigned int order) +{ + int pos = order - 1 + _PAGE_PFN_SHIFT; + unsigned long napot_bit = BIT(pos); + unsigned long napot_mask = ~GENMASK(pos, _PAGE_PFN_SHIFT); + + return __pte((pte_val(pte) & napot_mask) | napot_bit | _PAGE_NAPOT); +} + +#else + +static __always_inline bool has_svnapot(void) { return false; } + +static inline unsigned long pte_napot(pte_t pte) +{ + return 0; +} + +#endif /* CONFIG_RISCV_ISA_SVNAPOT */ + /* Yields the page frame number (PFN) of a page table entry */ static inline unsigned long pte_pfn(pte_t pte) { - return __page_val_to_pfn(pte_val(pte)); + unsigned long res = __page_val_to_pfn(pte_val(pte)); + + if (has_svnapot() && pte_napot(pte)) + res = res & (res - 1UL); + + return res; } #define pte_page(x) pfn_to_page(pte_pfn(x)) diff --git a/arch/riscv/include/asm/ptrace.h b/arch/riscv/include/asm/ptrace.h index 6ecd461129d2..b5b0adcc85c1 100644 --- a/arch/riscv/include/asm/ptrace.h +++ b/arch/riscv/include/asm/ptrace.h @@ -53,6 +53,9 @@ struct pt_regs { unsigned long orig_a0; }; +#define PTRACE_SYSEMU 0x1f +#define PTRACE_SYSEMU_SINGLESTEP 0x20 + #ifdef CONFIG_64BIT #define REG_FMT "%016lx" #else @@ -121,8 +124,6 @@ extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, unsigned long frame_pointer); -int do_syscall_trace_enter(struct pt_regs *regs); -void do_syscall_trace_exit(struct pt_regs *regs); /** * regs_get_register() - get register value from its offset @@ -172,6 +173,11 @@ static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs, return 0; } +static inline int regs_irqs_disabled(struct pt_regs *regs) +{ + return !(regs->status & SR_PIE); +} + #endif /* __ASSEMBLY__ */ #endif /* _ASM_RISCV_PTRACE_H */ diff --git a/arch/riscv/include/asm/stacktrace.h b/arch/riscv/include/asm/stacktrace.h index 3450c1912afd..f7e8ef2418b9 100644 --- a/arch/riscv/include/asm/stacktrace.h +++ b/arch/riscv/include/asm/stacktrace.h @@ -16,4 +16,9 @@ extern void notrace walk_stackframe(struct task_struct *task, struct pt_regs *re extern void dump_backtrace(struct pt_regs *regs, struct task_struct *task, const char *loglvl); +static inline bool on_thread_stack(void) +{ + return !(((unsigned long)(current->stack) ^ current_stack_pointer) & ~(THREAD_SIZE - 1)); +} + #endif /* _ASM_RISCV_STACKTRACE_H */ diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h index 384a63b86420..736110e1fd78 100644 --- a/arch/riscv/include/asm/syscall.h +++ b/arch/riscv/include/asm/syscall.h @@ -74,5 +74,26 @@ static inline int syscall_get_arch(struct task_struct *task) #endif } +typedef long (*syscall_t)(ulong, ulong, ulong, ulong, ulong, ulong, ulong); +static inline void syscall_handler(struct pt_regs *regs, ulong syscall) +{ + syscall_t fn; + +#ifdef CONFIG_COMPAT + if ((regs->status & SR_UXL) == SR_UXL_32) + fn = compat_sys_call_table[syscall]; + else +#endif + fn = sys_call_table[syscall]; + + regs->a0 = fn(regs->orig_a0, regs->a1, regs->a2, + regs->a3, regs->a4, regs->a5, regs->a6); +} + +static inline bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs) +{ + return false; +} + asmlinkage long sys_riscv_flush_icache(uintptr_t, uintptr_t, uintptr_t); #endif /* _ASM_RISCV_SYSCALL_H */ diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h index f704c8dd57e0..e0d202134b44 100644 --- a/arch/riscv/include/asm/thread_info.h +++ b/arch/riscv/include/asm/thread_info.h @@ -67,6 +67,7 @@ struct thread_info { long kernel_sp; /* Kernel stack pointer */ long user_sp; /* User stack pointer */ int cpu; + unsigned long syscall_work; /* SYSCALL_WORK_ flags */ }; /* @@ -89,26 +90,18 @@ struct thread_info { * - pending work-to-be-done flags are in lowest half-word * - other flags in upper half-word(s) */ -#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ #define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ #define TIF_SIGPENDING 2 /* signal pending */ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_RESTORE_SIGMASK 4 /* restore signal mask in do_signal() */ #define TIF_MEMDIE 5 /* is terminating due to OOM killer */ -#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ -#define TIF_SYSCALL_AUDIT 7 /* syscall auditing */ -#define TIF_SECCOMP 8 /* syscall secure computing */ #define TIF_NOTIFY_SIGNAL 9 /* signal notifications exist */ #define TIF_UPROBE 10 /* uprobe breakpoint or singlestep */ #define TIF_32BIT 11 /* compat-mode 32bit process */ -#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) -#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) -#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) -#define _TIF_SECCOMP (1 << TIF_SECCOMP) #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) #define _TIF_UPROBE (1 << TIF_UPROBE) @@ -116,8 +109,4 @@ struct thread_info { (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | \ _TIF_NOTIFY_SIGNAL | _TIF_UPROBE) -#define _TIF_SYSCALL_WORK \ - (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT | \ - _TIF_SECCOMP) - #endif /* _ASM_RISCV_THREAD_INFO_H */ diff --git a/arch/riscv/include/asm/vmalloc.h b/arch/riscv/include/asm/vmalloc.h index 48da5371f1e9..58d3e447f191 100644 --- a/arch/riscv/include/asm/vmalloc.h +++ b/arch/riscv/include/asm/vmalloc.h @@ -17,6 +17,65 @@ static inline bool arch_vmap_pmd_supported(pgprot_t prot) return true; } -#endif +#ifdef CONFIG_RISCV_ISA_SVNAPOT +#include <linux/pgtable.h> +#define arch_vmap_pte_range_map_size arch_vmap_pte_range_map_size +static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end, + u64 pfn, unsigned int max_page_shift) +{ + unsigned long map_size = PAGE_SIZE; + unsigned long size, order; + + if (!has_svnapot()) + return map_size; + + for_each_napot_order_rev(order) { + if (napot_cont_shift(order) > max_page_shift) + continue; + + size = napot_cont_size(order); + if (end - addr < size) + continue; + + if (!IS_ALIGNED(addr, size)) + continue; + + if (!IS_ALIGNED(PFN_PHYS(pfn), size)) + continue; + + map_size = size; + break; + } + + return map_size; +} + +#define arch_vmap_pte_supported_shift arch_vmap_pte_supported_shift +static inline int arch_vmap_pte_supported_shift(unsigned long size) +{ + int shift = PAGE_SHIFT; + unsigned long order; + + if (!has_svnapot()) + return shift; + + WARN_ON_ONCE(size >= PMD_SIZE); + + for_each_napot_order_rev(order) { + if (napot_cont_size(order) > size) + continue; + + if (!IS_ALIGNED(size, napot_cont_size(order))) + continue; + + shift = napot_cont_shift(order); + break; + } + + return shift; +} + +#endif /* CONFIG_RISCV_ISA_SVNAPOT */ +#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ #endif /* _ASM_RISCV_VMALLOC_H */ diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h index 92af6f3f057c..e44c1e90eaa7 100644 --- a/arch/riscv/include/uapi/asm/kvm.h +++ b/arch/riscv/include/uapi/asm/kvm.h @@ -52,6 +52,7 @@ struct kvm_riscv_config { unsigned long mvendorid; unsigned long marchid; unsigned long mimpid; + unsigned long zicboz_block_size; }; /* CORE registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ @@ -105,6 +106,7 @@ enum KVM_RISCV_ISA_EXT_ID { KVM_RISCV_ISA_EXT_SVINVAL, KVM_RISCV_ISA_EXT_ZIHINTPAUSE, KVM_RISCV_ISA_EXT_ZICBOM, + KVM_RISCV_ISA_EXT_ZICBOZ, KVM_RISCV_ISA_EXT_MAX, }; diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index 4cf303a779ab..392fa6e35d4a 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -68,8 +68,6 @@ obj-$(CONFIG_CPU_PM) += suspend_entry.o suspend.o obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o -obj-$(CONFIG_TRACE_IRQFLAGS) += trace_irq.o - obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o obj-$(CONFIG_RISCV_SBI) += sbi.o diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c index 8400f0cc9704..9203e18320f9 100644 --- a/arch/riscv/kernel/cpu.c +++ b/arch/riscv/kernel/cpu.c @@ -186,11 +186,13 @@ arch_initcall(riscv_cpuinfo_init); */ static struct riscv_isa_ext_data isa_ext_arr[] = { __RISCV_ISA_EXT_DATA(zicbom, RISCV_ISA_EXT_ZICBOM), + __RISCV_ISA_EXT_DATA(zicboz, RISCV_ISA_EXT_ZICBOZ), __RISCV_ISA_EXT_DATA(zihintpause, RISCV_ISA_EXT_ZIHINTPAUSE), __RISCV_ISA_EXT_DATA(zbb, RISCV_ISA_EXT_ZBB), __RISCV_ISA_EXT_DATA(sscofpmf, RISCV_ISA_EXT_SSCOFPMF), __RISCV_ISA_EXT_DATA(sstc, RISCV_ISA_EXT_SSTC), __RISCV_ISA_EXT_DATA(svinval, RISCV_ISA_EXT_SVINVAL), + __RISCV_ISA_EXT_DATA(svnapot, RISCV_ISA_EXT_SVNAPOT), __RISCV_ISA_EXT_DATA(svpbmt, RISCV_ISA_EXT_SVPBMT), __RISCV_ISA_EXT_DATA("", RISCV_ISA_EXT_MAX), }; diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index 59d58ee0f68d..00d7cd2c9043 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -8,20 +8,15 @@ #include <linux/bitmap.h> #include <linux/ctype.h> -#include <linux/libfdt.h> #include <linux/log2.h> #include <linux/memory.h> #include <linux/module.h> #include <linux/of.h> #include <asm/alternative.h> #include <asm/cacheflush.h> -#include <asm/errata_list.h> #include <asm/hwcap.h> #include <asm/patch.h> -#include <asm/pgtable.h> #include <asm/processor.h> -#include <asm/smp.h> -#include <asm/switch_to.h> #define NUM_ALPHA_EXTS ('z' - 'a' + 1) @@ -79,6 +74,15 @@ static bool riscv_isa_extension_check(int id) return false; } return true; + case RISCV_ISA_EXT_ZICBOZ: + if (!riscv_cboz_block_size) { + pr_err("Zicboz detected in ISA string, but no cboz-block-size found\n"); + return false; + } else if (!is_power_of_2(riscv_cboz_block_size)) { + pr_err("cboz-block-size present, but is not a power-of-2\n"); + return false; + } + return true; } return true; @@ -224,9 +228,11 @@ void __init riscv_fill_hwcap(void) SET_ISA_EXT_MAP("sscofpmf", RISCV_ISA_EXT_SSCOFPMF); SET_ISA_EXT_MAP("sstc", RISCV_ISA_EXT_SSTC); SET_ISA_EXT_MAP("svinval", RISCV_ISA_EXT_SVINVAL); + SET_ISA_EXT_MAP("svnapot", RISCV_ISA_EXT_SVNAPOT); SET_ISA_EXT_MAP("svpbmt", RISCV_ISA_EXT_SVPBMT); SET_ISA_EXT_MAP("zbb", RISCV_ISA_EXT_ZBB); SET_ISA_EXT_MAP("zicbom", RISCV_ISA_EXT_ZICBOM); + SET_ISA_EXT_MAP("zicboz", RISCV_ISA_EXT_ZICBOZ); SET_ISA_EXT_MAP("zihintpause", RISCV_ISA_EXT_ZIHINTPAUSE); } #undef SET_ISA_EXT_MAP @@ -269,12 +275,46 @@ void __init riscv_fill_hwcap(void) } #ifdef CONFIG_RISCV_ALTERNATIVE +/* + * Alternative patch sites consider 48 bits when determining when to patch + * the old instruction sequence with the new. These bits are broken into a + * 16-bit vendor ID and a 32-bit patch ID. A non-zero vendor ID means the + * patch site is for an erratum, identified by the 32-bit patch ID. When + * the vendor ID is zero, the patch site is for a cpufeature. cpufeatures + * further break down patch ID into two 16-bit numbers. The lower 16 bits + * are the cpufeature ID and the upper 16 bits are used for a value specific + * to the cpufeature and patch site. If the upper 16 bits are zero, then it + * implies no specific value is specified. cpufeatures that want to control + * patching on a per-site basis will provide non-zero values and implement + * checks here. The checks return true when patching should be done, and + * false otherwise. + */ +static bool riscv_cpufeature_patch_check(u16 id, u16 value) +{ + if (!value) + return true; + + switch (id) { + case RISCV_ISA_EXT_ZICBOZ: + /* + * Zicboz alternative applications provide the maximum + * supported block size order, or zero when it doesn't + * matter. If the current block size exceeds the maximum, + * then the alternative cannot be applied. + */ + return riscv_cboz_block_size <= (1U << value); + } + + return false; +} + void __init_or_module riscv_cpufeature_patch_func(struct alt_entry *begin, struct alt_entry *end, unsigned int stage) { struct alt_entry *alt; void *oldptr, *altptr; + u16 id, value; if (stage == RISCV_ALTERNATIVES_EARLY_BOOT) return; @@ -282,13 +322,19 @@ void __init_or_module riscv_cpufeature_patch_func(struct alt_entry *begin, for (alt = begin; alt < end; alt++) { if (alt->vendor_id != 0) continue; - if (alt->errata_id >= RISCV_ISA_EXT_MAX) { - WARN(1, "This extension id:%d is not in ISA extension list", - alt->errata_id); + + id = PATCH_ID_CPUFEATURE_ID(alt->patch_id); + + if (id >= RISCV_ISA_EXT_MAX) { + WARN(1, "This extension id:%d is not in ISA extension list", id); continue; } - if (!__riscv_isa_extension_available(NULL, alt->errata_id)) + if (!__riscv_isa_extension_available(NULL, id)) + continue; + + value = PATCH_ID_CPUFEATURE_VALUE(alt->patch_id); + if (!riscv_cpufeature_patch_check(id, value)) continue; oldptr = ALT_OLD_PTR(alt); diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index 99d38fdf8b18..3fbb100bc9e4 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -14,11 +14,7 @@ #include <asm/asm-offsets.h> #include <asm/errata_list.h> -#if !IS_ENABLED(CONFIG_PREEMPTION) -.set resume_kernel, restore_all -#endif - -ENTRY(handle_exception) +SYM_CODE_START(handle_exception) /* * If coming from userspace, preserve the user thread pointer and load * the kernel thread pointer. If we came from the kernel, the scratch @@ -46,32 +42,7 @@ _save_context: REG_S x1, PT_RA(sp) REG_S x3, PT_GP(sp) REG_S x5, PT_T0(sp) - REG_S x6, PT_T1(sp) - REG_S x7, PT_T2(sp) - REG_S x8, PT_S0(sp) - REG_S x9, PT_S1(sp) - REG_S x10, PT_A0(sp) - REG_S x11, PT_A1(sp) - REG_S x12, PT_A2(sp) - REG_S x13, PT_A3(sp) - REG_S x14, PT_A4(sp) - REG_S x15, PT_A5(sp) - REG_S x16, PT_A6(sp) - REG_S x17, PT_A7(sp) - REG_S x18, PT_S2(sp) - REG_S x19, PT_S3(sp) - REG_S x20, PT_S4(sp) - REG_S x21, PT_S5(sp) - REG_S x22, PT_S6(sp) - REG_S x23, PT_S7(sp) - REG_S x24, PT_S8(sp) - REG_S x25, PT_S9(sp) - REG_S x26, PT_S10(sp) - REG_S x27, PT_S11(sp) - REG_S x28, PT_T3(sp) - REG_S x29, PT_T4(sp) - REG_S x30, PT_T5(sp) - REG_S x31, PT_T6(sp) + save_from_x6_to_x31 /* * Disable user-mode memory access as it should only be set in the @@ -106,19 +77,8 @@ _save_context: .option norelax la gp, __global_pointer$ .option pop - -#ifdef CONFIG_TRACE_IRQFLAGS - call __trace_hardirqs_off -#endif - -#ifdef CONFIG_CONTEXT_TRACKING_USER - /* If previous state is in user mode, call user_exit_callable(). */ - li a0, SR_PP - and a0, s1, a0 - bnez a0, skip_context_tracking - call user_exit_callable -skip_context_tracking: -#endif + move a0, sp /* pt_regs */ + la ra, ret_from_exception /* * MSB of cause differentiates between @@ -126,38 +86,13 @@ skip_context_tracking: */ bge s4, zero, 1f - la ra, ret_from_exception - /* Handle interrupts */ - move a0, sp /* pt_regs */ - la a1, generic_handle_arch_irq - jr a1 + tail do_irq 1: - /* - * Exceptions run with interrupts enabled or disabled depending on the - * state of SR_PIE in m/sstatus. - */ - andi t0, s1, SR_PIE - beqz t0, 1f - /* kprobes, entered via ebreak, must have interrupts disabled. */ - li t0, EXC_BREAKPOINT - beq s4, t0, 1f -#ifdef CONFIG_TRACE_IRQFLAGS - call __trace_hardirqs_on -#endif - csrs CSR_STATUS, SR_IE - -1: - la ra, ret_from_exception - /* Handle syscalls */ - li t0, EXC_SYSCALL - beq s4, t0, handle_syscall - /* Handle other exceptions */ slli t0, s4, RISCV_LGPTR la t1, excp_vect_table la t2, excp_vect_table_end - move a0, sp /* pt_regs */ add t0, t1, t0 /* Check if exception code lies within bounds */ bgeu t0, t2, 1f @@ -165,95 +100,16 @@ skip_context_tracking: jr t0 1: tail do_trap_unknown +SYM_CODE_END(handle_exception) -handle_syscall: -#ifdef CONFIG_RISCV_M_MODE - /* - * When running is M-Mode (no MMU config), MPIE does not get set. - * As a result, we need to force enable interrupts here because - * handle_exception did not do set SR_IE as it always sees SR_PIE - * being cleared. - */ - csrs CSR_STATUS, SR_IE -#endif -#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING_USER) - /* Recover a0 - a7 for system calls */ - REG_L a0, PT_A0(sp) - REG_L a1, PT_A1(sp) - REG_L a2, PT_A2(sp) - REG_L a3, PT_A3(sp) - REG_L a4, PT_A4(sp) - REG_L a5, PT_A5(sp) - REG_L a6, PT_A6(sp) - REG_L a7, PT_A7(sp) -#endif - /* save the initial A0 value (needed in signal handlers) */ - REG_S a0, PT_ORIG_A0(sp) - /* - * Advance SEPC to avoid executing the original - * scall instruction on sret - */ - addi s2, s2, 0x4 - REG_S s2, PT_EPC(sp) - /* Trace syscalls, but only if requested by the user. */ - REG_L t0, TASK_TI_FLAGS(tp) - andi t0, t0, _TIF_SYSCALL_WORK - bnez t0, handle_syscall_trace_enter -check_syscall_nr: - /* Check to make sure we don't jump to a bogus syscall number. */ - li t0, __NR_syscalls - la s0, sys_ni_syscall - /* - * Syscall number held in a7. - * If syscall number is above allowed value, redirect to ni_syscall. - */ - bgeu a7, t0, 3f -#ifdef CONFIG_COMPAT - REG_L s0, PT_STATUS(sp) - srli s0, s0, SR_UXL_SHIFT - andi s0, s0, (SR_UXL >> SR_UXL_SHIFT) - li t0, (SR_UXL_32 >> SR_UXL_SHIFT) - sub t0, s0, t0 - bnez t0, 1f - - /* Call compat_syscall */ - la s0, compat_sys_call_table - j 2f -1: -#endif - /* Call syscall */ - la s0, sys_call_table -2: - slli t0, a7, RISCV_LGPTR - add s0, s0, t0 - REG_L s0, 0(s0) -3: - jalr s0 - -ret_from_syscall: - /* Set user a0 to kernel a0 */ - REG_S a0, PT_A0(sp) - /* - * We didn't execute the actual syscall. - * Seccomp already set return value for the current task pt_regs. - * (If it was configured with SECCOMP_RET_ERRNO/TRACE) - */ -ret_from_syscall_rejected: -#ifdef CONFIG_DEBUG_RSEQ - move a0, sp - call rseq_syscall -#endif - /* Trace syscalls, but only if requested by the user. */ - REG_L t0, TASK_TI_FLAGS(tp) - andi t0, t0, _TIF_SYSCALL_WORK - bnez t0, handle_syscall_trace_exit - +/* + * The ret_from_exception must be called with interrupt disabled. Here is the + * caller list: + * - handle_exception + * - ret_from_fork + */ SYM_CODE_START_NOALIGN(ret_from_exception) REG_L s0, PT_STATUS(sp) - csrc CSR_STATUS, SR_IE -#ifdef CONFIG_TRACE_IRQFLAGS - call __trace_hardirqs_off -#endif #ifdef CONFIG_RISCV_M_MODE /* the MPP value is too large to be used as an immediate arg for addi */ li t0, SR_MPP @@ -261,17 +117,7 @@ SYM_CODE_START_NOALIGN(ret_from_exception) #else andi s0, s0, SR_SPP #endif - bnez s0, resume_kernel -SYM_CODE_END(ret_from_exception) - - /* Interrupts must be disabled here so flags are checked atomically */ - REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */ - andi s1, s0, _TIF_WORK_MASK - bnez s1, resume_userspace_slow -resume_userspace: -#ifdef CONFIG_CONTEXT_TRACKING_USER - call user_enter_callable -#endif + bnez s0, 1f /* Save unwound kernel stack pointer in thread_info */ addi s0, sp, PT_SIZE_ON_STACK @@ -282,18 +128,7 @@ resume_userspace: * structures again. */ csrw CSR_SCRATCH, tp - -restore_all: -#ifdef CONFIG_TRACE_IRQFLAGS - REG_L s1, PT_STATUS(sp) - andi t0, s1, SR_PIE - beqz t0, 1f - call __trace_hardirqs_on - j 2f 1: - call __trace_hardirqs_off -2: -#endif REG_L a0, PT_STATUS(sp) /* * The current load reservation is effectively part of the processor's @@ -322,32 +157,7 @@ restore_all: REG_L x3, PT_GP(sp) REG_L x4, PT_TP(sp) REG_L x5, PT_T0(sp) - REG_L x6, PT_T1(sp) - REG_L x7, PT_T2(sp) - REG_L x8, PT_S0(sp) - REG_L x9, PT_S1(sp) - REG_L x10, PT_A0(sp) - REG_L x11, PT_A1(sp) - REG_L x12, PT_A2(sp) - REG_L x13, PT_A3(sp) - REG_L x14, PT_A4(sp) - REG_L x15, PT_A5(sp) - REG_L x16, PT_A6(sp) - REG_L x17, PT_A7(sp) - REG_L x18, PT_S2(sp) - REG_L x19, PT_S3(sp) - REG_L x20, PT_S4(sp) - REG_L x21, PT_S5(sp) - REG_L x22, PT_S6(sp) - REG_L x23, PT_S7(sp) - REG_L x24, PT_S8(sp) - REG_L x25, PT_S9(sp) - REG_L x26, PT_S10(sp) - REG_L x27, PT_S11(sp) - REG_L x28, PT_T3(sp) - REG_L x29, PT_T4(sp) - REG_L x30, PT_T5(sp) - REG_L x31, PT_T6(sp) + restore_from_x6_to_x31 REG_L x2, PT_SP(sp) @@ -356,47 +166,10 @@ restore_all: #else sret #endif - -#if IS_ENABLED(CONFIG_PREEMPTION) -resume_kernel: - REG_L s0, TASK_TI_PREEMPT_COUNT(tp) - bnez s0, restore_all - REG_L s0, TASK_TI_FLAGS(tp) - andi s0, s0, _TIF_NEED_RESCHED - beqz s0, restore_all - call preempt_schedule_irq - j restore_all -#endif - -resume_userspace_slow: - /* Enter slow path for supplementary processing */ - move a0, sp /* pt_regs */ - move a1, s0 /* current_thread_info->flags */ - call do_work_pending - j resume_userspace - -/* Slow paths for ptrace. */ -handle_syscall_trace_enter: - move a0, sp - call do_syscall_trace_enter - move t0, a0 - REG_L a0, PT_A0(sp) - REG_L a1, PT_A1(sp) - REG_L a2, PT_A2(sp) - REG_L a3, PT_A3(sp) - REG_L a4, PT_A4(sp) - REG_L a5, PT_A5(sp) - REG_L a6, PT_A6(sp) - REG_L a7, PT_A7(sp) - bnez t0, ret_from_syscall_rejected - j check_syscall_nr -handle_syscall_trace_exit: - move a0, sp - call do_syscall_trace_exit - j ret_from_exception +SYM_CODE_END(ret_from_exception) #ifdef CONFIG_VMAP_STACK -handle_kernel_stack_overflow: +SYM_CODE_START_LOCAL(handle_kernel_stack_overflow) /* * Takes the psuedo-spinlock for the shadow stack, in case multiple * harts are concurrently overflowing their kernel stacks. We could @@ -464,32 +237,7 @@ restore_caller_reg: REG_S x1, PT_RA(sp) REG_S x3, PT_GP(sp) REG_S x5, PT_T0(sp) - REG_S x6, PT_T1(sp) - REG_S x7, PT_T2(sp) - REG_S x8, PT_S0(sp) - REG_S x9, PT_S1(sp) - REG_S x10, PT_A0(sp) - REG_S x11, PT_A1(sp) - REG_S x12, PT_A2(sp) - REG_S x13, PT_A3(sp) - REG_S x14, PT_A4(sp) - REG_S x15, PT_A5(sp) - REG_S x16, PT_A6(sp) - REG_S x17, PT_A7(sp) - REG_S x18, PT_S2(sp) - REG_S x19, PT_S3(sp) - REG_S x20, PT_S4(sp) - REG_S x21, PT_S5(sp) - REG_S x22, PT_S6(sp) - REG_S x23, PT_S7(sp) - REG_S x24, PT_S8(sp) - REG_S x25, PT_S9(sp) - REG_S x26, PT_S10(sp) - REG_S x27, PT_S11(sp) - REG_S x28, PT_T3(sp) - REG_S x29, PT_T4(sp) - REG_S x30, PT_T5(sp) - REG_S x31, PT_T6(sp) + save_from_x6_to_x31 REG_L s0, TASK_TI_KERNEL_SP(tp) csrr s1, CSR_STATUS @@ -505,23 +253,20 @@ restore_caller_reg: REG_S s5, PT_TP(sp) move a0, sp tail handle_bad_stack +SYM_CODE_END(handle_kernel_stack_overflow) #endif -END(handle_exception) - -ENTRY(ret_from_fork) - la ra, ret_from_exception - tail schedule_tail -ENDPROC(ret_from_fork) - -ENTRY(ret_from_kernel_thread) +SYM_CODE_START(ret_from_fork) call schedule_tail + beqz s0, 1f /* not from kernel thread */ /* Call fn(arg) */ - la ra, ret_from_exception move a0, s1 - jr s0 -ENDPROC(ret_from_kernel_thread) - + jalr s0 +1: + move a0, sp /* pt_regs */ + la ra, ret_from_exception + tail syscall_exit_to_user_mode +SYM_CODE_END(ret_from_fork) /* * Integer register context switch @@ -533,7 +278,7 @@ ENDPROC(ret_from_kernel_thread) * The value of a0 and a1 must be preserved by this function, as that's how * arguments are passed to schedule_tail. */ -ENTRY(__switch_to) +SYM_FUNC_START(__switch_to) /* Save context into prev->thread */ li a4, TASK_THREAD_RA add a3, a0, a4 @@ -570,7 +315,7 @@ ENTRY(__switch_to) /* The offset of thread_info in task_struct is zero. */ move tp, a1 ret -ENDPROC(__switch_to) +SYM_FUNC_END(__switch_to) #ifndef CONFIG_MMU #define do_page_fault do_trap_unknown @@ -579,7 +324,7 @@ ENDPROC(__switch_to) .section ".rodata" .align LGREG /* Exception vector table */ -ENTRY(excp_vect_table) +SYM_CODE_START(excp_vect_table) RISCV_PTR do_trap_insn_misaligned ALT_INSN_FAULT(RISCV_PTR do_trap_insn_fault) RISCV_PTR do_trap_insn_illegal @@ -588,7 +333,7 @@ ENTRY(excp_vect_table) RISCV_PTR do_trap_load_fault RISCV_PTR do_trap_store_misaligned RISCV_PTR do_trap_store_fault - RISCV_PTR do_trap_ecall_u /* system call, gets intercepted */ + RISCV_PTR do_trap_ecall_u /* system call */ RISCV_PTR do_trap_ecall_s RISCV_PTR do_trap_unknown RISCV_PTR do_trap_ecall_m @@ -598,11 +343,11 @@ ENTRY(excp_vect_table) RISCV_PTR do_trap_unknown RISCV_PTR do_page_fault /* store page fault */ excp_vect_table_end: -END(excp_vect_table) +SYM_CODE_END(excp_vect_table) #ifndef CONFIG_MMU -ENTRY(__user_rt_sigreturn) +SYM_CODE_START(__user_rt_sigreturn) li a7, __NR_rt_sigreturn scall -END(__user_rt_sigreturn) +SYM_CODE_END(__user_rt_sigreturn) #endif diff --git a/arch/riscv/kernel/head.h b/arch/riscv/kernel/head.h index 726731ada534..a556fdaafed9 100644 --- a/arch/riscv/kernel/head.h +++ b/arch/riscv/kernel/head.h @@ -10,7 +10,6 @@ extern atomic_t hart_lottery; -asmlinkage void do_page_fault(struct pt_regs *regs); asmlinkage void __init setup_vm(uintptr_t dtb_pa); #ifdef CONFIG_XIP_KERNEL asmlinkage void __init __copy_data(void); diff --git a/arch/riscv/kernel/mcount-dyn.S b/arch/riscv/kernel/mcount-dyn.S index 125de818d1ba..669b8697aa38 100644 --- a/arch/riscv/kernel/mcount-dyn.S +++ b/arch/riscv/kernel/mcount-dyn.S @@ -66,66 +66,17 @@ REG_S x3, PT_GP(sp) REG_S x4, PT_TP(sp) REG_S x5, PT_T0(sp) - REG_S x6, PT_T1(sp) - REG_S x7, PT_T2(sp) - REG_S x8, PT_S0(sp) - REG_S x9, PT_S1(sp) - REG_S x10, PT_A0(sp) - REG_S x11, PT_A1(sp) - REG_S x12, PT_A2(sp) - REG_S x13, PT_A3(sp) - REG_S x14, PT_A4(sp) - REG_S x15, PT_A5(sp) - REG_S x16, PT_A6(sp) - REG_S x17, PT_A7(sp) - REG_S x18, PT_S2(sp) - REG_S x19, PT_S3(sp) - REG_S x20, PT_S4(sp) - REG_S x21, PT_S5(sp) - REG_S x22, PT_S6(sp) - REG_S x23, PT_S7(sp) - REG_S x24, PT_S8(sp) - REG_S x25, PT_S9(sp) - REG_S x26, PT_S10(sp) - REG_S x27, PT_S11(sp) - REG_S x28, PT_T3(sp) - REG_S x29, PT_T4(sp) - REG_S x30, PT_T5(sp) - REG_S x31, PT_T6(sp) + save_from_x6_to_x31 .endm .macro RESTORE_ALL - REG_L t0, PT_EPC(sp) REG_L x1, PT_RA(sp) REG_L x2, PT_SP(sp) REG_L x3, PT_GP(sp) REG_L x4, PT_TP(sp) - REG_L x6, PT_T1(sp) - REG_L x7, PT_T2(sp) - REG_L x8, PT_S0(sp) - REG_L x9, PT_S1(sp) - REG_L x10, PT_A0(sp) - REG_L x11, PT_A1(sp) - REG_L x12, PT_A2(sp) - REG_L x13, PT_A3(sp) - REG_L x14, PT_A4(sp) - REG_L x15, PT_A5(sp) - REG_L x16, PT_A6(sp) - REG_L x17, PT_A7(sp) - REG_L x18, PT_S2(sp) - REG_L x19, PT_S3(sp) - REG_L x20, PT_S4(sp) - REG_L x21, PT_S5(sp) - REG_L x22, PT_S6(sp) - REG_L x23, PT_S7(sp) - REG_L x24, PT_S8(sp) - REG_L x25, PT_S9(sp) - REG_L x26, PT_S10(sp) - REG_L x27, PT_S11(sp) - REG_L x28, PT_T3(sp) - REG_L x29, PT_T4(sp) - REG_L x30, PT_T5(sp) - REG_L x31, PT_T6(sp) + /* Restore t0 with PT_EPC */ + REG_L x5, PT_EPC(sp) + restore_from_x6_to_x31 addi sp, sp, PT_SIZE_ON_STACK .endm diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c index 774ffde386ab..e2a060066730 100644 --- a/arch/riscv/kernel/process.c +++ b/arch/riscv/kernel/process.c @@ -34,7 +34,6 @@ EXPORT_SYMBOL(__stack_chk_guard); #endif extern asmlinkage void ret_from_fork(void); -extern asmlinkage void ret_from_kernel_thread(void); void arch_cpu_idle(void) { @@ -173,7 +172,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) /* Supervisor/Machine, irqs on: */ childregs->status = SR_PP | SR_PIE; - p->thread.ra = (unsigned long)ret_from_kernel_thread; p->thread.s[0] = (unsigned long)args->fn; p->thread.s[1] = (unsigned long)args->fn_arg; } else { @@ -183,8 +181,9 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) if (clone_flags & CLONE_SETTLS) childregs->tp = tls; childregs->a0 = 0; /* Return value of fork() */ - p->thread.ra = (unsigned long)ret_from_fork; + p->thread.s[0] = 0; } + p->thread.ra = (unsigned long)ret_from_fork; p->thread.sp = (unsigned long)childregs; /* kernel sp */ return 0; } diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c index 2ae8280ae475..23c48b14a0e7 100644 --- a/arch/riscv/kernel/ptrace.c +++ b/arch/riscv/kernel/ptrace.c @@ -19,9 +19,6 @@ #include <linux/sched.h> #include <linux/sched/task_stack.h> -#define CREATE_TRACE_POINTS -#include <trace/events/syscalls.h> - enum riscv_regset { REGSET_X, #ifdef CONFIG_FPU @@ -212,7 +209,6 @@ unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) void ptrace_disable(struct task_struct *child) { - clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); } long arch_ptrace(struct task_struct *child, long request, @@ -229,46 +225,6 @@ long arch_ptrace(struct task_struct *child, long request, return ret; } -/* - * Allows PTRACE_SYSCALL to work. These are called from entry.S in - * {handle,ret_from}_syscall. - */ -__visible int do_syscall_trace_enter(struct pt_regs *regs) -{ - if (test_thread_flag(TIF_SYSCALL_TRACE)) - if (ptrace_report_syscall_entry(regs)) - return -1; - - /* - * Do the secure computing after ptrace; failures should be fast. - * If this fails we might have return value in a0 from seccomp - * (via SECCOMP_RET_ERRNO/TRACE). - */ - if (secure_computing() == -1) - return -1; - -#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS - if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) - trace_sys_enter(regs, syscall_get_nr(current, regs)); -#endif - - audit_syscall_entry(regs->a7, regs->a0, regs->a1, regs->a2, regs->a3); - return 0; -} - -__visible void do_syscall_trace_exit(struct pt_regs *regs) -{ - audit_syscall_exit(regs); - - if (test_thread_flag(TIF_SYSCALL_TRACE)) - ptrace_report_syscall_exit(regs, 0); - -#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS - if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) - trace_sys_exit(regs, regs_return_value(regs)); -#endif -} - #ifdef CONFIG_COMPAT static int compat_riscv_gpr_get(struct task_struct *target, const struct user_regset *regset, diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index 376d2827e736..5d3184cbf518 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -297,7 +297,7 @@ void __init setup_arch(char **cmdline_p) setup_smp(); #endif - riscv_init_cbom_blocksize(); + riscv_init_cbo_blocksizes(); riscv_fill_hwcap(); apply_boot_alternatives(); if (IS_ENABLED(CONFIG_RISCV_ISA_ZICBOM) && diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c index bfb2afa4135f..2e365084417e 100644 --- a/arch/riscv/kernel/signal.c +++ b/arch/riscv/kernel/signal.c @@ -12,6 +12,7 @@ #include <linux/syscalls.h> #include <linux/resume_user_mode.h> #include <linux/linkage.h> +#include <linux/entry-common.h> #include <asm/ucontext.h> #include <asm/vdso.h> @@ -274,7 +275,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) signal_setup_done(ret, ksig, 0); } -static void do_signal(struct pt_regs *regs) +void arch_do_signal_or_restart(struct pt_regs *regs) { struct ksignal ksig; @@ -311,29 +312,3 @@ static void do_signal(struct pt_regs *regs) */ restore_saved_sigmask(); } - -/* - * Handle any pending work on the resume-to-userspace path, as indicated by - * _TIF_WORK_MASK. Entered from assembly with IRQs off. - */ -asmlinkage __visible void do_work_pending(struct pt_regs *regs, - unsigned long thread_info_flags) -{ - do { - if (thread_info_flags & _TIF_NEED_RESCHED) { - schedule(); - } else { - local_irq_enable(); - if (thread_info_flags & _TIF_UPROBE) - uprobe_notify_resume(regs); - /* Handle pending signal delivery */ - if (thread_info_flags & (_TIF_SIGPENDING | - _TIF_NOTIFY_SIGNAL)) - do_signal(regs); - if (thread_info_flags & _TIF_NOTIFY_RESUME) - resume_user_mode_work(regs); - } - local_irq_disable(); - thread_info_flags = read_thread_flags(); - } while (thread_info_flags & _TIF_WORK_MASK); -} diff --git a/arch/riscv/kernel/trace_irq.c b/arch/riscv/kernel/trace_irq.c deleted file mode 100644 index 095ac976d7da..000000000000 --- a/arch/riscv/kernel/trace_irq.c +++ /dev/null @@ -1,27 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2022 Changbin Du <changbin.du@gmail.com> - */ - -#include <linux/irqflags.h> -#include <linux/kprobes.h> -#include "trace_irq.h" - -/* - * trace_hardirqs_on/off require the caller to setup frame pointer properly. - * Otherwise, CALLER_ADDR1 might trigger an pagging exception in kernel. - * Here we add one extra level so they can be safely called by low - * level entry code which $fp is used for other purpose. - */ - -void __trace_hardirqs_on(void) -{ - trace_hardirqs_on(); -} -NOKPROBE_SYMBOL(__trace_hardirqs_on); - -void __trace_hardirqs_off(void) -{ - trace_hardirqs_off(); -} -NOKPROBE_SYMBOL(__trace_hardirqs_off); diff --git a/arch/riscv/kernel/trace_irq.h b/arch/riscv/kernel/trace_irq.h deleted file mode 100644 index 99fe67377e5e..000000000000 --- a/arch/riscv/kernel/trace_irq.h +++ /dev/null @@ -1,11 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2022 Changbin Du <changbin.du@gmail.com> - */ -#ifndef __TRACE_IRQ_H -#define __TRACE_IRQ_H - -void __trace_hardirqs_on(void); -void __trace_hardirqs_off(void); - -#endif /* __TRACE_IRQ_H */ diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index f6fda94e8e59..1f4e37be7eb3 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -17,12 +17,14 @@ #include <linux/module.h> #include <linux/irq.h> #include <linux/kexec.h> +#include <linux/entry-common.h> #include <asm/asm-prototypes.h> #include <asm/bug.h> #include <asm/csr.h> #include <asm/processor.h> #include <asm/ptrace.h> +#include <asm/syscall.h> #include <asm/thread_info.h> int show_unhandled_signals = 1; @@ -119,14 +121,22 @@ static void do_trap_error(struct pt_regs *regs, int signo, int code, } #if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_RISCV_ALTERNATIVE) -#define __trap_section __section(".xip.traps") +#define __trap_section __noinstr_section(".xip.traps") #else -#define __trap_section +#define __trap_section noinstr #endif -#define DO_ERROR_INFO(name, signo, code, str) \ -asmlinkage __visible __trap_section void name(struct pt_regs *regs) \ -{ \ - do_trap_error(regs, signo, code, regs->epc, "Oops - " str); \ +#define DO_ERROR_INFO(name, signo, code, str) \ +asmlinkage __visible __trap_section void name(struct pt_regs *regs) \ +{ \ + if (user_mode(regs)) { \ + irqentry_enter_from_user_mode(regs); \ + do_trap_error(regs, signo, code, regs->epc, "Oops - " str); \ + irqentry_exit_to_user_mode(regs); \ + } else { \ + irqentry_state_t state = irqentry_nmi_enter(regs); \ + do_trap_error(regs, signo, code, regs->epc, "Oops - " str); \ + irqentry_nmi_exit(regs, state); \ + } \ } DO_ERROR_INFO(do_trap_unknown, @@ -148,26 +158,50 @@ DO_ERROR_INFO(do_trap_store_misaligned, int handle_misaligned_load(struct pt_regs *regs); int handle_misaligned_store(struct pt_regs *regs); -asmlinkage void __trap_section do_trap_load_misaligned(struct pt_regs *regs) +asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs) { - if (!handle_misaligned_load(regs)) - return; - do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc, - "Oops - load address misaligned"); + if (user_mode(regs)) { + irqentry_enter_from_user_mode(regs); + + if (handle_misaligned_load(regs)) + do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc, + "Oops - load address misaligned"); + + irqentry_exit_to_user_mode(regs); + } else { + irqentry_state_t state = irqentry_nmi_enter(regs); + + if (handle_misaligned_load(regs)) + do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc, + "Oops - load address misaligned"); + + irqentry_nmi_exit(regs, state); + } } -asmlinkage void __trap_section do_trap_store_misaligned(struct pt_regs *regs) +asmlinkage __visible __trap_section void do_trap_store_misaligned(struct pt_regs *regs) { - if (!handle_misaligned_store(regs)) - return; - do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc, - "Oops - store (or AMO) address misaligned"); + if (user_mode(regs)) { + irqentry_enter_from_user_mode(regs); + + if (handle_misaligned_store(regs)) + do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc, + "Oops - store (or AMO) address misaligned"); + + irqentry_exit_to_user_mode(regs); + } else { + irqentry_state_t state = irqentry_nmi_enter(regs); + + if (handle_misaligned_store(regs)) + do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc, + "Oops - store (or AMO) address misaligned"); + + irqentry_nmi_exit(regs, state); + } } #endif DO_ERROR_INFO(do_trap_store_fault, SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault"); -DO_ERROR_INFO(do_trap_ecall_u, - SIGILL, ILL_ILLTRP, "environment call from U-mode"); DO_ERROR_INFO(do_trap_ecall_s, SIGILL, ILL_ILLTRP, "environment call from S-mode"); DO_ERROR_INFO(do_trap_ecall_m, @@ -183,7 +217,7 @@ static inline unsigned long get_break_insn_length(unsigned long pc) return GET_INSN_LENGTH(insn); } -asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs) +void handle_break(struct pt_regs *regs) { #ifdef CONFIG_KPROBES if (kprobe_single_step_handler(regs)) @@ -213,7 +247,77 @@ asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs) else die(regs, "Kernel BUG"); } -NOKPROBE_SYMBOL(do_trap_break); + +asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs) +{ + if (user_mode(regs)) { + irqentry_enter_from_user_mode(regs); + + handle_break(regs); + + irqentry_exit_to_user_mode(regs); + } else { + irqentry_state_t state = irqentry_nmi_enter(regs); + + handle_break(regs); + + irqentry_nmi_exit(regs, state); + } +} + +asmlinkage __visible __trap_section void do_trap_ecall_u(struct pt_regs *regs) +{ + if (user_mode(regs)) { + ulong syscall = regs->a7; + + syscall = syscall_enter_from_user_mode(regs, syscall); + + regs->epc += 4; + regs->orig_a0 = regs->a0; + + if (syscall < NR_syscalls) + syscall_handler(regs, syscall); + else + regs->a0 = -ENOSYS; + + syscall_exit_to_user_mode(regs); + } else { + irqentry_state_t state = irqentry_nmi_enter(regs); + + do_trap_error(regs, SIGILL, ILL_ILLTRP, regs->epc, + "Oops - environment call from U-mode"); + + irqentry_nmi_exit(regs, state); + } + +} + +#ifdef CONFIG_MMU +asmlinkage __visible noinstr void do_page_fault(struct pt_regs *regs) +{ + irqentry_state_t state = irqentry_enter(regs); + + handle_page_fault(regs); + + local_irq_disable(); + + irqentry_exit(regs, state); +} +#endif + +asmlinkage __visible noinstr void do_irq(struct pt_regs *regs) +{ + struct pt_regs *old_regs; + irqentry_state_t state = irqentry_enter(regs); + + irq_enter_rcu(); + old_regs = set_irq_regs(regs); + handle_arch_irq(regs); + set_irq_regs(old_regs); + irq_exit_rcu(); + + irqentry_exit(regs, state); +} #ifdef CONFIG_GENERIC_BUG int is_valid_bugaddr(unsigned long pc) diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index 7d010b0be54e..6adb1b6112a1 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -63,6 +63,7 @@ static const unsigned long kvm_isa_ext_arr[] = { KVM_ISA_EXT_ARR(SVPBMT), KVM_ISA_EXT_ARR(ZIHINTPAUSE), KVM_ISA_EXT_ARR(ZICBOM), + KVM_ISA_EXT_ARR(ZICBOZ), }; static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext) @@ -283,6 +284,11 @@ static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu, return -EINVAL; reg_val = riscv_cbom_block_size; break; + case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size): + if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ)) + return -EINVAL; + reg_val = riscv_cboz_block_size; + break; case KVM_REG_RISCV_CONFIG_REG(mvendorid): reg_val = vcpu->arch.mvendorid; break; @@ -354,6 +360,8 @@ static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu, break; case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size): return -EOPNOTSUPP; + case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size): + return -EOPNOTSUPP; case KVM_REG_RISCV_CONFIG_REG(mvendorid): if (!vcpu->arch.ran_atleast_once) vcpu->arch.mvendorid = reg_val; @@ -865,6 +873,9 @@ static void kvm_riscv_vcpu_update_config(const unsigned long *isa) if (riscv_isa_extension_available(isa, ZICBOM)) henvcfg |= (ENVCFG_CBIE | ENVCFG_CBCFE); + if (riscv_isa_extension_available(isa, ZICBOZ)) + henvcfg |= ENVCFG_CBZE; + csr_write(CSR_HENVCFG, henvcfg); #ifdef CONFIG_32BIT csr_write(CSR_HENVCFGH, henvcfg >> 32); diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile index 6c74b0bedd60..26cb2502ecf8 100644 --- a/arch/riscv/lib/Makefile +++ b/arch/riscv/lib/Makefile @@ -8,5 +8,6 @@ lib-y += strlen.o lib-y += strncmp.o lib-$(CONFIG_MMU) += uaccess.o lib-$(CONFIG_64BIT) += tishift.o +lib-$(CONFIG_RISCV_ISA_ZICBOZ) += clear_page.o obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o diff --git a/arch/riscv/lib/clear_page.S b/arch/riscv/lib/clear_page.S new file mode 100644 index 000000000000..d7a256eb53f4 --- /dev/null +++ b/arch/riscv/lib/clear_page.S @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023 Ventana Micro Systems Inc. + */ + +#include <linux/linkage.h> +#include <asm/asm.h> +#include <asm/alternative-macros.h> +#include <asm-generic/export.h> +#include <asm/hwcap.h> +#include <asm/insn-def.h> +#include <asm/page.h> + +#define CBOZ_ALT(order, old, new) \ + ALTERNATIVE(old, new, 0, \ + ((order) << 16) | RISCV_ISA_EXT_ZICBOZ, \ + CONFIG_RISCV_ISA_ZICBOZ) + +/* void clear_page(void *page) */ +SYM_FUNC_START(clear_page) + li a2, PAGE_SIZE + + /* + * If Zicboz isn't present, or somehow has a block + * size larger than 4K, then fallback to memset. + */ + CBOZ_ALT(12, "j .Lno_zicboz", "nop") + + lw a1, riscv_cboz_block_size + add a2, a0, a2 +.Lzero_loop: + CBO_zero(a0) + add a0, a0, a1 + CBOZ_ALT(11, "bltu a0, a2, .Lzero_loop; ret", "nop; nop") + CBO_zero(a0) + add a0, a0, a1 + CBOZ_ALT(10, "bltu a0, a2, .Lzero_loop; ret", "nop; nop") + CBO_zero(a0) + add a0, a0, a1 + CBO_zero(a0) + add a0, a0, a1 + CBOZ_ALT(9, "bltu a0, a2, .Lzero_loop; ret", "nop; nop") + CBO_zero(a0) + add a0, a0, a1 + CBO_zero(a0) + add a0, a0, a1 + CBO_zero(a0) + add a0, a0, a1 + CBO_zero(a0) + add a0, a0, a1 + CBOZ_ALT(8, "bltu a0, a2, .Lzero_loop; ret", "nop; nop") + CBO_zero(a0) + add a0, a0, a1 + CBO_zero(a0) + add a0, a0, a1 + CBO_zero(a0) + add a0, a0, a1 + CBO_zero(a0) + add a0, a0, a1 + CBO_zero(a0) + add a0, a0, a1 + CBO_zero(a0) + add a0, a0, a1 + CBO_zero(a0) + add a0, a0, a1 + CBO_zero(a0) + add a0, a0, a1 + bltu a0, a2, .Lzero_loop + ret +.Lno_zicboz: + li a1, 0 + tail __memset +SYM_FUNC_END(clear_page) +EXPORT_SYMBOL(clear_page) diff --git a/arch/riscv/lib/strcmp.S b/arch/riscv/lib/strcmp.S index c42a8412547f..687b2bea5c43 100644 --- a/arch/riscv/lib/strcmp.S +++ b/arch/riscv/lib/strcmp.S @@ -2,9 +2,8 @@ #include <linux/linkage.h> #include <asm/asm.h> -#include <asm-generic/export.h> #include <asm/alternative-macros.h> -#include <asm/errata_list.h> +#include <asm/hwcap.h> /* int strcmp(const char *cs, const char *ct) */ SYM_FUNC_START(strcmp) diff --git a/arch/riscv/lib/strlen.S b/arch/riscv/lib/strlen.S index 15bb8f3aa959..db3d42d99b78 100644 --- a/arch/riscv/lib/strlen.S +++ b/arch/riscv/lib/strlen.S @@ -2,9 +2,8 @@ #include <linux/linkage.h> #include <asm/asm.h> -#include <asm-generic/export.h> #include <asm/alternative-macros.h> -#include <asm/errata_list.h> +#include <asm/hwcap.h> /* int strlen(const char *s) */ SYM_FUNC_START(strlen) diff --git a/arch/riscv/lib/strncmp.S b/arch/riscv/lib/strncmp.S index 7ac2f667285a..aba5b3148621 100644 --- a/arch/riscv/lib/strncmp.S +++ b/arch/riscv/lib/strncmp.S @@ -2,9 +2,8 @@ #include <linux/linkage.h> #include <asm/asm.h> -#include <asm-generic/export.h> #include <asm/alternative-macros.h> -#include <asm/errata_list.h> +#include <asm/hwcap.h> /* int strncmp(const char *cs, const char *ct, size_t count) */ SYM_FUNC_START(strncmp) diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c index fcd6145fbead..632d6d06148a 100644 --- a/arch/riscv/mm/cacheflush.c +++ b/arch/riscv/mm/cacheflush.c @@ -100,36 +100,48 @@ void flush_icache_pte(pte_t pte) unsigned int riscv_cbom_block_size; EXPORT_SYMBOL_GPL(riscv_cbom_block_size); -void riscv_init_cbom_blocksize(void) +unsigned int riscv_cboz_block_size; +EXPORT_SYMBOL_GPL(riscv_cboz_block_size); + +static void cbo_get_block_size(struct device_node *node, + const char *name, u32 *block_size, + unsigned long *first_hartid) { + unsigned long hartid; + u32 val; + + if (riscv_of_processor_hartid(node, &hartid)) + return; + + if (of_property_read_u32(node, name, &val)) + return; + + if (!*block_size) { + *block_size = val; + *first_hartid = hartid; + } else if (*block_size != val) { + pr_warn("%s mismatched between harts %lu and %lu\n", + name, *first_hartid, hartid); + } +} + +void riscv_init_cbo_blocksizes(void) +{ + unsigned long cbom_hartid, cboz_hartid; + u32 cbom_block_size = 0, cboz_block_size = 0; struct device_node *node; - unsigned long cbom_hartid; - u32 val, probed_block_size; - int ret; - probed_block_size = 0; for_each_of_cpu_node(node) { - unsigned long hartid; - - ret = riscv_of_processor_hartid(node, &hartid); - if (ret) - continue; - - /* set block-size for cbom extension if available */ - ret = of_property_read_u32(node, "riscv,cbom-block-size", &val); - if (ret) - continue; - - if (!probed_block_size) { - probed_block_size = val; - cbom_hartid = hartid; - } else { - if (probed_block_size != val) - pr_warn("cbom-block-size mismatched between harts %lu and %lu\n", - cbom_hartid, hartid); - } + /* set block-size for cbom and/or cboz extension if available */ + cbo_get_block_size(node, "riscv,cbom-block-size", + &cbom_block_size, &cbom_hartid); + cbo_get_block_size(node, "riscv,cboz-block-size", + &cboz_block_size, &cboz_hartid); } - if (probed_block_size) - riscv_cbom_block_size = probed_block_size; + if (cbom_block_size) + riscv_cbom_block_size = cbom_block_size; + + if (cboz_block_size) + riscv_cboz_block_size = cboz_block_size; } diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index 460f785f6e09..3aba72ec1742 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -15,6 +15,7 @@ #include <linux/uaccess.h> #include <linux/kprobes.h> #include <linux/kfence.h> +#include <linux/entry-common.h> #include <asm/ptrace.h> #include <asm/tlbflush.h> @@ -204,7 +205,7 @@ static inline bool access_error(unsigned long cause, struct vm_area_struct *vma) * This routine handles page faults. It determines the address and the * problem, and then passes it off to one of the appropriate routines. */ -asmlinkage void do_page_fault(struct pt_regs *regs) +void handle_page_fault(struct pt_regs *regs) { struct task_struct *tsk; struct vm_area_struct *vma; @@ -251,7 +252,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs) } #endif /* Enable interrupts if they were enabled in the parent context. */ - if (likely(regs->status & SR_PIE)) + if (!regs_irqs_disabled(regs)) local_irq_enable(); /* @@ -356,4 +357,3 @@ good_area: } return; } -NOKPROBE_SYMBOL(do_page_fault); diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c index 932dadfdca54..a163a3e0f0d4 100644 --- a/arch/riscv/mm/hugetlbpage.c +++ b/arch/riscv/mm/hugetlbpage.c @@ -2,6 +2,305 @@ #include <linux/hugetlb.h> #include <linux/err.h> +#ifdef CONFIG_RISCV_ISA_SVNAPOT +pte_t *huge_pte_alloc(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long addr, + unsigned long sz) +{ + unsigned long order; + pte_t *pte = NULL; + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + + pgd = pgd_offset(mm, addr); + p4d = p4d_alloc(mm, pgd, addr); + if (!p4d) + return NULL; + + pud = pud_alloc(mm, p4d, addr); + if (!pud) + return NULL; + + if (sz == PUD_SIZE) { + pte = (pte_t *)pud; + goto out; + } + + if (sz == PMD_SIZE) { + if (want_pmd_share(vma, addr) && pud_none(*pud)) + pte = huge_pmd_share(mm, vma, addr, pud); + else + pte = (pte_t *)pmd_alloc(mm, pud, addr); + goto out; + } + + pmd = pmd_alloc(mm, pud, addr); + if (!pmd) + return NULL; + + for_each_napot_order(order) { + if (napot_cont_size(order) == sz) { + pte = pte_alloc_map(mm, pmd, addr & napot_cont_mask(order)); + break; + } + } + +out: + WARN_ON_ONCE(pte && pte_present(*pte) && !pte_huge(*pte)); + return pte; +} + +pte_t *huge_pte_offset(struct mm_struct *mm, + unsigned long addr, + unsigned long sz) +{ + unsigned long order; + pte_t *pte = NULL; + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + + pgd = pgd_offset(mm, addr); + if (!pgd_present(*pgd)) + return NULL; + + p4d = p4d_offset(pgd, addr); + if (!p4d_present(*p4d)) + return NULL; + + pud = pud_offset(p4d, addr); + if (sz == PUD_SIZE) + /* must be pud huge, non-present or none */ + return (pte_t *)pud; + + if (!pud_present(*pud)) + return NULL; + + pmd = pmd_offset(pud, addr); + if (sz == PMD_SIZE) + /* must be pmd huge, non-present or none */ + return (pte_t *)pmd; + + if (!pmd_present(*pmd)) + return NULL; + + for_each_napot_order(order) { + if (napot_cont_size(order) == sz) { + pte = pte_offset_kernel(pmd, addr & napot_cont_mask(order)); + break; + } + } + return pte; +} + +static pte_t get_clear_contig(struct mm_struct *mm, + unsigned long addr, + pte_t *ptep, + unsigned long pte_num) +{ + pte_t orig_pte = ptep_get(ptep); + unsigned long i; + + for (i = 0; i < pte_num; i++, addr += PAGE_SIZE, ptep++) { + pte_t pte = ptep_get_and_clear(mm, addr, ptep); + + if (pte_dirty(pte)) + orig_pte = pte_mkdirty(orig_pte); + + if (pte_young(pte)) + orig_pte = pte_mkyoung(orig_pte); + } + + return orig_pte; +} + +static pte_t get_clear_contig_flush(struct mm_struct *mm, + unsigned long addr, + pte_t *ptep, + unsigned long pte_num) +{ + pte_t orig_pte = get_clear_contig(mm, addr, ptep, pte_num); + struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0); + bool valid = !pte_none(orig_pte); + + if (valid) + flush_tlb_range(&vma, addr, addr + (PAGE_SIZE * pte_num)); + + return orig_pte; +} + +pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags) +{ + unsigned long order; + + for_each_napot_order(order) { + if (shift == napot_cont_shift(order)) { + entry = pte_mknapot(entry, order); + break; + } + } + if (order == NAPOT_ORDER_MAX) + entry = pte_mkhuge(entry); + + return entry; +} + +void set_huge_pte_at(struct mm_struct *mm, + unsigned long addr, + pte_t *ptep, + pte_t pte) +{ + int i, pte_num; + + if (!pte_napot(pte)) { + set_pte_at(mm, addr, ptep, pte); + return; + } + + pte_num = napot_pte_num(napot_cont_order(pte)); + for (i = 0; i < pte_num; i++, ptep++, addr += PAGE_SIZE) + set_pte_at(mm, addr, ptep, pte); +} + +int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, + pte_t *ptep, + pte_t pte, + int dirty) +{ + struct mm_struct *mm = vma->vm_mm; + unsigned long order; + pte_t orig_pte; + int i, pte_num; + + if (!pte_napot(pte)) + return ptep_set_access_flags(vma, addr, ptep, pte, dirty); + + order = napot_cont_order(pte); + pte_num = napot_pte_num(order); + ptep = huge_pte_offset(mm, addr, napot_cont_size(order)); + orig_pte = get_clear_contig_flush(mm, addr, ptep, pte_num); + + if (pte_dirty(orig_pte)) + pte = pte_mkdirty(pte); + + if (pte_young(orig_pte)) + pte = pte_mkyoung(pte); + + for (i = 0; i < pte_num; i++, addr += PAGE_SIZE, ptep++) + set_pte_at(mm, addr, ptep, pte); + + return true; +} + +pte_t huge_ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, + pte_t *ptep) +{ + pte_t orig_pte = ptep_get(ptep); + int pte_num; + + if (!pte_napot(orig_pte)) + return ptep_get_and_clear(mm, addr, ptep); + + pte_num = napot_pte_num(napot_cont_order(orig_pte)); + + return get_clear_contig(mm, addr, ptep, pte_num); +} + +void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, + pte_t *ptep) +{ + pte_t pte = ptep_get(ptep); + unsigned long order; + int i, pte_num; + + if (!pte_napot(pte)) { + ptep_set_wrprotect(mm, addr, ptep); + return; + } + + order = napot_cont_order(pte); + pte_num = napot_pte_num(order); + ptep = huge_pte_offset(mm, addr, napot_cont_size(order)); + + for (i = 0; i < pte_num; i++, addr += PAGE_SIZE, ptep++) + ptep_set_wrprotect(mm, addr, ptep); +} + +pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, + unsigned long addr, + pte_t *ptep) +{ + pte_t pte = ptep_get(ptep); + int pte_num; + + if (!pte_napot(pte)) + return ptep_clear_flush(vma, addr, ptep); + + pte_num = napot_pte_num(napot_cont_order(pte)); + + return get_clear_contig_flush(vma->vm_mm, addr, ptep, pte_num); +} + +void huge_pte_clear(struct mm_struct *mm, + unsigned long addr, + pte_t *ptep, + unsigned long sz) +{ + pte_t pte = READ_ONCE(*ptep); + int i, pte_num; + + if (!pte_napot(pte)) { + pte_clear(mm, addr, ptep); + return; + } + + pte_num = napot_pte_num(napot_cont_order(pte)); + for (i = 0; i < pte_num; i++, addr += PAGE_SIZE, ptep++) + pte_clear(mm, addr, ptep); +} + +static __init bool is_napot_size(unsigned long size) +{ + unsigned long order; + + if (!has_svnapot()) + return false; + + for_each_napot_order(order) { + if (size == napot_cont_size(order)) + return true; + } + return false; +} + +static __init int napot_hugetlbpages_init(void) +{ + if (has_svnapot()) { + unsigned long order; + + for_each_napot_order(order) + hugetlb_add_hstate(order); + } + return 0; +} +arch_initcall(napot_hugetlbpages_init); + +#else + +static __init bool is_napot_size(unsigned long size) +{ + return false; +} + +#endif /*CONFIG_RISCV_ISA_SVNAPOT*/ + int pud_huge(pud_t pud) { return pud_leaf(pud); @@ -18,6 +317,8 @@ bool __init arch_hugetlb_valid_size(unsigned long size) return true; else if (IS_ENABLED(CONFIG_64BIT) && size == PUD_SIZE) return true; + else if (is_napot_size(size)) + return true; else return false; } |