summaryrefslogtreecommitdiff
path: root/arch/riscv
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv')
-rw-r--r--arch/riscv/boot/dts/sophgo/cv18xx.dtsi2
-rw-r--r--arch/riscv/crypto/Kconfig1
-rw-r--r--arch/riscv/include/asm/alternative-macros.h19
-rw-r--r--arch/riscv/include/asm/cacheflush.h15
-rw-r--r--arch/riscv/include/asm/cmpxchg.h2
-rw-r--r--arch/riscv/include/asm/kgdb.h9
-rw-r--r--arch/riscv/include/asm/page.h12
-rw-r--r--arch/riscv/include/asm/pgtable.h3
-rw-r--r--arch/riscv/include/asm/syscall.h7
-rw-r--r--arch/riscv/kernel/Makefile4
-rw-r--r--arch/riscv/kernel/cpu_ops_sbi.c6
-rw-r--r--arch/riscv/kernel/kgdb.c6
-rw-r--r--arch/riscv/kernel/module-sections.c13
-rw-r--r--arch/riscv/kernel/module.c11
-rw-r--r--arch/riscv/kernel/probes/uprobes.c10
-rw-r--r--arch/riscv/kernel/setup.c36
-rw-r--r--arch/riscv/kernel/traps.c74
-rw-r--r--arch/riscv/kernel/traps_misaligned.c19
-rw-r--r--arch/riscv/kernel/vdso/vdso.lds.S2
-rw-r--r--arch/riscv/kvm/vcpu_sbi.c4
-rw-r--r--arch/riscv/kvm/vcpu_sbi_replace.c8
-rw-r--r--arch/riscv/mm/cacheflush.c15
-rw-r--r--arch/riscv/mm/tlbflush.c37
23 files changed, 195 insertions, 120 deletions
diff --git a/arch/riscv/boot/dts/sophgo/cv18xx.dtsi b/arch/riscv/boot/dts/sophgo/cv18xx.dtsi
index b724fb6d9689..b8063ba6d6d7 100644
--- a/arch/riscv/boot/dts/sophgo/cv18xx.dtsi
+++ b/arch/riscv/boot/dts/sophgo/cv18xx.dtsi
@@ -309,7 +309,7 @@
1024 1024 1024 1024>;
snps,priority = <0 1 2 3 4 5 6 7>;
snps,dma-masters = <2>;
- snps,data-width = <4>;
+ snps,data-width = <2>;
status = "disabled";
};
diff --git a/arch/riscv/crypto/Kconfig b/arch/riscv/crypto/Kconfig
index ad58dad9a580..c67095a3d669 100644
--- a/arch/riscv/crypto/Kconfig
+++ b/arch/riscv/crypto/Kconfig
@@ -22,7 +22,6 @@ config CRYPTO_CHACHA_RISCV64
tristate "Ciphers: ChaCha"
depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
select CRYPTO_SKCIPHER
- select CRYPTO_LIB_CHACHA_GENERIC
help
Length-preserving ciphers: ChaCha20 stream cipher algorithm
diff --git a/arch/riscv/include/asm/alternative-macros.h b/arch/riscv/include/asm/alternative-macros.h
index 721ec275ce57..231d777d936c 100644
--- a/arch/riscv/include/asm/alternative-macros.h
+++ b/arch/riscv/include/asm/alternative-macros.h
@@ -115,24 +115,19 @@
\old_c
.endm
-#define _ALTERNATIVE_CFG(old_c, ...) \
- ALTERNATIVE_CFG old_c
-
-#define _ALTERNATIVE_CFG_2(old_c, ...) \
- ALTERNATIVE_CFG old_c
+#define __ALTERNATIVE_CFG(old_c, ...) ALTERNATIVE_CFG old_c
+#define __ALTERNATIVE_CFG_2(old_c, ...) ALTERNATIVE_CFG old_c
#else /* !__ASSEMBLY__ */
-#define __ALTERNATIVE_CFG(old_c) \
- old_c "\n"
+#define __ALTERNATIVE_CFG(old_c, ...) old_c "\n"
+#define __ALTERNATIVE_CFG_2(old_c, ...) old_c "\n"
-#define _ALTERNATIVE_CFG(old_c, ...) \
- __ALTERNATIVE_CFG(old_c)
+#endif /* __ASSEMBLY__ */
-#define _ALTERNATIVE_CFG_2(old_c, ...) \
- __ALTERNATIVE_CFG(old_c)
+#define _ALTERNATIVE_CFG(old_c, ...) __ALTERNATIVE_CFG(old_c)
+#define _ALTERNATIVE_CFG_2(old_c, ...) __ALTERNATIVE_CFG_2(old_c)
-#endif /* __ASSEMBLY__ */
#endif /* CONFIG_RISCV_ALTERNATIVE */
/*
diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
index 8de73f91bfa3..b59ffeb668d6 100644
--- a/arch/riscv/include/asm/cacheflush.h
+++ b/arch/riscv/include/asm/cacheflush.h
@@ -34,11 +34,6 @@ static inline void flush_dcache_page(struct page *page)
flush_dcache_folio(page_folio(page));
}
-/*
- * RISC-V doesn't have an instruction to flush parts of the instruction cache,
- * so instead we just flush the whole thing.
- */
-#define flush_icache_range(start, end) flush_icache_all()
#define flush_icache_user_page(vma, pg, addr, len) \
do { \
if (vma->vm_flags & VM_EXEC) \
@@ -78,6 +73,16 @@ void flush_icache_mm(struct mm_struct *mm, bool local);
#endif /* CONFIG_SMP */
+/*
+ * RISC-V doesn't have an instruction to flush parts of the instruction cache,
+ * so instead we just flush the whole thing.
+ */
+#define flush_icache_range flush_icache_range
+static inline void flush_icache_range(unsigned long start, unsigned long end)
+{
+ flush_icache_all();
+}
+
extern unsigned int riscv_cbom_block_size;
extern unsigned int riscv_cboz_block_size;
void riscv_init_cbo_blocksizes(void);
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index ebbce134917c..6efa95ad033a 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -169,7 +169,7 @@
break; \
case 4: \
__arch_cmpxchg(".w", ".w" sc_sfx, prepend, append, \
- __ret, __ptr, (long), __old, __new); \
+ __ret, __ptr, (long)(int)(long), __old, __new); \
break; \
case 8: \
__arch_cmpxchg(".d", ".d" sc_sfx, prepend, append, \
diff --git a/arch/riscv/include/asm/kgdb.h b/arch/riscv/include/asm/kgdb.h
index 46677daf708b..cc11c4544cff 100644
--- a/arch/riscv/include/asm/kgdb.h
+++ b/arch/riscv/include/asm/kgdb.h
@@ -19,16 +19,9 @@
#ifndef __ASSEMBLY__
+void arch_kgdb_breakpoint(void);
extern unsigned long kgdb_compiled_break;
-static inline void arch_kgdb_breakpoint(void)
-{
- asm(".global kgdb_compiled_break\n"
- ".option norvc\n"
- "kgdb_compiled_break: ebreak\n"
- ".option rvc\n");
-}
-
#endif /* !__ASSEMBLY__ */
#define DBG_REG_ZERO "zero"
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index febf820d5058..e8beadc2bffd 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -26,12 +26,9 @@
* When not using MMU this corresponds to the first free page in
* physical memory (aligned on a page boundary).
*/
-#ifdef CONFIG_64BIT
#ifdef CONFIG_MMU
+#ifdef CONFIG_64BIT
#define PAGE_OFFSET kernel_map.page_offset
-#else
-#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
-#endif
/*
* By default, CONFIG_PAGE_OFFSET value corresponds to SV57 address space so
* define the PAGE_OFFSET value for SV48 and SV39.
@@ -41,6 +38,9 @@
#else
#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
#endif /* CONFIG_64BIT */
+#else
+#define PAGE_OFFSET ((unsigned long)phys_ram_base)
+#endif /* CONFIG_MMU */
#ifndef __ASSEMBLY__
@@ -97,11 +97,7 @@ typedef struct page *pgtable_t;
#define MIN_MEMBLOCK_ADDR 0
#endif
-#ifdef CONFIG_MMU
#define ARCH_PFN_OFFSET (PFN_DOWN((unsigned long)phys_ram_base))
-#else
-#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
-#endif /* CONFIG_MMU */
struct kernel_mapping {
unsigned long page_offset;
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index c0866ada5bbc..03881122506a 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -12,7 +12,7 @@
#include <asm/pgtable-bits.h>
#ifndef CONFIG_MMU
-#define KERNEL_LINK_ADDR PAGE_OFFSET
+#define KERNEL_LINK_ADDR _AC(CONFIG_PAGE_OFFSET, UL)
#define KERN_VIRT_SIZE (UL(-1))
#else
@@ -916,7 +916,6 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
*/
#ifdef CONFIG_64BIT
#define TASK_SIZE_64 (PGDIR_SIZE * PTRS_PER_PGD / 2)
-#define TASK_SIZE_MAX LONG_MAX
#ifdef CONFIG_COMPAT
#define TASK_SIZE_32 (_AC(0x80000000, UL) - PAGE_SIZE)
diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h
index 121fff429dce..eceabf59ae48 100644
--- a/arch/riscv/include/asm/syscall.h
+++ b/arch/riscv/include/asm/syscall.h
@@ -62,8 +62,11 @@ static inline void syscall_get_arguments(struct task_struct *task,
unsigned long *args)
{
args[0] = regs->orig_a0;
- args++;
- memcpy(args, &regs->a1, 5 * sizeof(args[0]));
+ args[1] = regs->a1;
+ args[2] = regs->a2;
+ args[3] = regs->a3;
+ args[4] = regs->a4;
+ args[5] = regs->a5;
}
static inline int syscall_get_arch(struct task_struct *task)
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index 69dc8aaab3fb..6b3b5255c889 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -9,8 +9,8 @@ CFLAGS_REMOVE_patch.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_sbi.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_return_address.o = $(CC_FLAGS_FTRACE)
endif
-CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,)
-CFLAGS_compat_syscall_table.o += $(call cc-option,-Wno-override-init,)
+CFLAGS_syscall_table.o += $(call cc-disable-warning, override-init)
+CFLAGS_compat_syscall_table.o += $(call cc-disable-warning, override-init)
ifdef CONFIG_KEXEC_CORE
AFLAGS_kexec_relocate.o := -mcmodel=medany $(call cc-option,-mno-relax)
diff --git a/arch/riscv/kernel/cpu_ops_sbi.c b/arch/riscv/kernel/cpu_ops_sbi.c
index e6fbaaf54956..87d655944803 100644
--- a/arch/riscv/kernel/cpu_ops_sbi.c
+++ b/arch/riscv/kernel/cpu_ops_sbi.c
@@ -18,10 +18,10 @@ const struct cpu_operations cpu_ops_sbi;
/*
* Ordered booting via HSM brings one cpu at a time. However, cpu hotplug can
- * be invoked from multiple threads in parallel. Define a per cpu data
+ * be invoked from multiple threads in parallel. Define an array of boot data
* to handle that.
*/
-static DEFINE_PER_CPU(struct sbi_hart_boot_data, boot_data);
+static struct sbi_hart_boot_data boot_data[NR_CPUS];
static int sbi_hsm_hart_start(unsigned long hartid, unsigned long saddr,
unsigned long priv)
@@ -67,7 +67,7 @@ static int sbi_cpu_start(unsigned int cpuid, struct task_struct *tidle)
unsigned long boot_addr = __pa_symbol(secondary_start_sbi);
unsigned long hartid = cpuid_to_hartid_map(cpuid);
unsigned long hsm_data;
- struct sbi_hart_boot_data *bdata = &per_cpu(boot_data, cpuid);
+ struct sbi_hart_boot_data *bdata = &boot_data[cpuid];
/* Make sure tidle is updated */
smp_mb();
diff --git a/arch/riscv/kernel/kgdb.c b/arch/riscv/kernel/kgdb.c
index 2e0266ae6bd7..9f3db3503dab 100644
--- a/arch/riscv/kernel/kgdb.c
+++ b/arch/riscv/kernel/kgdb.c
@@ -254,6 +254,12 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
regs->epc = pc;
}
+noinline void arch_kgdb_breakpoint(void)
+{
+ asm(".global kgdb_compiled_break\n"
+ "kgdb_compiled_break: ebreak\n");
+}
+
void kgdb_arch_handle_qxfer_pkt(char *remcom_in_buffer,
char *remcom_out_buffer)
{
diff --git a/arch/riscv/kernel/module-sections.c b/arch/riscv/kernel/module-sections.c
index e264e59e596e..91d0b355ceef 100644
--- a/arch/riscv/kernel/module-sections.c
+++ b/arch/riscv/kernel/module-sections.c
@@ -73,16 +73,17 @@ static bool duplicate_rela(const Elf_Rela *rela, int idx)
static void count_max_entries(Elf_Rela *relas, int num,
unsigned int *plts, unsigned int *gots)
{
- unsigned int type, i;
-
- for (i = 0; i < num; i++) {
- type = ELF_RISCV_R_TYPE(relas[i].r_info);
- if (type == R_RISCV_CALL_PLT) {
+ for (int i = 0; i < num; i++) {
+ switch (ELF_R_TYPE(relas[i].r_info)) {
+ case R_RISCV_CALL_PLT:
+ case R_RISCV_PLT32:
if (!duplicate_rela(relas, i))
(*plts)++;
- } else if (type == R_RISCV_GOT_HI20) {
+ break;
+ case R_RISCV_GOT_HI20:
if (!duplicate_rela(relas, i))
(*gots)++;
+ break;
}
}
}
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
index 47d0ebeec93c..7f6147c18033 100644
--- a/arch/riscv/kernel/module.c
+++ b/arch/riscv/kernel/module.c
@@ -648,7 +648,7 @@ process_accumulated_relocations(struct module *me,
kfree(bucket_iter);
}
- kfree(*relocation_hashtable);
+ kvfree(*relocation_hashtable);
}
static int add_relocation_to_accumulate(struct module *me, int type,
@@ -752,9 +752,10 @@ initialize_relocation_hashtable(unsigned int num_relocations,
hashtable_size <<= should_double_size;
- *relocation_hashtable = kmalloc_array(hashtable_size,
- sizeof(**relocation_hashtable),
- GFP_KERNEL);
+ /* Number of relocations may be large, so kvmalloc it */
+ *relocation_hashtable = kvmalloc_array(hashtable_size,
+ sizeof(**relocation_hashtable),
+ GFP_KERNEL);
if (!*relocation_hashtable)
return 0;
@@ -859,7 +860,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
}
j++;
- if (j > sechdrs[relsec].sh_size / sizeof(*rel))
+ if (j == num_relocations)
j = 0;
} while (j_idx != j);
diff --git a/arch/riscv/kernel/probes/uprobes.c b/arch/riscv/kernel/probes/uprobes.c
index 4b3dc8beaf77..cc15f7ca6cc1 100644
--- a/arch/riscv/kernel/probes/uprobes.c
+++ b/arch/riscv/kernel/probes/uprobes.c
@@ -167,6 +167,7 @@ void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
/* Initialize the slot */
void *kaddr = kmap_atomic(page);
void *dst = kaddr + (vaddr & ~PAGE_MASK);
+ unsigned long start = (unsigned long)dst;
memcpy(dst, src, len);
@@ -176,13 +177,6 @@ void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
*(uprobe_opcode_t *)dst = __BUG_INSN_32;
}
+ flush_icache_range(start, start + len);
kunmap_atomic(kaddr);
-
- /*
- * We probably need flush_icache_user_page() but it needs vma.
- * This should work on most of architectures by default. If
- * architecture needs to do something different it can define
- * its own version of the function.
- */
- flush_dcache_page(page);
}
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 7934613a98c8..194bda6d74ce 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -66,6 +66,9 @@ static struct resource bss_res = { .name = "Kernel bss", };
static struct resource elfcorehdr_res = { .name = "ELF Core hdr", };
#endif
+static int num_standard_resources;
+static struct resource *standard_resources;
+
static int __init add_resource(struct resource *parent,
struct resource *res)
{
@@ -139,7 +142,7 @@ static void __init init_resources(void)
struct resource *res = NULL;
struct resource *mem_res = NULL;
size_t mem_res_sz = 0;
- int num_resources = 0, res_idx = 0;
+ int num_resources = 0, res_idx = 0, non_resv_res = 0;
int ret = 0;
/* + 1 as memblock_alloc() might increase memblock.reserved.cnt */
@@ -195,6 +198,7 @@ static void __init init_resources(void)
/* Add /memory regions to the resource tree */
for_each_mem_region(region) {
res = &mem_res[res_idx--];
+ non_resv_res++;
if (unlikely(memblock_is_nomap(region))) {
res->name = "Reserved";
@@ -212,6 +216,9 @@ static void __init init_resources(void)
goto error;
}
+ num_standard_resources = non_resv_res;
+ standard_resources = &mem_res[res_idx + 1];
+
/* Clean-up any unused pre-allocated resources */
if (res_idx >= 0)
memblock_free(mem_res, (res_idx + 1) * sizeof(*mem_res));
@@ -223,6 +230,33 @@ static void __init init_resources(void)
memblock_free(mem_res, mem_res_sz);
}
+static int __init reserve_memblock_reserved_regions(void)
+{
+ u64 i, j;
+
+ for (i = 0; i < num_standard_resources; i++) {
+ struct resource *mem = &standard_resources[i];
+ phys_addr_t r_start, r_end, mem_size = resource_size(mem);
+
+ if (!memblock_is_region_reserved(mem->start, mem_size))
+ continue;
+
+ for_each_reserved_mem_range(j, &r_start, &r_end) {
+ resource_size_t start, end;
+
+ start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start);
+ end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end);
+
+ if (start > mem->end || end < mem->start)
+ continue;
+
+ reserve_region_with_split(mem, start, end, "Reserved");
+ }
+ }
+
+ return 0;
+}
+arch_initcall(reserve_memblock_reserved_regions);
static void __init parse_dtb(void)
{
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index 8ff8e8b36524..80230de167de 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -6,6 +6,7 @@
#include <linux/cpu.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/irqflags.h>
#include <linux/randomize_kstack.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
@@ -151,7 +152,9 @@ asmlinkage __visible __trap_section void name(struct pt_regs *regs) \
{ \
if (user_mode(regs)) { \
irqentry_enter_from_user_mode(regs); \
+ local_irq_enable(); \
do_trap_error(regs, signo, code, regs->epc, "Oops - " str); \
+ local_irq_disable(); \
irqentry_exit_to_user_mode(regs); \
} else { \
irqentry_state_t state = irqentry_nmi_enter(regs); \
@@ -173,17 +176,14 @@ asmlinkage __visible __trap_section void do_trap_insn_illegal(struct pt_regs *re
if (user_mode(regs)) {
irqentry_enter_from_user_mode(regs);
-
local_irq_enable();
handled = riscv_v_first_use_handler(regs);
-
- local_irq_disable();
-
if (!handled)
do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->epc,
"Oops - illegal instruction");
+ local_irq_disable();
irqentry_exit_to_user_mode(regs);
} else {
irqentry_state_t state = irqentry_nmi_enter(regs);
@@ -198,47 +198,57 @@ asmlinkage __visible __trap_section void do_trap_insn_illegal(struct pt_regs *re
DO_ERROR_INFO(do_trap_load_fault,
SIGSEGV, SEGV_ACCERR, "load access fault");
-asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs)
+enum misaligned_access_type {
+ MISALIGNED_STORE,
+ MISALIGNED_LOAD,
+};
+static const struct {
+ const char *type_str;
+ int (*handler)(struct pt_regs *regs);
+} misaligned_handler[] = {
+ [MISALIGNED_STORE] = {
+ .type_str = "Oops - store (or AMO) address misaligned",
+ .handler = handle_misaligned_store,
+ },
+ [MISALIGNED_LOAD] = {
+ .type_str = "Oops - load address misaligned",
+ .handler = handle_misaligned_load,
+ },
+};
+
+static void do_trap_misaligned(struct pt_regs *regs, enum misaligned_access_type type)
{
+ irqentry_state_t state;
+
if (user_mode(regs)) {
irqentry_enter_from_user_mode(regs);
+ local_irq_enable();
+ } else {
+ state = irqentry_nmi_enter(regs);
+ }
- if (handle_misaligned_load(regs))
- do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
- "Oops - load address misaligned");
+ if (misaligned_handler[type].handler(regs))
+ do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
+ misaligned_handler[type].type_str);
+ if (user_mode(regs)) {
+ local_irq_disable();
irqentry_exit_to_user_mode(regs);
} else {
- irqentry_state_t state = irqentry_nmi_enter(regs);
-
- if (handle_misaligned_load(regs))
- do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
- "Oops - load address misaligned");
-
irqentry_nmi_exit(regs, state);
}
}
-asmlinkage __visible __trap_section void do_trap_store_misaligned(struct pt_regs *regs)
+asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs)
{
- if (user_mode(regs)) {
- irqentry_enter_from_user_mode(regs);
-
- if (handle_misaligned_store(regs))
- do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
- "Oops - store (or AMO) address misaligned");
-
- irqentry_exit_to_user_mode(regs);
- } else {
- irqentry_state_t state = irqentry_nmi_enter(regs);
-
- if (handle_misaligned_store(regs))
- do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
- "Oops - store (or AMO) address misaligned");
+ do_trap_misaligned(regs, MISALIGNED_LOAD);
+}
- irqentry_nmi_exit(regs, state);
- }
+asmlinkage __visible __trap_section void do_trap_store_misaligned(struct pt_regs *regs)
+{
+ do_trap_misaligned(regs, MISALIGNED_STORE);
}
+
DO_ERROR_INFO(do_trap_store_fault,
SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault");
DO_ERROR_INFO(do_trap_ecall_s,
@@ -298,9 +308,11 @@ asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs)
{
if (user_mode(regs)) {
irqentry_enter_from_user_mode(regs);
+ local_irq_enable();
handle_break(regs);
+ local_irq_disable();
irqentry_exit_to_user_mode(regs);
} else {
irqentry_state_t state = irqentry_nmi_enter(regs);
diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
index 9a80a12f6b48..4128aa5e0c76 100644
--- a/arch/riscv/kernel/traps_misaligned.c
+++ b/arch/riscv/kernel/traps_misaligned.c
@@ -87,6 +87,13 @@
#define INSN_MATCH_C_FSWSP 0xe002
#define INSN_MASK_C_FSWSP 0xe003
+#define INSN_MATCH_C_LHU 0x8400
+#define INSN_MASK_C_LHU 0xfc43
+#define INSN_MATCH_C_LH 0x8440
+#define INSN_MASK_C_LH 0xfc43
+#define INSN_MATCH_C_SH 0x8c00
+#define INSN_MASK_C_SH 0xfc43
+
#define INSN_LEN(insn) ((((insn) & 0x3) < 0x3) ? 2 : 4)
#if defined(CONFIG_64BIT)
@@ -405,6 +412,13 @@ int handle_misaligned_load(struct pt_regs *regs)
fp = 1;
len = 4;
#endif
+ } else if ((insn & INSN_MASK_C_LHU) == INSN_MATCH_C_LHU) {
+ len = 2;
+ insn = RVC_RS2S(insn) << SH_RD;
+ } else if ((insn & INSN_MASK_C_LH) == INSN_MATCH_C_LH) {
+ len = 2;
+ shift = 8 * (sizeof(ulong) - len);
+ insn = RVC_RS2S(insn) << SH_RD;
} else {
regs->epc = epc;
return -1;
@@ -422,7 +436,7 @@ int handle_misaligned_load(struct pt_regs *regs)
}
if (!fp)
- SET_RD(insn, regs, val.data_ulong << shift >> shift);
+ SET_RD(insn, regs, (long)(val.data_ulong << shift) >> shift);
else if (len == 8)
set_f64_rd(insn, regs, val.data_u64);
else
@@ -504,6 +518,9 @@ int handle_misaligned_store(struct pt_regs *regs)
len = 4;
val.data_ulong = GET_F32_RS2C(insn, regs);
#endif
+ } else if ((insn & INSN_MASK_C_SH) == INSN_MATCH_C_SH) {
+ len = 2;
+ val.data_ulong = GET_RS2S(insn, regs);
} else {
regs->epc = epc;
return -1;
diff --git a/arch/riscv/kernel/vdso/vdso.lds.S b/arch/riscv/kernel/vdso/vdso.lds.S
index cbe2a179331d..99e51f775539 100644
--- a/arch/riscv/kernel/vdso/vdso.lds.S
+++ b/arch/riscv/kernel/vdso/vdso.lds.S
@@ -31,7 +31,7 @@ SECTIONS
*(.data .data.* .gnu.linkonce.d.*)
*(.dynbss)
*(.bss .bss.* .gnu.linkonce.b.*)
- }
+ } :text
.note : { *(.note.*) } :text :note
diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c
index 6e704ed86a83..635c67ed3665 100644
--- a/arch/riscv/kvm/vcpu_sbi.c
+++ b/arch/riscv/kvm/vcpu_sbi.c
@@ -139,9 +139,9 @@ void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
struct kvm_vcpu *tmp;
kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
- spin_lock(&vcpu->arch.mp_state_lock);
+ spin_lock(&tmp->arch.mp_state_lock);
WRITE_ONCE(tmp->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
- spin_unlock(&vcpu->arch.mp_state_lock);
+ spin_unlock(&tmp->arch.mp_state_lock);
}
kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
diff --git a/arch/riscv/kvm/vcpu_sbi_replace.c b/arch/riscv/kvm/vcpu_sbi_replace.c
index 5fbf3f94f1e8..b17fad091bab 100644
--- a/arch/riscv/kvm/vcpu_sbi_replace.c
+++ b/arch/riscv/kvm/vcpu_sbi_replace.c
@@ -103,7 +103,7 @@ static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_FENCE_I_SENT);
break;
case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
- if (cp->a2 == 0 && cp->a3 == 0)
+ if ((cp->a2 == 0 && cp->a3 == 0) || cp->a3 == -1UL)
kvm_riscv_hfence_vvma_all(vcpu->kvm, hbase, hmask);
else
kvm_riscv_hfence_vvma_gva(vcpu->kvm, hbase, hmask,
@@ -111,7 +111,7 @@ static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_SENT);
break;
case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
- if (cp->a2 == 0 && cp->a3 == 0)
+ if ((cp->a2 == 0 && cp->a3 == 0) || cp->a3 == -1UL)
kvm_riscv_hfence_vvma_asid_all(vcpu->kvm,
hbase, hmask, cp->a4);
else
@@ -127,9 +127,9 @@ static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run
case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID:
/*
* Until nested virtualization is implemented, the
- * SBI HFENCE calls should be treated as NOPs
+ * SBI HFENCE calls should return not supported
+ * hence fallthrough.
*/
- break;
default:
retdata->err_val = SBI_ERR_NOT_SUPPORTED;
}
diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
index b81672729887..b2e4b81763f8 100644
--- a/arch/riscv/mm/cacheflush.c
+++ b/arch/riscv/mm/cacheflush.c
@@ -24,7 +24,20 @@ void flush_icache_all(void)
if (num_online_cpus() < 2)
return;
- else if (riscv_use_sbi_for_rfence())
+
+ /*
+ * Make sure all previous writes to the D$ are ordered before making
+ * the IPI. The RISC-V spec states that a hart must execute a data fence
+ * before triggering a remote fence.i in order to make the modification
+ * visable for remote harts.
+ *
+ * IPIs on RISC-V are triggered by MMIO writes to either CLINT or
+ * S-IMSIC, so the fence ensures previous data writes "happen before"
+ * the MMIO.
+ */
+ RISCV_FENCE(w, o);
+
+ if (riscv_use_sbi_for_rfence())
sbi_remote_fence_i(NULL);
else
on_each_cpu(ipi_remote_fence_i, NULL, 1);
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index 9b6e86ce3867..bb77607c87aa 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -4,6 +4,7 @@
#include <linux/smp.h>
#include <linux/sched.h>
#include <linux/hugetlb.h>
+#include <linux/mmu_notifier.h>
#include <asm/sbi.h>
#include <asm/mmu_context.h>
@@ -78,10 +79,17 @@ static void __ipi_flush_tlb_range_asid(void *info)
local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
}
-static void __flush_tlb_range(const struct cpumask *cmask, unsigned long asid,
+static inline unsigned long get_mm_asid(struct mm_struct *mm)
+{
+ return mm ? cntx2asid(atomic_long_read(&mm->context.id)) : FLUSH_TLB_NO_ASID;
+}
+
+static void __flush_tlb_range(struct mm_struct *mm,
+ const struct cpumask *cmask,
unsigned long start, unsigned long size,
unsigned long stride)
{
+ unsigned long asid = get_mm_asid(mm);
unsigned int cpu;
if (cpumask_empty(cmask))
@@ -105,30 +113,26 @@ static void __flush_tlb_range(const struct cpumask *cmask, unsigned long asid,
}
put_cpu();
-}
-static inline unsigned long get_mm_asid(struct mm_struct *mm)
-{
- return cntx2asid(atomic_long_read(&mm->context.id));
+ if (mm)
+ mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, start + size);
}
void flush_tlb_mm(struct mm_struct *mm)
{
- __flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm),
- 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
+ __flush_tlb_range(mm, mm_cpumask(mm), 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
}
void flush_tlb_mm_range(struct mm_struct *mm,
unsigned long start, unsigned long end,
unsigned int page_size)
{
- __flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm),
- start, end - start, page_size);
+ __flush_tlb_range(mm, mm_cpumask(mm), start, end - start, page_size);
}
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
{
- __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
+ __flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm),
addr, PAGE_SIZE, PAGE_SIZE);
}
@@ -161,13 +165,13 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
}
}
- __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
+ __flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm),
start, end - start, stride_size);
}
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
- __flush_tlb_range(cpu_online_mask, FLUSH_TLB_NO_ASID,
+ __flush_tlb_range(NULL, cpu_online_mask,
start, end - start, PAGE_SIZE);
}
@@ -175,7 +179,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
- __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
+ __flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm),
start, end - start, PMD_SIZE);
}
#endif
@@ -189,7 +193,10 @@ void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
struct mm_struct *mm,
unsigned long uaddr)
{
+ unsigned long start = uaddr & PAGE_MASK;
+
cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
+ mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, start + PAGE_SIZE);
}
void arch_flush_tlb_batched_pending(struct mm_struct *mm)
@@ -199,7 +206,7 @@ void arch_flush_tlb_batched_pending(struct mm_struct *mm)
void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
{
- __flush_tlb_range(&batch->cpumask, FLUSH_TLB_NO_ASID, 0,
- FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
+ __flush_tlb_range(NULL, &batch->cpumask,
+ 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
cpumask_clear(&batch->cpumask);
}