diff options
author | Guo Ren <ren_guo@c-sky.com> | 2019-06-18 15:33:32 +0300 |
---|---|---|
committer | Guo Ren <ren_guo@c-sky.com> | 2019-07-19 09:21:36 +0300 |
commit | 22d55f02b8922a097cd4be1e2f131dfa7ef65901 (patch) | |
tree | e8e0c0f8c5071343902090aa225fb92bd5747536 | |
parent | a231b8839cd4259de1d37a78165739a4d5d08e72 (diff) | |
download | linux-22d55f02b8922a097cd4be1e2f131dfa7ef65901.tar.xz |
csky: Use generic asid algorithm to implement switch_mm
Use linux generic asid/vmid algorithm to implement csky
switch_mm function. The algorithm is from arm and it could
work with SMP system. It'll help reduce tlb flush for
switch_mm in task/vm switch.
Signed-off-by: Guo Ren <ren_guo@c-sky.com>
Cc: Arnd Bergmann <arnd@arndb.de>
-rw-r--r-- | arch/csky/abiv1/inc/abi/ckmmu.h | 6 | ||||
-rw-r--r-- | arch/csky/abiv2/inc/abi/ckmmu.h | 10 | ||||
-rw-r--r-- | arch/csky/include/asm/mmu.h | 1 | ||||
-rw-r--r-- | arch/csky/include/asm/mmu_context.h | 12 | ||||
-rw-r--r-- | arch/csky/mm/Makefile | 1 | ||||
-rw-r--r-- | arch/csky/mm/context.c | 46 |
6 files changed, 74 insertions, 2 deletions
diff --git a/arch/csky/abiv1/inc/abi/ckmmu.h b/arch/csky/abiv1/inc/abi/ckmmu.h index 81f37715c0d2..ba8eb5870835 100644 --- a/arch/csky/abiv1/inc/abi/ckmmu.h +++ b/arch/csky/abiv1/inc/abi/ckmmu.h @@ -78,6 +78,12 @@ static inline void tlb_invalid_all(void) cpwcr("cpcr8", 0x04000000); } + +static inline void local_tlb_invalid_all(void) +{ + tlb_invalid_all(); +} + static inline void tlb_invalid_indexed(void) { cpwcr("cpcr8", 0x02000000); diff --git a/arch/csky/abiv2/inc/abi/ckmmu.h b/arch/csky/abiv2/inc/abi/ckmmu.h index e4480e6bc3b3..73ded7c72482 100644 --- a/arch/csky/abiv2/inc/abi/ckmmu.h +++ b/arch/csky/abiv2/inc/abi/ckmmu.h @@ -85,6 +85,16 @@ static inline void tlb_invalid_all(void) #endif } +static inline void local_tlb_invalid_all(void) +{ +#ifdef CONFIG_CPU_HAS_TLBI + asm volatile("tlbi.all\n":::"memory"); + sync_is(); +#else + tlb_invalid_all(); +#endif +} + static inline void tlb_invalid_indexed(void) { mtcr("cr<8, 15>", 0x02000000); diff --git a/arch/csky/include/asm/mmu.h b/arch/csky/include/asm/mmu.h index 06f509ae09b0..b382a14ea4ec 100644 --- a/arch/csky/include/asm/mmu.h +++ b/arch/csky/include/asm/mmu.h @@ -5,6 +5,7 @@ #define __ASM_CSKY_MMU_H typedef struct { + atomic64_t asid; void *vdso; } mm_context_t; diff --git a/arch/csky/include/asm/mmu_context.h b/arch/csky/include/asm/mmu_context.h index 86dde481df76..0285b0ad18b6 100644 --- a/arch/csky/include/asm/mmu_context.h +++ b/arch/csky/include/asm/mmu_context.h @@ -20,20 +20,28 @@ #define TLBMISS_HANDLER_SETUP_PGD_KERNEL(pgd) \ setup_pgd(__pa(pgd), true) -#define init_new_context(tsk,mm) 0 +#define ASID_MASK ((1 << CONFIG_CPU_ASID_BITS) - 1) +#define cpu_asid(mm) (atomic64_read(&mm->context.asid) & ASID_MASK) + +#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.asid, 0); 0; }) #define activate_mm(prev,next) switch_mm(prev, next, current) #define destroy_context(mm) do {} while (0) #define enter_lazy_tlb(mm, tsk) do {} while (0) #define deactivate_mm(tsk, mm) do {} while (0) +void check_and_switch_context(struct mm_struct *mm, unsigned int cpu); + static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { + unsigned int cpu = smp_processor_id(); + if (prev != next) - tlb_invalid_all(); + check_and_switch_context(next, cpu); TLBMISS_HANDLER_SETUP_PGD(next->pgd); + write_mmu_entryhi(next->context.asid.counter); } #endif /* __ASM_CSKY_MMU_CONTEXT_H */ diff --git a/arch/csky/mm/Makefile b/arch/csky/mm/Makefile index d3d564e5da61..c94ef6481098 100644 --- a/arch/csky/mm/Makefile +++ b/arch/csky/mm/Makefile @@ -13,3 +13,4 @@ obj-y += ioremap.o obj-y += syscache.o obj-y += tlb.o obj-y += asid.o +obj-y += context.o diff --git a/arch/csky/mm/context.c b/arch/csky/mm/context.c new file mode 100644 index 000000000000..0d95bdd93846 --- /dev/null +++ b/arch/csky/mm/context.c @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/bitops.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/mm.h> + +#include <asm/asid.h> +#include <asm/mmu_context.h> +#include <asm/smp.h> +#include <asm/tlbflush.h> + +static DEFINE_PER_CPU(atomic64_t, active_asids); +static DEFINE_PER_CPU(u64, reserved_asids); + +struct asid_info asid_info; + +void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) +{ + asid_check_context(&asid_info, &mm->context.asid, cpu, mm); +} + +static void asid_flush_cpu_ctxt(void) +{ + local_tlb_invalid_all(); +} + +static int asids_init(void) +{ + BUG_ON(((1 << CONFIG_CPU_ASID_BITS) - 1) <= num_possible_cpus()); + + if (asid_allocator_init(&asid_info, CONFIG_CPU_ASID_BITS, 1, + asid_flush_cpu_ctxt)) + panic("Unable to initialize ASID allocator for %lu ASIDs\n", + NUM_ASIDS(&asid_info)); + + asid_info.active = &active_asids; + asid_info.reserved = &reserved_asids; + + pr_info("ASID allocator initialised with %lu entries\n", + NUM_CTXT_ASIDS(&asid_info)); + + return 0; +} +early_initcall(asids_init); |