From 00a9730e1007c6cc87a7c78af2f24a4105d616ee Mon Sep 17 00:00:00 2001 From: Guo Ren Date: Wed, 5 Sep 2018 14:25:10 +0800 Subject: csky: Cache and TLB routines This patch adds cache and tlb sync codes for abiv1 & abiv2. Signed-off-by: Guo Ren Reviewed-by: Arnd Bergmann --- arch/csky/abiv1/cacheflush.c | 52 ++++++++++++++++++++++++++++++++++++ arch/csky/abiv1/inc/abi/cacheflush.h | 49 +++++++++++++++++++++++++++++++++ 2 files changed, 101 insertions(+) create mode 100644 arch/csky/abiv1/cacheflush.c create mode 100644 arch/csky/abiv1/inc/abi/cacheflush.h (limited to 'arch/csky/abiv1') diff --git a/arch/csky/abiv1/cacheflush.c b/arch/csky/abiv1/cacheflush.c new file mode 100644 index 000000000000..10af8b6fe322 --- /dev/null +++ b/arch/csky/abiv1/cacheflush.c @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +void flush_dcache_page(struct page *page) +{ + struct address_space *mapping = page_mapping(page); + unsigned long addr; + + if (mapping && !mapping_mapped(mapping)) { + set_bit(PG_arch_1, &(page)->flags); + return; + } + + /* + * We could delay the flush for the !page_mapping case too. But that + * case is for exec env/arg pages and those are %99 certainly going to + * get faulted into the tlb (and thus flushed) anyways. + */ + addr = (unsigned long) page_address(page); + dcache_wb_range(addr, addr + PAGE_SIZE); +} + +void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, + pte_t *pte) +{ + unsigned long addr; + struct page *page; + unsigned long pfn; + + pfn = pte_pfn(*pte); + if (unlikely(!pfn_valid(pfn))) + return; + + page = pfn_to_page(pfn); + addr = (unsigned long) page_address(page); + + if (vma->vm_flags & VM_EXEC || + pages_do_alias(addr, address & PAGE_MASK)) + cache_wbinv_all(); + + clear_bit(PG_arch_1, &(page)->flags); +} diff --git a/arch/csky/abiv1/inc/abi/cacheflush.h b/arch/csky/abiv1/inc/abi/cacheflush.h new file mode 100644 index 000000000000..5f663aef9b1b --- /dev/null +++ b/arch/csky/abiv1/inc/abi/cacheflush.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ABI_CSKY_CACHEFLUSH_H +#define __ABI_CSKY_CACHEFLUSH_H + +#include +#include +#include + +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 +extern void flush_dcache_page(struct page *); + +#define flush_cache_mm(mm) cache_wbinv_all() +#define flush_cache_page(vma, page, pfn) cache_wbinv_all() +#define flush_cache_dup_mm(mm) cache_wbinv_all() + +/* + * if (current_mm != vma->mm) cache_wbinv_range(start, end) will be broken. + * Use cache_wbinv_all() here and need to be improved in future. + */ +#define flush_cache_range(vma, start, end) cache_wbinv_all() +#define flush_cache_vmap(start, end) cache_wbinv_range(start, end) +#define flush_cache_vunmap(start, end) cache_wbinv_range(start, end) + +#define flush_icache_page(vma, page) cache_wbinv_all() +#define flush_icache_range(start, end) cache_wbinv_range(start, end) + +#define flush_icache_user_range(vma, pg, adr, len) \ + cache_wbinv_range(adr, adr + len) + +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ +do { \ + cache_wbinv_all(); \ + memcpy(dst, src, len); \ + cache_wbinv_all(); \ +} while (0) + +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ +do { \ + cache_wbinv_all(); \ + memcpy(dst, src, len); \ + cache_wbinv_all(); \ +} while (0) + +#define flush_dcache_mmap_lock(mapping) do {} while (0) +#define flush_dcache_mmap_unlock(mapping) do {} while (0) + +#endif /* __ABI_CSKY_CACHEFLUSH_H */ -- cgit v1.2.3