diff options
Diffstat (limited to 'arch')
127 files changed, 10562 insertions, 0 deletions
diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig new file mode 100644 index 000000000000..0a0558567eaa --- /dev/null +++ b/arch/csky/Kconfig @@ -0,0 +1,205 @@ +config CSKY + def_bool y + select ARCH_HAS_SYNC_DMA_FOR_CPU + select ARCH_HAS_SYNC_DMA_FOR_DEVICE + select ARCH_USE_BUILTIN_BSWAP + select ARCH_USE_QUEUED_RWLOCKS if NR_CPUS>2 + select COMMON_CLK + select CLKSRC_MMIO + select CLKSRC_OF + select DMA_DIRECT_OPS + select DMA_NONCOHERENT_OPS + select IRQ_DOMAIN + select HANDLE_DOMAIN_IRQ + select DW_APB_TIMER_OF + select GENERIC_LIB_ASHLDI3 + select GENERIC_LIB_ASHRDI3 + select GENERIC_LIB_LSHRDI3 + select GENERIC_LIB_MULDI3 + select GENERIC_LIB_CMPDI2 + select GENERIC_LIB_UCMPDI2 + select GENERIC_ALLOCATOR + select GENERIC_ATOMIC64 + select GENERIC_CLOCKEVENTS + select GENERIC_CPU_DEVICES + select GENERIC_IRQ_CHIP + select GENERIC_IRQ_PROBE + select GENERIC_IRQ_SHOW + select GENERIC_IRQ_MULTI_HANDLER + select GENERIC_SCHED_CLOCK + select GENERIC_SMP_IDLE_THREAD + select HAVE_ARCH_TRACEHOOK + select HAVE_GENERIC_DMA_COHERENT + select HAVE_KERNEL_GZIP + select HAVE_KERNEL_LZO + select HAVE_KERNEL_LZMA + select HAVE_C_RECORDMCOUNT + select HAVE_DMA_API_DEBUG + select HAVE_DMA_CONTIGUOUS + select HAVE_MEMBLOCK + select MAY_HAVE_SPARSE_IRQ + select MODULES_USE_ELF_RELA if MODULES + select NO_BOOTMEM + select OF + select OF_EARLY_FLATTREE + select OF_RESERVED_MEM + select PERF_USE_VMALLOC + select RTC_LIB + select TIMER_OF + select USB_ARCH_HAS_EHCI + select USB_ARCH_HAS_OHCI + +config CPU_HAS_CACHEV2 + bool + +config CPU_HAS_FPUV2 + bool + +config CPU_HAS_HILO + bool + +config CPU_HAS_TLBI + bool + +config CPU_HAS_LDSTEX + bool + help + For SMP, CPU needs "ldex&stex" instrcutions to atomic operations. + +config CPU_NEED_TLBSYNC + bool + +config CPU_NEED_SOFTALIGN + bool + +config CPU_NO_USER_BKPT + bool + help + For abiv2 we couldn't use "trap 1" as user space bkpt in gdbserver, because + abiv2 is 16/32bit instruction set and "trap 1" is 32bit. + So we need a 16bit instruction as user space bkpt, and it will cause an illegal + instruction exception. + In kernel we parse the *regs->pc to determine whether to send SIGTRAP or not. + +config GENERIC_CALIBRATE_DELAY + def_bool y + +config GENERIC_CSUM + def_bool y + +config GENERIC_HWEIGHT + def_bool y + +config MMU + def_bool y + +config RWSEM_GENERIC_SPINLOCK + def_bool y + +config TIME_LOW_RES + def_bool y + +config TRACE_IRQFLAGS_SUPPORT + def_bool y + +config CPU_TLB_SIZE + int + default "128" if (CPU_CK610 || CPU_CK807 || CPU_CK810) + default "1024" if (CPU_CK860) + +config CPU_ASID_BITS + int + default "8" if (CPU_CK610 || CPU_CK807 || CPU_CK810) + default "12" if (CPU_CK860) + +config L1_CACHE_SHIFT + int + default "4" if (CPU_CK610) + default "5" if (CPU_CK807 || CPU_CK810) + default "6" if (CPU_CK860) + +menu "Processor type and features" + +choice + prompt "CPU MODEL" + default CPU_CK807 + +config CPU_CK610 + bool "CSKY CPU ck610" + select CPU_NEED_TLBSYNC + select CPU_NEED_SOFTALIGN + select CPU_NO_USER_BKPT + +config CPU_CK810 + bool "CSKY CPU ck810" + select CPU_HAS_HILO + select CPU_NEED_TLBSYNC + +config CPU_CK807 + bool "CSKY CPU ck807" + select CPU_HAS_HILO + +config CPU_CK860 + bool "CSKY CPU ck860" + select CPU_HAS_TLBI + select CPU_HAS_CACHEV2 + select CPU_HAS_LDSTEX + select CPU_HAS_FPUV2 +endchoice + +choice + prompt "Power Manager Instruction (wait/doze/stop)" + default CPU_PM_NONE + +config CPU_PM_NONE + bool "None" + +config CPU_PM_WAIT + bool "wait" + +config CPU_PM_DOZE + bool "doze" + +config CPU_PM_STOP + bool "stop" +endchoice + +config CPU_HAS_VDSP + bool "CPU has VDSP coprocessor" + depends on CPU_HAS_FPU && CPU_HAS_FPUV2 + +config CPU_HAS_FPU + bool "CPU has FPU coprocessor" + depends on CPU_CK807 || CPU_CK810 || CPU_CK860 + +config CPU_HAS_TEE + bool "CPU has Trusted Execution Environment" + depends on CPU_CK810 + +config SMP + bool "Symmetric Multi-Processing (SMP) support for C-SKY" + depends on CPU_CK860 + default n + +config NR_CPUS + int "Maximum number of CPUs (2-32)" + range 2 32 + depends on SMP + default "2" + +config HIGHMEM + bool "High Memory Support" + depends on !CPU_CK610 + default y + +config FORCE_MAX_ZONEORDER + int "Maximum zone order" + default "11" + +config RAM_BASE + hex "DRAM start addr (the same with memory-section in dts)" + default 0x0 + +endmenu + +source "kernel/Kconfig.hz" diff --git a/arch/csky/Kconfig.debug b/arch/csky/Kconfig.debug new file mode 100644 index 000000000000..48cf6ff9df4a --- /dev/null +++ b/arch/csky/Kconfig.debug @@ -0,0 +1,9 @@ +menu "C-SKY Debug Options" +config CSKY_BUILTIN_DTB + string "Use kernel builtin dtb" + help + User could define the dtb instead of the one which is passed from + bootloader. + Sometimes for debug, we want to use a built-in dtb and then we needn't + modify bootloader at all. +endmenu diff --git a/arch/csky/Makefile b/arch/csky/Makefile new file mode 100644 index 000000000000..67a4ae1fba2b --- /dev/null +++ b/arch/csky/Makefile @@ -0,0 +1,93 @@ +OBJCOPYFLAGS :=-O binary +GZFLAGS :=-9 +KBUILD_DEFCONFIG := defconfig + +ifdef CONFIG_CPU_HAS_FPU +FPUEXT = f +endif + +ifdef CONFIG_CPU_HAS_VDSP +VDSPEXT = v +endif + +ifdef CONFIG_CPU_HAS_TEE +TEEEXT = t +endif + +ifdef CONFIG_CPU_CK610 +CPUTYPE = ck610 +CSKYABI = abiv1 +endif + +ifdef CONFIG_CPU_CK810 +CPUTYPE = ck810 +CSKYABI = abiv2 +endif + +ifdef CONFIG_CPU_CK807 +CPUTYPE = ck807 +CSKYABI = abiv2 +endif + +ifdef CONFIG_CPU_CK860 +CPUTYPE = ck860 +CSKYABI = abiv2 +endif + +ifneq ($(CSKYABI),) +MCPU_STR = $(CPUTYPE)$(FPUEXT)$(VDSPEXT)$(TEEEXT) +KBUILD_CFLAGS += -mcpu=$(MCPU_STR) +KBUILD_CFLAGS += -DCSKYCPU_DEF_NAME=\"$(MCPU_STR)\" +KBUILD_CFLAGS += -msoft-float -mdiv +KBUILD_CFLAGS += -fno-tree-vectorize +endif + +KBUILD_CFLAGS += -pipe +ifeq ($(CSKYABI),abiv2) +KBUILD_CFLAGS += -mno-stack-size +endif + +abidirs := $(patsubst %,arch/csky/%/,$(CSKYABI)) +KBUILD_CFLAGS += $(patsubst %,-I$(srctree)/%inc,$(abidirs)) + +KBUILD_CPPFLAGS += -mlittle-endian +LDFLAGS += -EL + +KBUILD_AFLAGS += $(KBUILD_CFLAGS) + +head-y := arch/csky/kernel/head.o + +core-y += arch/csky/kernel/ +core-y += arch/csky/mm/ +core-y += arch/csky/$(CSKYABI)/ + +libs-y += arch/csky/lib/ \ + $(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name) + +boot := arch/csky/boot +ifneq '$(CONFIG_CSKY_BUILTIN_DTB)' '""' +core-y += $(boot)/dts/ +endif + +all: zImage + + +dtbs: scripts + $(Q)$(MAKE) $(build)=$(boot)/dts + +%.dtb %.dtb.S %.dtb.o: scripts + $(Q)$(MAKE) $(build)=$(boot)/dts $(boot)/dts/$@ + +zImage Image uImage: vmlinux dtbs + $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ + +archclean: + $(Q)$(MAKE) $(clean)=$(boot) + $(Q)$(MAKE) $(clean)=$(boot)/dts + rm -rf arch/csky/include/generated + +define archhelp + echo '* zImage - Compressed kernel image (arch/$(ARCH)/boot/zImage)' + echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)' + echo ' uImage - U-Boot wrapped zImage' +endef diff --git a/arch/csky/abiv1/Makefile b/arch/csky/abiv1/Makefile new file mode 100644 index 000000000000..7c062768d44d --- /dev/null +++ b/arch/csky/abiv1/Makefile @@ -0,0 +1,8 @@ +obj-$(CONFIG_CPU_NEED_SOFTALIGN) += alignment.o +obj-y += bswapdi.o +obj-y += bswapsi.o +obj-y += cacheflush.o +obj-y += mmap.o +obj-y += memcpy.o +obj-y += memset.o +obj-y += strksyms.o diff --git a/arch/csky/abiv1/alignment.c b/arch/csky/abiv1/alignment.c new file mode 100644 index 000000000000..60205e98fb87 --- /dev/null +++ b/arch/csky/abiv1/alignment.c @@ -0,0 +1,326 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/kernel.h> +#include <linux/uaccess.h> +#include <linux/ptrace.h> + +static int align_enable = 1; +static int align_count; + +static inline uint32_t get_ptreg(struct pt_regs *regs, uint32_t rx) +{ + return rx == 15 ? regs->lr : *((uint32_t *)&(regs->a0) - 2 + rx); +} + +static inline void put_ptreg(struct pt_regs *regs, uint32_t rx, uint32_t val) +{ + if (rx == 15) + regs->lr = val; + else + *((uint32_t *)&(regs->a0) - 2 + rx) = val; +} + +/* + * Get byte-value from addr and set it to *valp. + * + * Success: return 0 + * Failure: return 1 + */ +static int ldb_asm(uint32_t addr, uint32_t *valp) +{ + uint32_t val; + int err; + + if (!access_ok(VERIFY_READ, (void *)addr, 1)) + return 1; + + asm volatile ( + "movi %0, 0\n" + "1:\n" + "ldb %1, (%2)\n" + "br 3f\n" + "2:\n" + "movi %0, 1\n" + "br 3f\n" + ".section __ex_table,\"a\"\n" + ".align 2\n" + ".long 1b, 2b\n" + ".previous\n" + "3:\n" + : "=&r"(err), "=r"(val) + : "r" (addr) + ); + + *valp = val; + + return err; +} + +/* + * Put byte-value to addr. + * + * Success: return 0 + * Failure: return 1 + */ +static int stb_asm(uint32_t addr, uint32_t val) +{ + int err; + + if (!access_ok(VERIFY_WRITE, (void *)addr, 1)) + return 1; + + asm volatile ( + "movi %0, 0\n" + "1:\n" + "stb %1, (%2)\n" + "br 3f\n" + "2:\n" + "movi %0, 1\n" + "br 3f\n" + ".section __ex_table,\"a\"\n" + ".align 2\n" + ".long 1b, 2b\n" + ".previous\n" + "3:\n" + : "=&r"(err) + : "r"(val), "r" (addr) + ); + + return err; +} + +/* + * Get half-word from [rx + imm] + * + * Success: return 0 + * Failure: return 1 + */ +static int ldh_c(struct pt_regs *regs, uint32_t rz, uint32_t addr) +{ + uint32_t byte0, byte1; + + if (ldb_asm(addr, &byte0)) + return 1; + addr += 1; + if (ldb_asm(addr, &byte1)) + return 1; + + byte0 |= byte1 << 8; + put_ptreg(regs, rz, byte0); + + return 0; +} + +/* + * Store half-word to [rx + imm] + * + * Success: return 0 + * Failure: return 1 + */ +static int sth_c(struct pt_regs *regs, uint32_t rz, uint32_t addr) +{ + uint32_t byte0, byte1; + + byte0 = byte1 = get_ptreg(regs, rz); + + byte0 &= 0xff; + + if (stb_asm(addr, byte0)) + return 1; + + addr += 1; + byte1 = (byte1 >> 8) & 0xff; + if (stb_asm(addr, byte1)) + return 1; + + return 0; +} + +/* + * Get word from [rx + imm] + * + * Success: return 0 + * Failure: return 1 + */ +static int ldw_c(struct pt_regs *regs, uint32_t rz, uint32_t addr) +{ + uint32_t byte0, byte1, byte2, byte3; + + if (ldb_asm(addr, &byte0)) + return 1; + + addr += 1; + if (ldb_asm(addr, &byte1)) + return 1; + + addr += 1; + if (ldb_asm(addr, &byte2)) + return 1; + + addr += 1; + if (ldb_asm(addr, &byte3)) + return 1; + + byte0 |= byte1 << 8; + byte0 |= byte2 << 16; + byte0 |= byte3 << 24; + + put_ptreg(regs, rz, byte0); + + return 0; +} + +/* + * Store word to [rx + imm] + * + * Success: return 0 + * Failure: return 1 + */ +static int stw_c(struct pt_regs *regs, uint32_t rz, uint32_t addr) +{ + uint32_t byte0, byte1, byte2, byte3; + + byte0 = byte1 = byte2 = byte3 = get_ptreg(regs, rz); + + byte0 &= 0xff; + + if (stb_asm(addr, byte0)) + return 1; + + addr += 1; + byte1 = (byte1 >> 8) & 0xff; + if (stb_asm(addr, byte1)) + return 1; + + addr += 1; + byte2 = (byte2 >> 16) & 0xff; + if (stb_asm(addr, byte2)) + return 1; + + addr += 1; + byte3 = (byte3 >> 24) & 0xff; + if (stb_asm(addr, byte3)) + return 1; + + align_count++; + + return 0; +} + +extern int fixup_exception(struct pt_regs *regs); + +#define OP_LDH 0xc000 +#define OP_STH 0xd000 +#define OP_LDW 0x8000 +#define OP_STW 0x9000 + +void csky_alignment(struct pt_regs *regs) +{ + int ret; + uint16_t tmp; + uint32_t opcode = 0; + uint32_t rx = 0; + uint32_t rz = 0; + uint32_t imm = 0; + uint32_t addr = 0; + + if (!user_mode(regs)) + goto bad_area; + + ret = get_user(tmp, (uint16_t *)instruction_pointer(regs)); + if (ret) { + pr_err("%s get_user failed.\n", __func__); + goto bad_area; + } + + opcode = (uint32_t)tmp; + + rx = opcode & 0xf; + imm = (opcode >> 4) & 0xf; + rz = (opcode >> 8) & 0xf; + opcode &= 0xf000; + + if (rx == 0 || rx == 1 || rz == 0 || rz == 1) + goto bad_area; + + switch (opcode) { + case OP_LDH: + addr = get_ptreg(regs, rx) + (imm << 1); + ret = ldh_c(regs, rz, addr); + break; + case OP_LDW: + addr = get_ptreg(regs, rx) + (imm << 2); + ret = ldw_c(regs, rz, addr); + break; + case OP_STH: + addr = get_ptreg(regs, rx) + (imm << 1); + ret = sth_c(regs, rz, addr); + break; + case OP_STW: + addr = get_ptreg(regs, rx) + (imm << 2); + ret = stw_c(regs, rz, addr); + break; + } + + if (ret) + goto bad_area; + + regs->pc += 2; + + return; + +bad_area: + if (!user_mode(regs)) { + if (fixup_exception(regs)) + return; + + bust_spinlocks(1); + pr_alert("%s opcode: %x, rz: %d, rx: %d, imm: %d, addr: %x.\n", + __func__, opcode, rz, rx, imm, addr); + show_regs(regs); + bust_spinlocks(0); + do_exit(SIGKILL); + } + + force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)addr, current); +} + +static struct ctl_table alignment_tbl[4] = { + { + .procname = "enable", + .data = &align_enable, + .maxlen = sizeof(align_enable), + .mode = 0666, + .proc_handler = &proc_dointvec + }, + { + .procname = "count", + .data = &align_count, + .maxlen = sizeof(align_count), + .mode = 0666, + .proc_handler = &proc_dointvec + }, + {} +}; + +static struct ctl_table sysctl_table[2] = { + { + .procname = "csky_alignment", + .mode = 0555, + .child = alignment_tbl}, + {} +}; + +static struct ctl_path sysctl_path[2] = { + {.procname = "csky"}, + {} +}; + +static int __init csky_alignment_init(void) +{ + register_sysctl_paths(sysctl_path, sysctl_table); + return 0; +} + +arch_initcall(csky_alignment_init); diff --git a/arch/csky/abiv1/bswapdi.c b/arch/csky/abiv1/bswapdi.c new file mode 100644 index 000000000000..f50a1d6e337a --- /dev/null +++ b/arch/csky/abiv1/bswapdi.c @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/export.h> +#include <linux/compiler.h> +#include <uapi/linux/swab.h> + +unsigned long long notrace __bswapdi2(unsigned long long u) +{ + return ___constant_swab64(u); +} +EXPORT_SYMBOL(__bswapdi2); diff --git a/arch/csky/abiv1/bswapsi.c b/arch/csky/abiv1/bswapsi.c new file mode 100644 index 000000000000..0f79182e8a5b --- /dev/null +++ b/arch/csky/abiv1/bswapsi.c @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/export.h> +#include <linux/compiler.h> +#include <uapi/linux/swab.h> + +unsigned int notrace __bswapsi2(unsigned int u) +{ + return ___constant_swab32(u); +} +EXPORT_SYMBOL(__bswapsi2); diff --git a/arch/csky/abiv1/cacheflush.c b/arch/csky/abiv1/cacheflush.c new file mode 100644 index 000000000000..10af8b6fe322 --- /dev/null +++ b/arch/csky/abiv1/cacheflush.c @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/fs.h> +#include <linux/syscalls.h> +#include <linux/spinlock.h> +#include <asm/page.h> +#include <asm/cache.h> +#include <asm/cacheflush.h> +#include <asm/cachectl.h> + +void flush_dcache_page(struct page *page) +{ + struct address_space *mapping = page_mapping(page); + unsigned long addr; + + if (mapping && !mapping_mapped(mapping)) { + set_bit(PG_arch_1, &(page)->flags); + return; + } + + /* + * We could delay the flush for the !page_mapping case too. But that + * case is for exec env/arg pages and those are %99 certainly going to + * get faulted into the tlb (and thus flushed) anyways. + */ + addr = (unsigned long) page_address(page); + dcache_wb_range(addr, addr + PAGE_SIZE); +} + +void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, + pte_t *pte) +{ + unsigned long addr; + struct page *page; + unsigned long pfn; + + pfn = pte_pfn(*pte); + if (unlikely(!pfn_valid(pfn))) + return; + + page = pfn_to_page(pfn); + addr = (unsigned long) page_address(page); + + if (vma->vm_flags & VM_EXEC || + pages_do_alias(addr, address & PAGE_MASK)) + cache_wbinv_all(); + + clear_bit(PG_arch_1, &(page)->flags); +} diff --git a/arch/csky/abiv1/inc/abi/cacheflush.h b/arch/csky/abiv1/inc/abi/cacheflush.h new file mode 100644 index 000000000000..5f663aef9b1b --- /dev/null +++ b/arch/csky/abiv1/inc/abi/cacheflush.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ABI_CSKY_CACHEFLUSH_H +#define __ABI_CSKY_CACHEFLUSH_H + +#include <linux/compiler.h> +#include <asm/string.h> +#include <asm/cache.h> + +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 +extern void flush_dcache_page(struct page *); + +#define flush_cache_mm(mm) cache_wbinv_all() +#define flush_cache_page(vma, page, pfn) cache_wbinv_all() +#define flush_cache_dup_mm(mm) cache_wbinv_all() + +/* + * if (current_mm != vma->mm) cache_wbinv_range(start, end) will be broken. + * Use cache_wbinv_all() here and need to be improved in future. + */ +#define flush_cache_range(vma, start, end) cache_wbinv_all() +#define flush_cache_vmap(start, end) cache_wbinv_range(start, end) +#define flush_cache_vunmap(start, end) cache_wbinv_range(start, end) + +#define flush_icache_page(vma, page) cache_wbinv_all() +#define flush_icache_range(start, end) cache_wbinv_range(start, end) + +#define flush_icache_user_range(vma, pg, adr, len) \ + cache_wbinv_range(adr, adr + len) + +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ +do { \ + cache_wbinv_all(); \ + memcpy(dst, src, len); \ + cache_wbinv_all(); \ +} while (0) + +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ +do { \ + cache_wbinv_all(); \ + memcpy(dst, src, len); \ + cache_wbinv_all(); \ +} while (0) + +#define flush_dcache_mmap_lock(mapping) do {} while (0) +#define flush_dcache_mmap_unlock(mapping) do {} while (0) + +#endif /* __ABI_CSKY_CACHEFLUSH_H */ diff --git a/arch/csky/abiv1/inc/abi/ckmmu.h b/arch/csky/abiv1/inc/abi/ckmmu.h new file mode 100644 index 000000000000..3a002017bebe --- /dev/null +++ b/arch/csky/abiv1/inc/abi/ckmmu.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_CKMMUV1_H +#define __ASM_CSKY_CKMMUV1_H +#include <abi/reg_ops.h> + +static inline int read_mmu_index(void) +{ + return cprcr("cpcr0"); +} + +static inline void write_mmu_index(int value) +{ + cpwcr("cpcr0", value); +} + +static inline int read_mmu_entrylo0(void) +{ + return cprcr("cpcr2") << 6; +} + +static inline int read_mmu_entrylo1(void) +{ + return cprcr("cpcr3") << 6; +} + +static inline void write_mmu_pagemask(int value) +{ + cpwcr("cpcr6", value); +} + +static inline int read_mmu_entryhi(void) +{ + return cprcr("cpcr4"); +} + +static inline void write_mmu_entryhi(int value) +{ + cpwcr("cpcr4", value); +} + +/* + * TLB operations. + */ +static inline void tlb_probe(void) +{ + cpwcr("cpcr8", 0x80000000); +} + +static inline void tlb_read(void) +{ + cpwcr("cpcr8", 0x40000000); +} + +static inline void tlb_invalid_all(void) +{ + cpwcr("cpcr8", 0x04000000); +} + +static inline void tlb_invalid_indexed(void) +{ + cpwcr("cpcr8", 0x02000000); +} + +static inline void setup_pgd(unsigned long pgd, bool kernel) +{ + cpwcr("cpcr29", pgd); +} + +static inline unsigned long get_pgd(void) +{ + return cprcr("cpcr29"); +} +#endif /* __ASM_CSKY_CKMMUV1_H */ diff --git a/arch/csky/abiv1/inc/abi/elf.h b/arch/csky/abiv1/inc/abi/elf.h new file mode 100644 index 000000000000..3058cc06b104 --- /dev/null +++ b/arch/csky/abiv1/inc/abi/elf.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ABI_CSKY_ELF_H +#define __ABI_CSKY_ELF_H + +#define ELF_CORE_COPY_REGS(pr_reg, regs) do { \ + pr_reg[0] = regs->pc; \ + pr_reg[1] = regs->regs[9]; \ + pr_reg[2] = regs->usp; \ + pr_reg[3] = regs->sr; \ + pr_reg[4] = regs->a0; \ + pr_reg[5] = regs->a1; \ + pr_reg[6] = regs->a2; \ + pr_reg[7] = regs->a3; \ + pr_reg[8] = regs->regs[0]; \ + pr_reg[9] = regs->regs[1]; \ + pr_reg[10] = regs->regs[2]; \ + pr_reg[11] = regs->regs[3]; \ + pr_reg[12] = regs->regs[4]; \ + pr_reg[13] = regs->regs[5]; \ + pr_reg[14] = regs->regs[6]; \ + pr_reg[15] = regs->regs[7]; \ + pr_reg[16] = regs->regs[8]; \ + pr_reg[17] = regs->lr; \ +} while (0); +#endif /* __ABI_CSKY_ELF_H */ diff --git a/arch/csky/abiv1/inc/abi/entry.h b/arch/csky/abiv1/inc/abi/entry.h new file mode 100644 index 000000000000..3f3faab3d747 --- /dev/null +++ b/arch/csky/abiv1/inc/abi/entry.h @@ -0,0 +1,160 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_ENTRY_H +#define __ASM_CSKY_ENTRY_H + +#include <asm/setup.h> +#include <abi/regdef.h> + +#define LSAVE_PC 8 +#define LSAVE_PSR 12 +#define LSAVE_A0 24 +#define LSAVE_A1 28 +#define LSAVE_A2 32 +#define LSAVE_A3 36 +#define LSAVE_A4 40 +#define LSAVE_A5 44 + +#define EPC_INCREASE 2 +#define EPC_KEEP 0 + +.macro USPTOKSP + mtcr sp, ss1 + mfcr sp, ss0 +.endm + +.macro KSPTOUSP + mtcr sp, ss0 + mfcr sp, ss1 +.endm + +.macro INCTRAP rx + addi \rx, EPC_INCREASE +.endm + +.macro SAVE_ALL epc_inc + mtcr r13, ss2 + mfcr r13, epsr + btsti r13, 31 + bt 1f + USPTOKSP +1: + subi sp, 32 + subi sp, 32 + subi sp, 16 + stw r13, (sp, 12) + + stw lr, (sp, 4) + + mfcr lr, epc + movi r13, \epc_inc + add lr, r13 + stw lr, (sp, 8) + + mfcr lr, ss1 + stw lr, (sp, 16) + + stw a0, (sp, 20) + stw a0, (sp, 24) + stw a1, (sp, 28) + stw a2, (sp, 32) + stw a3, (sp, 36) + + addi sp, 32 + addi sp, 8 + mfcr r13, ss2 + stw r6, (sp) + stw r7, (sp, 4) + stw r8, (sp, 8) + stw r9, (sp, 12) + stw r10, (sp, 16) + stw r11, (sp, 20) + stw r12, (sp, 24) + stw r13, (sp, 28) + stw r14, (sp, 32) + stw r1, (sp, 36) + subi sp, 32 + subi sp, 8 +.endm + +.macro RESTORE_ALL + psrclr ie + ldw lr, (sp, 4) + ldw a0, (sp, 8) + mtcr a0, epc + ldw a0, (sp, 12) + mtcr a0, epsr + btsti a0, 31 + ldw a0, (sp, 16) + mtcr a0, ss1 + + ldw a0, (sp, 24) + ldw a1, (sp, 28) + ldw a2, (sp, 32) + ldw a3, (sp, 36) + + addi sp, 32 + addi sp, 8 + ldw r6, (sp) + ldw r7, (sp, 4) + ldw r8, (sp, 8) + ldw r9, (sp, 12) + ldw r10, (sp, 16) + ldw r11, (sp, 20) + ldw r12, (sp, 24) + ldw r13, (sp, 28) + ldw r14, (sp, 32) + ldw r1, (sp, 36) + addi sp, 32 + addi sp, 8 + + bt 1f + KSPTOUSP +1: + rte +.endm + +.macro SAVE_SWITCH_STACK + subi sp, 32 + stm r8-r15, (sp) +.endm + +.macro RESTORE_SWITCH_STACK + ldm r8-r15, (sp) + addi sp, 32 +.endm + +/* MMU registers operators. */ +.macro RD_MIR rx + cprcr \rx, cpcr0 +.endm + +.macro RD_MEH rx + cprcr \rx, cpcr4 +.endm + +.macro RD_MCIR rx + cprcr \rx, cpcr8 +.endm + +.macro RD_PGDR rx + cprcr \rx, cpcr29 +.endm + +.macro WR_MEH rx + cpwcr \rx, cpcr4 +.endm + +.macro WR_MCIR rx + cpwcr \rx, cpcr8 +.endm + +.macro SETUP_MMU rx + lrw \rx, PHYS_OFFSET | 0xe + cpwcr \rx, cpcr30 + lrw \rx, (PHYS_OFFSET + 0x20000000) | 0xe + cpwcr \rx, cpcr31 +.endm + +#endif /* __ASM_CSKY_ENTRY_H */ diff --git a/arch/csky/abiv1/inc/abi/page.h b/arch/csky/abiv1/inc/abi/page.h new file mode 100644 index 000000000000..6336e92a103a --- /dev/null +++ b/arch/csky/abiv1/inc/abi/page.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +extern unsigned long shm_align_mask; +extern void flush_dcache_page(struct page *page); + +static inline unsigned long pages_do_alias(unsigned long addr1, + unsigned long addr2) +{ + return (addr1 ^ addr2) & shm_align_mask; +} + +static inline void clear_user_page(void *addr, unsigned long vaddr, + struct page *page) +{ + clear_page(addr); + if (pages_do_alias((unsigned long) addr, vaddr & PAGE_MASK)) + flush_dcache_page(page); +} + +static inline void copy_user_page(void *to, void *from, unsigned long vaddr, + struct page *page) +{ + copy_page(to, from); + if (pages_do_alias((unsigned long) to, vaddr & PAGE_MASK)) + flush_dcache_page(page); +} diff --git a/arch/csky/abiv1/inc/abi/pgtable-bits.h b/arch/csky/abiv1/inc/abi/pgtable-bits.h new file mode 100644 index 000000000000..455075b5db0d --- /dev/null +++ b/arch/csky/abiv1/inc/abi/pgtable-bits.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_PGTABLE_BITS_H +#define __ASM_CSKY_PGTABLE_BITS_H + +/* implemented in software */ +#define _PAGE_ACCESSED (1<<3) +#define PAGE_ACCESSED_BIT (3) + +#define _PAGE_READ (1<<1) +#define _PAGE_WRITE (1<<2) +#define _PAGE_PRESENT (1<<0) + +#define _PAGE_MODIFIED (1<<4) +#define PAGE_MODIFIED_BIT (4) + +/* implemented in hardware */ +#define _PAGE_GLOBAL (1<<6) + +#define _PAGE_VALID (1<<7) +#define PAGE_VALID_BIT (7) + +#define _PAGE_DIRTY (1<<8) +#define PAGE_DIRTY_BIT (8) + +#define _PAGE_CACHE (3<<9) +#define _PAGE_UNCACHE (2<<9) + +#define _CACHE_MASK (7<<9) + +#define _CACHE_CACHED (_PAGE_VALID | _PAGE_CACHE) +#define _CACHE_UNCACHED (_PAGE_VALID | _PAGE_UNCACHE) + +#define HAVE_ARCH_UNMAPPED_AREA + +#endif /* __ASM_CSKY_PGTABLE_BITS_H */ diff --git a/arch/csky/abiv1/inc/abi/reg_ops.h b/arch/csky/abiv1/inc/abi/reg_ops.h new file mode 100644 index 000000000000..a153bd3918f7 --- /dev/null +++ b/arch/csky/abiv1/inc/abi/reg_ops.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ABI_REG_OPS_H +#define __ABI_REG_OPS_H +#include <asm/reg_ops.h> + +#define cprcr(reg) \ +({ \ + unsigned int tmp; \ + asm volatile("cprcr %0, "reg"\n":"=b"(tmp)); \ + tmp; \ +}) + +#define cpwcr(reg, val) \ +({ \ + asm volatile("cpwcr %0, "reg"\n"::"b"(val)); \ +}) + +static inline unsigned int mfcr_hint(void) +{ + return mfcr("cr30"); +} + +static inline unsigned int mfcr_ccr2(void) { return 0; } + +#endif /* __ABI_REG_OPS_H */ diff --git a/arch/csky/abiv1/inc/abi/regdef.h b/arch/csky/abiv1/inc/abi/regdef.h new file mode 100644 index 000000000000..876689291b71 --- /dev/null +++ b/arch/csky/abiv1/inc/abi/regdef.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_REGDEF_H +#define __ASM_CSKY_REGDEF_H + +#define syscallid r1 +#define r11_sig r11 + +#define regs_syscallid(regs) regs->regs[9] + +/* + * PSR format: + * | 31 | 30-24 | 23-16 | 15 14 | 13-0 | + * S CPID VEC TM + * + * S: Super Mode + * CPID: Coprocessor id, only 15 for MMU + * VEC: Exception Number + * TM: Trace Mode + */ +#define DEFAULT_PSR_VALUE 0x8f000000 + +#define SYSTRACE_SAVENUM 2 + +#endif /* __ASM_CSKY_REGDEF_H */ diff --git a/arch/csky/abiv1/inc/abi/string.h b/arch/csky/abiv1/inc/abi/string.h new file mode 100644 index 000000000000..5abe80be044d --- /dev/null +++ b/arch/csky/abiv1/inc/abi/string.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ABI_CSKY_STRING_H +#define __ABI_CSKY_STRING_H + +#define __HAVE_ARCH_MEMCPY +extern void *memcpy(void *, const void *, __kernel_size_t); + +#define __HAVE_ARCH_MEMSET +extern void *memset(void *, int, __kernel_size_t); + +#endif /* __ABI_CSKY_STRING_H */ diff --git a/arch/csky/abiv1/inc/abi/vdso.h b/arch/csky/abiv1/inc/abi/vdso.h new file mode 100644 index 000000000000..14352f524f1d --- /dev/null +++ b/arch/csky/abiv1/inc/abi/vdso.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include <linux/uaccess.h> + +static inline int setup_vdso_page(unsigned short *ptr) +{ + int err = 0; + + /* movi r1, 127 */ + err |= __put_user(0x67f1, ptr + 0); + /* addi r1, (139 - 127) */ + err |= __put_user(0x20b1, ptr + 1); + /* trap 0 */ + err |= __put_user(0x0008, ptr + 2); + + return err; +} diff --git a/arch/csky/abiv1/memcpy.S b/arch/csky/abiv1/memcpy.S new file mode 100644 index 000000000000..5078eb5169fa --- /dev/null +++ b/arch/csky/abiv1/memcpy.S @@ -0,0 +1,347 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/linkage.h> + +.macro GET_FRONT_BITS rx y +#ifdef __cskyLE__ + lsri \rx, \y +#else + lsli \rx, \y +#endif +.endm + +.macro GET_AFTER_BITS rx y +#ifdef __cskyLE__ + lsli \rx, \y +#else + lsri \rx, \y +#endif +.endm + +/* void *memcpy(void *dest, const void *src, size_t n); */ +ENTRY(memcpy) + mov r7, r2 + cmplti r4, 4 + bt .L_copy_by_byte + mov r6, r2 + andi r6, 3 + cmpnei r6, 0 + jbt .L_dest_not_aligned + mov r6, r3 + andi r6, 3 + cmpnei r6, 0 + jbt .L_dest_aligned_but_src_not_aligned +.L0: + cmplti r4, 16 + jbt .L_aligned_and_len_less_16bytes + subi sp, 8 + stw r8, (sp, 0) +.L_aligned_and_len_larger_16bytes: + ldw r1, (r3, 0) + ldw r5, (r3, 4) + ldw r8, (r3, 8) + stw r1, (r7, 0) + ldw r1, (r3, 12) + stw r5, (r7, 4) + stw r8, (r7, 8) + stw r1, (r7, 12) + subi r4, 16 + addi r3, 16 + addi r7, 16 + cmplti r4, 16 + jbf .L_aligned_and_len_larger_16bytes + ldw r8, (sp, 0) + addi sp, 8 + cmpnei r4, 0 + jbf .L_return + +.L_aligned_and_len_less_16bytes: + cmplti r4, 4 + bt .L_copy_by_byte +.L1: + ldw r1, (r3, 0) + stw r1, (r7, 0) + subi r4, 4 + addi r3, 4 + addi r7, 4 + cmplti r4, 4 + jbf .L1 + br .L_copy_by_byte + +.L_return: + rts + +.L_copy_by_byte: /* len less than 4 bytes */ + cmpnei r4, 0 + jbf .L_return +.L4: + ldb r1, (r3, 0) + stb r1, (r7, 0) + addi r3, 1 + addi r7, 1 + decne r4 + jbt .L4 + rts + +/* + * If dest is not aligned, just copying some bytes makes the dest align. + * Afther that, we judge whether the src is aligned. + */ +.L_dest_not_aligned: + mov r5, r3 + rsub r5, r5, r7 + abs r5, r5 + cmplt r5, r4 + bt .L_copy_by_byte + mov r5, r7 + sub r5, r3 + cmphs r5, r4 + bf .L_copy_by_byte + mov r5, r6 +.L5: + ldb r1, (r3, 0) /* makes the dest align. */ + stb r1, (r7, 0) + addi r5, 1 + subi r4, 1 + addi r3, 1 + addi r7, 1 + cmpnei r5, 4 + jbt .L5 + cmplti r4, 4 + jbt .L_copy_by_byte + mov r6, r3 /* judge whether the src is aligned. */ + andi r6, 3 + cmpnei r6, 0 + jbf .L0 + +/* Judge the number of misaligned, 1, 2, 3? */ +.L_dest_aligned_but_src_not_aligned: + mov r5, r3 + rsub r5, r5, r7 + abs r5, r5 + cmplt r5, r4 + bt .L_copy_by_byte + bclri r3, 0 + bclri r3, 1 + ldw r1, (r3, 0) + addi r3, 4 + cmpnei r6, 2 + bf .L_dest_aligned_but_src_not_aligned_2bytes + cmpnei r6, 3 + bf .L_dest_aligned_but_src_not_aligned_3bytes + +.L_dest_aligned_but_src_not_aligned_1byte: + mov r5, r7 + sub r5, r3 + cmphs r5, r4 + bf .L_copy_by_byte + cmplti r4, 16 + bf .L11 +.L10: /* If the len is less than 16 bytes */ + GET_FRONT_BITS r1 8 + mov r5, r1 + ldw r6, (r3, 0) + mov r1, r6 + GET_AFTER_BITS r6 24 + or r5, r6 + stw r5, (r7, 0) + subi r4, 4 + addi r3, 4 + addi r7, 4 + cmplti r4, 4 + bf .L10 + subi r3, 3 + br .L_copy_by_byte +.L11: + subi sp, 16 + stw r8, (sp, 0) + stw r9, (sp, 4) + stw r10, (sp, 8) + stw r11, (sp, 12) +.L12: + ldw r5, (r3, 0) + ldw r11, (r3, 4) + ldw r8, (r3, 8) + ldw r9, (r3, 12) + + GET_FRONT_BITS r1 8 /* little or big endian? */ + mov r10, r5 + GET_AFTER_BITS r5 24 + or r5, r1 + + GET_FRONT_BITS r10 8 + mov r1, r11 + GET_AFTER_BITS r11 24 + or r11, r10 + + GET_FRONT_BITS r1 8 + mov r10, r8 + GET_AFTER_BITS r8 24 + or r8, r1 + + GET_FRONT_BITS r10 8 + mov r1, r9 + GET_AFTER_BITS r9 24 + or r9, r10 + + stw r5, (r7, 0) + stw r11, (r7, 4) + stw r8, (r7, 8) + stw r9, (r7, 12) + subi r4, 16 + addi r3, 16 + addi r7, 16 + cmplti r4, 16 + jbf .L12 + ldw r8, (sp, 0) + ldw r9, (sp, 4) + ldw r10, (sp, 8) + ldw r11, (sp, 12) + addi sp , 16 + cmplti r4, 4 + bf .L10 + subi r3, 3 + br .L_copy_by_byte + +.L_dest_aligned_but_src_not_aligned_2bytes: + cmplti r4, 16 + bf .L21 +.L20: + GET_FRONT_BITS r1 16 + mov r5, r1 + ldw r6, (r3, 0) + mov r1, r6 + GET_AFTER_BITS r6 16 + or r5, r6 + stw r5, (r7, 0) + subi r4, 4 + addi r3, 4 + addi r7, 4 + cmplti r4, 4 + bf .L20 + subi r3, 2 + br .L_copy_by_byte + rts + +.L21: /* n > 16 */ + subi sp, 16 + stw r8, (sp, 0) + stw r9, (sp, 4) + stw r10, (sp, 8) + stw r11, (sp, 12) + +.L22: + ldw r5, (r3, 0) + ldw r11, (r3, 4) + ldw r8, (r3, 8) + ldw r9, (r3, 12) + + GET_FRONT_BITS r1 16 + mov r10, r5 + GET_AFTER_BITS r5 16 + or r5, r1 + + GET_FRONT_BITS r10 16 + mov r1, r11 + GET_AFTER_BITS r11 16 + or r11, r10 + + GET_FRONT_BITS r1 16 + mov r10, r8 + GET_AFTER_BITS r8 16 + or r8, r1 + + GET_FRONT_BITS r10 16 + mov r1, r9 + GET_AFTER_BITS r9 16 + or r9, r10 + + stw r5, (r7, 0) + stw r11, (r7, 4) + stw r8, (r7, 8) + stw r9, (r7, 12) + subi r4, 16 + addi r3, 16 + addi r7, 16 + cmplti r4, 16 + jbf .L22 + ldw r8, (sp, 0) + ldw r9, (sp, 4) + ldw r10, (sp, 8) + ldw r11, (sp, 12) + addi sp, 16 + cmplti r4, 4 + bf .L20 + subi r3, 2 + br .L_copy_by_byte + + +.L_dest_aligned_but_src_not_aligned_3bytes: + cmplti r4, 16 + bf .L31 +.L30: + GET_FRONT_BITS r1 24 + mov r5, r1 + ldw r6, (r3, 0) + mov r1, r6 + GET_AFTER_BITS r6 8 + or r5, r6 + stw r5, (r7, 0) + subi r4, 4 + addi r3, 4 + addi r7, 4 + cmplti r4, 4 + bf .L30 + subi r3, 1 + br .L_copy_by_byte +.L31: + subi sp, 16 + stw r8, (sp, 0) + stw r9, (sp, 4) + stw r10, (sp, 8) + stw r11, (sp, 12) +.L32: + ldw r5, (r3, 0) + ldw r11, (r3, 4) + ldw r8, (r3, 8) + ldw r9, (r3, 12) + + GET_FRONT_BITS r1 24 + mov r10, r5 + GET_AFTER_BITS r5 8 + or r5, r1 + + GET_FRONT_BITS r10 24 + mov r1, r11 + GET_AFTER_BITS r11 8 + or r11, r10 + + GET_FRONT_BITS r1 24 + mov r10, r8 + GET_AFTER_BITS r8 8 + or r8, r1 + + GET_FRONT_BITS r10 24 + mov r1, r9 + GET_AFTER_BITS r9 8 + or r9, r10 + + stw r5, (r7, 0) + stw r11, (r7, 4) + stw r8, (r7, 8) + stw r9, (r7, 12) + subi r4, 16 + addi r3, 16 + addi r7, 16 + cmplti r4, 16 + jbf .L32 + ldw r8, (sp, 0) + ldw r9, (sp, 4) + ldw r10, (sp, 8) + ldw r11, (sp, 12) + addi sp, 16 + cmplti r4, 4 + bf .L30 + subi r3, 1 + br .L_copy_by_byte diff --git a/arch/csky/abiv1/memset.c b/arch/csky/abiv1/memset.c new file mode 100644 index 000000000000..b4aa75b99c5d --- /dev/null +++ b/arch/csky/abiv1/memset.c @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/types.h> + +void *memset(void *dest, int c, size_t l) +{ + char *d = dest; + int ch = c & 0xff; + int tmp = (ch | ch << 8 | ch << 16 | ch << 24); + + while (((uintptr_t)d & 0x3) && l--) + *d++ = ch; + + while (l >= 16) { + *(((u32 *)d)) = tmp; + *(((u32 *)d)+1) = tmp; + *(((u32 *)d)+2) = tmp; + *(((u32 *)d)+3) = tmp; + l -= 16; + d += 16; + } + + while (l > 3) { + *(((u32 *)d)) = tmp; + l -= 4; + d += 4; + } + + while (l) { + *d = ch; + l--; + d++; + } + + return dest; +} diff --git a/arch/csky/abiv1/mmap.c b/arch/csky/abiv1/mmap.c new file mode 100644 index 000000000000..b462fd50b23a --- /dev/null +++ b/arch/csky/abiv1/mmap.c @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/fs.h> +#include <linux/mm.h> +#include <linux/mman.h> +#include <linux/shm.h> +#include <linux/sched.h> +#include <linux/random.h> +#include <linux/io.h> + +unsigned long shm_align_mask = (0x4000 >> 1) - 1; /* Sane caches */ + +#define COLOUR_ALIGN(addr, pgoff) \ + ((((addr) + shm_align_mask) & ~shm_align_mask) + \ + (((pgoff) << PAGE_SHIFT) & shm_align_mask)) + +unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + struct vm_area_struct *vmm; + int do_color_align; + + if (flags & MAP_FIXED) { + /* + * We do not accept a shared mapping if it would violate + * cache aliasing constraints. + */ + if ((flags & MAP_SHARED) && + ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) + return -EINVAL; + return addr; + } + + if (len > TASK_SIZE) + return -ENOMEM; + do_color_align = 0; + if (filp || (flags & MAP_SHARED)) + do_color_align = 1; + if (addr) { + if (do_color_align) + addr = COLOUR_ALIGN(addr, pgoff); + else + addr = PAGE_ALIGN(addr); + vmm = find_vma(current->mm, addr); + if (TASK_SIZE - len >= addr && + (!vmm || addr + len <= vmm->vm_start)) + return addr; + } + addr = TASK_UNMAPPED_BASE; + if (do_color_align) + addr = COLOUR_ALIGN(addr, pgoff); + else + addr = PAGE_ALIGN(addr); + + for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { + /* At this point: (!vmm || addr < vmm->vm_end). */ + if (TASK_SIZE - len < addr) + return -ENOMEM; + if (!vmm || addr + len <= vmm->vm_start) + return addr; + addr = vmm->vm_end; + if (do_color_align) + addr = COLOUR_ALIGN(addr, pgoff); + } +} diff --git a/arch/csky/abiv1/strksyms.c b/arch/csky/abiv1/strksyms.c new file mode 100644 index 000000000000..436995c9b75c --- /dev/null +++ b/arch/csky/abiv1/strksyms.c @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/module.h> + +EXPORT_SYMBOL(memcpy); +EXPORT_SYMBOL(memset); diff --git a/arch/csky/abiv2/Makefile b/arch/csky/abiv2/Makefile new file mode 100644 index 000000000000..069ca7276b99 --- /dev/null +++ b/arch/csky/abiv2/Makefile @@ -0,0 +1,10 @@ +obj-y += cacheflush.o +obj-$(CONFIG_CPU_HAS_FPU) += fpu.o +obj-y += memcmp.o +obj-y += memcpy.o +obj-y += memmove.o +obj-y += memset.o +obj-y += strcmp.o +obj-y += strcpy.o +obj-y += strlen.o +obj-y += strksyms.o diff --git a/arch/csky/abiv2/cacheflush.c b/arch/csky/abiv2/cacheflush.c new file mode 100644 index 000000000000..d22c95ffc74d --- /dev/null +++ b/arch/csky/abiv2/cacheflush.c @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/cache.h> +#include <linux/highmem.h> +#include <linux/mm.h> +#include <asm/cache.h> + +void flush_icache_page(struct vm_area_struct *vma, struct page *page) +{ + unsigned long start; + + start = (unsigned long) kmap_atomic(page); + + cache_wbinv_range(start, start + PAGE_SIZE); + + kunmap_atomic((void *)start); +} + +void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, + unsigned long vaddr, int len) +{ + unsigned long kaddr; + + kaddr = (unsigned long) kmap_atomic(page) + (vaddr & ~PAGE_MASK); + + cache_wbinv_range(kaddr, kaddr + len); + + kunmap_atomic((void *)kaddr); +} + +void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, + pte_t *pte) +{ + unsigned long addr, pfn; + struct page *page; + void *va; + + if (!(vma->vm_flags & VM_EXEC)) + return; + + pfn = pte_pfn(*pte); + if (unlikely(!pfn_valid(pfn))) + return; + + page = pfn_to_page(pfn); + if (page == ZERO_PAGE(0)) + return; + + va = page_address(page); + addr = (unsigned long) va; + + if (va == NULL && PageHighMem(page)) + addr = (unsigned long) kmap_atomic(page); + + cache_wbinv_range(addr, addr + PAGE_SIZE); + + if (va == NULL && PageHighMem(page)) + kunmap_atomic((void *) addr); +} diff --git a/arch/csky/abiv2/fpu.c b/arch/csky/abiv2/fpu.c new file mode 100644 index 000000000000..e7e11344005a --- /dev/null +++ b/arch/csky/abiv2/fpu.c @@ -0,0 +1,275 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/ptrace.h> +#include <linux/uaccess.h> +#include <abi/reg_ops.h> + +#define MTCR_MASK 0xFC00FFE0 +#define MFCR_MASK 0xFC00FFE0 +#define MTCR_DIST 0xC0006420 +#define MFCR_DIST 0xC0006020 + +void __init init_fpu(void) +{ + mtcr("cr<1, 2>", 0); +} + +/* + * fpu_libc_helper() is to help libc to excute: + * - mfcr %a, cr<1, 2> + * - mfcr %a, cr<2, 2> + * - mtcr %a, cr<1, 2> + * - mtcr %a, cr<2, 2> + */ +int fpu_libc_helper(struct pt_regs *regs) +{ + int fault; + unsigned long instrptr, regx = 0; + unsigned long index = 0, tmp = 0; + unsigned long tinstr = 0; + u16 instr_hi, instr_low; + + instrptr = instruction_pointer(regs); + if (instrptr & 1) + return 0; + + fault = __get_user(instr_low, (u16 *)instrptr); + if (fault) + return 0; + + fault = __get_user(instr_hi, (u16 *)(instrptr + 2)); + if (fault) + return 0; + + tinstr = instr_hi | ((unsigned long)instr_low << 16); + + if (((tinstr >> 21) & 0x1F) != 2) + return 0; + + if ((tinstr & MTCR_MASK) == MTCR_DIST) { + index = (tinstr >> 16) & 0x1F; + if (index > 13) + return 0; + + tmp = tinstr & 0x1F; + if (tmp > 2) + return 0; + + regx = *(®s->a0 + index); + + if (tmp == 1) + mtcr("cr<1, 2>", regx); + else if (tmp == 2) + mtcr("cr<2, 2>", regx); + else + return 0; + + regs->pc += 4; + return 1; + } + + if ((tinstr & MFCR_MASK) == MFCR_DIST) { + index = tinstr & 0x1F; + if (index > 13) + return 0; + + tmp = ((tinstr >> 16) & 0x1F); + if (tmp > 2) + return 0; + + if (tmp == 1) + regx = mfcr("cr<1, 2>"); + else if (tmp == 2) + regx = mfcr("cr<2, 2>"); + else + return 0; + + *(®s->a0 + index) = regx; + + regs->pc += 4; + return 1; + } + + return 0; +} + +void fpu_fpe(struct pt_regs *regs) +{ + int sig, code; + unsigned int fesr; + + fesr = mfcr("cr<2, 2>"); + + sig = SIGFPE; + code = FPE_FLTUNK; + + if (fesr & FPE_ILLE) { + sig = SIGILL; + code = ILL_ILLOPC; + } else if (fesr & FPE_IDC) { + sig = SIGILL; + code = ILL_ILLOPN; + } else if (fesr & FPE_FEC) { + sig = SIGFPE; + if (fesr & FPE_IOC) + code = FPE_FLTINV; + else if (fesr & FPE_DZC) + code = FPE_FLTDIV; + else if (fesr & FPE_UFC) + code = FPE_FLTUND; + else if (fesr & FPE_OFC) + code = FPE_FLTOVF; + else if (fesr & FPE_IXC) + code = FPE_FLTRES; + } + + force_sig_fault(sig, code, (void __user *)regs->pc, current); +} + +#define FMFVR_FPU_REGS(vrx, vry) \ + "fmfvrl %0, "#vrx"\n" \ + "fmfvrh %1, "#vrx"\n" \ + "fmfvrl %2, "#vry"\n" \ + "fmfvrh %3, "#vry"\n" + +#define FMTVR_FPU_REGS(vrx, vry) \ + "fmtvrl "#vrx", %0\n" \ + "fmtvrh "#vrx", %1\n" \ + "fmtvrl "#vry", %2\n" \ + "fmtvrh "#vry", %3\n" + +#define STW_FPU_REGS(a, b, c, d) \ + "stw %0, (%4, "#a")\n" \ + "stw %1, (%4, "#b")\n" \ + "stw %2, (%4, "#c")\n" \ + "stw %3, (%4, "#d")\n" + +#define LDW_FPU_REGS(a, b, c, d) \ + "ldw %0, (%4, "#a")\n" \ + "ldw %1, (%4, "#b")\n" \ + "ldw %2, (%4, "#c")\n" \ + "ldw %3, (%4, "#d")\n" + +void save_to_user_fp(struct user_fp *user_fp) +{ + unsigned long flg; + unsigned long tmp1, tmp2; + unsigned long *fpregs; + + local_irq_save(flg); + + tmp1 = mfcr("cr<1, 2>"); + tmp2 = mfcr("cr<2, 2>"); + + user_fp->fcr = tmp1; + user_fp->fesr = tmp2; + + fpregs = &user_fp->vr[0]; +#ifdef CONFIG_CPU_HAS_FPUV2 +#ifdef CONFIG_CPU_HAS_VDSP + asm volatile( + "vstmu.32 vr0-vr3, (%0)\n" + "vstmu.32 vr4-vr7, (%0)\n" + "vstmu.32 vr8-vr11, (%0)\n" + "vstmu.32 vr12-vr15, (%0)\n" + "fstmu.64 vr16-vr31, (%0)\n" + : "+a"(fpregs) + ::"memory"); +#else + asm volatile( + "fstmu.64 vr0-vr31, (%0)\n" + : "+a"(fpregs) + ::"memory"); +#endif +#else + { + unsigned long tmp3, tmp4; + + asm volatile( + FMFVR_FPU_REGS(vr0, vr1) + STW_FPU_REGS(0, 4, 16, 20) + FMFVR_FPU_REGS(vr2, vr3) + STW_FPU_REGS(32, 36, 48, 52) + FMFVR_FPU_REGS(vr4, vr5) + STW_FPU_REGS(64, 68, 80, 84) + FMFVR_FPU_REGS(vr6, vr7) + STW_FPU_REGS(96, 100, 112, 116) + "addi %4, 128\n" + FMFVR_FPU_REGS(vr8, vr9) + STW_FPU_REGS(0, 4, 16, 20) + FMFVR_FPU_REGS(vr10, vr11) + STW_FPU_REGS(32, 36, 48, 52) + FMFVR_FPU_REGS(vr12, vr13) + STW_FPU_REGS(64, 68, 80, 84) + FMFVR_FPU_REGS(vr14, vr15) + STW_FPU_REGS(96, 100, 112, 116) + : "=a"(tmp1), "=a"(tmp2), "=a"(tmp3), + "=a"(tmp4), "+a"(fpregs) + ::"memory"); + } +#endif + + local_irq_restore(flg); +} + +void restore_from_user_fp(struct user_fp *user_fp) +{ + unsigned long flg; + unsigned long tmp1, tmp2; + unsigned long *fpregs; + + local_irq_save(flg); + + tmp1 = user_fp->fcr; + tmp2 = user_fp->fesr; + + mtcr("cr<1, 2>", tmp1); + mtcr("cr<2, 2>", tmp2); + + fpregs = &user_fp->vr[0]; +#ifdef CONFIG_CPU_HAS_FPUV2 +#ifdef CONFIG_CPU_HAS_VDSP + asm volatile( + "vldmu.32 vr0-vr3, (%0)\n" + "vldmu.32 vr4-vr7, (%0)\n" + "vldmu.32 vr8-vr11, (%0)\n" + "vldmu.32 vr12-vr15, (%0)\n" + "fldmu.64 vr16-vr31, (%0)\n" + : "+a"(fpregs) + ::"memory"); +#else + asm volatile( + "fldmu.64 vr0-vr31, (%0)\n" + : "+a"(fpregs) + ::"memory"); +#endif +#else + { + unsigned long tmp3, tmp4; + + asm volatile( + LDW_FPU_REGS(0, 4, 16, 20) + FMTVR_FPU_REGS(vr0, vr1) + LDW_FPU_REGS(32, 36, 48, 52) + FMTVR_FPU_REGS(vr2, vr3) + LDW_FPU_REGS(64, 68, 80, 84) + FMTVR_FPU_REGS(vr4, vr5) + LDW_FPU_REGS(96, 100, 112, 116) + FMTVR_FPU_REGS(vr6, vr7) + "addi %4, 128\n" + LDW_FPU_REGS(0, 4, 16, 20) + FMTVR_FPU_REGS(vr8, vr9) + LDW_FPU_REGS(32, 36, 48, 52) + FMTVR_FPU_REGS(vr10, vr11) + LDW_FPU_REGS(64, 68, 80, 84) + FMTVR_FPU_REGS(vr12, vr13) + LDW_FPU_REGS(96, 100, 112, 116) + FMTVR_FPU_REGS(vr14, vr15) + : "=a"(tmp1), "=a"(tmp2), "=a"(tmp3), + "=a"(tmp4), "+a"(fpregs) + ::"memory"); + } +#endif + local_irq_restore(flg); +} diff --git a/arch/csky/abiv2/inc/abi/cacheflush.h b/arch/csky/abiv2/inc/abi/cacheflush.h new file mode 100644 index 000000000000..b8db5e0b2fe3 --- /dev/null +++ b/arch/csky/abiv2/inc/abi/cacheflush.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ABI_CSKY_CACHEFLUSH_H +#define __ABI_CSKY_CACHEFLUSH_H + +/* Keep includes the same across arches. */ +#include <linux/mm.h> + +/* + * The cache doesn't need to be flushed when TLB entries change when + * the cache is mapped to physical memory, not virtual memory + */ +#define flush_cache_all() do { } while (0) +#define flush_cache_mm(mm) do { } while (0) +#define flush_cache_dup_mm(mm) do { } while (0) + +#define flush_cache_range(vma, start, end) \ + do { \ + if (vma->vm_flags & VM_EXEC) \ + icache_inv_all(); \ + } while (0) + +#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 +#define flush_dcache_page(page) do { } while (0) +#define flush_dcache_mmap_lock(mapping) do { } while (0) +#define flush_dcache_mmap_unlock(mapping) do { } while (0) + +#define flush_icache_range(start, end) cache_wbinv_range(start, end) + +void flush_icache_page(struct vm_area_struct *vma, struct page *page); +void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, + unsigned long vaddr, int len); + +#define flush_cache_vmap(start, end) do { } while (0) +#define flush_cache_vunmap(start, end) do { } while (0) + +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ +do { \ + memcpy(dst, src, len); \ + cache_wbinv_range((unsigned long)dst, (unsigned long)dst + len); \ +} while (0) +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ + memcpy(dst, src, len) + +#endif /* __ABI_CSKY_CACHEFLUSH_H */ diff --git a/arch/csky/abiv2/inc/abi/ckmmu.h b/arch/csky/abiv2/inc/abi/ckmmu.h new file mode 100644 index 000000000000..97230ad9427c --- /dev/null +++ b/arch/csky/abiv2/inc/abi/ckmmu.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_CKMMUV2_H +#define __ASM_CSKY_CKMMUV2_H + +#include <abi/reg_ops.h> +#include <asm/barrier.h> + +static inline int read_mmu_index(void) +{ + return mfcr("cr<0, 15>"); +} + +static inline void write_mmu_index(int value) +{ + mtcr("cr<0, 15>", value); +} + +static inline int read_mmu_entrylo0(void) +{ + return mfcr("cr<2, 15>"); +} + +static inline int read_mmu_entrylo1(void) +{ + return mfcr("cr<3, 15>"); +} + +static inline void write_mmu_pagemask(int value) +{ + mtcr("cr<6, 15>", value); +} + +static inline int read_mmu_entryhi(void) +{ + return mfcr("cr<4, 15>"); +} + +static inline void write_mmu_entryhi(int value) +{ + mtcr("cr<4, 15>", value); +} + +/* + * TLB operations. + */ +static inline void tlb_probe(void) +{ + mtcr("cr<8, 15>", 0x80000000); +} + +static inline void tlb_read(void) +{ + mtcr("cr<8, 15>", 0x40000000); +} + +static inline void tlb_invalid_all(void) +{ +#ifdef CONFIG_CPU_HAS_TLBI + asm volatile("tlbi.alls\n":::"memory"); + sync_is(); +#else + mtcr("cr<8, 15>", 0x04000000); +#endif +} + +static inline void tlb_invalid_indexed(void) +{ + mtcr("cr<8, 15>", 0x02000000); +} + +/* setup hardrefil pgd */ +static inline unsigned long get_pgd(void) +{ + return mfcr("cr<29, 15>"); +} + +static inline void setup_pgd(unsigned long pgd, bool kernel) +{ + if (kernel) + mtcr("cr<28, 15>", pgd); + else + mtcr("cr<29, 15>", pgd); +} + +#endif /* __ASM_CSKY_CKMMUV2_H */ diff --git a/arch/csky/abiv2/inc/abi/elf.h b/arch/csky/abiv2/inc/abi/elf.h new file mode 100644 index 000000000000..290f49ef4c48 --- /dev/null +++ b/arch/csky/abiv2/inc/abi/elf.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ABI_CSKY_ELF_H +#define __ABI_CSKY_ELF_H + +/* The member sort in array pr_reg[x] is defined by GDB. */ +#define ELF_CORE_COPY_REGS(pr_reg, regs) do { \ + pr_reg[0] = regs->pc; \ + pr_reg[1] = regs->a1; \ + pr_reg[2] = regs->a0; \ + pr_reg[3] = regs->sr; \ + pr_reg[4] = regs->a2; \ + pr_reg[5] = regs->a3; \ + pr_reg[6] = regs->regs[0]; \ + pr_reg[7] = regs->regs[1]; \ + pr_reg[8] = regs->regs[2]; \ + pr_reg[9] = regs->regs[3]; \ + pr_reg[10] = regs->regs[4]; \ + pr_reg[11] = regs->regs[5]; \ + pr_reg[12] = regs->regs[6]; \ + pr_reg[13] = regs->regs[7]; \ + pr_reg[14] = regs->regs[8]; \ + pr_reg[15] = regs->regs[9]; \ + pr_reg[16] = regs->usp; \ + pr_reg[17] = regs->lr; \ + pr_reg[18] = regs->exregs[0]; \ + pr_reg[19] = regs->exregs[1]; \ + pr_reg[20] = regs->exregs[2]; \ + pr_reg[21] = regs->exregs[3]; \ + pr_reg[22] = regs->exregs[4]; \ + pr_reg[23] = regs->exregs[5]; \ + pr_reg[24] = regs->exregs[6]; \ + pr_reg[25] = regs->exregs[7]; \ + pr_reg[26] = regs->exregs[8]; \ + pr_reg[27] = regs->exregs[9]; \ + pr_reg[28] = regs->exregs[10]; \ + pr_reg[29] = regs->exregs[11]; \ + pr_reg[30] = regs->exregs[12]; \ + pr_reg[31] = regs->exregs[13]; \ + pr_reg[32] = regs->exregs[14]; \ + pr_reg[33] = regs->tls; \ +} while (0); +#endif /* __ABI_CSKY_ELF_H */ diff --git a/arch/csky/abiv2/inc/abi/entry.h b/arch/csky/abiv2/inc/abi/entry.h new file mode 100644 index 000000000000..acd05214d4e3 --- /dev/null +++ b/arch/csky/abiv2/inc/abi/entry.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_ENTRY_H +#define __ASM_CSKY_ENTRY_H + +#include <asm/setup.h> +#include <abi/regdef.h> + +#define LSAVE_PC 8 +#define LSAVE_PSR 12 +#define LSAVE_A0 24 +#define LSAVE_A1 28 +#define LSAVE_A2 32 +#define LSAVE_A3 36 + +#define EPC_INCREASE 4 +#define EPC_KEEP 0 + +#define KSPTOUSP +#define USPTOKSP + +#define usp cr<14, 1> + +.macro INCTRAP rx + addi \rx, EPC_INCREASE +.endm + +.macro SAVE_ALL epc_inc + subi sp, 152 + stw tls, (sp, 0) + stw lr, (sp, 4) + + mfcr lr, epc + movi tls, \epc_inc + add lr, tls + stw lr, (sp, 8) + + mfcr lr, epsr + stw lr, (sp, 12) + mfcr lr, usp + stw lr, (sp, 16) + + stw a0, (sp, 20) + stw a0, (sp, 24) + stw a1, (sp, 28) + stw a2, (sp, 32) + stw a3, (sp, 36) + + addi sp, 40 + stm r4-r13, (sp) + + addi sp, 40 + stm r16-r30, (sp) +#ifdef CONFIG_CPU_HAS_HILO + mfhi lr + stw lr, (sp, 60) + mflo lr + stw lr, (sp, 64) +#endif + subi sp, 80 +.endm + +.macro RESTORE_ALL + psrclr ie + ldw tls, (sp, 0) + ldw lr, (sp, 4) + ldw a0, (sp, 8) + mtcr a0, epc + ldw a0, (sp, 12) + mtcr a0, epsr + ldw a0, (sp, 16) + mtcr a0, usp + +#ifdef CONFIG_CPU_HAS_HILO + ldw a0, (sp, 140) + mthi a0 + ldw a0, (sp, 144) + mtlo a0 +#endif + + ldw a0, (sp, 24) + ldw a1, (sp, 28) + ldw a2, (sp, 32) + ldw a3, (sp, 36) + + addi sp, 40 + ldm r4-r13, (sp) + addi sp, 40 + ldm r16-r30, (sp) + addi sp, 72 + rte +.endm + +.macro SAVE_SWITCH_STACK + subi sp, 64 + stm r4-r11, (sp) + stw r15, (sp, 32) + stw r16, (sp, 36) + stw r17, (sp, 40) + stw r26, (sp, 44) + stw r27, (sp, 48) + stw r28, (sp, 52) + stw r29, (sp, 56) + stw r30, (sp, 60) +.endm + +.macro RESTORE_SWITCH_STACK + ldm r4-r11, (sp) + ldw r15, (sp, 32) + ldw r16, (sp, 36) + ldw r17, (sp, 40) + ldw r26, (sp, 44) + ldw r27, (sp, 48) + ldw r28, (sp, 52) + ldw r29, (sp, 56) + ldw r30, (sp, 60) + addi sp, 64 +.endm + +/* MMU registers operators. */ +.macro RD_MIR rx + mfcr \rx, cr<0, 15> +.endm + +.macro RD_MEH rx + mfcr \rx, cr<4, 15> +.endm + +.macro RD_MCIR rx + mfcr \rx, cr<8, 15> +.endm + +.macro RD_PGDR rx + mfcr \rx, cr<29, 15> +.endm + +.macro RD_PGDR_K rx + mfcr \rx, cr<28, 15> +.endm + +.macro WR_MEH rx + mtcr \rx, cr<4, 15> +.endm + +.macro WR_MCIR rx + mtcr \rx, cr<8, 15> +.endm + +.macro SETUP_MMU rx + lrw \rx, PHYS_OFFSET | 0xe + mtcr \rx, cr<30, 15> + lrw \rx, (PHYS_OFFSET + 0x20000000) | 0xe + mtcr \rx, cr<31, 15> +.endm +#endif /* __ASM_CSKY_ENTRY_H */ diff --git a/arch/csky/abiv2/inc/abi/fpu.h b/arch/csky/abiv2/inc/abi/fpu.h new file mode 100644 index 000000000000..22ca3cf2794a --- /dev/null +++ b/arch/csky/abiv2/inc/abi/fpu.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_FPU_H +#define __ASM_CSKY_FPU_H + +#include <asm/sigcontext.h> +#include <asm/ptrace.h> + +int fpu_libc_helper(struct pt_regs *regs); +void fpu_fpe(struct pt_regs *regs); +void __init init_fpu(void); + +void save_to_user_fp(struct user_fp *user_fp); +void restore_from_user_fp(struct user_fp *user_fp); + +/* + * Define the fesr bit for fpe handle. + */ +#define FPE_ILLE (1 << 16) /* Illegal instruction */ +#define FPE_FEC (1 << 7) /* Input float-point arithmetic exception */ +#define FPE_IDC (1 << 5) /* Input denormalized exception */ +#define FPE_IXC (1 << 4) /* Inexact exception */ +#define FPE_UFC (1 << 3) /* Underflow exception */ +#define FPE_OFC (1 << 2) /* Overflow exception */ +#define FPE_DZC (1 << 1) /* Divide by zero exception */ +#define FPE_IOC (1 << 0) /* Invalid operation exception */ +#define FPE_REGULAR_EXCEPTION (FPE_IXC | FPE_UFC | FPE_OFC | FPE_DZC | FPE_IOC) + +#ifdef CONFIG_OPEN_FPU_IDE +#define IDE_STAT (1 << 5) +#else +#define IDE_STAT 0 +#endif + +#ifdef CONFIG_OPEN_FPU_IXE +#define IXE_STAT (1 << 4) +#else +#define IXE_STAT 0 +#endif + +#ifdef CONFIG_OPEN_FPU_UFE +#define UFE_STAT (1 << 3) +#else +#define UFE_STAT 0 +#endif + +#ifdef CONFIG_OPEN_FPU_OFE +#define OFE_STAT (1 << 2) +#else +#define OFE_STAT 0 +#endif + +#ifdef CONFIG_OPEN_FPU_DZE +#define DZE_STAT (1 << 1) +#else +#define DZE_STAT 0 +#endif + +#ifdef CONFIG_OPEN_FPU_IOE +#define IOE_STAT (1 << 0) +#else +#define IOE_STAT 0 +#endif + +#endif /* __ASM_CSKY_FPU_H */ diff --git a/arch/csky/abiv2/inc/abi/page.h b/arch/csky/abiv2/inc/abi/page.h new file mode 100644 index 000000000000..0a70cb553dca --- /dev/null +++ b/arch/csky/abiv2/inc/abi/page.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +static inline void clear_user_page(void *addr, unsigned long vaddr, + struct page *page) +{ + clear_page(addr); +} + +static inline void copy_user_page(void *to, void *from, unsigned long vaddr, + struct page *page) +{ + copy_page(to, from); +} diff --git a/arch/csky/abiv2/inc/abi/pgtable-bits.h b/arch/csky/abiv2/inc/abi/pgtable-bits.h new file mode 100644 index 000000000000..b20ae19702e3 --- /dev/null +++ b/arch/csky/abiv2/inc/abi/pgtable-bits.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_PGTABLE_BITS_H +#define __ASM_CSKY_PGTABLE_BITS_H + +/* implemented in software */ +#define _PAGE_ACCESSED (1<<7) +#define PAGE_ACCESSED_BIT (7) + +#define _PAGE_READ (1<<8) +#define _PAGE_WRITE (1<<9) +#define _PAGE_PRESENT (1<<10) + +#define _PAGE_MODIFIED (1<<11) +#define PAGE_MODIFIED_BIT (11) + +/* implemented in hardware */ +#define _PAGE_GLOBAL (1<<0) + +#define _PAGE_VALID (1<<1) +#define PAGE_VALID_BIT (1) + +#define _PAGE_DIRTY (1<<2) +#define PAGE_DIRTY_BIT (2) + +#define _PAGE_SO (1<<5) +#define _PAGE_BUF (1<<6) + +#define _PAGE_CACHE (1<<3) + +#define _CACHE_MASK _PAGE_CACHE + +#define _CACHE_CACHED (_PAGE_VALID | _PAGE_CACHE | _PAGE_BUF) +#define _CACHE_UNCACHED (_PAGE_VALID | _PAGE_SO) + +#endif /* __ASM_CSKY_PGTABLE_BITS_H */ diff --git a/arch/csky/abiv2/inc/abi/reg_ops.h b/arch/csky/abiv2/inc/abi/reg_ops.h new file mode 100644 index 000000000000..ae82c3f26a6b --- /dev/null +++ b/arch/csky/abiv2/inc/abi/reg_ops.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ABI_REG_OPS_H +#define __ABI_REG_OPS_H +#include <asm/reg_ops.h> + +static inline unsigned int mfcr_hint(void) +{ + return mfcr("cr31"); +} + +static inline unsigned int mfcr_ccr2(void) +{ + return mfcr("cr23"); +} +#endif /* __ABI_REG_OPS_H */ diff --git a/arch/csky/abiv2/inc/abi/regdef.h b/arch/csky/abiv2/inc/abi/regdef.h new file mode 100644 index 000000000000..c72abb781bdc --- /dev/null +++ b/arch/csky/abiv2/inc/abi/regdef.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_REGDEF_H +#define __ASM_CSKY_REGDEF_H + +#define syscallid r7 +#define r11_sig r11 + +#define regs_syscallid(regs) regs->regs[3] + +/* + * PSR format: + * | 31 | 30-24 | 23-16 | 15 14 | 13-10 | 9 | 8-0 | + * S VEC TM MM + * + * S: Super Mode + * VEC: Exception Number + * TM: Trace Mode + * MM: Memory unaligned addr access + */ +#define DEFAULT_PSR_VALUE 0x80000200 + +#define SYSTRACE_SAVENUM 5 + +#endif /* __ASM_CSKY_REGDEF_H */ diff --git a/arch/csky/abiv2/inc/abi/string.h b/arch/csky/abiv2/inc/abi/string.h new file mode 100644 index 000000000000..f01bad2ac4fb --- /dev/null +++ b/arch/csky/abiv2/inc/abi/string.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ABI_CSKY_STRING_H +#define __ABI_CSKY_STRING_H + +#define __HAVE_ARCH_MEMCMP +extern int memcmp(const void *, const void *, __kernel_size_t); + +#define __HAVE_ARCH_MEMCPY +extern void *memcpy(void *, const void *, __kernel_size_t); + +#define __HAVE_ARCH_MEMMOVE +extern void *memmove(void *, const void *, __kernel_size_t); + +#define __HAVE_ARCH_MEMSET +extern void *memset(void *, int, __kernel_size_t); + +#define __HAVE_ARCH_STRCMP +extern int strcmp(const char *, const char *); + +#define __HAVE_ARCH_STRCPY +extern char *strcpy(char *, const char *); + +#define __HAVE_ARCH_STRLEN +extern __kernel_size_t strlen(const char *); + +#endif /* __ABI_CSKY_STRING_H */ diff --git a/arch/csky/abiv2/inc/abi/vdso.h b/arch/csky/abiv2/inc/abi/vdso.h new file mode 100644 index 000000000000..b60d4a070326 --- /dev/null +++ b/arch/csky/abiv2/inc/abi/vdso.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ABI_CSKY_VDSO_H +#define __ABI_CSKY_VDSO_H + +#include <linux/uaccess.h> + +static inline int setup_vdso_page(unsigned short *ptr) +{ + int err = 0; + + /* movi r7, 173 */ + err |= __put_user(0xea07, ptr); + err |= __put_user(0x008b, ptr+1); + + /* trap 0 */ + err |= __put_user(0xc000, ptr+2); + err |= __put_user(0x2020, ptr+3); + + return err; +} + +#endif /* __ABI_CSKY_STRING_H */ diff --git a/arch/csky/abiv2/memcmp.S b/arch/csky/abiv2/memcmp.S new file mode 100644 index 000000000000..bf0d809f09e2 --- /dev/null +++ b/arch/csky/abiv2/memcmp.S @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/linkage.h> +#include "sysdep.h" + +ENTRY(memcmp) + /* Test if len less than 4 bytes. */ + mov r3, r0 + movi r0, 0 + mov r12, r4 + cmplti r2, 4 + bt .L_compare_by_byte + + andi r13, r0, 3 + movi r19, 4 + + /* Test if s1 is not 4 bytes aligned. */ + bnez r13, .L_s1_not_aligned + + LABLE_ALIGN +.L_s1_aligned: + /* If dest is aligned, then copy. */ + zext r18, r2, 31, 4 + /* Test if len less than 16 bytes. */ + bez r18, .L_compare_by_word + +.L_compare_by_4word: + /* If aligned, load word each time. */ + ldw r20, (r3, 0) + ldw r21, (r1, 0) + /* If s1[i] != s2[i], goto .L_byte_check. */ + cmpne r20, r21 + bt .L_byte_check + + ldw r20, (r3, 4) + ldw r21, (r1, 4) + cmpne r20, r21 + bt .L_byte_check + + ldw r20, (r3, 8) + ldw r21, (r1, 8) + cmpne r20, r21 + bt .L_byte_check + + ldw r20, (r3, 12) + ldw r21, (r1, 12) + cmpne r20, r21 + bt .L_byte_check + + PRE_BNEZAD (r18) + addi a3, 16 + addi a1, 16 + + BNEZAD (r18, .L_compare_by_4word) + +.L_compare_by_word: + zext r18, r2, 3, 2 + bez r18, .L_compare_by_byte +.L_compare_by_word_loop: + ldw r20, (r3, 0) + ldw r21, (r1, 0) + addi r3, 4 + PRE_BNEZAD (r18) + cmpne r20, r21 + addi r1, 4 + bt .L_byte_check + BNEZAD (r18, .L_compare_by_word_loop) + +.L_compare_by_byte: + zext r18, r2, 1, 0 + bez r18, .L_return +.L_compare_by_byte_loop: + ldb r0, (r3, 0) + ldb r4, (r1, 0) + addi r3, 1 + subu r0, r4 + PRE_BNEZAD (r18) + addi r1, 1 + bnez r0, .L_return + BNEZAD (r18, .L_compare_by_byte_loop) + +.L_return: + mov r4, r12 + rts + +# ifdef __CSKYBE__ +/* d[i] != s[i] in word, so we check byte 0. */ +.L_byte_check: + xtrb0 r0, r20 + xtrb0 r2, r21 + subu r0, r2 + bnez r0, .L_return + + /* check byte 1 */ + xtrb1 r0, r20 + xtrb1 r2, r21 + subu r0, r2 + bnez r0, .L_return + + /* check byte 2 */ + xtrb2 r0, r20 + xtrb2 r2, r21 + subu r0, r2 + bnez r0, .L_return + + /* check byte 3 */ + xtrb3 r0, r20 + xtrb3 r2, r21 + subu r0, r2 +# else +/* s1[i] != s2[i] in word, so we check byte 3. */ +.L_byte_check: + xtrb3 r0, r20 + xtrb3 r2, r21 + subu r0, r2 + bnez r0, .L_return + + /* check byte 2 */ + xtrb2 r0, r20 + xtrb2 r2, r21 + subu r0, r2 + bnez r0, .L_return + + /* check byte 1 */ + xtrb1 r0, r20 + xtrb1 r2, r21 + subu r0, r2 + bnez r0, .L_return + + /* check byte 0 */ + xtrb0 r0, r20 + xtrb0 r2, r21 + subu r0, r2 + br .L_return +# endif /* !__CSKYBE__ */ + +/* Compare when s1 is not aligned. */ +.L_s1_not_aligned: + sub r13, r19, r13 + sub r2, r13 +.L_s1_not_aligned_loop: + ldb r0, (r3, 0) + ldb r4, (r1, 0) + addi r3, 1 + subu r0, r4 + PRE_BNEZAD (r13) + addi r1, 1 + bnez r0, .L_return + BNEZAD (r13, .L_s1_not_aligned_loop) + br .L_s1_aligned +ENDPROC(memcmp) diff --git a/arch/csky/abiv2/memcpy.S b/arch/csky/abiv2/memcpy.S new file mode 100644 index 000000000000..987fec60ab97 --- /dev/null +++ b/arch/csky/abiv2/memcpy.S @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/linkage.h> +#include "sysdep.h" + +ENTRY(__memcpy) +ENTRY(memcpy) + /* Test if len less than 4 bytes. */ + mov r12, r0 + cmplti r2, 4 + bt .L_copy_by_byte + + andi r13, r0, 3 + movi r19, 4 + /* Test if dest is not 4 bytes aligned. */ + bnez r13, .L_dest_not_aligned + +/* Hardware can handle unaligned access directly. */ +.L_dest_aligned: + /* If dest is aligned, then copy. */ + zext r18, r2, 31, 4 + + /* Test if len less than 16 bytes. */ + bez r18, .L_len_less_16bytes + movi r19, 0 + + LABLE_ALIGN +.L_len_larger_16bytes: +#if defined(__CSKY_VDSPV2__) + vldx.8 vr0, (r1), r19 + PRE_BNEZAD (r18) + addi r1, 16 + vstx.8 vr0, (r0), r19 + addi r0, 16 +#elif defined(__CK860__) + ldw r3, (r1, 0) + stw r3, (r0, 0) + ldw r3, (r1, 4) + stw r3, (r0, 4) + ldw r3, (r1, 8) + stw r3, (r0, 8) + ldw r3, (r1, 12) + addi r1, 16 + stw r3, (r0, 12) + addi r0, 16 +#else + ldw r20, (r1, 0) + ldw r21, (r1, 4) + ldw r22, (r1, 8) + ldw r23, (r1, 12) + stw r20, (r0, 0) + stw r21, (r0, 4) + stw r22, (r0, 8) + stw r23, (r0, 12) + PRE_BNEZAD (r18) + addi r1, 16 + addi r0, 16 +#endif + BNEZAD (r18, .L_len_larger_16bytes) + +.L_len_less_16bytes: + zext r18, r2, 3, 2 + bez r18, .L_copy_by_byte +.L_len_less_16bytes_loop: + ldw r3, (r1, 0) + PRE_BNEZAD (r18) + addi r1, 4 + stw r3, (r0, 0) + addi r0, 4 + BNEZAD (r18, .L_len_less_16bytes_loop) + +/* Test if len less than 4 bytes. */ +.L_copy_by_byte: + zext r18, r2, 1, 0 + bez r18, .L_return +.L_copy_by_byte_loop: + ldb r3, (r1, 0) + PRE_BNEZAD (r18) + addi r1, 1 + stb r3, (r0, 0) + addi r0, 1 + BNEZAD (r18, .L_copy_by_byte_loop) + +.L_return: + mov r0, r12 + rts + +/* + * If dest is not aligned, just copying some bytes makes the + * dest align. + */ +.L_dest_not_aligned: + sub r13, r19, r13 + sub r2, r13 + +/* Makes the dest align. */ +.L_dest_not_aligned_loop: + ldb r3, (r1, 0) + PRE_BNEZAD (r13) + addi r1, 1 + stb r3, (r0, 0) + addi r0, 1 + BNEZAD (r13, .L_dest_not_aligned_loop) + cmplti r2, 4 + bt .L_copy_by_byte + + /* Check whether the src is aligned. */ + jbr .L_dest_aligned +ENDPROC(__memcpy) diff --git a/arch/csky/abiv2/memmove.S b/arch/csky/abiv2/memmove.S new file mode 100644 index 000000000000..b0c42ecf1889 --- /dev/null +++ b/arch/csky/abiv2/memmove.S @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/linkage.h> +#include "sysdep.h" + + .weak memmove +ENTRY(__memmove) +ENTRY(memmove) + subu r3, r0, r1 + cmphs r3, r2 + bt memcpy + + mov r12, r0 + addu r0, r0, r2 + addu r1, r1, r2 + + /* Test if len less than 4 bytes. */ + cmplti r2, 4 + bt .L_copy_by_byte + + andi r13, r0, 3 + /* Test if dest is not 4 bytes aligned. */ + bnez r13, .L_dest_not_aligned + /* Hardware can handle unaligned access directly. */ +.L_dest_aligned: + /* If dest is aligned, then copy. */ + zext r18, r2, 31, 4 + /* Test if len less than 16 bytes. */ + bez r18, .L_len_less_16bytes + movi r19, 0 + + /* len > 16 bytes */ + LABLE_ALIGN +.L_len_larger_16bytes: + subi r1, 16 + subi r0, 16 +#if defined(__CSKY_VDSPV2__) + vldx.8 vr0, (r1), r19 + PRE_BNEZAD (r18) + vstx.8 vr0, (r0), r19 +#elif defined(__CK860__) + ldw r3, (r1, 12) + stw r3, (r0, 12) + ldw r3, (r1, 8) + stw r3, (r0, 8) + ldw r3, (r1, 4) + stw r3, (r0, 4) + ldw r3, (r1, 0) + stw r3, (r0, 0) +#else + ldw r20, (r1, 0) + ldw r21, (r1, 4) + ldw r22, (r1, 8) + ldw r23, (r1, 12) + stw r20, (r0, 0) + stw r21, (r0, 4) + stw r22, (r0, 8) + stw r23, (r0, 12) + PRE_BNEZAD (r18) +#endif + BNEZAD (r18, .L_len_larger_16bytes) + +.L_len_less_16bytes: + zext r18, r2, 3, 2 + bez r18, .L_copy_by_byte +.L_len_less_16bytes_loop: + subi r1, 4 + subi r0, 4 + ldw r3, (r1, 0) + PRE_BNEZAD (r18) + stw r3, (r0, 0) + BNEZAD (r18, .L_len_less_16bytes_loop) + + /* Test if len less than 4 bytes. */ +.L_copy_by_byte: + zext r18, r2, 1, 0 + bez r18, .L_return +.L_copy_by_byte_loop: + subi r1, 1 + subi r0, 1 + ldb r3, (r1, 0) + PRE_BNEZAD (r18) + stb r3, (r0, 0) + BNEZAD (r18, .L_copy_by_byte_loop) + +.L_return: + mov r0, r12 + rts + + /* If dest is not aligned, just copy some bytes makes the dest + align. */ +.L_dest_not_aligned: + sub r2, r13 +.L_dest_not_aligned_loop: + subi r1, 1 + subi r0, 1 + /* Makes the dest align. */ + ldb r3, (r1, 0) + PRE_BNEZAD (r13) + stb r3, (r0, 0) + BNEZAD (r13, .L_dest_not_aligned_loop) + cmplti r2, 4 + bt .L_copy_by_byte + /* Check whether the src is aligned. */ + jbr .L_dest_aligned +ENDPROC(memmove) +ENDPROC(__memmove) diff --git a/arch/csky/abiv2/memset.S b/arch/csky/abiv2/memset.S new file mode 100644 index 000000000000..a7e7d994b667 --- /dev/null +++ b/arch/csky/abiv2/memset.S @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/linkage.h> +#include "sysdep.h" + + .weak memset +ENTRY(__memset) +ENTRY(memset) + /* Test if len less than 4 bytes. */ + mov r12, r0 + cmplti r2, 8 + bt .L_set_by_byte + + andi r13, r0, 3 + movi r19, 4 + /* Test if dest is not 4 bytes aligned. */ + bnez r13, .L_dest_not_aligned + /* Hardware can handle unaligned access directly. */ +.L_dest_aligned: + zextb r3, r1 + lsli r1, 8 + or r1, r3 + lsli r3, r1, 16 + or r3, r1 + + /* If dest is aligned, then copy. */ + zext r18, r2, 31, 4 + /* Test if len less than 16 bytes. */ + bez r18, .L_len_less_16bytes + + LABLE_ALIGN +.L_len_larger_16bytes: + stw r3, (r0, 0) + stw r3, (r0, 4) + stw r3, (r0, 8) + stw r3, (r0, 12) + PRE_BNEZAD (r18) + addi r0, 16 + BNEZAD (r18, .L_len_larger_16bytes) + +.L_len_less_16bytes: + zext r18, r2, 3, 2 + andi r2, 3 + bez r18, .L_set_by_byte +.L_len_less_16bytes_loop: + stw r3, (r0, 0) + PRE_BNEZAD (r18) + addi r0, 4 + BNEZAD (r18, .L_len_less_16bytes_loop) + + /* Test if len less than 4 bytes. */ +.L_set_by_byte: + zext r18, r2, 2, 0 + bez r18, .L_return +.L_set_by_byte_loop: + stb r1, (r0, 0) + PRE_BNEZAD (r18) + addi r0, 1 + BNEZAD (r18, .L_set_by_byte_loop) + +.L_return: + mov r0, r12 + rts + + /* If dest is not aligned, just set some bytes makes the dest + align. */ + +.L_dest_not_aligned: + sub r13, r19, r13 + sub r2, r13 +.L_dest_not_aligned_loop: + /* Makes the dest align. */ + stb r1, (r0, 0) + PRE_BNEZAD (r13) + addi r0, 1 + BNEZAD (r13, .L_dest_not_aligned_loop) + cmplti r2, 8 + bt .L_set_by_byte + /* Check whether the src is aligned. */ + jbr .L_dest_aligned +ENDPROC(memset) +ENDPROC(__memset) diff --git a/arch/csky/abiv2/strcmp.S b/arch/csky/abiv2/strcmp.S new file mode 100644 index 000000000000..f8403f4d8c2b --- /dev/null +++ b/arch/csky/abiv2/strcmp.S @@ -0,0 +1,168 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/linkage.h> +#include "sysdep.h" + +ENTRY(strcmp) + mov a3, a0 + /* Check if the s1 addr is aligned. */ + xor a2, a3, a1 + andi a2, 0x3 + bnez a2, 7f + andi t1, a0, 0x3 + bnez t1, 5f + +1: + /* If aligned, load word each time. */ + ldw t0, (a3, 0) + ldw t1, (a1, 0) + /* If s1[i] != s2[i], goto 2f. */ + cmpne t0, t1 + bt 2f + /* If s1[i] == s2[i], check if s1 or s2 is at the end. */ + tstnbz t0 + /* If at the end, goto 3f (finish comparing). */ + bf 3f + + ldw t0, (a3, 4) + ldw t1, (a1, 4) + cmpne t0, t1 + bt 2f + tstnbz t0 + bf 3f + + ldw t0, (a3, 8) + ldw t1, (a1, 8) + cmpne t0, t1 + bt 2f + tstnbz t0 + bf 3f + + ldw t0, (a3, 12) + ldw t1, (a1, 12) + cmpne t0, t1 + bt 2f + tstnbz t0 + bf 3f + + ldw t0, (a3, 16) + ldw t1, (a1, 16) + cmpne t0, t1 + bt 2f + tstnbz t0 + bf 3f + + ldw t0, (a3, 20) + ldw t1, (a1, 20) + cmpne t0, t1 + bt 2f + tstnbz t0 + bf 3f + + ldw t0, (a3, 24) + ldw t1, (a1, 24) + cmpne t0, t1 + bt 2f + tstnbz t0 + bf 3f + + ldw t0, (a3, 28) + ldw t1, (a1, 28) + cmpne t0, t1 + bt 2f + tstnbz t0 + bf 3f + + addi a3, 32 + addi a1, 32 + + br 1b + +# ifdef __CSKYBE__ + /* d[i] != s[i] in word, so we check byte 0. */ +2: + xtrb0 a0, t0 + xtrb0 a2, t1 + subu a0, a2 + bez a2, 4f + bnez a0, 4f + + /* check byte 1 */ + xtrb1 a0, t0 + xtrb1 a2, t1 + subu a0, a2 + bez a2, 4f + bnez a0, 4f + + /* check byte 2 */ + xtrb2 a0, t0 + xtrb2 a2, t1 + subu a0, a2 + bez a2, 4f + bnez a0, 4f + + /* check byte 3 */ + xtrb3 a0, t0 + xtrb3 a2, t1 + subu a0, a2 +# else + /* s1[i] != s2[i] in word, so we check byte 3. */ +2: + xtrb3 a0, t0 + xtrb3 a2, t1 + subu a0, a2 + bez a2, 4f + bnez a0, 4f + + /* check byte 2 */ + xtrb2 a0, t0 + xtrb2 a2, t1 + subu a0, a2 + bez a2, 4f + bnez a0, 4f + + /* check byte 1 */ + xtrb1 a0, t0 + xtrb1 a2, t1 + subu a0, a2 + bez a2, 4f + bnez a0, 4f + + /* check byte 0 */ + xtrb0 a0, t0 + xtrb0 a2, t1 + subu a0, a2 + +# endif /* !__CSKYBE__ */ + jmp lr +3: + movi a0, 0 +4: + jmp lr + + /* Compare when s1 or s2 is not aligned. */ +5: + subi t1, 4 +6: + ldb a0, (a3, 0) + ldb a2, (a1, 0) + subu a0, a2 + bez a2, 4b + bnez a0, 4b + addi t1, 1 + addi a1, 1 + addi a3, 1 + bnez t1, 6b + br 1b + +7: + ldb a0, (a3, 0) + addi a3, 1 + ldb a2, (a1, 0) + addi a1, 1 + subu a0, a2 + bnez a0, 4b + bnez a2, 7b + jmp r15 +ENDPROC(strcmp) diff --git a/arch/csky/abiv2/strcpy.S b/arch/csky/abiv2/strcpy.S new file mode 100644 index 000000000000..3c6d3f6a573a --- /dev/null +++ b/arch/csky/abiv2/strcpy.S @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/linkage.h> +#include "sysdep.h" + +ENTRY(strcpy) + mov a3, a0 + /* Check if the src addr is aligned. */ + andi t0, a1, 3 + bnez t0, 11f +1: + /* Check if all the bytes in the word are not zero. */ + ldw a2, (a1) + tstnbz a2 + bf 9f + stw a2, (a3) + + ldw a2, (a1, 4) + tstnbz a2 + bf 2f + stw a2, (a3, 4) + + ldw a2, (a1, 8) + tstnbz a2 + bf 3f + stw a2, (a3, 8) + + ldw a2, (a1, 12) + tstnbz a2 + bf 4f + stw a2, (a3, 12) + + ldw a2, (a1, 16) + tstnbz a2 + bf 5f + stw a2, (a3, 16) + + ldw a2, (a1, 20) + tstnbz a2 + bf 6f + stw a2, (a3, 20) + + ldw a2, (a1, 24) + tstnbz a2 + bf 7f + stw a2, (a3, 24) + + ldw a2, (a1, 28) + tstnbz a2 + bf 8f + stw a2, (a3, 28) + + addi a3, 32 + addi a1, 32 + br 1b + + +2: + addi a3, 4 + br 9f + +3: + addi a3, 8 + br 9f + +4: + addi a3, 12 + br 9f + +5: + addi a3, 16 + br 9f + +6: + addi a3, 20 + br 9f + +7: + addi a3, 24 + br 9f + +8: + addi a3, 28 +9: +# ifdef __CSKYBE__ + xtrb0 t0, a2 + st.b t0, (a3) + bez t0, 10f + xtrb1 t0, a2 + st.b t0, (a3, 1) + bez t0, 10f + xtrb2 t0, a2 + st.b t0, (a3, 2) + bez t0, 10f + stw a2, (a3) +# else + xtrb3 t0, a2 + st.b t0, (a3) + bez t0, 10f + xtrb2 t0, a2 + st.b t0, (a3, 1) + bez t0, 10f + xtrb1 t0, a2 + st.b t0, (a3, 2) + bez t0, 10f + stw a2, (a3) +# endif /* !__CSKYBE__ */ +10: + jmp lr + +11: + subi t0, 4 +12: + ld.b a2, (a1) + st.b a2, (a3) + bez a2, 10b + addi t0, 1 + addi a1, a1, 1 + addi a3, a3, 1 + bnez t0, 12b + jbr 1b +ENDPROC(strcpy) diff --git a/arch/csky/abiv2/strksyms.c b/arch/csky/abiv2/strksyms.c new file mode 100644 index 000000000000..06da723d8202 --- /dev/null +++ b/arch/csky/abiv2/strksyms.c @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/module.h> + +EXPORT_SYMBOL(memcpy); +EXPORT_SYMBOL(memset); +EXPORT_SYMBOL(memcmp); +EXPORT_SYMBOL(memmove); +EXPORT_SYMBOL(strcmp); +EXPORT_SYMBOL(strcpy); +EXPORT_SYMBOL(strlen); diff --git a/arch/csky/abiv2/strlen.S b/arch/csky/abiv2/strlen.S new file mode 100644 index 000000000000..bcdd70764d08 --- /dev/null +++ b/arch/csky/abiv2/strlen.S @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/linkage.h> +#include "sysdep.h" + +ENTRY(strlen) + /* Check if the start addr is aligned. */ + mov r3, r0 + andi r1, r0, 3 + movi r2, 4 + movi r0, 0 + bnez r1, .L_start_not_aligned + + LABLE_ALIGN +.L_start_addr_aligned: + /* Check if all the bytes in the word are not zero. */ + ldw r1, (r3) + tstnbz r1 + bf .L_string_tail + + ldw r1, (r3, 4) + addi r0, 4 + tstnbz r1 + bf .L_string_tail + + ldw r1, (r3, 8) + addi r0, 4 + tstnbz r1 + bf .L_string_tail + + ldw r1, (r3, 12) + addi r0, 4 + tstnbz r1 + bf .L_string_tail + + ldw r1, (r3, 16) + addi r0, 4 + tstnbz r1 + bf .L_string_tail + + ldw r1, (r3, 20) + addi r0, 4 + tstnbz r1 + bf .L_string_tail + + ldw r1, (r3, 24) + addi r0, 4 + tstnbz r1 + bf .L_string_tail + + ldw r1, (r3, 28) + addi r0, 4 + tstnbz r1 + bf .L_string_tail + + addi r0, 4 + addi r3, 32 + br .L_start_addr_aligned + +.L_string_tail: +# ifdef __CSKYBE__ + xtrb0 r3, r1 + bez r3, .L_return + addi r0, 1 + xtrb1 r3, r1 + bez r3, .L_return + addi r0, 1 + xtrb2 r3, r1 + bez r3, .L_return + addi r0, 1 +# else + xtrb3 r3, r1 + bez r3, .L_return + addi r0, 1 + xtrb2 r3, r1 + bez r3, .L_return + addi r0, 1 + xtrb1 r3, r1 + bez r3, .L_return + addi r0, 1 +# endif /* !__CSKYBE__ */ + +.L_return: + rts + +.L_start_not_aligned: + sub r2, r2, r1 +.L_start_not_aligned_loop: + ldb r1, (r3) + PRE_BNEZAD (r2) + addi r3, 1 + bez r1, .L_return + addi r0, 1 + BNEZAD (r2, .L_start_not_aligned_loop) + br .L_start_addr_aligned +ENDPROC(strlen) diff --git a/arch/csky/abiv2/sysdep.h b/arch/csky/abiv2/sysdep.h new file mode 100644 index 000000000000..bbbedfd34777 --- /dev/null +++ b/arch/csky/abiv2/sysdep.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __SYSDEP_H +#define __SYSDEP_H + +#ifdef __ASSEMBLER__ + +#if defined(__CK860__) +#define LABLE_ALIGN \ + .balignw 16, 0x6c03 + +#define PRE_BNEZAD(R) + +#define BNEZAD(R, L) \ + bnezad R, L +#else +#define LABLE_ALIGN \ + .balignw 8, 0x6c03 + +#define PRE_BNEZAD(R) \ + subi R, 1 + +#define BNEZAD(R, L) \ + bnez R, L +#endif + +#endif + +#endif diff --git a/arch/csky/boot/Makefile b/arch/csky/boot/Makefile new file mode 100644 index 000000000000..47d3d723784c --- /dev/null +++ b/arch/csky/boot/Makefile @@ -0,0 +1,24 @@ +targets := Image zImage uImage +targets += $(dtb-y) + +$(obj)/Image: vmlinux FORCE + $(call if_changed,objcopy) + @echo ' Kernel: $@ is ready' + +compress-$(CONFIG_KERNEL_GZIP) = gzip +compress-$(CONFIG_KERNEL_LZO) = lzo +compress-$(CONFIG_KERNEL_LZMA) = lzma +compress-$(CONFIG_KERNEL_XZ) = xzkern +compress-$(CONFIG_KERNEL_LZ4) = lz4 + +$(obj)/zImage: $(obj)/Image FORCE + $(call if_changed,$(compress-y)) + @echo ' Kernel: $@ is ready' + +UIMAGE_ARCH = sandbox +UIMAGE_COMPRESSION = $(compress-y) +UIMAGE_LOADADDR = $(shell $(NM) vmlinux | awk '$$NF == "_start" {print $$1}') + +$(obj)/uImage: $(obj)/zImage + $(call if_changed,uimage) + @echo 'Image: $@ is ready' diff --git a/arch/csky/boot/dts/Makefile b/arch/csky/boot/dts/Makefile new file mode 100644 index 000000000000..305e81a5e91e --- /dev/null +++ b/arch/csky/boot/dts/Makefile @@ -0,0 +1,13 @@ +dtstree := $(srctree)/$(src) + +ifneq '$(CONFIG_CSKY_BUILTIN_DTB)' '""' +builtindtb-y := $(patsubst "%",%,$(CONFIG_CSKY_BUILTIN_DTB)) +dtb-y += $(builtindtb-y).dtb +obj-y += $(builtindtb-y).dtb.o +.SECONDARY: $(obj)/$(builtindtb-y).dtb.S +else +dtb-y := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts)) +endif + +always += $(dtb-y) +clean-files += *.dtb *.dtb.S diff --git a/arch/csky/boot/dts/include/dt-bindings b/arch/csky/boot/dts/include/dt-bindings new file mode 120000 index 000000000000..08c00e4972fa --- /dev/null +++ b/arch/csky/boot/dts/include/dt-bindings @@ -0,0 +1 @@ +../../../../../include/dt-bindings
\ No newline at end of file diff --git a/arch/csky/configs/defconfig b/arch/csky/configs/defconfig new file mode 100644 index 000000000000..7ef42895dfb0 --- /dev/null +++ b/arch/csky/configs/defconfig @@ -0,0 +1,61 @@ +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_DEFAULT_HOSTNAME="csky" +# CONFIG_SWAP is not set +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y +CONFIG_NO_HZ_IDLE=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_DEFAULT_DEADLINE=y +CONFIG_CPU_CK807=y +CONFIG_CPU_HAS_FPU=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_INET=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=65536 +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_SERIAL_NONSTANDARD=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_TTY_PRINTK=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_CSKY_MPTIMER=y +CONFIG_GX6605S_TIMER=y +CONFIG_PM_DEVFREQ=y +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y +CONFIG_DEVFREQ_GOV_PERFORMANCE=y +CONFIG_DEVFREQ_GOV_POWERSAVE=y +CONFIG_DEVFREQ_GOV_USERSPACE=y +CONFIG_GENERIC_PHY=y +CONFIG_EXT4_FS=y +CONFIG_FANOTIFY=y +CONFIG_QUOTA=y +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +CONFIG_CACHEFILES=m +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_UTF8=y +CONFIG_NTFS_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_CHILDREN=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_CONFIGFS_FS=y +CONFIG_CRAMFS=y +CONFIG_ROMFS_FS=y +CONFIG_NFS_FS=y +CONFIG_PRINTK_TIME=y +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_FS=y +CONFIG_MAGIC_SYSRQ=y diff --git a/arch/csky/include/asm/Kbuild b/arch/csky/include/asm/Kbuild new file mode 100644 index 000000000000..2a0abe8f2a35 --- /dev/null +++ b/arch/csky/include/asm/Kbuild @@ -0,0 +1,49 @@ +generic-y += asm-offsets.h +generic-y += bugs.h +generic-y += clkdev.h +generic-y += compat.h +generic-y += current.h +generic-y += delay.h +generic-y += device.h +generic-y += div64.h +generic-y += dma.h +generic-y += dma-contiguous.h +generic-y += dma-mapping.h +generic-y += emergency-restart.h +generic-y += exec.h +generic-y += fb.h +generic-y += ftrace.h +generic-y += futex.h +generic-y += gpio.h +generic-y += hardirq.h +generic-y += hw_irq.h +generic-y += irq.h +generic-y += irq_regs.h +generic-y += irq_work.h +generic-y += kdebug.h +generic-y += kmap_types.h +generic-y += kprobes.h +generic-y += kvm_para.h +generic-y += linkage.h +generic-y += local.h +generic-y += local64.h +generic-y += mm-arch-hooks.h +generic-y += module.h +generic-y += mutex.h +generic-y += pci.h +generic-y += percpu.h +generic-y += preempt.h +generic-y += qrwlock.h +generic-y += scatterlist.h +generic-y += sections.h +generic-y += serial.h +generic-y += shm.h +generic-y += timex.h +generic-y += topology.h +generic-y += trace_clock.h +generic-y += unaligned.h +generic-y += user.h +generic-y += vga.h +generic-y += vmlinux.lds.h +generic-y += word-at-a-time.h +generic-y += xor.h diff --git a/arch/csky/include/asm/addrspace.h b/arch/csky/include/asm/addrspace.h new file mode 100644 index 000000000000..d1c2ede692ed --- /dev/null +++ b/arch/csky/include/asm/addrspace.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_ADDRSPACE_H +#define __ASM_CSKY_ADDRSPACE_H + +#define KSEG0 0x80000000ul +#define KSEG0ADDR(a) (((unsigned long)a & 0x1fffffff) | KSEG0) + +#endif /* __ASM_CSKY_ADDRSPACE_H */ diff --git a/arch/csky/include/asm/atomic.h b/arch/csky/include/asm/atomic.h new file mode 100644 index 000000000000..e369d73b13e3 --- /dev/null +++ b/arch/csky/include/asm/atomic.h @@ -0,0 +1,212 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_ATOMIC_H +#define __ASM_CSKY_ATOMIC_H + +#include <linux/version.h> +#include <asm/cmpxchg.h> +#include <asm/barrier.h> + +#ifdef CONFIG_CPU_HAS_LDSTEX + +#define __atomic_add_unless __atomic_add_unless +static inline int __atomic_add_unless(atomic_t *v, int a, int u) +{ + unsigned long tmp, ret; + + smp_mb(); + + asm volatile ( + "1: ldex.w %0, (%3) \n" + " mov %1, %0 \n" + " cmpne %0, %4 \n" + " bf 2f \n" + " add %0, %2 \n" + " stex.w %0, (%3) \n" + " bez %0, 1b \n" + "2: \n" + : "=&r" (tmp), "=&r" (ret) + : "r" (a), "r"(&v->counter), "r"(u) + : "memory"); + + if (ret != u) + smp_mb(); + + return ret; +} + +#define ATOMIC_OP(op, c_op) \ +static inline void atomic_##op(int i, atomic_t *v) \ +{ \ + unsigned long tmp; \ + \ + asm volatile ( \ + "1: ldex.w %0, (%2) \n" \ + " " #op " %0, %1 \n" \ + " stex.w %0, (%2) \n" \ + " bez %0, 1b \n" \ + : "=&r" (tmp) \ + : "r" (i), "r"(&v->counter) \ + : "memory"); \ +} + +#define ATOMIC_OP_RETURN(op, c_op) \ +static inline int atomic_##op##_return(int i, atomic_t *v) \ +{ \ + unsigned long tmp, ret; \ + \ + smp_mb(); \ + asm volatile ( \ + "1: ldex.w %0, (%3) \n" \ + " " #op " %0, %2 \n" \ + " mov %1, %0 \n" \ + " stex.w %0, (%3) \n" \ + " bez %0, 1b \n" \ + : "=&r" (tmp), "=&r" (ret) \ + : "r" (i), "r"(&v->counter) \ + : "memory"); \ + smp_mb(); \ + \ + return ret; \ +} + +#define ATOMIC_FETCH_OP(op, c_op) \ +static inline int atomic_fetch_##op(int i, atomic_t *v) \ +{ \ + unsigned long tmp, ret; \ + \ + smp_mb(); \ + asm volatile ( \ + "1: ldex.w %0, (%3) \n" \ + " mov %1, %0 \n" \ + " " #op " %0, %2 \n" \ + " stex.w %0, (%3) \n" \ + " bez %0, 1b \n" \ + : "=&r" (tmp), "=&r" (ret) \ + : "r" (i), "r"(&v->counter) \ + : "memory"); \ + smp_mb(); \ + \ + return ret; \ +} + +#else /* CONFIG_CPU_HAS_LDSTEX */ + +#include <linux/irqflags.h> + +#define __atomic_add_unless __atomic_add_unless +static inline int __atomic_add_unless(atomic_t *v, int a, int u) +{ + unsigned long tmp, ret, flags; + + raw_local_irq_save(flags); + + asm volatile ( + " ldw %0, (%3) \n" + " mov %1, %0 \n" + " cmpne %0, %4 \n" + " bf 2f \n" + " add %0, %2 \n" + " stw %0, (%3) \n" + "2: \n" + : "=&r" (tmp), "=&r" (ret) + : "r" (a), "r"(&v->counter), "r"(u) + : "memory"); + + raw_local_irq_restore(flags); + + return ret; +} + +#define ATOMIC_OP(op, c_op) \ +static inline void atomic_##op(int i, atomic_t *v) \ +{ \ + unsigned long tmp, flags; \ + \ + raw_local_irq_save(flags); \ + \ + asm volatile ( \ + " ldw %0, (%2) \n" \ + " " #op " %0, %1 \n" \ + " stw %0, (%2) \n" \ + : "=&r" (tmp) \ + : "r" (i), "r"(&v->counter) \ + : "memory"); \ + \ + raw_local_irq_restore(flags); \ +} + +#define ATOMIC_OP_RETURN(op, c_op) \ +static inline int atomic_##op##_return(int i, atomic_t *v) \ +{ \ + unsigned long tmp, ret, flags; \ + \ + raw_local_irq_save(flags); \ + \ + asm volatile ( \ + " ldw %0, (%3) \n" \ + " " #op " %0, %2 \n" \ + " stw %0, (%3) \n" \ + " mov %1, %0 \n" \ + : "=&r" (tmp), "=&r" (ret) \ + : "r" (i), "r"(&v->counter) \ + : "memory"); \ + \ + raw_local_irq_restore(flags); \ + \ + return ret; \ +} + +#define ATOMIC_FETCH_OP(op, c_op) \ +static inline int atomic_fetch_##op(int i, atomic_t *v) \ +{ \ + unsigned long tmp, ret, flags; \ + \ + raw_local_irq_save(flags); \ + \ + asm volatile ( \ + " ldw %0, (%3) \n" \ + " mov %1, %0 \n" \ + " " #op " %0, %2 \n" \ + " stw %0, (%3) \n" \ + : "=&r" (tmp), "=&r" (ret) \ + : "r" (i), "r"(&v->counter) \ + : "memory"); \ + \ + raw_local_irq_restore(flags); \ + \ + return ret; \ +} + +#endif /* CONFIG_CPU_HAS_LDSTEX */ + +#define atomic_add_return atomic_add_return +ATOMIC_OP_RETURN(add, +) +#define atomic_sub_return atomic_sub_return +ATOMIC_OP_RETURN(sub, -) + +#define atomic_fetch_add atomic_fetch_add +ATOMIC_FETCH_OP(add, +) +#define atomic_fetch_sub atomic_fetch_sub +ATOMIC_FETCH_OP(sub, -) +#define atomic_fetch_and atomic_fetch_and +ATOMIC_FETCH_OP(and, &) +#define atomic_fetch_or atomic_fetch_or +ATOMIC_FETCH_OP(or, |) +#define atomic_fetch_xor atomic_fetch_xor +ATOMIC_FETCH_OP(xor, ^) + +#define atomic_and atomic_and +ATOMIC_OP(and, &) +#define atomic_or atomic_or +ATOMIC_OP(or, |) +#define atomic_xor atomic_xor +ATOMIC_OP(xor, ^) + +#undef ATOMIC_FETCH_OP +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP + +#include <asm-generic/atomic.h> + +#endif /* __ASM_CSKY_ATOMIC_H */ diff --git a/arch/csky/include/asm/barrier.h b/arch/csky/include/asm/barrier.h new file mode 100644 index 000000000000..476eb786f22d --- /dev/null +++ b/arch/csky/include/asm/barrier.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_BARRIER_H +#define __ASM_CSKY_BARRIER_H + +#ifndef __ASSEMBLY__ + +#define nop() asm volatile ("nop\n":::"memory") + +/* + * sync: completion barrier + * sync.s: completion barrier and shareable to other cores + * sync.i: completion barrier with flush cpu pipeline + * sync.is: completion barrier with flush cpu pipeline and shareable to + * other cores + * + * bar.brwarw: ordering barrier for all load/store instructions before it + * bar.brwarws: ordering barrier for all load/store instructions before it + * and shareable to other cores + * bar.brar: ordering barrier for all load instructions before it + * bar.brars: ordering barrier for all load instructions before it + * and shareable to other cores + * bar.bwaw: ordering barrier for all store instructions before it + * bar.bwaws: ordering barrier for all store instructions before it + * and shareable to other cores + */ + +#ifdef CONFIG_CPU_HAS_CACHEV2 +#define mb() asm volatile ("bar.brwarw\n":::"memory") +#define rmb() asm volatile ("bar.brar\n":::"memory") +#define wmb() asm volatile ("bar.bwaw\n":::"memory") + +#ifdef CONFIG_SMP +#define __smp_mb() asm volatile ("bar.brwarws\n":::"memory") +#define __smp_rmb() asm volatile ("bar.brars\n":::"memory") +#define __smp_wmb() asm volatile ("bar.bwaws\n":::"memory") +#endif /* CONFIG_SMP */ + +#define sync_is() asm volatile ("sync.is\n":::"memory") + +#else /* !CONFIG_CPU_HAS_CACHEV2 */ +#define mb() asm volatile ("sync\n":::"memory") +#endif + +#include <asm-generic/barrier.h> + +#endif /* __ASSEMBLY__ */ +#endif /* __ASM_CSKY_BARRIER_H */ diff --git a/arch/csky/include/asm/bitops.h b/arch/csky/include/asm/bitops.h new file mode 100644 index 000000000000..335f2883fb1e --- /dev/null +++ b/arch/csky/include/asm/bitops.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_BITOPS_H +#define __ASM_CSKY_BITOPS_H + +#include <linux/compiler.h> +#include <asm/barrier.h> + +/* + * asm-generic/bitops/ffs.h + */ +static inline int ffs(int x) +{ + if (!x) + return 0; + + asm volatile ( + "brev %0\n" + "ff1 %0\n" + "addi %0, 1\n" + : "=&r"(x) + : "0"(x)); + return x; +} + +/* + * asm-generic/bitops/__ffs.h + */ +static __always_inline unsigned long __ffs(unsigned long x) +{ + asm volatile ( + "brev %0\n" + "ff1 %0\n" + : "=&r"(x) + : "0"(x)); + return x; +} + +/* + * asm-generic/bitops/fls.h + */ +static __always_inline int fls(int x) +{ + asm volatile( + "ff1 %0\n" + : "=&r"(x) + : "0"(x)); + + return (32 - x); +} + +/* + * asm-generic/bitops/__fls.h + */ +static __always_inline unsigned long __fls(unsigned long x) +{ + return fls(x) - 1; +} + +#include <asm-generic/bitops/ffz.h> +#include <asm-generic/bitops/fls64.h> +#include <asm-generic/bitops/find.h> + +#ifndef _LINUX_BITOPS_H +#error only <linux/bitops.h> can be included directly +#endif + +#include <asm-generic/bitops/sched.h> +#include <asm-generic/bitops/hweight.h> +#include <asm-generic/bitops/lock.h> +#include <asm-generic/bitops/atomic.h> + +/* + * bug fix, why only could use atomic!!!! + */ +#include <asm-generic/bitops/non-atomic.h> +#define __clear_bit(nr, vaddr) clear_bit(nr, vaddr) + +#include <asm-generic/bitops/le.h> +#include <asm-generic/bitops/ext2-atomic.h> +#endif /* __ASM_CSKY_BITOPS_H */ diff --git a/arch/csky/include/asm/bug.h b/arch/csky/include/asm/bug.h new file mode 100644 index 000000000000..bd7b3235bb84 --- /dev/null +++ b/arch/csky/include/asm/bug.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_BUG_H +#define __ASM_CSKY_BUG_H + +#include <linux/compiler.h> +#include <linux/const.h> +#include <linux/types.h> + +#define BUG() \ +do { \ + asm volatile ("bkpt\n"); \ + unreachable(); \ +} while (0) + +#define HAVE_ARCH_BUG + +#include <asm-generic/bug.h> + +struct pt_regs; + +void die_if_kernel(char *str, struct pt_regs *regs, int nr); +void show_regs(struct pt_regs *regs); + +#endif /* __ASM_CSKY_BUG_H */ diff --git a/arch/csky/include/asm/cache.h b/arch/csky/include/asm/cache.h new file mode 100644 index 000000000000..d68373463676 --- /dev/null +++ b/arch/csky/include/asm/cache.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_CACHE_H +#define __ASM_CSKY_CACHE_H + +/* bytes per L1 cache line */ +#define L1_CACHE_SHIFT CONFIG_L1_CACHE_SHIFT + +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) + +#define ARCH_DMA_MINALIGN L1_CACHE_BYTES + +#ifndef __ASSEMBLY__ + +void dcache_wb_line(unsigned long start); + +void icache_inv_range(unsigned long start, unsigned long end); +void icache_inv_all(void); + +void dcache_wb_range(unsigned long start, unsigned long end); +void dcache_wbinv_all(void); + +void cache_wbinv_range(unsigned long start, unsigned long end); +void cache_wbinv_all(void); + +void dma_wbinv_range(unsigned long start, unsigned long end); +void dma_wb_range(unsigned long start, unsigned long end); + +#endif +#endif /* __ASM_CSKY_CACHE_H */ diff --git a/arch/csky/include/asm/cacheflush.h b/arch/csky/include/asm/cacheflush.h new file mode 100644 index 000000000000..a96da67261ae --- /dev/null +++ b/arch/csky/include/asm/cacheflush.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_CACHEFLUSH_H +#define __ASM_CSKY_CACHEFLUSH_H + +#include <abi/cacheflush.h> + +#endif /* __ASM_CSKY_CACHEFLUSH_H */ diff --git a/arch/csky/include/asm/checksum.h b/arch/csky/include/asm/checksum.h new file mode 100644 index 000000000000..7685824291b1 --- /dev/null +++ b/arch/csky/include/asm/checksum.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_CHECKSUM_H +#define __ASM_CSKY_CHECKSUM_H + +#include <linux/in6.h> +#include <asm/byteorder.h> + +static inline __sum16 csum_fold(__wsum csum) +{ + u32 tmp; + + asm volatile( + "mov %1, %0\n" + "rori %0, 16\n" + "addu %0, %1\n" + "lsri %0, 16\n" + : "=r"(csum), "=r"(tmp) + : "0"(csum)); + + return (__force __sum16) ~csum; +} +#define csum_fold csum_fold + +static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, + unsigned short len, unsigned short proto, __wsum sum) +{ + asm volatile( + "clrc\n" + "addc %0, %1\n" + "addc %0, %2\n" + "addc %0, %3\n" + "inct %0\n" + : "=r"(sum) + : "r"((__force u32)saddr), "r"((__force u32)daddr), +#ifdef __BIG_ENDIAN + "r"(proto + len), +#else + "r"((proto + len) << 8), +#endif + "0" ((__force unsigned long)sum) + : "cc"); + return sum; +} +#define csum_tcpudp_nofold csum_tcpudp_nofold + +#include <asm-generic/checksum.h> + +#endif /* __ASM_CSKY_CHECKSUM_H */ diff --git a/arch/csky/include/asm/cmpxchg.h b/arch/csky/include/asm/cmpxchg.h new file mode 100644 index 000000000000..89224530a0ee --- /dev/null +++ b/arch/csky/include/asm/cmpxchg.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_CMPXCHG_H +#define __ASM_CSKY_CMPXCHG_H + +#ifdef CONFIG_CPU_HAS_LDSTEX +#include <asm/barrier.h> + +extern void __bad_xchg(void); + +#define __xchg(new, ptr, size) \ +({ \ + __typeof__(ptr) __ptr = (ptr); \ + __typeof__(new) __new = (new); \ + __typeof__(*(ptr)) __ret; \ + unsigned long tmp; \ + switch (size) { \ + case 4: \ + smp_mb(); \ + asm volatile ( \ + "1: ldex.w %0, (%3) \n" \ + " mov %1, %2 \n" \ + " stex.w %1, (%3) \n" \ + " bez %1, 1b \n" \ + : "=&r" (__ret), "=&r" (tmp) \ + : "r" (__new), "r"(__ptr) \ + :); \ + smp_mb(); \ + break; \ + default: \ + __bad_xchg(); \ + } \ + __ret; \ +}) + +#define xchg(ptr, x) (__xchg((x), (ptr), sizeof(*(ptr)))) + +#define __cmpxchg(ptr, old, new, size) \ +({ \ + __typeof__(ptr) __ptr = (ptr); \ + __typeof__(new) __new = (new); \ + __typeof__(new) __tmp; \ + __typeof__(old) __old = (old); \ + __typeof__(*(ptr)) __ret; \ + switch (size) { \ + case 4: \ + smp_mb(); \ + asm volatile ( \ + "1: ldex.w %0, (%3) \n" \ + " cmpne %0, %4 \n" \ + " bt 2f \n" \ + " mov %1, %2 \n" \ + " stex.w %1, (%3) \n" \ + " bez %1, 1b \n" \ + "2: \n" \ + : "=&r" (__ret), "=&r" (__tmp) \ + : "r" (__new), "r"(__ptr), "r"(__old) \ + :); \ + smp_mb(); \ + break; \ + default: \ + __bad_xchg(); \ + } \ + __ret; \ +}) + +#define cmpxchg(ptr, o, n) \ + (__cmpxchg((ptr), (o), (n), sizeof(*(ptr)))) +#else +#include <asm-generic/cmpxchg.h> +#endif + +#endif /* __ASM_CSKY_CMPXCHG_H */ diff --git a/arch/csky/include/asm/elf.h b/arch/csky/include/asm/elf.h new file mode 100644 index 000000000000..773b133ca297 --- /dev/null +++ b/arch/csky/include/asm/elf.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_ELF_H +#define __ASM_CSKY_ELF_H + +#include <asm/ptrace.h> +#include <abi/regdef.h> + +#define ELF_ARCH 252 + +/* CSKY Relocations */ +#define R_CSKY_NONE 0 +#define R_CSKY_32 1 +#define R_CSKY_PCIMM8BY4 2 +#define R_CSKY_PCIMM11BY2 3 +#define R_CSKY_PCIMM4BY2 4 +#define R_CSKY_PC32 5 +#define R_CSKY_PCRELJSR_IMM11BY2 6 +#define R_CSKY_GNU_VTINHERIT 7 +#define R_CSKY_GNU_VTENTRY 8 +#define R_CSKY_RELATIVE 9 +#define R_CSKY_COPY 10 +#define R_CSKY_GLOB_DAT 11 +#define R_CSKY_JUMP_SLOT 12 +#define R_CSKY_ADDR_HI16 24 +#define R_CSKY_ADDR_LO16 25 +#define R_CSKY_PCRELJSR_IMM26BY2 40 + +typedef unsigned long elf_greg_t; + +typedef struct user_fp elf_fpregset_t; + +#define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t)) + +typedef elf_greg_t elf_gregset_t[ELF_NGREG]; + +/* + * This is used to ensure we don't load something for the wrong architecture. + */ +#define elf_check_arch(x) ((x)->e_machine == ELF_ARCH) + +/* + * These are used to set parameters in the core dumps. + */ +#define USE_ELF_CORE_DUMP +#define ELF_EXEC_PAGESIZE 4096 +#define ELF_CLASS ELFCLASS32 +#define ELF_PLAT_INIT(_r, load_addr) { _r->a0 = 0; } + +#ifdef __cskyBE__ +#define ELF_DATA ELFDATA2MSB +#else +#define ELF_DATA ELFDATA2LSB +#endif + +/* + * This is the location that an ET_DYN program is loaded if exec'ed. Typical + * use of this is to invoke "./ld.so someprog" to test out a new version of + * the loader. We need to make sure that it is out of the way of the program + * that it will "exec", and that there is sufficient room for the brk. + */ +#define ELF_ET_DYN_BASE 0x0UL +#include <abi/elf.h> + +/* Similar, but for a thread other than current. */ +struct task_struct; +extern int dump_task_regs(struct task_struct *tsk, elf_gregset_t *elf_regs); +#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs) + +#define ELF_HWCAP (0) + +/* + * This yields a string that ld.so will use to load implementation specific + * libraries for optimization. This is more specific in intent than poking + * at uname or /proc/cpuinfo. + */ +#define ELF_PLATFORM (NULL) +#define SET_PERSONALITY(ex) set_personality(PER_LINUX) + +#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 +struct linux_binprm; +extern int arch_setup_additional_pages(struct linux_binprm *bprm, + int uses_interp); +#endif /* __ASM_CSKY_ELF_H */ diff --git a/arch/csky/include/asm/fixmap.h b/arch/csky/include/asm/fixmap.h new file mode 100644 index 000000000000..380ff0a307df --- /dev/null +++ b/arch/csky/include/asm/fixmap.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_FIXMAP_H +#define __ASM_CSKY_FIXMAP_H + +#include <asm/page.h> +#ifdef CONFIG_HIGHMEM +#include <linux/threads.h> +#include <asm/kmap_types.h> +#endif + +enum fixed_addresses { +#ifdef CONFIG_HIGHMEM + FIX_KMAP_BEGIN, + FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1, +#endif + __end_of_fixed_addresses +}; + +#define FIXADDR_TOP 0xffffc000 +#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) + +#include <asm-generic/fixmap.h> + +#endif /* __ASM_CSKY_FIXMAP_H */ diff --git a/arch/csky/include/asm/highmem.h b/arch/csky/include/asm/highmem.h new file mode 100644 index 000000000000..a345a2f2c22e --- /dev/null +++ b/arch/csky/include/asm/highmem.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_HIGHMEM_H +#define __ASM_CSKY_HIGHMEM_H + +#ifdef __KERNEL__ + +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/uaccess.h> +#include <asm/kmap_types.h> +#include <asm/cache.h> + +/* undef for production */ +#define HIGHMEM_DEBUG 1 + +/* declarations for highmem.c */ +extern unsigned long highstart_pfn, highend_pfn; + +extern pte_t *pkmap_page_table; + +/* + * Right now we initialize only a single pte table. It can be extended + * easily, subsequent pte tables have to be allocated in one physical + * chunk of RAM. + */ +#define LAST_PKMAP 1024 +#define LAST_PKMAP_MASK (LAST_PKMAP-1) +#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) +#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) + +extern void *kmap_high(struct page *page); +extern void kunmap_high(struct page *page); + +extern void *kmap(struct page *page); +extern void kunmap(struct page *page); +extern void *kmap_atomic(struct page *page); +extern void __kunmap_atomic(void *kvaddr); +extern void *kmap_atomic_pfn(unsigned long pfn); +extern struct page *kmap_atomic_to_page(void *ptr); + +#define flush_cache_kmaps() do {} while (0) + +extern void kmap_init(void); + +#define kmap_prot PAGE_KERNEL + +#endif /* __KERNEL__ */ + +#endif /* __ASM_CSKY_HIGHMEM_H */ diff --git a/arch/csky/include/asm/io.h b/arch/csky/include/asm/io.h new file mode 100644 index 000000000000..ecae6b358f95 --- /dev/null +++ b/arch/csky/include/asm/io.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_IO_H +#define __ASM_CSKY_IO_H + +#include <abi/pgtable-bits.h> +#include <linux/types.h> +#include <linux/version.h> + +extern void __iomem *ioremap(phys_addr_t offset, size_t size); + +extern void iounmap(void *addr); + +extern int remap_area_pages(unsigned long address, phys_addr_t phys_addr, + size_t size, unsigned long flags); + +#define ioremap_nocache(phy, sz) ioremap(phy, sz) +#define ioremap_wc ioremap_nocache +#define ioremap_wt ioremap_nocache + +#include <asm-generic/io.h> + +#endif /* __ASM_CSKY_IO_H */ diff --git a/arch/csky/include/asm/irqflags.h b/arch/csky/include/asm/irqflags.h new file mode 100644 index 000000000000..9e3a569a55d6 --- /dev/null +++ b/arch/csky/include/asm/irqflags.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_IRQFLAGS_H +#define __ASM_CSKY_IRQFLAGS_H +#include <abi/reg_ops.h> + +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags; + + flags = mfcr("psr"); + asm volatile("psrclr ie\n":::"memory"); + return flags; +} +#define arch_local_irq_save arch_local_irq_save + +static inline void arch_local_irq_enable(void) +{ + asm volatile("psrset ee, ie\n":::"memory"); +} +#define arch_local_irq_enable arch_local_irq_enable + +static inline void arch_local_irq_disable(void) +{ + asm volatile("psrclr ie\n":::"memory"); +} +#define arch_local_irq_disable arch_local_irq_disable + +static inline unsigned long arch_local_save_flags(void) +{ + return mfcr("psr"); +} +#define arch_local_save_flags arch_local_save_flags + +static inline void arch_local_irq_restore(unsigned long flags) +{ + mtcr("psr", flags); +} +#define arch_local_irq_restore arch_local_irq_restore + +static inline int arch_irqs_disabled_flags(unsigned long flags) +{ + return !(flags & (1<<6)); +} +#define arch_irqs_disabled_flags arch_irqs_disabled_flags + +#include <asm-generic/irqflags.h> + +#endif /* __ASM_CSKY_IRQFLAGS_H */ diff --git a/arch/csky/include/asm/mmu.h b/arch/csky/include/asm/mmu.h new file mode 100644 index 000000000000..cb344675ccc4 --- /dev/null +++ b/arch/csky/include/asm/mmu.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_MMU_H +#define __ASM_CSKY_MMU_H + +typedef struct { + unsigned long asid[NR_CPUS]; + void *vdso; +} mm_context_t; + +#endif /* __ASM_CSKY_MMU_H */ diff --git a/arch/csky/include/asm/mmu_context.h b/arch/csky/include/asm/mmu_context.h new file mode 100644 index 000000000000..c410aa4fff1a --- /dev/null +++ b/arch/csky/include/asm/mmu_context.h @@ -0,0 +1,150 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_MMU_CONTEXT_H +#define __ASM_CSKY_MMU_CONTEXT_H + +#include <asm-generic/mm_hooks.h> +#include <asm/setup.h> +#include <asm/page.h> +#include <asm/cacheflush.h> +#include <asm/tlbflush.h> + +#include <linux/errno.h> +#include <linux/sched.h> +#include <abi/ckmmu.h> + +static inline void tlbmiss_handler_setup_pgd(unsigned long pgd, bool kernel) +{ + pgd &= ~(1<<31); + pgd += PHYS_OFFSET; + pgd |= 1; + setup_pgd(pgd, kernel); +} + +#define TLBMISS_HANDLER_SETUP_PGD(pgd) \ + tlbmiss_handler_setup_pgd((unsigned long)pgd, 0) +#define TLBMISS_HANDLER_SETUP_PGD_KERNEL(pgd) \ + tlbmiss_handler_setup_pgd((unsigned long)pgd, 1) + +static inline unsigned long tlb_get_pgd(void) +{ + return ((get_pgd()|(1<<31)) - PHYS_OFFSET) & ~1; +} + +#define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) +#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK) +#define asid_cache(cpu) (cpu_data[cpu].asid_cache) + +#define ASID_FIRST_VERSION (1 << CONFIG_CPU_ASID_BITS) +#define ASID_INC 0x1 +#define ASID_MASK (ASID_FIRST_VERSION - 1) +#define ASID_VERSION_MASK ~ASID_MASK + +#define destroy_context(mm) do {} while (0) +#define enter_lazy_tlb(mm, tsk) do {} while (0) +#define deactivate_mm(tsk, mm) do {} while (0) + +/* + * All unused by hardware upper bits will be considered + * as a software asid extension. + */ +static inline void +get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) +{ + unsigned long asid = asid_cache(cpu); + + asid += ASID_INC; + if (!(asid & ASID_MASK)) { + flush_tlb_all(); /* start new asid cycle */ + if (!asid) /* fix version if needed */ + asid = ASID_FIRST_VERSION; + } + cpu_context(cpu, mm) = asid_cache(cpu) = asid; +} + +/* + * Initialize the context related info for a new mm_struct + * instance. + */ +static inline int +init_new_context(struct task_struct *tsk, struct mm_struct *mm) +{ + int i; + + for_each_online_cpu(i) + cpu_context(i, mm) = 0; + return 0; +} + +static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) +{ + unsigned int cpu = smp_processor_id(); + unsigned long flags; + + local_irq_save(flags); + /* Check if our ASID is of an older version and thus invalid */ + if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) + get_new_mmu_context(next, cpu); + write_mmu_entryhi(cpu_asid(cpu, next)); + TLBMISS_HANDLER_SETUP_PGD(next->pgd); + + /* + * Mark current->active_mm as not "active" anymore. + * We don't want to mislead possible IPI tlb flush routines. + */ + cpumask_clear_cpu(cpu, mm_cpumask(prev)); + cpumask_set_cpu(cpu, mm_cpumask(next)); + + local_irq_restore(flags); +} + +/* + * After we have set current->mm to a new value, this activates + * the context for the new mm so we see the new mappings. + */ +static inline void +activate_mm(struct mm_struct *prev, struct mm_struct *next) +{ + unsigned long flags; + int cpu = smp_processor_id(); + + local_irq_save(flags); + + /* Unconditionally get a new ASID. */ + get_new_mmu_context(next, cpu); + + write_mmu_entryhi(cpu_asid(cpu, next)); + TLBMISS_HANDLER_SETUP_PGD(next->pgd); + + /* mark mmu ownership change */ + cpumask_clear_cpu(cpu, mm_cpumask(prev)); + cpumask_set_cpu(cpu, mm_cpumask(next)); + + local_irq_restore(flags); +} + +/* + * If mm is currently active_mm, we can't really drop it. Instead, + * we will get a new one for it. + */ +static inline void +drop_mmu_context(struct mm_struct *mm, unsigned int cpu) +{ + unsigned long flags; + + local_irq_save(flags); + + if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { + get_new_mmu_context(mm, cpu); + write_mmu_entryhi(cpu_asid(cpu, mm)); + } else { + /* will get a new context next time */ + cpu_context(cpu, mm) = 0; + } + + local_irq_restore(flags); +} + +#endif /* __ASM_CSKY_MMU_CONTEXT_H */ diff --git a/arch/csky/include/asm/page.h b/arch/csky/include/asm/page.h new file mode 100644 index 000000000000..73cf2bd66a13 --- /dev/null +++ b/arch/csky/include/asm/page.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_PAGE_H +#define __ASM_CSKY_PAGE_H + +#include <asm/setup.h> +#include <asm/cache.h> +#include <linux/const.h> + +/* + * PAGE_SHIFT determines the page size + */ +#define PAGE_SHIFT 12 +#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE - 1)) +#define THREAD_SIZE (PAGE_SIZE * 2) +#define THREAD_MASK (~(THREAD_SIZE - 1)) +#define THREAD_SHIFT (PAGE_SHIFT + 1) + +/* + * NOTE: virtual isn't really correct, actually it should be the offset into the + * memory node, but we have no highmem, so that works for now. + * TODO: implement (fast) pfn<->pgdat_idx conversion functions, this makes lots + * of the shifts unnecessary. + */ + +#ifndef __ASSEMBLY__ + +#include <linux/pfn.h> + +#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) +#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT) + +#define virt_addr_valid(kaddr) ((void *)(kaddr) >= (void *)PAGE_OFFSET && \ + (void *)(kaddr) < high_memory) +#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr) + +extern void *memset(void *dest, int c, size_t l); +extern void *memcpy(void *to, const void *from, size_t l); + +#define clear_page(page) memset((page), 0, PAGE_SIZE) +#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) + +#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) +#define phys_to_page(paddr) (pfn_to_page(PFN_DOWN(paddr))) + +struct page; + +#include <abi/page.h> + +struct vm_area_struct; + +/* + * These are used to make use of C type-checking.. + */ +typedef struct { unsigned long pte_low; } pte_t; +#define pte_val(x) ((x).pte_low) + +typedef struct { unsigned long pgd; } pgd_t; +typedef struct { unsigned long pgprot; } pgprot_t; +typedef struct page *pgtable_t; + +#define pgd_val(x) ((x).pgd) +#define pgprot_val(x) ((x).pgprot) + +#define ptep_buddy(x) ((pte_t *)((unsigned long)(x) ^ sizeof(pte_t))) + +#define __pte(x) ((pte_t) { (x) }) +#define __pgd(x) ((pgd_t) { (x) }) +#define __pgprot(x) ((pgprot_t) { (x) }) + +#endif /* !__ASSEMBLY__ */ + +#define PHYS_OFFSET (CONFIG_RAM_BASE & ~(LOWMEM_LIMIT - 1)) +#define PHYS_OFFSET_OFFSET (CONFIG_RAM_BASE & (LOWMEM_LIMIT - 1)) +#define ARCH_PFN_OFFSET PFN_DOWN(CONFIG_RAM_BASE) + +#define PAGE_OFFSET 0x80000000 +#define LOWMEM_LIMIT 0x40000000 + +#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET) +#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - \ + PHYS_OFFSET)) +#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) + +#define MAP_NR(x) PFN_DOWN((unsigned long)(x) - PAGE_OFFSET - \ + PHYS_OFFSET_OFFSET) +#define virt_to_page(x) (mem_map + MAP_NR(x)) + +#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + +/* + * main RAM and kernel working space are coincident at 0x80000000, but to make + * life more interesting, there's also an uncached virtual shadow at 0xb0000000 + * - these mappings are fixed in the MMU + */ + +#define pfn_to_kaddr(x) __va(PFN_PHYS(x)) + +#include <asm-generic/memory_model.h> +#include <asm-generic/getorder.h> + +#endif /* __ASM_CSKY_PAGE_H */ diff --git a/arch/csky/include/asm/pgalloc.h b/arch/csky/include/asm/pgalloc.h new file mode 100644 index 000000000000..bf4f4a0e140e --- /dev/null +++ b/arch/csky/include/asm/pgalloc.h @@ -0,0 +1,115 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_PGALLOC_H +#define __ASM_CSKY_PGALLOC_H + +#include <linux/highmem.h> +#include <linux/mm.h> +#include <linux/sched.h> + +static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, + pte_t *pte) +{ + set_pmd(pmd, __pmd(__pa(pte))); +} + +static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, + pgtable_t pte) +{ + set_pmd(pmd, __pmd(__pa(page_address(pte)))); +} + +#define pmd_pgtable(pmd) pmd_page(pmd) + +extern void pgd_init(unsigned long *p); + +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, + unsigned long address) +{ + pte_t *pte; + unsigned long *kaddr, i; + + pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_RETRY_MAYFAIL, + PTE_ORDER); + kaddr = (unsigned long *)pte; + if (address & 0x80000000) + for (i = 0; i < (PAGE_SIZE/4); i++) + *(kaddr + i) = 0x1; + else + clear_page(kaddr); + + return pte; +} + +static inline struct page *pte_alloc_one(struct mm_struct *mm, + unsigned long address) +{ + struct page *pte; + unsigned long *kaddr, i; + + pte = alloc_pages(GFP_KERNEL | __GFP_RETRY_MAYFAIL, PTE_ORDER); + if (pte) { + kaddr = kmap_atomic(pte); + if (address & 0x80000000) { + for (i = 0; i < (PAGE_SIZE/4); i++) + *(kaddr + i) = 0x1; + } else + clear_page(kaddr); + kunmap_atomic(kaddr); + pgtable_page_ctor(pte); + } + return pte; +} + +static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) +{ + free_pages((unsigned long)pte, PTE_ORDER); +} + +static inline void pte_free(struct mm_struct *mm, pgtable_t pte) +{ + pgtable_page_dtor(pte); + __free_pages(pte, PTE_ORDER); +} + +static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) +{ + free_pages((unsigned long)pgd, PGD_ORDER); +} + +static inline pgd_t *pgd_alloc(struct mm_struct *mm) +{ + pgd_t *ret; + pgd_t *init; + + ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); + if (ret) { + init = pgd_offset(&init_mm, 0UL); + pgd_init((unsigned long *)ret); + memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, + (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); + /* prevent out of order excute */ + smp_mb(); +#ifdef CONFIG_CPU_NEED_TLBSYNC + dcache_wb_range((unsigned int)ret, + (unsigned int)(ret + PTRS_PER_PGD)); +#endif + } + + return ret; +} + +#define __pte_free_tlb(tlb, pte, address) \ +do { \ + pgtable_page_dtor(pte); \ + tlb_remove_page(tlb, pte); \ +} while (0) + +#define check_pgt_cache() do {} while (0) + +extern void pagetable_init(void); +extern void pre_mmu_init(void); +extern void pre_trap_init(void); + +#endif /* __ASM_CSKY_PGALLOC_H */ diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h new file mode 100644 index 000000000000..edfcbb25fd9f --- /dev/null +++ b/arch/csky/include/asm/pgtable.h @@ -0,0 +1,306 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_PGTABLE_H +#define __ASM_CSKY_PGTABLE_H + +#include <asm/fixmap.h> +#include <asm/addrspace.h> +#include <abi/pgtable-bits.h> +#include <asm-generic/pgtable-nopmd.h> + +#define PGDIR_SHIFT 22 +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) + +#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE) +#define FIRST_USER_ADDRESS 0UL + +#define PKMAP_BASE (0xff800000) + +#define VMALLOC_START (0xc0008000) +#define VMALLOC_END (PKMAP_BASE - 2*PAGE_SIZE) + +/* + * C-SKY is two-level paging structure: + */ +#define PGD_ORDER 0 +#define PTE_ORDER 0 + +#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t)) +#define PTRS_PER_PMD 1 +#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) + +#define pte_ERROR(e) \ + pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low) +#define pgd_ERROR(e) \ + pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) + +/* Find an entry in the third-level page table.. */ +#define __pte_offset_t(address) \ + (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +#define pte_offset_kernel(dir, address) \ + (pmd_page_vaddr(*(dir)) + __pte_offset_t(address)) +#define pte_offset_map(dir, address) \ + ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset_t(address)) +#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) +#define pte_clear(mm, addr, ptep) set_pte((ptep), \ + (((unsigned int)addr&0x80000000)?__pte(1):__pte(0))) +#define pte_none(pte) (!(pte_val(pte)&0xfffffffe)) +#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) +#define pte_pfn(x) ((unsigned long)((x).pte_low >> PAGE_SHIFT)) +#define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \ + | pgprot_val(prot)) + +#define __READABLE (_PAGE_READ | _PAGE_VALID | _PAGE_ACCESSED) +#define __WRITEABLE (_PAGE_WRITE | _PAGE_DIRTY | _PAGE_MODIFIED) + +#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | \ + _CACHE_MASK) + +#define pte_unmap(pte) ((void)(pte)) + +#define __swp_type(x) (((x).val >> 4) & 0xff) +#define __swp_offset(x) ((x).val >> 12) +#define __swp_entry(type, offset) ((swp_entry_t) {((type) << 4) | \ + ((offset) << 12) }) +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) +#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) + +#define pte_page(x) pfn_to_page(pte_pfn(x)) +#define __mk_pte(page_nr, pgprot) __pte(((page_nr) << PAGE_SHIFT) | \ + pgprot_val(pgprot)) + +/* + * CSKY can't do page protection for execute, and considers that the same like + * read. Also, write permissions imply read permissions. This is the closest + * we can get by reasonable means.. + */ +#define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHED) +#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ + _CACHE_CACHED) +#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | _CACHE_CACHED) +#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | _CACHE_CACHED) +#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ + _PAGE_GLOBAL | _CACHE_CACHED) +#define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ + _CACHE_CACHED) + +#define __P000 PAGE_NONE +#define __P001 PAGE_READONLY +#define __P010 PAGE_COPY +#define __P011 PAGE_COPY +#define __P100 PAGE_READONLY +#define __P101 PAGE_READONLY +#define __P110 PAGE_COPY +#define __P111 PAGE_COPY + +#define __S000 PAGE_NONE +#define __S001 PAGE_READONLY +#define __S010 PAGE_SHARED +#define __S011 PAGE_SHARED +#define __S100 PAGE_READONLY +#define __S101 PAGE_READONLY +#define __S110 PAGE_SHARED +#define __S111 PAGE_SHARED + +extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; +#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) + +extern void load_pgd(unsigned long pg_dir); +extern pte_t invalid_pte_table[PTRS_PER_PTE]; + +static inline int pte_special(pte_t pte) { return 0; } +static inline pte_t pte_mkspecial(pte_t pte) { return pte; } + +static inline void set_pte(pte_t *p, pte_t pte) +{ + *p = pte; +#if defined(CONFIG_CPU_NEED_TLBSYNC) + dcache_wb_line((u32)p); +#endif + /* prevent out of order excution */ + smp_mb(); +} +#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) + +static inline pte_t *pmd_page_vaddr(pmd_t pmd) +{ + unsigned long ptr; + + ptr = pmd_val(pmd); + + return __va(ptr); +} + +#define pmd_phys(pmd) pmd_val(pmd) + +static inline void set_pmd(pmd_t *p, pmd_t pmd) +{ + *p = pmd; +#if defined(CONFIG_CPU_NEED_TLBSYNC) + dcache_wb_line((u32)p); +#endif + /* prevent specul excute */ + smp_mb(); +} + + +static inline int pmd_none(pmd_t pmd) +{ + return pmd_val(pmd) == __pa(invalid_pte_table); +} + +#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) + +static inline int pmd_present(pmd_t pmd) +{ + return (pmd_val(pmd) != __pa(invalid_pte_table)); +} + +static inline void pmd_clear(pmd_t *p) +{ + pmd_val(*p) = (__pa(invalid_pte_table)); +#if defined(CONFIG_CPU_NEED_TLBSYNC) + dcache_wb_line((u32)p); +#endif +} + +/* + * The following only work if pte_present() is true. + * Undefined behaviour if not.. + */ +static inline int pte_read(pte_t pte) +{ + return pte.pte_low & _PAGE_READ; +} + +static inline int pte_write(pte_t pte) +{ + return (pte).pte_low & _PAGE_WRITE; +} + +static inline int pte_dirty(pte_t pte) +{ + return (pte).pte_low & _PAGE_MODIFIED; +} + +static inline int pte_young(pte_t pte) +{ + return (pte).pte_low & _PAGE_ACCESSED; +} + +static inline pte_t pte_wrprotect(pte_t pte) +{ + pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_DIRTY); + return pte; +} + +static inline pte_t pte_mkclean(pte_t pte) +{ + pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_DIRTY); + return pte; +} + +static inline pte_t pte_mkold(pte_t pte) +{ + pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_VALID); + return pte; +} + +static inline pte_t pte_mkwrite(pte_t pte) +{ + pte_val(pte) |= _PAGE_WRITE; + if (pte_val(pte) & _PAGE_MODIFIED) + pte_val(pte) |= _PAGE_DIRTY; + return pte; +} + +static inline pte_t pte_mkdirty(pte_t pte) +{ + pte_val(pte) |= _PAGE_MODIFIED; + if (pte_val(pte) & _PAGE_WRITE) + pte_val(pte) |= _PAGE_DIRTY; + return pte; +} + +static inline pte_t pte_mkyoung(pte_t pte) +{ + pte_val(pte) |= _PAGE_ACCESSED; + if (pte_val(pte) & _PAGE_READ) + pte_val(pte) |= _PAGE_VALID; + return pte; +} + +#define __pgd_offset(address) pgd_index(address) +#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) +#define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) + +/* to find an entry in a kernel page-table-directory */ +#define pgd_offset_k(address) pgd_offset(&init_mm, address) + +#define pgd_index(address) ((address) >> PGDIR_SHIFT) + +/* + * Macro to make mark a page protection value as "uncacheable". Note + * that "protection" is really a misnomer here as the protection value + * contains the memory attribute bits, dirty bits, and various other + * bits as well. + */ +#define pgprot_noncached pgprot_noncached + +static inline pgprot_t pgprot_noncached(pgprot_t _prot) +{ + unsigned long prot = pgprot_val(_prot); + + prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED; + + return __pgprot(prot); +} + +/* + * Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + */ +#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + return __pte((pte_val(pte) & _PAGE_CHG_MASK) | + (pgprot_val(newprot))); +} + +/* to find an entry in a page-table-directory */ +static inline pgd_t *pgd_offset(struct mm_struct *mm, unsigned long address) +{ + return mm->pgd + pgd_index(address); +} + +/* Find an entry in the third-level page table.. */ +static inline pte_t *pte_offset(pmd_t *dir, unsigned long address) +{ + return (pte_t *) (pmd_page_vaddr(*dir)) + + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); +} + +extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; +extern void paging_init(void); + +extern void show_jtlb_table(void); + +void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, + pte_t *pte); + +/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ +#define kern_addr_valid(addr) (1) + +/* + * No page table caches to initialise + */ +#define pgtable_cache_init() do {} while (0) + +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) + +#include <asm-generic/pgtable.h> + +#endif /* __ASM_CSKY_PGTABLE_H */ diff --git a/arch/csky/include/asm/processor.h b/arch/csky/include/asm/processor.h new file mode 100644 index 000000000000..5ad4f0b83092 --- /dev/null +++ b/arch/csky/include/asm/processor.h @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_PROCESSOR_H +#define __ASM_CSKY_PROCESSOR_H + +/* + * Default implementation of macro that returns current + * instruction pointer ("program counter"). + */ +#define current_text_addr() ({ __label__ _l; _l: &&_l; }) + +#include <linux/bitops.h> +#include <asm/segment.h> +#include <asm/ptrace.h> +#include <asm/current.h> +#include <asm/cache.h> +#include <abi/reg_ops.h> +#include <abi/regdef.h> +#ifdef CONFIG_CPU_HAS_FPU +#include <abi/fpu.h> +#endif + +struct cpuinfo_csky { + unsigned long udelay_val; + unsigned long asid_cache; + /* + * Capability and feature descriptor structure for CSKY CPU + */ + unsigned long options; + unsigned int processor_id[4]; + unsigned int fpu_id; +} __aligned(SMP_CACHE_BYTES); + +extern struct cpuinfo_csky cpu_data[]; + +/* + * User space process size: 2GB. This is hardcoded into a few places, + * so don't change it unless you know what you are doing. TASK_SIZE + * for a 64 bit kernel expandable to 8192EB, of which the current CSKY + * implementations will "only" be able to use 1TB ... + */ +#define TASK_SIZE 0x7fff8000UL + +#ifdef __KERNEL__ +#define STACK_TOP TASK_SIZE +#define STACK_TOP_MAX STACK_TOP +#endif + +/* This decides where the kernel will search for a free chunk of vm + * space during mmap's. + */ +#define TASK_UNMAPPED_BASE (TASK_SIZE / 3) + +struct thread_struct { + unsigned long ksp; /* kernel stack pointer */ + unsigned long sr; /* saved status register */ + unsigned long esp0; /* points to SR of stack frame */ + unsigned long hi; + unsigned long lo; + + /* Other stuff associated with the thread. */ + unsigned long address; /* Last user fault */ + unsigned long error_code; + + /* FPU regs */ + struct user_fp __aligned(16) user_fp; +}; + +#define INIT_THREAD { \ + .ksp = (unsigned long) init_thread_union.stack + THREAD_SIZE, \ + .sr = DEFAULT_PSR_VALUE, \ +} + +/* + * Do necessary setup to start up a newly executed thread. + * + * pass the data segment into user programs if it exists, + * it can't hurt anything as far as I can tell + */ +#define start_thread(_regs, _pc, _usp) \ +do { \ + set_fs(USER_DS); /* reads from user space */ \ + (_regs)->pc = (_pc); \ + (_regs)->regs[1] = 0; /* ABIV1 is R7, uClibc_main rtdl arg */ \ + (_regs)->regs[2] = 0; \ + (_regs)->regs[3] = 0; /* ABIV2 is R7, use it? */ \ + (_regs)->sr &= ~PS_S; \ + (_regs)->usp = (_usp); \ +} while (0) + +/* Forward declaration, a strange C thing */ +struct task_struct; + +/* Free all resources held by a thread. */ +static inline void release_thread(struct task_struct *dead_task) +{ +} + +/* Prepare to copy thread state - unlazy all lazy status */ +#define prepare_to_copy(tsk) do { } while (0) + +extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); + +#define copy_segments(tsk, mm) do { } while (0) +#define release_segments(mm) do { } while (0) +#define forget_segments() do { } while (0) + +extern unsigned long thread_saved_pc(struct task_struct *tsk); + +unsigned long get_wchan(struct task_struct *p); + +#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) +#define KSTK_ESP(tsk) (task_pt_regs(tsk)->usp) + +#define task_pt_regs(p) \ + ((struct pt_regs *)(THREAD_SIZE + p->stack) - 1) + +#define cpu_relax() barrier() + +#endif /* __ASM_CSKY_PROCESSOR_H */ diff --git a/arch/csky/include/asm/reg_ops.h b/arch/csky/include/asm/reg_ops.h new file mode 100644 index 000000000000..cccf7d525fe2 --- /dev/null +++ b/arch/csky/include/asm/reg_ops.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_REGS_OPS_H +#define __ASM_REGS_OPS_H + +#define mfcr(reg) \ +({ \ + unsigned int tmp; \ + asm volatile( \ + "mfcr %0, "reg"\n" \ + : "=r"(tmp) \ + : \ + : "memory"); \ + tmp; \ +}) + +#define mtcr(reg, val) \ +({ \ + asm volatile( \ + "mtcr %0, "reg"\n" \ + : \ + : "r"(val) \ + : "memory"); \ +}) + +#endif /* __ASM_REGS_OPS_H */ diff --git a/arch/csky/include/asm/segment.h b/arch/csky/include/asm/segment.h new file mode 100644 index 000000000000..ffdc4c47ff43 --- /dev/null +++ b/arch/csky/include/asm/segment.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_SEGMENT_H +#define __ASM_CSKY_SEGMENT_H + +typedef struct { + unsigned long seg; +} mm_segment_t; + +#define KERNEL_DS ((mm_segment_t) { 0xFFFFFFFF }) +#define get_ds() KERNEL_DS + +#define USER_DS ((mm_segment_t) { 0x80000000UL }) +#define get_fs() (current_thread_info()->addr_limit) +#define set_fs(x) (current_thread_info()->addr_limit = (x)) +#define segment_eq(a, b) ((a).seg == (b).seg) + +#endif /* __ASM_CSKY_SEGMENT_H */ diff --git a/arch/csky/include/asm/shmparam.h b/arch/csky/include/asm/shmparam.h new file mode 100644 index 000000000000..efafe4c79fed --- /dev/null +++ b/arch/csky/include/asm/shmparam.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_SHMPARAM_H +#define __ASM_CSKY_SHMPARAM_H + +#define SHMLBA (4 * PAGE_SIZE) + +#define __ARCH_FORCE_SHMLBA + +#endif /* __ASM_CSKY_SHMPARAM_H */ diff --git a/arch/csky/include/asm/smp.h b/arch/csky/include/asm/smp.h new file mode 100644 index 000000000000..4a929c4d6437 --- /dev/null +++ b/arch/csky/include/asm/smp.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_SMP_H +#define __ASM_CSKY_SMP_H + +#include <linux/cpumask.h> +#include <linux/irqreturn.h> +#include <linux/threads.h> + +#ifdef CONFIG_SMP + +void __init setup_smp(void); + +void __init setup_smp_ipi(void); + +void arch_send_call_function_ipi_mask(struct cpumask *mask); + +void arch_send_call_function_single_ipi(int cpu); + +void __init set_send_ipi(void (*func)(const struct cpumask *mask), int irq); + +#define raw_smp_processor_id() (current_thread_info()->cpu) + +#endif /* CONFIG_SMP */ + +#endif /* __ASM_CSKY_SMP_H */ diff --git a/arch/csky/include/asm/spinlock.h b/arch/csky/include/asm/spinlock.h new file mode 100644 index 000000000000..7cf3f2b34cea --- /dev/null +++ b/arch/csky/include/asm/spinlock.h @@ -0,0 +1,256 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_SPINLOCK_H +#define __ASM_CSKY_SPINLOCK_H + +#include <linux/spinlock_types.h> +#include <asm/barrier.h> + +#ifdef CONFIG_QUEUED_RWLOCKS + +/* + * Ticket-based spin-locking. + */ +static inline void arch_spin_lock(arch_spinlock_t *lock) +{ + arch_spinlock_t lockval; + u32 ticket_next = 1 << TICKET_NEXT; + u32 *p = &lock->lock; + u32 tmp; + + asm volatile ( + "1: ldex.w %0, (%2) \n" + " mov %1, %0 \n" + " add %0, %3 \n" + " stex.w %0, (%2) \n" + " bez %0, 1b \n" + : "=&r" (tmp), "=&r" (lockval) + : "r"(p), "r"(ticket_next) + : "cc"); + + while (lockval.tickets.next != lockval.tickets.owner) + lockval.tickets.owner = READ_ONCE(lock->tickets.owner); + + smp_mb(); +} + +static inline int arch_spin_trylock(arch_spinlock_t *lock) +{ + u32 tmp, contended, res; + u32 ticket_next = 1 << TICKET_NEXT; + u32 *p = &lock->lock; + + do { + asm volatile ( + " ldex.w %0, (%3) \n" + " movi %2, 1 \n" + " rotli %1, %0, 16 \n" + " cmpne %1, %0 \n" + " bt 1f \n" + " movi %2, 0 \n" + " add %0, %0, %4 \n" + " stex.w %0, (%3) \n" + "1: \n" + : "=&r" (res), "=&r" (tmp), "=&r" (contended) + : "r"(p), "r"(ticket_next) + : "cc"); + } while (!res); + + if (!contended) + smp_mb(); + + return !contended; +} + +static inline void arch_spin_unlock(arch_spinlock_t *lock) +{ + smp_mb(); + WRITE_ONCE(lock->tickets.owner, lock->tickets.owner + 1); +} + +static inline int arch_spin_value_unlocked(arch_spinlock_t lock) +{ + return lock.tickets.owner == lock.tickets.next; +} + +static inline int arch_spin_is_locked(arch_spinlock_t *lock) +{ + return !arch_spin_value_unlocked(READ_ONCE(*lock)); +} + +static inline int arch_spin_is_contended(arch_spinlock_t *lock) +{ + struct __raw_tickets tickets = READ_ONCE(lock->tickets); + + return (tickets.next - tickets.owner) > 1; +} +#define arch_spin_is_contended arch_spin_is_contended + +#include <asm/qrwlock.h> + +/* See include/linux/spinlock.h */ +#define smp_mb__after_spinlock() smp_mb() + +#else /* CONFIG_QUEUED_RWLOCKS */ + +/* + * Test-and-set spin-locking. + */ +static inline void arch_spin_lock(arch_spinlock_t *lock) +{ + u32 *p = &lock->lock; + u32 tmp; + + asm volatile ( + "1: ldex.w %0, (%1) \n" + " bnez %0, 1b \n" + " movi %0, 1 \n" + " stex.w %0, (%1) \n" + " bez %0, 1b \n" + : "=&r" (tmp) + : "r"(p) + : "cc"); + smp_mb(); +} + +static inline void arch_spin_unlock(arch_spinlock_t *lock) +{ + smp_mb(); + WRITE_ONCE(lock->lock, 0); +} + +static inline int arch_spin_trylock(arch_spinlock_t *lock) +{ + u32 *p = &lock->lock; + u32 tmp; + + asm volatile ( + "1: ldex.w %0, (%1) \n" + " bnez %0, 2f \n" + " movi %0, 1 \n" + " stex.w %0, (%1) \n" + " bez %0, 1b \n" + " movi %0, 0 \n" + "2: \n" + : "=&r" (tmp) + : "r"(p) + : "cc"); + + if (!tmp) + smp_mb(); + + return !tmp; +} + +#define arch_spin_is_locked(x) (READ_ONCE((x)->lock) != 0) + +/* + * read lock/unlock/trylock + */ +static inline void arch_read_lock(arch_rwlock_t *lock) +{ + u32 *p = &lock->lock; + u32 tmp; + + asm volatile ( + "1: ldex.w %0, (%1) \n" + " blz %0, 1b \n" + " addi %0, 1 \n" + " stex.w %0, (%1) \n" + " bez %0, 1b \n" + : "=&r" (tmp) + : "r"(p) + : "cc"); + smp_mb(); +} + +static inline void arch_read_unlock(arch_rwlock_t *lock) +{ + u32 *p = &lock->lock; + u32 tmp; + + smp_mb(); + asm volatile ( + "1: ldex.w %0, (%1) \n" + " subi %0, 1 \n" + " stex.w %0, (%1) \n" + " bez %0, 1b \n" + : "=&r" (tmp) + : "r"(p) + : "cc"); +} + +static inline int arch_read_trylock(arch_rwlock_t *lock) +{ + u32 *p = &lock->lock; + u32 tmp; + + asm volatile ( + "1: ldex.w %0, (%1) \n" + " blz %0, 2f \n" + " addi %0, 1 \n" + " stex.w %0, (%1) \n" + " bez %0, 1b \n" + " movi %0, 0 \n" + "2: \n" + : "=&r" (tmp) + : "r"(p) + : "cc"); + + if (!tmp) + smp_mb(); + + return !tmp; +} + +/* + * write lock/unlock/trylock + */ +static inline void arch_write_lock(arch_rwlock_t *lock) +{ + u32 *p = &lock->lock; + u32 tmp; + + asm volatile ( + "1: ldex.w %0, (%1) \n" + " bnez %0, 1b \n" + " subi %0, 1 \n" + " stex.w %0, (%1) \n" + " bez %0, 1b \n" + : "=&r" (tmp) + : "r"(p) + : "cc"); + smp_mb(); +} + +static inline void arch_write_unlock(arch_rwlock_t *lock) +{ + smp_mb(); + WRITE_ONCE(lock->lock, 0); +} + +static inline int arch_write_trylock(arch_rwlock_t *lock) +{ + u32 *p = &lock->lock; + u32 tmp; + + asm volatile ( + "1: ldex.w %0, (%1) \n" + " bnez %0, 2f \n" + " subi %0, 1 \n" + " stex.w %0, (%1) \n" + " bez %0, 1b \n" + " movi %0, 0 \n" + "2: \n" + : "=&r" (tmp) + : "r"(p) + : "cc"); + + if (!tmp) + smp_mb(); + + return !tmp; +} + +#endif /* CONFIG_QUEUED_RWLOCKS */ +#endif /* __ASM_CSKY_SPINLOCK_H */ diff --git a/arch/csky/include/asm/spinlock_types.h b/arch/csky/include/asm/spinlock_types.h new file mode 100644 index 000000000000..88b82438b182 --- /dev/null +++ b/arch/csky/include/asm/spinlock_types.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_SPINLOCK_TYPES_H +#define __ASM_CSKY_SPINLOCK_TYPES_H + +#ifndef __LINUX_SPINLOCK_TYPES_H +# error "please don't include this file directly" +#endif + +#define TICKET_NEXT 16 + +typedef struct { + union { + u32 lock; + struct __raw_tickets { + /* little endian */ + u16 owner; + u16 next; + } tickets; + }; +} arch_spinlock_t; + +#define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } } + +#ifdef CONFIG_QUEUED_RWLOCKS +#include <asm-generic/qrwlock_types.h> + +#else /* CONFIG_NR_CPUS > 2 */ + +typedef struct { + u32 lock; +} arch_rwlock_t; + +#define __ARCH_RW_LOCK_UNLOCKED { 0 } + +#endif /* CONFIG_QUEUED_RWLOCKS */ +#endif /* __ASM_CSKY_SPINLOCK_TYPES_H */ diff --git a/arch/csky/include/asm/string.h b/arch/csky/include/asm/string.h new file mode 100644 index 000000000000..73142de18355 --- /dev/null +++ b/arch/csky/include/asm/string.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef _CSKY_STRING_MM_H_ +#define _CSKY_STRING_MM_H_ + +#ifndef __ASSEMBLY__ +#include <linux/types.h> +#include <linux/compiler.h> +#include <abi/string.h> +#endif + +#endif /* _CSKY_STRING_MM_H_ */ diff --git a/arch/csky/include/asm/switch_to.h b/arch/csky/include/asm/switch_to.h new file mode 100644 index 000000000000..35a39e88933d --- /dev/null +++ b/arch/csky/include/asm/switch_to.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_SWITCH_TO_H +#define __ASM_CSKY_SWITCH_TO_H + +#include <linux/thread_info.h> +#ifdef CONFIG_CPU_HAS_FPU +#include <abi/fpu.h> +static inline void __switch_to_fpu(struct task_struct *prev, + struct task_struct *next) +{ + save_to_user_fp(&prev->thread.user_fp); + restore_from_user_fp(&next->thread.user_fp); +} +#else +static inline void __switch_to_fpu(struct task_struct *prev, + struct task_struct *next) +{} +#endif + +/* + * Context switching is now performed out-of-line in switch_to.S + */ +extern struct task_struct *__switch_to(struct task_struct *, + struct task_struct *); + +#define switch_to(prev, next, last) \ + do { \ + struct task_struct *__prev = (prev); \ + struct task_struct *__next = (next); \ + __switch_to_fpu(__prev, __next); \ + ((last) = __switch_to((prev), (next))); \ + } while (0) + +#endif /* __ASM_CSKY_SWITCH_TO_H */ diff --git a/arch/csky/include/asm/syscall.h b/arch/csky/include/asm/syscall.h new file mode 100644 index 000000000000..926a64a8b4ee --- /dev/null +++ b/arch/csky/include/asm/syscall.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_SYSCALL_H +#define __ASM_SYSCALL_H + +#include <linux/sched.h> +#include <linux/err.h> +#include <abi/regdef.h> + +static inline int +syscall_get_nr(struct task_struct *task, struct pt_regs *regs) +{ + return regs_syscallid(regs); +} + +static inline void +syscall_rollback(struct task_struct *task, struct pt_regs *regs) +{ + regs->a0 = regs->orig_a0; +} + +static inline long +syscall_get_error(struct task_struct *task, struct pt_regs *regs) +{ + unsigned long error = regs->a0; + + return IS_ERR_VALUE(error) ? error : 0; +} + +static inline long +syscall_get_return_value(struct task_struct *task, struct pt_regs *regs) +{ + return regs->a0; +} + +static inline void +syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, + int error, long val) +{ + regs->a0 = (long) error ?: val; +} + +static inline void +syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, + unsigned int i, unsigned int n, unsigned long *args) +{ + BUG_ON(i + n > 6); + if (i == 0) { + args[0] = regs->orig_a0; + args++; + i++; + n--; + } + memcpy(args, ®s->a1 + i * sizeof(regs->a1), n * sizeof(args[0])); +} + +static inline void +syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, + unsigned int i, unsigned int n, const unsigned long *args) +{ + BUG_ON(i + n > 6); + if (i == 0) { + regs->orig_a0 = args[0]; + args++; + i++; + n--; + } + memcpy(®s->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0)); +} + +#endif /* __ASM_SYSCALL_H */ diff --git a/arch/csky/include/asm/syscalls.h b/arch/csky/include/asm/syscalls.h new file mode 100644 index 000000000000..5d48e5e0082e --- /dev/null +++ b/arch/csky/include/asm/syscalls.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_SYSCALLS_H +#define __ASM_CSKY_SYSCALLS_H + +#include <asm-generic/syscalls.h> + +long sys_cacheflush(void __user *, unsigned long, int); + +long sys_set_thread_area(unsigned long addr); + +long sys_csky_fadvise64_64(int fd, int advice, loff_t offset, loff_t len); + +#endif /* __ASM_CSKY_SYSCALLS_H */ diff --git a/arch/csky/include/asm/thread_info.h b/arch/csky/include/asm/thread_info.h new file mode 100644 index 000000000000..a2c69a7836f7 --- /dev/null +++ b/arch/csky/include/asm/thread_info.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef _ASM_CSKY_THREAD_INFO_H +#define _ASM_CSKY_THREAD_INFO_H + +#ifndef __ASSEMBLY__ + +#include <linux/version.h> +#include <asm/types.h> +#include <asm/page.h> +#include <asm/processor.h> + +struct thread_info { + struct task_struct *task; + void *dump_exec_domain; + unsigned long flags; + int preempt_count; + unsigned long tp_value; + mm_segment_t addr_limit; + struct restart_block restart_block; + struct pt_regs *regs; + unsigned int cpu; +}; + +#define INIT_THREAD_INFO(tsk) \ +{ \ + .task = &tsk, \ + .preempt_count = INIT_PREEMPT_COUNT, \ + .addr_limit = KERNEL_DS, \ + .cpu = 0, \ + .restart_block = { \ + .fn = do_no_restart_syscall, \ + }, \ +} + +#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT) + +static inline struct thread_info *current_thread_info(void) +{ + unsigned long sp; + + asm volatile("mov %0, sp\n":"=r"(sp)); + + return (struct thread_info *)(sp & ~(THREAD_SIZE - 1)); +} + +#endif /* !__ASSEMBLY__ */ + +/* entry.S relies on these definitions! + * bits 0-5 are tested at every exception exit + */ +#define TIF_SIGPENDING 0 /* signal pending */ +#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ +#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ +#define TIF_SYSCALL_TRACE 5 /* syscall trace active */ +#define TIF_DELAYED_TRACE 14 /* single step a syscall */ +#define TIF_POLLING_NRFLAG 16 /* poll_idle() is TIF_NEED_RESCHED */ +#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ +#define TIF_FREEZE 19 /* thread is freezing for suspend */ +#define TIF_RESTORE_SIGMASK 20 /* restore signal mask in do_signal() */ +#define TIF_SECCOMP 21 /* secure computing */ + +#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) +#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) +#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) +#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) +#define _TIF_DELAYED_TRACE (1 << TIF_DELAYED_TRACE) +#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) +#define _TIF_MEMDIE (1 << TIF_MEMDIE) +#define _TIF_FREEZE (1 << TIF_FREEZE) +#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) +#define _TIF_SECCOMP (1 << TIF_SECCOMP) + +#endif /* _ASM_CSKY_THREAD_INFO_H */ diff --git a/arch/csky/include/asm/tlb.h b/arch/csky/include/asm/tlb.h new file mode 100644 index 000000000000..8c7cc097666f --- /dev/null +++ b/arch/csky/include/asm/tlb.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_TLB_H +#define __ASM_CSKY_TLB_H + +#include <asm/cacheflush.h> + +#define tlb_start_vma(tlb, vma) \ + do { \ + if (!tlb->fullmm) \ + flush_cache_range(vma, vma->vm_start, vma->vm_end); \ + } while (0) + +#define tlb_end_vma(tlb, vma) \ + do { \ + if (!tlb->fullmm) \ + flush_tlb_range(vma, vma->vm_start, vma->vm_end); \ + } while (0) + +#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) + +#include <asm-generic/tlb.h> + +#endif /* __ASM_CSKY_TLB_H */ diff --git a/arch/csky/include/asm/tlbflush.h b/arch/csky/include/asm/tlbflush.h new file mode 100644 index 000000000000..6845b0667703 --- /dev/null +++ b/arch/csky/include/asm/tlbflush.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_TLBFLUSH_H +#define __ASM_TLBFLUSH_H + +/* + * TLB flushing: + * + * - flush_tlb_all() flushes all processes TLB entries + * - flush_tlb_mm(mm) flushes the specified mm context TLB entries + * - flush_tlb_page(vma, vmaddr) flushes one page + * - flush_tlb_range(vma, start, end) flushes a range of pages + * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages + */ +extern void flush_tlb_all(void); +extern void flush_tlb_mm(struct mm_struct *mm); +extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page); +extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end); +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); + +extern void flush_tlb_one(unsigned long vaddr); + +#endif diff --git a/arch/csky/include/asm/traps.h b/arch/csky/include/asm/traps.h new file mode 100644 index 000000000000..1c081805b962 --- /dev/null +++ b/arch/csky/include/asm/traps.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_TRAPS_H +#define __ASM_CSKY_TRAPS_H + +#define VEC_RESET 0 +#define VEC_ALIGN 1 +#define VEC_ACCESS 2 +#define VEC_ZERODIV 3 +#define VEC_ILLEGAL 4 +#define VEC_PRIV 5 +#define VEC_TRACE 6 +#define VEC_BREAKPOINT 7 +#define VEC_UNRECOVER 8 +#define VEC_SOFTRESET 9 +#define VEC_AUTOVEC 10 +#define VEC_FAUTOVEC 11 +#define VEC_HWACCEL 12 + +#define VEC_TLBMISS 14 +#define VEC_TLBMODIFIED 15 + +#define VEC_TRAP0 16 +#define VEC_TRAP1 17 +#define VEC_TRAP2 18 +#define VEC_TRAP3 19 + +#define VEC_TLBINVALIDL 20 +#define VEC_TLBINVALIDS 21 + +#define VEC_PRFL 29 +#define VEC_FPE 30 + +extern void *vec_base[]; + +#define VEC_INIT(i, func) \ +do { \ + vec_base[i] = (void *)func; \ +} while (0) + +void csky_alignment(struct pt_regs *regs); + +#endif /* __ASM_CSKY_TRAPS_H */ diff --git a/arch/csky/include/asm/uaccess.h b/arch/csky/include/asm/uaccess.h new file mode 100644 index 000000000000..acaf0e210d81 --- /dev/null +++ b/arch/csky/include/asm/uaccess.h @@ -0,0 +1,416 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_UACCESS_H +#define __ASM_CSKY_UACCESS_H + +/* + * User space memory access functions + */ +#include <linux/compiler.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/string.h> +#include <linux/version.h> +#include <asm/segment.h> + +#define VERIFY_READ 0 +#define VERIFY_WRITE 1 + +static inline int access_ok(int type, const void *addr, unsigned long size) +{ + unsigned long limit = current_thread_info()->addr_limit.seg; + + return (((unsigned long)addr < limit) && + ((unsigned long)(addr + size) < limit)); +} + +static inline int verify_area(int type, const void *addr, unsigned long size) +{ + return access_ok(type, addr, size) ? 0 : -EFAULT; +} + +#define __addr_ok(addr) (access_ok(VERIFY_READ, addr, 0)) + +extern int __put_user_bad(void); + +/* + * Tell gcc we read from memory instead of writing: this is because + * we do not write to any memory gcc knows about, so there are no + * aliasing issues. + */ + +/* + * These are the main single-value transfer routines. They automatically + * use the right size if we just have the right pointer type. + * + * This gets kind of ugly. We want to return _two_ values in "get_user()" + * and yet we don't want to do any pointers, because that is too much + * of a performance impact. Thus we have a few rather ugly macros here, + * and hide all the ugliness from the user. + * + * The "__xxx" versions of the user access functions are versions that + * do not verify the address space, that must have been done previously + * with a separate "access_ok()" call (this is used when we do multiple + * accesses to the same area of user memory). + * + * As we use the same address space for kernel and user data on + * Ckcore, we can just do these as direct assignments. (Of course, the + * exception handling means that it's no longer "just"...) + */ + +#define put_user(x, ptr) \ + __put_user_check((x), (ptr), sizeof(*(ptr))) + +#define __put_user(x, ptr) \ + __put_user_nocheck((x), (ptr), sizeof(*(ptr))) + +#define __ptr(x) ((unsigned long *)(x)) + +#define get_user(x, ptr) \ + __get_user_check((x), (ptr), sizeof(*(ptr))) + +#define __get_user(x, ptr) \ + __get_user_nocheck((x), (ptr), sizeof(*(ptr))) + +#define __put_user_nocheck(x, ptr, size) \ +({ \ + long __pu_err = 0; \ + typeof(*(ptr)) *__pu_addr = (ptr); \ + typeof(*(ptr)) __pu_val = (typeof(*(ptr)))(x); \ + if (__pu_addr) \ + __put_user_size(__pu_val, (__pu_addr), (size), \ + __pu_err); \ + __pu_err; \ +}) + +#define __put_user_check(x, ptr, size) \ +({ \ + long __pu_err = -EFAULT; \ + typeof(*(ptr)) *__pu_addr = (ptr); \ + typeof(*(ptr)) __pu_val = (typeof(*(ptr)))(x); \ + if (access_ok(VERIFY_WRITE, __pu_addr, size) && __pu_addr) \ + __put_user_size(__pu_val, __pu_addr, (size), __pu_err); \ + __pu_err; \ +}) + +#define __put_user_size(x, ptr, size, retval) \ +do { \ + retval = 0; \ + switch (size) { \ + case 1: \ + __put_user_asm_b(x, ptr, retval); \ + break; \ + case 2: \ + __put_user_asm_h(x, ptr, retval); \ + break; \ + case 4: \ + __put_user_asm_w(x, ptr, retval); \ + break; \ + case 8: \ + __put_user_asm_64(x, ptr, retval); \ + break; \ + default: \ + __put_user_bad(); \ + } \ +} while (0) + +/* + * We don't tell gcc that we are accessing memory, but this is OK + * because we do not write to any memory gcc knows about, so there + * are no aliasing issues. + * + * Note that PC at a fault is the address *after* the faulting + * instruction. + */ +#define __put_user_asm_b(x, ptr, err) \ +do { \ + int errcode; \ + asm volatile( \ + "1: stb %1, (%2,0) \n" \ + " br 3f \n" \ + "2: mov %0, %3 \n" \ + " br 3f \n" \ + ".section __ex_table, \"a\" \n" \ + ".align 2 \n" \ + ".long 1b,2b \n" \ + ".previous \n" \ + "3: \n" \ + : "=r"(err), "=r"(x), "=r"(ptr), "=r"(errcode) \ + : "0"(err), "1"(x), "2"(ptr), "3"(-EFAULT) \ + : "memory"); \ +} while (0) + +#define __put_user_asm_h(x, ptr, err) \ +do { \ + int errcode; \ + asm volatile( \ + "1: sth %1, (%2,0) \n" \ + " br 3f \n" \ + "2: mov %0, %3 \n" \ + " br 3f \n" \ + ".section __ex_table, \"a\" \n" \ + ".align 2 \n" \ + ".long 1b,2b \n" \ + ".previous \n" \ + "3: \n" \ + : "=r"(err), "=r"(x), "=r"(ptr), "=r"(errcode) \ + : "0"(err), "1"(x), "2"(ptr), "3"(-EFAULT) \ + : "memory"); \ +} while (0) + +#define __put_user_asm_w(x, ptr, err) \ +do { \ + int errcode; \ + asm volatile( \ + "1: stw %1, (%2,0) \n" \ + " br 3f \n" \ + "2: mov %0, %3 \n" \ + " br 3f \n" \ + ".section __ex_table,\"a\" \n" \ + ".align 2 \n" \ + ".long 1b, 2b \n" \ + ".previous \n" \ + "3: \n" \ + : "=r"(err), "=r"(x), "=r"(ptr), "=r"(errcode) \ + : "0"(err), "1"(x), "2"(ptr), "3"(-EFAULT) \ + : "memory"); \ +} while (0) + +#define __put_user_asm_64(x, ptr, err) \ +do { \ + int tmp; \ + int errcode; \ + typeof(*(ptr))src = (typeof(*(ptr)))x; \ + typeof(*(ptr))*psrc = &src; \ + \ + asm volatile( \ + " ldw %3, (%1, 0) \n" \ + "1: stw %3, (%2, 0) \n" \ + " ldw %3, (%1, 4) \n" \ + "2: stw %3, (%2, 4) \n" \ + " br 4f \n" \ + "3: mov %0, %4 \n" \ + " br 4f \n" \ + ".section __ex_table, \"a\" \n" \ + ".align 2 \n" \ + ".long 1b, 3b \n" \ + ".long 2b, 3b \n" \ + ".previous \n" \ + "4: \n" \ + : "=r"(err), "=r"(psrc), "=r"(ptr), \ + "=r"(tmp), "=r"(errcode) \ + : "0"(err), "1"(psrc), "2"(ptr), "3"(0), "4"(-EFAULT) \ + : "memory"); \ +} while (0) + +#define __get_user_nocheck(x, ptr, size) \ +({ \ + long __gu_err; \ + __get_user_size(x, (ptr), (size), __gu_err); \ + __gu_err; \ +}) + +#define __get_user_check(x, ptr, size) \ +({ \ + int __gu_err = -EFAULT; \ + const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ + if (access_ok(VERIFY_READ, __gu_ptr, size) && __gu_ptr) \ + __get_user_size(x, __gu_ptr, size, __gu_err); \ + __gu_err; \ +}) + +#define __get_user_size(x, ptr, size, retval) \ +do { \ + switch (size) { \ + case 1: \ + __get_user_asm_common((x), ptr, "ldb", retval); \ + break; \ + case 2: \ + __get_user_asm_common((x), ptr, "ldh", retval); \ + break; \ + case 4: \ + __get_user_asm_common((x), ptr, "ldw", retval); \ + break; \ + default: \ + x = 0; \ + (retval) = __get_user_bad(); \ + } \ +} while (0) + +#define __get_user_asm_common(x, ptr, ins, err) \ +do { \ + int errcode; \ + asm volatile( \ + "1: " ins " %1, (%4,0) \n" \ + " br 3f \n" \ + /* Fix up codes */ \ + "2: mov %0, %2 \n" \ + " movi %1, 0 \n" \ + " br 3f \n" \ + ".section __ex_table,\"a\" \n" \ + ".align 2 \n" \ + ".long 1b, 2b \n" \ + ".previous \n" \ + "3: \n" \ + : "=r"(err), "=r"(x), "=r"(errcode) \ + : "0"(0), "r"(ptr), "2"(-EFAULT) \ + : "memory"); \ +} while (0) + +extern int __get_user_bad(void); + +#define __copy_user(to, from, n) \ +do { \ + int w0, w1, w2, w3; \ + asm volatile( \ + "0: cmpnei %1, 0 \n" \ + " bf 8f \n" \ + " mov %3, %1 \n" \ + " or %3, %2 \n" \ + " andi %3, 3 \n" \ + " cmpnei %3, 0 \n" \ + " bf 1f \n" \ + " br 5f \n" \ + "1: cmplti %0, 16 \n" /* 4W */ \ + " bt 3f \n" \ + " ldw %3, (%2, 0) \n" \ + " ldw %4, (%2, 4) \n" \ + " ldw %5, (%2, 8) \n" \ + " ldw %6, (%2, 12) \n" \ + "2: stw %3, (%1, 0) \n" \ + "9: stw %4, (%1, 4) \n" \ + "10: stw %5, (%1, 8) \n" \ + "11: stw %6, (%1, 12) \n" \ + " addi %2, 16 \n" \ + " addi %1, 16 \n" \ + " subi %0, 16 \n" \ + " br 1b \n" \ + "3: cmplti %0, 4 \n" /* 1W */ \ + " bt 5f \n" \ + " ldw %3, (%2, 0) \n" \ + "4: stw %3, (%1, 0) \n" \ + " addi %2, 4 \n" \ + " addi %1, 4 \n" \ + " subi %0, 4 \n" \ + " br 3b \n" \ + "5: cmpnei %0, 0 \n" /* 1B */ \ + " bf 8f \n" \ + " ldb %3, (%2, 0) \n" \ + "6: stb %3, (%1, 0) \n" \ + " addi %2, 1 \n" \ + " addi %1, 1 \n" \ + " subi %0, 1 \n" \ + " br 5b \n" \ + "7: br 8f \n" \ + ".section __ex_table, \"a\" \n" \ + ".align 2 \n" \ + ".long 2b, 7b \n" \ + ".long 9b, 7b \n" \ + ".long 10b, 7b \n" \ + ".long 11b, 7b \n" \ + ".long 4b, 7b \n" \ + ".long 6b, 7b \n" \ + ".previous \n" \ + "8: \n" \ + : "=r"(n), "=r"(to), "=r"(from), "=r"(w0), \ + "=r"(w1), "=r"(w2), "=r"(w3) \ + : "0"(n), "1"(to), "2"(from) \ + : "memory"); \ +} while (0) + +#define __copy_user_zeroing(to, from, n) \ +do { \ + int tmp; \ + int nsave; \ + asm volatile( \ + "0: cmpnei %1, 0 \n" \ + " bf 7f \n" \ + " mov %3, %1 \n" \ + " or %3, %2 \n" \ + " andi %3, 3 \n" \ + " cmpnei %3, 0 \n" \ + " bf 1f \n" \ + " br 5f \n" \ + "1: cmplti %0, 16 \n" \ + " bt 3f \n" \ + "2: ldw %3, (%2, 0) \n" \ + "10: ldw %4, (%2, 4) \n" \ + " stw %3, (%1, 0) \n" \ + " stw %4, (%1, 4) \n" \ + "11: ldw %3, (%2, 8) \n" \ + "12: ldw %4, (%2, 12) \n" \ + " stw %3, (%1, 8) \n" \ + " stw %4, (%1, 12) \n" \ + " addi %2, 16 \n" \ + " addi %1, 16 \n" \ + " subi %0, 16 \n" \ + " br 1b \n" \ + "3: cmplti %0, 4 \n" \ + " bt 5f \n" \ + "4: ldw %3, (%2, 0) \n" \ + " stw %3, (%1, 0) \n" \ + " addi %2, 4 \n" \ + " addi %1, 4 \n" \ + " subi %0, 4 \n" \ + " br 3b \n" \ + "5: cmpnei %0, 0 \n" \ + " bf 7f \n" \ + "6: ldb %3, (%2, 0) \n" \ + " stb %3, (%1, 0) \n" \ + " addi %2, 1 \n" \ + " addi %1, 1 \n" \ + " subi %0, 1 \n" \ + " br 5b \n" \ + "8: mov %3, %0 \n" \ + " movi %4, 0 \n" \ + "9: stb %4, (%1, 0) \n" \ + " addi %1, 1 \n" \ + " subi %3, 1 \n" \ + " cmpnei %3, 0 \n" \ + " bt 9b \n" \ + " br 7f \n" \ + ".section __ex_table, \"a\" \n" \ + ".align 2 \n" \ + ".long 2b, 8b \n" \ + ".long 10b, 8b \n" \ + ".long 11b, 8b \n" \ + ".long 12b, 8b \n" \ + ".long 4b, 8b \n" \ + ".long 6b, 8b \n" \ + ".previous \n" \ + "7: \n" \ + : "=r"(n), "=r"(to), "=r"(from), "=r"(nsave), \ + "=r"(tmp) \ + : "0"(n), "1"(to), "2"(from) \ + : "memory"); \ +} while (0) + +unsigned long raw_copy_from_user(void *to, const void *from, unsigned long n); +unsigned long raw_copy_to_user(void *to, const void *from, unsigned long n); + +unsigned long clear_user(void *to, unsigned long n); +unsigned long __clear_user(void __user *to, unsigned long n); + +long strncpy_from_user(char *dst, const char *src, long count); +long __strncpy_from_user(char *dst, const char *src, long count); + +/* + * Return the size of a string (including the ending 0) + * + * Return 0 on exception, a value greater than N if too long + */ +long strnlen_user(const char *src, long n); + +#define strlen_user(str) strnlen_user(str, 32767) + +struct exception_table_entry { + unsigned long insn; + unsigned long nextinsn; +}; + +extern int fixup_exception(struct pt_regs *regs); + +#endif /* __ASM_CSKY_UACCESS_H */ diff --git a/arch/csky/include/asm/unistd.h b/arch/csky/include/asm/unistd.h new file mode 100644 index 000000000000..284487477a61 --- /dev/null +++ b/arch/csky/include/asm/unistd.h @@ -0,0 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <uapi/asm/unistd.h> diff --git a/arch/csky/include/asm/vdso.h b/arch/csky/include/asm/vdso.h new file mode 100644 index 000000000000..d963d691f3a1 --- /dev/null +++ b/arch/csky/include/asm/vdso.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_VDSO_H +#define __ASM_CSKY_VDSO_H + +#include <abi/vdso.h> + +struct csky_vdso { + unsigned short rt_signal_retcode[4]; +}; + +#endif /* __ASM_CSKY_VDSO_H */ diff --git a/arch/csky/include/uapi/asm/Kbuild b/arch/csky/include/uapi/asm/Kbuild new file mode 100644 index 000000000000..e02fd44e6447 --- /dev/null +++ b/arch/csky/include/uapi/asm/Kbuild @@ -0,0 +1,32 @@ +include include/uapi/asm-generic/Kbuild.asm + +header-y += cachectl.h + +generic-y += auxvec.h +generic-y += param.h +generic-y += bpf_perf_event.h +generic-y += errno.h +generic-y += fcntl.h +generic-y += ioctl.h +generic-y += ioctls.h +generic-y += ipcbuf.h +generic-y += shmbuf.h +generic-y += bitsperlong.h +generic-y += mman.h +generic-y += msgbuf.h +generic-y += poll.h +generic-y += posix_types.h +generic-y += resource.h +generic-y += sembuf.h +generic-y += siginfo.h +generic-y += signal.h +generic-y += socket.h +generic-y += sockios.h +generic-y += statfs.h +generic-y += stat.h +generic-y += setup.h +generic-y += swab.h +generic-y += termbits.h +generic-y += termios.h +generic-y += types.h +generic-y += ucontext.h diff --git a/arch/csky/include/uapi/asm/byteorder.h b/arch/csky/include/uapi/asm/byteorder.h new file mode 100644 index 000000000000..b079ec715cdf --- /dev/null +++ b/arch/csky/include/uapi/asm/byteorder.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_BYTEORDER_H +#define __ASM_CSKY_BYTEORDER_H + +#include <linux/byteorder/little_endian.h> + +#endif /* __ASM_CSKY_BYTEORDER_H */ diff --git a/arch/csky/include/uapi/asm/cachectl.h b/arch/csky/include/uapi/asm/cachectl.h new file mode 100644 index 000000000000..ddf2f39aa925 --- /dev/null +++ b/arch/csky/include/uapi/asm/cachectl.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_CACHECTL_H +#define __ASM_CSKY_CACHECTL_H + +/* + * See "man cacheflush" + */ +#define ICACHE (1<<0) +#define DCACHE (1<<1) +#define BCACHE (ICACHE|DCACHE) + +#endif /* __ASM_CSKY_CACHECTL_H */ diff --git a/arch/csky/include/uapi/asm/ptrace.h b/arch/csky/include/uapi/asm/ptrace.h new file mode 100644 index 000000000000..f10d02c8b09e --- /dev/null +++ b/arch/csky/include/uapi/asm/ptrace.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef _CSKY_PTRACE_H +#define _CSKY_PTRACE_H + +#ifndef __ASSEMBLY__ + +struct pt_regs { + unsigned long tls; + unsigned long lr; + unsigned long pc; + unsigned long sr; + unsigned long usp; + + /* + * a0, a1, a2, a3: + * abiv1: r2, r3, r4, r5 + * abiv2: r0, r1, r2, r3 + */ + unsigned long orig_a0; + unsigned long a0; + unsigned long a1; + unsigned long a2; + unsigned long a3; + + /* + * ABIV2: r4 ~ r13 + * ABIV1: r6 ~ r14, r1 + */ + unsigned long regs[10]; + +#if defined(__CSKYABIV2__) + /* r16 ~ r30 */ + unsigned long exregs[15]; + + unsigned long rhi; + unsigned long rlo; + unsigned long pad; /* reserved */ +#endif +}; + +struct user_fp { + unsigned long vr[96]; + unsigned long fcr; + unsigned long fesr; + unsigned long fid; + unsigned long reserved; +}; + +/* + * Switch stack for switch_to after push pt_regs. + * + * ABI_CSKYV2: r4 ~ r11, r15 ~ r17, r26 ~ r30; + * ABI_CSKYV1: r8 ~ r14, r15; + */ +struct switch_stack { +#if defined(__CSKYABIV2__) + unsigned long r4; + unsigned long r5; + unsigned long r6; + unsigned long r7; + unsigned long r8; + unsigned long r9; + unsigned long r10; + unsigned long r11; +#else + unsigned long r8; + unsigned long r9; + unsigned long r10; + unsigned long r11; + unsigned long r12; + unsigned long r13; + unsigned long r14; +#endif + unsigned long r15; +#if defined(__CSKYABIV2__) + unsigned long r16; + unsigned long r17; + unsigned long r26; + unsigned long r27; + unsigned long r28; + unsigned long r29; + unsigned long r30; +#endif +}; + +#ifdef __KERNEL__ + +#define PS_S 0x80000000 /* Supervisor Mode */ + +#define arch_has_single_step() (1) +#define current_pt_regs() \ +({ (struct pt_regs *)((char *)current_thread_info() + THREAD_SIZE) - 1; }) + +#define user_stack_pointer(regs) ((regs)->usp) + +#define user_mode(regs) (!((regs)->sr & PS_S)) +#define instruction_pointer(regs) ((regs)->pc) +#define profile_pc(regs) instruction_pointer(regs) + +#endif /* __KERNEL__ */ +#endif /* __ASSEMBLY__ */ +#endif /* _CSKY_PTRACE_H */ diff --git a/arch/csky/include/uapi/asm/sigcontext.h b/arch/csky/include/uapi/asm/sigcontext.h new file mode 100644 index 000000000000..e81e7ff11e36 --- /dev/null +++ b/arch/csky/include/uapi/asm/sigcontext.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_SIGCONTEXT_H +#define __ASM_CSKY_SIGCONTEXT_H + +#include <asm/ptrace.h> + +struct sigcontext { + struct pt_regs sc_pt_regs; + struct user_fp sc_user_fp; +}; + +#endif /* __ASM_CSKY_SIGCONTEXT_H */ diff --git a/arch/csky/include/uapi/asm/unistd.h b/arch/csky/include/uapi/asm/unistd.h new file mode 100644 index 000000000000..224c9a9ab45b --- /dev/null +++ b/arch/csky/include/uapi/asm/unistd.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#define __ARCH_WANT_SYS_CLONE +#include <asm-generic/unistd.h> + +#define __NR_set_thread_area (__NR_arch_specific_syscall + 0) +__SYSCALL(__NR_set_thread_area, sys_set_thread_area) +#define __NR_cacheflush (__NR_arch_specific_syscall + 1) +__SYSCALL(__NR_cacheflush, sys_cacheflush) diff --git a/arch/csky/kernel/Makefile b/arch/csky/kernel/Makefile new file mode 100644 index 000000000000..4422de756cde --- /dev/null +++ b/arch/csky/kernel/Makefile @@ -0,0 +1,8 @@ +extra-y := head.o vmlinux.lds + +obj-y += entry.o atomic.o signal.o traps.o irq.o time.o vdso.o +obj-y += power.o syscall.o syscall_table.o setup.o +obj-y += process.o cpu-probe.o ptrace.o dumpstack.o + +obj-$(CONFIG_MODULES) += module.o +obj-$(CONFIG_SMP) += smp.o diff --git a/arch/csky/kernel/asm-offsets.c b/arch/csky/kernel/asm-offsets.c new file mode 100644 index 000000000000..8d3ed811321f --- /dev/null +++ b/arch/csky/kernel/asm-offsets.c @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/sched.h> +#include <linux/kernel_stat.h> +#include <linux/kbuild.h> +#include <abi/regdef.h> + +int main(void) +{ + /* offsets into the task struct */ + DEFINE(TASK_STATE, offsetof(struct task_struct, state)); + DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack)); + DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags)); + DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace)); + DEFINE(TASK_THREAD, offsetof(struct task_struct, thread)); + DEFINE(TASK_MM, offsetof(struct task_struct, mm)); + DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); + + /* offsets into the thread struct */ + DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp)); + DEFINE(THREAD_SR, offsetof(struct thread_struct, sr)); + DEFINE(THREAD_ESP0, offsetof(struct thread_struct, esp0)); + DEFINE(THREAD_FESR, offsetof(struct thread_struct, user_fp.fesr)); + DEFINE(THREAD_FCR, offsetof(struct thread_struct, user_fp.fcr)); + DEFINE(THREAD_FPREG, offsetof(struct thread_struct, user_fp.vr)); + DEFINE(THREAD_DSPHI, offsetof(struct thread_struct, hi)); + DEFINE(THREAD_DSPLO, offsetof(struct thread_struct, lo)); + + /* offsets into the thread_info struct */ + DEFINE(TINFO_FLAGS, offsetof(struct thread_info, flags)); + DEFINE(TINFO_PREEMPT, offsetof(struct thread_info, preempt_count)); + DEFINE(TINFO_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); + DEFINE(TINFO_TP_VALUE, offsetof(struct thread_info, tp_value)); + DEFINE(TINFO_TASK, offsetof(struct thread_info, task)); + + /* offsets into the pt_regs */ + DEFINE(PT_PC, offsetof(struct pt_regs, pc)); + DEFINE(PT_ORIG_AO, offsetof(struct pt_regs, orig_a0)); + DEFINE(PT_SR, offsetof(struct pt_regs, sr)); + + DEFINE(PT_A0, offsetof(struct pt_regs, a0)); + DEFINE(PT_A1, offsetof(struct pt_regs, a1)); + DEFINE(PT_A2, offsetof(struct pt_regs, a2)); + DEFINE(PT_A3, offsetof(struct pt_regs, a3)); + DEFINE(PT_REGS0, offsetof(struct pt_regs, regs[0])); + DEFINE(PT_REGS1, offsetof(struct pt_regs, regs[1])); + DEFINE(PT_REGS2, offsetof(struct pt_regs, regs[2])); + DEFINE(PT_REGS3, offsetof(struct pt_regs, regs[3])); + DEFINE(PT_REGS4, offsetof(struct pt_regs, regs[4])); + DEFINE(PT_REGS5, offsetof(struct pt_regs, regs[5])); + DEFINE(PT_REGS6, offsetof(struct pt_regs, regs[6])); + DEFINE(PT_REGS7, offsetof(struct pt_regs, regs[7])); + DEFINE(PT_REGS8, offsetof(struct pt_regs, regs[8])); + DEFINE(PT_REGS9, offsetof(struct pt_regs, regs[9])); + DEFINE(PT_R15, offsetof(struct pt_regs, lr)); +#if defined(__CSKYABIV2__) + DEFINE(PT_R16, offsetof(struct pt_regs, exregs[0])); + DEFINE(PT_R17, offsetof(struct pt_regs, exregs[1])); + DEFINE(PT_R18, offsetof(struct pt_regs, exregs[2])); + DEFINE(PT_R19, offsetof(struct pt_regs, exregs[3])); + DEFINE(PT_R20, offsetof(struct pt_regs, exregs[4])); + DEFINE(PT_R21, offsetof(struct pt_regs, exregs[5])); + DEFINE(PT_R22, offsetof(struct pt_regs, exregs[6])); + DEFINE(PT_R23, offsetof(struct pt_regs, exregs[7])); + DEFINE(PT_R24, offsetof(struct pt_regs, exregs[8])); + DEFINE(PT_R25, offsetof(struct pt_regs, exregs[9])); + DEFINE(PT_R26, offsetof(struct pt_regs, exregs[10])); + DEFINE(PT_R27, offsetof(struct pt_regs, exregs[11])); + DEFINE(PT_R28, offsetof(struct pt_regs, exregs[12])); + DEFINE(PT_R29, offsetof(struct pt_regs, exregs[13])); + DEFINE(PT_R30, offsetof(struct pt_regs, exregs[14])); + DEFINE(PT_R31, offsetof(struct pt_regs, exregs[15])); + DEFINE(PT_RHI, offsetof(struct pt_regs, rhi)); + DEFINE(PT_RLO, offsetof(struct pt_regs, rlo)); +#endif + DEFINE(PT_USP, offsetof(struct pt_regs, usp)); + + /* offsets into the irq_cpustat_t struct */ + DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, + __softirq_pending)); + + /* signal defines */ + DEFINE(SIGSEGV, SIGSEGV); + DEFINE(SIGTRAP, SIGTRAP); + + return 0; +} diff --git a/arch/csky/kernel/atomic.S b/arch/csky/kernel/atomic.S new file mode 100644 index 000000000000..d2357c8f85bd --- /dev/null +++ b/arch/csky/kernel/atomic.S @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/linkage.h> +#include <abi/entry.h> + +.text + +/* + * int csky_cmpxchg(int oldval, int newval, int *ptr) + * + * If *ptr != oldval && return 1, + * else *ptr = newval return 0. + */ +#ifdef CONFIG_CPU_HAS_LDSTEX +ENTRY(csky_cmpxchg) + USPTOKSP + mfcr a3, epc + INCTRAP a3 + + subi sp, 8 + stw a3, (sp, 0) + mfcr a3, epsr + stw a3, (sp, 4) + + psrset ee +1: + ldex a3, (a2) + cmpne a0, a3 + bt16 2f + mov a3, a1 + stex a3, (a2) + bez a3, 1b +2: + sync.is + mvc a0 + ldw a3, (sp, 0) + mtcr a3, epc + ldw a3, (sp, 4) + mtcr a3, epsr + addi sp, 8 + KSPTOUSP + rte +END(csky_cmpxchg) +#else +ENTRY(csky_cmpxchg) + USPTOKSP + mfcr a3, epc + INCTRAP a3 + + subi sp, 8 + stw a3, (sp, 0) + mfcr a3, epsr + stw a3, (sp, 4) + + psrset ee +1: + ldw a3, (a2) + cmpne a0, a3 + bt16 3f +2: + stw a1, (a2) +3: + mvc a0 + ldw a3, (sp, 0) + mtcr a3, epc + ldw a3, (sp, 4) + mtcr a3, epsr + addi sp, 8 + KSPTOUSP + rte +END(csky_cmpxchg) + +/* + * Called from tlbmodified exception + */ +ENTRY(csky_cmpxchg_fixup) + mfcr a0, epc + lrw a1, 2b + cmpne a1, a0 + bt 1f + subi a1, (2b - 1b) + stw a1, (sp, LSAVE_PC) +1: + rts +END(csky_cmpxchg_fixup) +#endif diff --git a/arch/csky/kernel/cpu-probe.c b/arch/csky/kernel/cpu-probe.c new file mode 100644 index 000000000000..5f15ca31d3e8 --- /dev/null +++ b/arch/csky/kernel/cpu-probe.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/of.h> +#include <linux/init.h> +#include <linux/seq_file.h> +#include <linux/memblock.h> + +#include <abi/reg_ops.h> + +static void percpu_print(void *arg) +{ + struct seq_file *m = (struct seq_file *)arg; + unsigned int cur, next, i; + + seq_printf(m, "processor : %d\n", smp_processor_id()); + seq_printf(m, "C-SKY CPU model : %s\n", CSKYCPU_DEF_NAME); + + /* read processor id, max is 100 */ + cur = mfcr("cr13"); + for (i = 0; i < 100; i++) { + seq_printf(m, "product info[%d] : 0x%08x\n", i, cur); + + next = mfcr("cr13"); + + /* some CPU only has one id reg */ + if (cur == next) + break; + + cur = next; + + /* cpid index is 31-28, reset */ + if (!(next >> 28)) { + while ((mfcr("cr13") >> 28) != i); + break; + } + } + + /* CPU feature regs, setup by bootloader or gdbinit */ + seq_printf(m, "hint (CPU funcs): 0x%08x\n", mfcr_hint()); + seq_printf(m, "ccr (L1C & MMU): 0x%08x\n", mfcr("cr18")); + seq_printf(m, "ccr2 (L2C) : 0x%08x\n", mfcr_ccr2()); + seq_printf(m, "\n"); +} + +static int c_show(struct seq_file *m, void *v) +{ + int cpu; + + for_each_online_cpu(cpu) + smp_call_function_single(cpu, percpu_print, m, true); + +#ifdef CSKY_ARCH_VERSION + seq_printf(m, "arch-version : %s\n", CSKY_ARCH_VERSION); + seq_printf(m, "\n"); +#endif + + return 0; +} + +static void *c_start(struct seq_file *m, loff_t *pos) +{ + return *pos < 1 ? (void *)1 : NULL; +} + +static void *c_next(struct seq_file *m, void *v, loff_t *pos) +{ + ++*pos; + return NULL; +} + +static void c_stop(struct seq_file *m, void *v) {} + +const struct seq_operations cpuinfo_op = { + .start = c_start, + .next = c_next, + .stop = c_stop, + .show = c_show, +}; diff --git a/arch/csky/kernel/dumpstack.c b/arch/csky/kernel/dumpstack.c new file mode 100644 index 000000000000..a9a03ac57ec5 --- /dev/null +++ b/arch/csky/kernel/dumpstack.c @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/ptrace.h> + +int kstack_depth_to_print = 48; + +void show_trace(unsigned long *stack) +{ + unsigned long *endstack; + unsigned long addr; + int i; + + pr_info("Call Trace:\n"); + addr = (unsigned long)stack + THREAD_SIZE - 1; + endstack = (unsigned long *)(addr & -THREAD_SIZE); + i = 0; + while (stack + 1 <= endstack) { + addr = *stack++; + /* + * If the address is either in the text segment of the + * kernel, or in the region which contains vmalloc'ed + * memory, it *may* be the address of a calling + * routine; if so, print it so that someone tracing + * down the cause of the crash will be able to figure + * out the call path that was taken. + */ + if (__kernel_text_address(addr)) { +#ifndef CONFIG_KALLSYMS + if (i % 5 == 0) + pr_cont("\n "); +#endif + pr_cont(" [<%08lx>] %pS\n", addr, (void *)addr); + i++; + } + } + pr_cont("\n"); +} + +void show_stack(struct task_struct *task, unsigned long *stack) +{ + unsigned long *p; + unsigned long *endstack; + int i; + + if (!stack) { + if (task) + stack = (unsigned long *)task->thread.esp0; + else + stack = (unsigned long *)&stack; + } + endstack = (unsigned long *) + (((unsigned long)stack + THREAD_SIZE - 1) & -THREAD_SIZE); + + pr_info("Stack from %08lx:", (unsigned long)stack); + p = stack; + for (i = 0; i < kstack_depth_to_print; i++) { + if (p + 1 > endstack) + break; + if (i % 8 == 0) + pr_cont("\n "); + pr_cont(" %08lx", *p++); + } + pr_cont("\n"); + show_trace(stack); +} diff --git a/arch/csky/kernel/entry.S b/arch/csky/kernel/entry.S new file mode 100644 index 000000000000..79f92b8606c8 --- /dev/null +++ b/arch/csky/kernel/entry.S @@ -0,0 +1,396 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/linkage.h> +#include <abi/entry.h> +#include <abi/pgtable-bits.h> +#include <asm/errno.h> +#include <asm/setup.h> +#include <asm/unistd.h> +#include <asm/asm-offsets.h> +#include <linux/threads.h> +#include <asm/setup.h> +#include <asm/page.h> +#include <asm/thread_info.h> + +#define PTE_INDX_MSK 0xffc +#define PTE_INDX_SHIFT 10 +#define _PGDIR_SHIFT 22 + +.macro tlbop_begin name, val0, val1, val2 +ENTRY(csky_\name) + mtcr a3, ss2 + mtcr r6, ss3 + mtcr a2, ss4 + + RD_PGDR r6 + RD_MEH a3 +#ifdef CONFIG_CPU_HAS_TLBI + tlbi.vaas a3 + sync.is + + btsti a3, 31 + bf 1f + RD_PGDR_K r6 +1: +#else + bgeni a2, 31 + WR_MCIR a2 + bgeni a2, 25 + WR_MCIR a2 +#endif + bclri r6, 0 + lrw a2, PHYS_OFFSET + subu r6, a2 + bseti r6, 31 + + mov a2, a3 + lsri a2, _PGDIR_SHIFT + lsli a2, 2 + addu r6, a2 + ldw r6, (r6) + + lrw a2, PHYS_OFFSET + subu r6, a2 + bseti r6, 31 + + lsri a3, PTE_INDX_SHIFT + lrw a2, PTE_INDX_MSK + and a3, a2 + addu r6, a3 + ldw a3, (r6) + + movi a2, (_PAGE_PRESENT | \val0) + and a3, a2 + cmpne a3, a2 + bt \name + + /* First read/write the page, just update the flags */ + ldw a3, (r6) + bgeni a2, PAGE_VALID_BIT + bseti a2, PAGE_ACCESSED_BIT + bseti a2, \val1 + bseti a2, \val2 + or a3, a2 + stw a3, (r6) + + /* Some cpu tlb-hardrefill bypass the cache */ +#ifdef CONFIG_CPU_NEED_TLBSYNC + movi a2, 0x22 + bseti a2, 6 + mtcr r6, cr22 + mtcr a2, cr17 + sync +#endif + + mfcr a3, ss2 + mfcr r6, ss3 + mfcr a2, ss4 + rte +\name: + mfcr a3, ss2 + mfcr r6, ss3 + mfcr a2, ss4 + SAVE_ALL EPC_KEEP +.endm +.macro tlbop_end is_write + RD_MEH a2 + psrset ee, ie + mov a0, sp + movi a1, \is_write + jbsr do_page_fault + movi r11_sig, 0 /* r11 = 0, Not a syscall. */ + jmpi ret_from_exception +.endm + +.text + +tlbop_begin tlbinvalidl, _PAGE_READ, PAGE_VALID_BIT, PAGE_ACCESSED_BIT +tlbop_end 0 + +tlbop_begin tlbinvalids, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT +tlbop_end 1 + +tlbop_begin tlbmodified, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT +#ifndef CONFIG_CPU_HAS_LDSTEX +jbsr csky_cmpxchg_fixup +#endif +tlbop_end 1 + +ENTRY(csky_systemcall) + SAVE_ALL EPC_INCREASE + + psrset ee, ie + + /* Stack frame for syscall, origin call set_esp0 */ + mov r12, sp + + bmaski r11, 13 + andn r12, r11 + bgeni r11, 9 + addi r11, 32 + addu r12, r11 + st sp, (r12, 0) + + lrw r11, __NR_syscalls + cmphs syscallid, r11 /* Check nr of syscall */ + bt ret_from_exception + + lrw r13, sys_call_table + ixw r13, syscallid + ldw r11, (r13) + cmpnei r11, 0 + bf ret_from_exception + + mov r9, sp + bmaski r10, THREAD_SHIFT + andn r9, r10 + ldw r8, (r9, TINFO_FLAGS) + btsti r8, TIF_SYSCALL_TRACE + bt 1f +#if defined(__CSKYABIV2__) + subi sp, 8 + stw r5, (sp, 0x4) + stw r4, (sp, 0x0) + jsr r11 /* Do system call */ + addi sp, 8 +#else + jsr r11 +#endif + stw a0, (sp, LSAVE_A0) /* Save return value */ + jmpi ret_from_exception + +1: + movi a0, 0 /* enter system call */ + mov a1, sp /* sp = pt_regs pointer */ + jbsr syscall_trace + /* Prepare args before do system call */ + ldw a0, (sp, LSAVE_A0) + ldw a1, (sp, LSAVE_A1) + ldw a2, (sp, LSAVE_A2) + ldw a3, (sp, LSAVE_A3) +#if defined(__CSKYABIV2__) + subi sp, 8 + stw r5, (sp, 0x4) + stw r4, (sp, 0x0) +#else + ldw r6, (sp, LSAVE_A4) + ldw r7, (sp, LSAVE_A5) +#endif + jsr r11 /* Do system call */ +#if defined(__CSKYABIV2__) + addi sp, 8 +#endif + stw a0, (sp, LSAVE_A0) /* Save return value */ + + movi a0, 1 /* leave system call */ + mov a1, sp /* sp = pt_regs pointer */ + jbsr syscall_trace + +syscall_exit_work: + ld syscallid, (sp, LSAVE_PSR) + btsti syscallid, 31 + bt 2f + + jmpi resume_userspace + +2: RESTORE_ALL + +ENTRY(ret_from_kernel_thread) + jbsr schedule_tail + mov a0, r8 + jsr r9 + jbsr ret_from_exception + +ENTRY(ret_from_fork) + jbsr schedule_tail + mov r9, sp + bmaski r10, THREAD_SHIFT + andn r9, r10 + ldw r8, (r9, TINFO_FLAGS) + movi r11_sig, 1 + btsti r8, TIF_SYSCALL_TRACE + bf 3f + movi a0, 1 + mov a1, sp /* sp = pt_regs pointer */ + jbsr syscall_trace +3: + jbsr ret_from_exception + +ret_from_exception: + ld syscallid, (sp, LSAVE_PSR) + btsti syscallid, 31 + bt 1f + + /* + * Load address of current->thread_info, Then get address of task_struct + * Get task_needreshed in task_struct + */ + mov r9, sp + bmaski r10, THREAD_SHIFT + andn r9, r10 + +resume_userspace: + ldw r8, (r9, TINFO_FLAGS) + andi r8, (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED) + cmpnei r8, 0 + bt exit_work +1: RESTORE_ALL + +exit_work: + mov a0, sp /* Stack address is arg[0] */ + jbsr set_esp0 /* Call C level */ + btsti r8, TIF_NEED_RESCHED + bt work_resched + /* If thread_info->flag is empty, RESTORE_ALL */ + cmpnei r8, 0 + bf 1b + mov a1, sp + mov a0, r8 + mov a2, r11_sig /* syscall? */ + btsti r8, TIF_SIGPENDING /* delivering a signal? */ + /* prevent further restarts(set r11 = 0) */ + clrt r11_sig + jbsr do_notify_resume /* do signals */ + br resume_userspace + +work_resched: + lrw syscallid, ret_from_exception + mov r15, syscallid /* Return address in link */ + jmpi schedule + +ENTRY(sys_rt_sigreturn) + movi r11_sig, 0 + jmpi do_rt_sigreturn + +ENTRY(csky_trap) + SAVE_ALL EPC_KEEP + psrset ee + movi r11_sig, 0 /* r11 = 0, Not a syscall. */ + mov a0, sp /* Push Stack pointer arg */ + jbsr trap_c /* Call C-level trap handler */ + jmpi ret_from_exception + +/* + * Prototype from libc for abiv1: + * register unsigned int __result asm("a0"); + * asm( "trap 3" :"=r"(__result)::); + */ +ENTRY(csky_get_tls) + USPTOKSP + + /* increase epc for continue */ + mfcr a0, epc + INCTRAP a0 + mtcr a0, epc + + /* get current task thread_info with kernel 8K stack */ + bmaski a0, THREAD_SHIFT + not a0 + subi sp, 1 + and a0, sp + addi sp, 1 + + /* get tls */ + ldw a0, (a0, TINFO_TP_VALUE) + + KSPTOUSP + rte + +ENTRY(csky_irq) + SAVE_ALL EPC_KEEP + psrset ee + movi r11_sig, 0 /* r11 = 0, Not a syscall. */ + +#ifdef CONFIG_PREEMPT + mov r9, sp /* Get current stack pointer */ + bmaski r10, THREAD_SHIFT + andn r9, r10 /* Get thread_info */ + + /* + * Get task_struct->stack.preempt_count for current, + * and increase 1. + */ + ldw r8, (r9, TINFO_PREEMPT) + addi r8, 1 + stw r8, (r9, TINFO_PREEMPT) +#endif + + mov a0, sp + jbsr csky_do_IRQ + +#ifdef CONFIG_PREEMPT + subi r8, 1 + stw r8, (r9, TINFO_PREEMPT) + cmpnei r8, 0 + bt 2f + ldw r8, (r9, TINFO_FLAGS) + btsti r8, TIF_NEED_RESCHED + bf 2f +1: + jbsr preempt_schedule_irq /* irq en/disable is done inside */ + ldw r7, (r9, TINFO_FLAGS) /* get new tasks TI_FLAGS */ + btsti r7, TIF_NEED_RESCHED + bt 1b /* go again */ +#endif +2: + jmpi ret_from_exception + +/* + * a0 = prev task_struct * + * a1 = next task_struct * + * a0 = return next + */ +ENTRY(__switch_to) + lrw a3, TASK_THREAD + addu a3, a0 + + mfcr a2, psr /* Save PSR value */ + stw a2, (a3, THREAD_SR) /* Save PSR in task struct */ + bclri a2, 6 /* Disable interrupts */ + mtcr a2, psr + + SAVE_SWITCH_STACK + + stw sp, (a3, THREAD_KSP) + +#ifdef CONFIG_CPU_HAS_HILO + lrw r10, THREAD_DSPHI + add r10, a3 + mfhi r6 + mflo r7 + stw r6, (r10, 0) /* THREAD_DSPHI */ + stw r7, (r10, 4) /* THREAD_DSPLO */ + mfcr r6, cr14 + stw r6, (r10, 8) /* THREAD_DSPCSR */ +#endif + + /* Set up next process to run */ + lrw a3, TASK_THREAD + addu a3, a1 + + ldw sp, (a3, THREAD_KSP) /* Set next kernel sp */ + +#ifdef CONFIG_CPU_HAS_HILO + lrw r10, THREAD_DSPHI + add r10, a3 + ldw r6, (r10, 8) /* THREAD_DSPCSR */ + mtcr r6, cr14 + ldw r6, (r10, 0) /* THREAD_DSPHI */ + ldw r7, (r10, 4) /* THREAD_DSPLO */ + mthi r6 + mtlo r7 +#endif + + ldw a2, (a3, THREAD_SR) /* Set next PSR */ + mtcr a2, psr + +#if defined(__CSKYABIV2__) + addi r7, a1, TASK_THREAD_INFO + ldw tls, (r7, TINFO_TP_VALUE) +#endif + + RESTORE_SWITCH_STACK + + rts +ENDPROC(__switch_to) diff --git a/arch/csky/kernel/head.S b/arch/csky/kernel/head.S new file mode 100644 index 000000000000..9c4ec473b76b --- /dev/null +++ b/arch/csky/kernel/head.S @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include <linux/linkage.h> +#include <linux/init.h> +#include <asm/page.h> +#include <abi/entry.h> + +__HEAD +ENTRY(_start) + /* set super user mode */ + lrw a3, DEFAULT_PSR_VALUE + mtcr a3, psr + psrset ee + + SETUP_MMU a3 + + /* set stack point */ + lrw a3, init_thread_union + THREAD_SIZE + mov sp, a3 + + jmpi csky_start +END(_start) + +#ifdef CONFIG_SMP +.align 10 +ENTRY(_start_smp_secondary) + /* Invalid I/Dcache BTB BHT */ + movi a3, 7 + lsli a3, 16 + addi a3, (1<<4) | 3 + mtcr a3, cr17 + + tlbi.alls + + /* setup PAGEMASK */ + movi a3, 0 + mtcr a3, cr<6, 15> + + /* setup MEL0/MEL1 */ + grs a0, _start_smp_pc +_start_smp_pc: + bmaski a1, 13 + andn a0, a1 + movi a1, 0x00000006 + movi a2, 0x00001006 + or a1, a0 + or a2, a0 + mtcr a1, cr<2, 15> + mtcr a2, cr<3, 15> + + /* setup MEH */ + mtcr a0, cr<4, 15> + + /* write TLB */ + bgeni a3, 28 + mtcr a3, cr<8, 15> + + SETUP_MMU a3 + + /* enable MMU */ + movi a3, 1 + mtcr a3, cr18 + + jmpi _goto_mmu_on +_goto_mmu_on: + lrw a3, DEFAULT_PSR_VALUE + mtcr a3, psr + psrset ee + + /* set stack point */ + lrw a3, secondary_stack + ld.w a3, (a3, 0) + mov sp, a3 + + jmpi csky_start_secondary +END(_start_smp_secondary) +#endif diff --git a/arch/csky/kernel/irq.c b/arch/csky/kernel/irq.c new file mode 100644 index 000000000000..03a1930f1cbb --- /dev/null +++ b/arch/csky/kernel/irq.c @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <asm/traps.h> +#include <asm/smp.h> + +void __init init_IRQ(void) +{ + irqchip_init(); +#ifdef CONFIG_SMP + setup_smp_ipi(); +#endif +} + +asmlinkage void __irq_entry csky_do_IRQ(struct pt_regs *regs) +{ + handle_arch_irq(regs); +} diff --git a/arch/csky/kernel/module.c b/arch/csky/kernel/module.c new file mode 100644 index 000000000000..65abab0c7a47 --- /dev/null +++ b/arch/csky/kernel/module.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/moduleloader.h> +#include <linux/elf.h> +#include <linux/mm.h> +#include <linux/vmalloc.h> +#include <linux/slab.h> +#include <linux/fs.h> +#include <linux/string.h> +#include <linux/kernel.h> +#include <linux/spinlock.h> +#include <asm/pgtable.h> + +#if defined(__CSKYABIV2__) +#define IS_BSR32(hi16, lo16) (((hi16) & 0xFC00) == 0xE000) +#define IS_JSRI32(hi16, lo16) ((hi16) == 0xEAE0) + +#define CHANGE_JSRI_TO_LRW(addr) do { \ + *(uint16_t *)(addr) = (*(uint16_t *)(addr) & 0xFF9F) | 0x001a; \ + *((uint16_t *)(addr) + 1) = *((uint16_t *)(addr) + 1) & 0xFFFF; \ +} while (0) + +#define SET_JSR32_R26(addr) do { \ + *(uint16_t *)(addr) = 0xE8Fa; \ + *((uint16_t *)(addr) + 1) = 0x0000; \ +} while (0) +#endif + +int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, + unsigned int symindex, unsigned int relsec, struct module *me) +{ + unsigned int i; + Elf32_Rela *rel = (void *) sechdrs[relsec].sh_addr; + Elf32_Sym *sym; + uint32_t *location; + short *temp; +#if defined(__CSKYABIV2__) + uint16_t *location_tmp; +#endif + + for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { + /* This is where to make the change */ + location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + + rel[i].r_offset; + sym = (Elf32_Sym *)sechdrs[symindex].sh_addr + + ELF32_R_SYM(rel[i].r_info); + + switch (ELF32_R_TYPE(rel[i].r_info)) { + case R_CSKY_32: + /* We add the value into the location given */ + *location = rel[i].r_addend + sym->st_value; + break; + case R_CSKY_PC32: + /* Add the value, subtract its postition */ + *location = rel[i].r_addend + sym->st_value + - (uint32_t)location; + break; + case R_CSKY_PCRELJSR_IMM11BY2: + break; + case R_CSKY_PCRELJSR_IMM26BY2: +#if defined(__CSKYABIV2__) + location_tmp = (uint16_t *)location; + if (IS_BSR32(*location_tmp, *(location_tmp + 1))) + break; + + if (IS_JSRI32(*location_tmp, *(location_tmp + 1))) { + /* jsri 0x... --> lrw r26, 0x... */ + CHANGE_JSRI_TO_LRW(location); + /* lsli r0, r0 --> jsr r26 */ + SET_JSR32_R26(location + 1); + } +#endif + break; + case R_CSKY_ADDR_HI16: + temp = ((short *)location) + 1; + *temp = (short) + ((rel[i].r_addend + sym->st_value) >> 16); + break; + case R_CSKY_ADDR_LO16: + temp = ((short *)location) + 1; + *temp = (short) + ((rel[i].r_addend + sym->st_value) & 0xffff); + break; + default: + pr_err("module %s: Unknown relocation: %u\n", + me->name, ELF32_R_TYPE(rel[i].r_info)); + return -ENOEXEC; + } + } + return 0; +} diff --git a/arch/csky/kernel/power.c b/arch/csky/kernel/power.c new file mode 100644 index 000000000000..923ee4e381b8 --- /dev/null +++ b/arch/csky/kernel/power.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/reboot.h> + +void (*pm_power_off)(void); +EXPORT_SYMBOL(pm_power_off); + +void machine_power_off(void) +{ + local_irq_disable(); + if (pm_power_off) + pm_power_off(); + asm volatile ("bkpt"); +} + +void machine_halt(void) +{ + local_irq_disable(); + if (pm_power_off) + pm_power_off(); + asm volatile ("bkpt"); +} + +void machine_restart(char *cmd) +{ + local_irq_disable(); + do_kernel_restart(cmd); + asm volatile ("bkpt"); +} diff --git a/arch/csky/kernel/process.c b/arch/csky/kernel/process.c new file mode 100644 index 000000000000..8ed20028b160 --- /dev/null +++ b/arch/csky/kernel/process.c @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/module.h> +#include <linux/version.h> +#include <linux/sched.h> +#include <linux/sched/task_stack.h> +#include <linux/sched/debug.h> +#include <linux/delay.h> +#include <linux/kallsyms.h> +#include <linux/uaccess.h> +#include <linux/ptrace.h> + +#include <asm/elf.h> +#include <abi/reg_ops.h> + +struct cpuinfo_csky cpu_data[NR_CPUS]; + +asmlinkage void ret_from_fork(void); +asmlinkage void ret_from_kernel_thread(void); + +/* + * Some archs flush debug and FPU info here + */ +void flush_thread(void){} + +/* + * Return saved PC from a blocked thread + */ +unsigned long thread_saved_pc(struct task_struct *tsk) +{ + struct switch_stack *sw = (struct switch_stack *)tsk->thread.ksp; + + return sw->r15; +} + +int copy_thread(unsigned long clone_flags, + unsigned long usp, + unsigned long kthread_arg, + struct task_struct *p) +{ + struct switch_stack *childstack; + struct pt_regs *childregs = task_pt_regs(p); + +#ifdef CONFIG_CPU_HAS_FPU + save_to_user_fp(&p->thread.user_fp); +#endif + + childstack = ((struct switch_stack *) childregs) - 1; + memset(childstack, 0, sizeof(struct switch_stack)); + + /* setup ksp for switch_to !!! */ + p->thread.ksp = (unsigned long)childstack; + + if (unlikely(p->flags & PF_KTHREAD)) { + memset(childregs, 0, sizeof(struct pt_regs)); + childstack->r15 = (unsigned long) ret_from_kernel_thread; + childstack->r8 = kthread_arg; + childstack->r9 = usp; + childregs->sr = mfcr("psr"); + } else { + *childregs = *(current_pt_regs()); + if (usp) + childregs->usp = usp; + if (clone_flags & CLONE_SETTLS) + task_thread_info(p)->tp_value = childregs->tls + = childregs->regs[0]; + + childregs->a0 = 0; + childstack->r15 = (unsigned long) ret_from_fork; + } + + return 0; +} + +/* Fill in the fpu structure for a core dump. */ +int dump_fpu(struct pt_regs *regs, struct user_fp *fpu) +{ + memcpy(fpu, ¤t->thread.user_fp, sizeof(*fpu)); + return 1; +} +EXPORT_SYMBOL(dump_fpu); + +int dump_task_regs(struct task_struct *tsk, elf_gregset_t *pr_regs) +{ + struct pt_regs *regs = task_pt_regs(tsk); + + /* NOTE: usp is error value. */ + ELF_CORE_COPY_REGS((*pr_regs), regs) + + return 1; +} + +unsigned long get_wchan(struct task_struct *p) +{ + unsigned long esp, pc; + unsigned long stack_page; + int count = 0; + + if (!p || p == current || p->state == TASK_RUNNING) + return 0; + + stack_page = (unsigned long)p; + esp = p->thread.esp0; + do { + if (esp < stack_page+sizeof(struct task_struct) || + esp >= 8184+stack_page) + return 0; + /*FIXME: There's may be error here!*/ + pc = ((unsigned long *)esp)[1]; + /* FIXME: This depends on the order of these functions. */ + if (!in_sched_functions(pc)) + return pc; + esp = *(unsigned long *) esp; + } while (count++ < 16); + return 0; +} +EXPORT_SYMBOL(get_wchan); + +#ifndef CONFIG_CPU_PM_NONE +void arch_cpu_idle(void) +{ +#ifdef CONFIG_CPU_PM_WAIT + asm volatile("wait\n"); +#endif + +#ifdef CONFIG_CPU_PM_DOZE + asm volatile("doze\n"); +#endif + +#ifdef CONFIG_CPU_PM_STOP + asm volatile("stop\n"); +#endif + local_irq_enable(); +} +#endif diff --git a/arch/csky/kernel/ptrace.c b/arch/csky/kernel/ptrace.c new file mode 100644 index 000000000000..34b30257298f --- /dev/null +++ b/arch/csky/kernel/ptrace.c @@ -0,0 +1,314 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/elf.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/ptrace.h> +#include <linux/regset.h> +#include <linux/sched.h> +#include <linux/signal.h> +#include <linux/smp.h> +#include <linux/uaccess.h> +#include <linux/user.h> + +#include <asm/thread_info.h> +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/processor.h> +#include <asm/asm-offsets.h> + +#include <abi/regdef.h> + +/* sets the trace bits. */ +#define TRACE_MODE_SI (1 << 14) +#define TRACE_MODE_RUN 0 +#define TRACE_MODE_MASK ~(0x3 << 14) + +/* + * Make sure the single step bit is not set. + */ +static void singlestep_disable(struct task_struct *tsk) +{ + struct pt_regs *regs; + + regs = task_pt_regs(tsk); + regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_RUN; +} + +static void singlestep_enable(struct task_struct *tsk) +{ + struct pt_regs *regs; + + regs = task_pt_regs(tsk); + regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_SI; +} + +/* + * Make sure the single step bit is set. + */ +void user_enable_single_step(struct task_struct *child) +{ + if (child->thread.esp0 == 0) + return; + singlestep_enable(child); +} + +void user_disable_single_step(struct task_struct *child) +{ + if (child->thread.esp0 == 0) + return; + singlestep_disable(child); +} + +enum csky_regset { + REGSET_GPR, + REGSET_FPR, +}; + +static int gpr_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + struct pt_regs *regs; + + regs = task_pt_regs(target); + + /* Abiv1 regs->tls is fake and we need sync here. */ + regs->tls = task_thread_info(target)->tp_value; + + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, regs, 0, -1); +} + +static int gpr_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + struct pt_regs regs; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®s, 0, -1); + if (ret) + return ret; + + regs.sr = task_pt_regs(target)->sr; + + task_thread_info(target)->tp_value = regs.tls; + + *task_pt_regs(target) = regs; + + return 0; +} + +static int fpr_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + struct user_fp *regs = (struct user_fp *)&target->thread.user_fp; + +#if defined(CONFIG_CPU_HAS_FPUV2) && !defined(CONFIG_CPU_HAS_VDSP) + int i; + struct user_fp tmp = *regs; + + for (i = 0; i < 16; i++) { + tmp.vr[i*4] = regs->vr[i*2]; + tmp.vr[i*4 + 1] = regs->vr[i*2 + 1]; + } + + for (i = 0; i < 32; i++) + tmp.vr[64 + i] = regs->vr[32 + i]; + + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &tmp, 0, -1); +#else + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, regs, 0, -1); +#endif +} + +static int fpr_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + struct user_fp *regs = (struct user_fp *)&target->thread.user_fp; + +#if defined(CONFIG_CPU_HAS_FPUV2) && !defined(CONFIG_CPU_HAS_VDSP) + int i; + struct user_fp tmp; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tmp, 0, -1); + + *regs = tmp; + + for (i = 0; i < 16; i++) { + regs->vr[i*2] = tmp.vr[i*4]; + regs->vr[i*2 + 1] = tmp.vr[i*4 + 1]; + } + + for (i = 0; i < 32; i++) + regs->vr[32 + i] = tmp.vr[64 + i]; +#else + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs, 0, -1); +#endif + + return ret; +} + +static const struct user_regset csky_regsets[] = { + [REGSET_GPR] = { + .core_note_type = NT_PRSTATUS, + .n = ELF_NGREG, + .size = sizeof(u32), + .align = sizeof(u32), + .get = &gpr_get, + .set = &gpr_set, + }, + [REGSET_FPR] = { + .core_note_type = NT_PRFPREG, + .n = sizeof(struct user_fp) / sizeof(u32), + .size = sizeof(u32), + .align = sizeof(u32), + .get = &fpr_get, + .set = &fpr_set, + }, +}; + +static const struct user_regset_view user_csky_view = { + .name = "csky", + .e_machine = ELF_ARCH, + .regsets = csky_regsets, + .n = ARRAY_SIZE(csky_regsets), +}; + +const struct user_regset_view *task_user_regset_view(struct task_struct *task) +{ + return &user_csky_view; +} + +void ptrace_disable(struct task_struct *child) +{ + singlestep_disable(child); +} + +long arch_ptrace(struct task_struct *child, long request, + unsigned long addr, unsigned long data) +{ + long ret = -EIO; + + switch (request) { + default: + ret = ptrace_request(child, request, addr, data); + break; + } + + return ret; +} + +/* + * If process's system calls is traces, do some corresponding handles in this + * function before entering system call function and after exiting system call + * function. + */ +asmlinkage void syscall_trace(int why, struct pt_regs *regs) +{ + long saved_why; + /* + * Save saved_why, why is used to denote syscall entry/exit; + * why = 0:entry, why = 1: exit + */ + saved_why = regs->regs[SYSTRACE_SAVENUM]; + regs->regs[SYSTRACE_SAVENUM] = why; + + ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) + ? 0x80 : 0)); + + /* + * this isn't the same as continuing with a signal, but it will do + * for normal use. strace only continues with a signal if the + * stopping signal is not SIGTRAP. -brl + */ + if (current->exit_code) { + send_sig(current->exit_code, current, 1); + current->exit_code = 0; + } + + regs->regs[SYSTRACE_SAVENUM] = saved_why; +} + +void show_regs(struct pt_regs *fp) +{ + unsigned long *sp; + unsigned char *tp; + int i; + + pr_info("\nCURRENT PROCESS:\n\n"); + pr_info("COMM=%s PID=%d\n", current->comm, current->pid); + + if (current->mm) { + pr_info("TEXT=%08x-%08x DATA=%08x-%08x BSS=%08x-%08x\n", + (int) current->mm->start_code, + (int) current->mm->end_code, + (int) current->mm->start_data, + (int) current->mm->end_data, + (int) current->mm->end_data, + (int) current->mm->brk); + pr_info("USER-STACK=%08x KERNEL-STACK=%08x\n\n", + (int) current->mm->start_stack, + (int) (((unsigned long) current) + 2 * PAGE_SIZE)); + } + + pr_info("PC: 0x%08lx\n", (long)fp->pc); + pr_info("orig_a0: 0x%08lx\n", fp->orig_a0); + pr_info("PSR: 0x%08lx\n", (long)fp->sr); + + pr_info("a0: 0x%08lx a1: 0x%08lx a2: 0x%08lx a3: 0x%08lx\n", + fp->a0, fp->a1, fp->a2, fp->a3); +#if defined(__CSKYABIV2__) + pr_info("r4: 0x%08lx r5: 0x%08lx r6: 0x%08lx r7: 0x%08lx\n", + fp->regs[0], fp->regs[1], fp->regs[2], fp->regs[3]); + pr_info("r8: 0x%08lx r9: 0x%08lx r10: 0x%08lx r11: 0x%08lx\n", + fp->regs[4], fp->regs[5], fp->regs[6], fp->regs[7]); + pr_info("r12 0x%08lx r13: 0x%08lx r15: 0x%08lx\n", + fp->regs[8], fp->regs[9], fp->lr); + pr_info("r16:0x%08lx r17: 0x%08lx r18: 0x%08lx r19: 0x%08lx\n", + fp->exregs[0], fp->exregs[1], fp->exregs[2], fp->exregs[3]); + pr_info("r20 0x%08lx r21: 0x%08lx r22: 0x%08lx r23: 0x%08lx\n", + fp->exregs[4], fp->exregs[5], fp->exregs[6], fp->exregs[7]); + pr_info("r24 0x%08lx r25: 0x%08lx r26: 0x%08lx r27: 0x%08lx\n", + fp->exregs[8], fp->exregs[9], fp->exregs[10], fp->exregs[11]); + pr_info("r28 0x%08lx r29: 0x%08lx r30: 0x%08lx tls: 0x%08lx\n", + fp->exregs[12], fp->exregs[13], fp->exregs[14], fp->tls); + pr_info("hi 0x%08lx lo: 0x%08lx\n", + fp->rhi, fp->rlo); +#else + pr_info("r6: 0x%08lx r7: 0x%08lx r8: 0x%08lx r9: 0x%08lx\n", + fp->regs[0], fp->regs[1], fp->regs[2], fp->regs[3]); + pr_info("r10: 0x%08lx r11: 0x%08lx r12: 0x%08lx r13: 0x%08lx\n", + fp->regs[4], fp->regs[5], fp->regs[6], fp->regs[7]); + pr_info("r14 0x%08lx r1: 0x%08lx r15: 0x%08lx\n", + fp->regs[8], fp->regs[9], fp->lr); +#endif + + pr_info("\nCODE:"); + tp = ((unsigned char *) fp->pc) - 0x20; + tp += ((int)tp % 4) ? 2 : 0; + for (sp = (unsigned long *) tp, i = 0; (i < 0x40); i += 4) { + if ((i % 0x10) == 0) + pr_cont("\n%08x: ", (int) (tp + i)); + pr_cont("%08x ", (int) *sp++); + } + pr_cont("\n"); + + pr_info("\nKERNEL STACK:"); + tp = ((unsigned char *) fp) - 0x40; + for (sp = (unsigned long *) tp, i = 0; (i < 0xc0); i += 4) { + if ((i % 0x10) == 0) + pr_cont("\n%08x: ", (int) (tp + i)); + pr_cont("%08x ", (int) *sp++); + } + pr_cont("\n"); +} diff --git a/arch/csky/kernel/setup.c b/arch/csky/kernel/setup.c new file mode 100644 index 000000000000..a5e3ab1d5360 --- /dev/null +++ b/arch/csky/kernel/setup.c @@ -0,0 +1,162 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/console.h> +#include <linux/memblock.h> +#include <linux/bootmem.h> +#include <linux/initrd.h> +#include <linux/of.h> +#include <linux/of_fdt.h> +#include <linux/start_kernel.h> +#include <linux/dma-contiguous.h> +#include <linux/screen_info.h> +#include <asm/sections.h> +#include <asm/mmu_context.h> +#include <asm/pgalloc.h> + +#ifdef CONFIG_DUMMY_CONSOLE +struct screen_info screen_info = { + .orig_video_lines = 30, + .orig_video_cols = 80, + .orig_video_mode = 0, + .orig_video_ega_bx = 0, + .orig_video_isVGA = 1, + .orig_video_points = 8 +}; +#endif + +phys_addr_t __init_memblock memblock_end_of_REG0(void) +{ + return (memblock.memory.regions[0].base + + memblock.memory.regions[0].size); +} + +phys_addr_t __init_memblock memblock_start_of_REG1(void) +{ + return memblock.memory.regions[1].base; +} + +size_t __init_memblock memblock_size_of_REG1(void) +{ + return memblock.memory.regions[1].size; +} + +static void __init csky_memblock_init(void) +{ + unsigned long zone_size[MAX_NR_ZONES]; + unsigned long zhole_size[MAX_NR_ZONES]; + signed long size; + + memblock_reserve(__pa(_stext), _end - _stext); +#ifdef CONFIG_BLK_DEV_INITRD + memblock_reserve(__pa(initrd_start), initrd_end - initrd_start); +#endif + + early_init_fdt_reserve_self(); + early_init_fdt_scan_reserved_mem(); + + memblock_dump_all(); + + memset(zone_size, 0, sizeof(zone_size)); + memset(zhole_size, 0, sizeof(zhole_size)); + + min_low_pfn = PFN_UP(memblock_start_of_DRAM()); + max_pfn = PFN_DOWN(memblock_end_of_DRAM()); + + max_low_pfn = PFN_UP(memblock_end_of_REG0()); + if (max_low_pfn == 0) + max_low_pfn = max_pfn; + + size = max_pfn - min_low_pfn; + + if (memblock.memory.cnt > 1) { + zone_size[ZONE_NORMAL] = + PFN_DOWN(memblock_start_of_REG1()) - min_low_pfn; + zhole_size[ZONE_NORMAL] = + PFN_DOWN(memblock_start_of_REG1()) - max_low_pfn; + } else { + if (size <= PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET)) + zone_size[ZONE_NORMAL] = max_pfn - min_low_pfn; + else { + zone_size[ZONE_NORMAL] = + PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET); + max_low_pfn = min_low_pfn + zone_size[ZONE_NORMAL]; + } + } + +#ifdef CONFIG_HIGHMEM + size = 0; + if (memblock.memory.cnt > 1) { + size = PFN_DOWN(memblock_size_of_REG1()); + highstart_pfn = PFN_DOWN(memblock_start_of_REG1()); + } else { + size = max_pfn - min_low_pfn - + PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET); + highstart_pfn = min_low_pfn + + PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET); + } + + if (size > 0) + zone_size[ZONE_HIGHMEM] = size; + + highend_pfn = max_pfn; +#endif + memblock_set_current_limit(PFN_PHYS(max_low_pfn)); + + dma_contiguous_reserve(0); + + free_area_init_node(0, zone_size, min_low_pfn, zhole_size); +} + +void __init setup_arch(char **cmdline_p) +{ + *cmdline_p = boot_command_line; + + console_verbose(); + + pr_info("Phys. mem: %ldMB\n", + (unsigned long) memblock_phys_mem_size()/1024/1024); + + init_mm.start_code = (unsigned long) _stext; + init_mm.end_code = (unsigned long) _etext; + init_mm.end_data = (unsigned long) _edata; + init_mm.brk = (unsigned long) _end; + + parse_early_param(); + + csky_memblock_init(); + + unflatten_and_copy_device_tree(); + +#ifdef CONFIG_SMP + setup_smp(); +#endif + + sparse_init(); + +#ifdef CONFIG_HIGHMEM + kmap_init(); +#endif + +#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE) + conswitchp = &dummy_con; +#endif +} + +asmlinkage __visible void __init csky_start(unsigned int unused, void *param) +{ + /* Clean up bss section */ + memset(__bss_start, 0, __bss_stop - __bss_start); + + pre_trap_init(); + pre_mmu_init(); + + if (param == NULL) + early_init_dt_scan(__dtb_start); + else + early_init_dt_scan(param); + + start_kernel(); + + asm volatile("br .\n"); +} diff --git a/arch/csky/kernel/signal.c b/arch/csky/kernel/signal.c new file mode 100644 index 000000000000..66e1b729b10b --- /dev/null +++ b/arch/csky/kernel/signal.c @@ -0,0 +1,347 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/kernel.h> +#include <linux/signal.h> +#include <linux/syscalls.h> +#include <linux/errno.h> +#include <linux/wait.h> +#include <linux/ptrace.h> +#include <linux/unistd.h> +#include <linux/stddef.h> +#include <linux/highuid.h> +#include <linux/personality.h> +#include <linux/tty.h> +#include <linux/binfmts.h> +#include <linux/tracehook.h> +#include <linux/freezer.h> +#include <linux/uaccess.h> + +#include <asm/setup.h> +#include <asm/pgtable.h> +#include <asm/traps.h> +#include <asm/ucontext.h> +#include <asm/vdso.h> + +#include <abi/regdef.h> + +#ifdef CONFIG_CPU_HAS_FPU +#include <abi/fpu.h> + +static int restore_fpu_state(struct sigcontext *sc) +{ + int err = 0; + struct user_fp user_fp; + + err = copy_from_user(&user_fp, &sc->sc_user_fp, sizeof(user_fp)); + + restore_from_user_fp(&user_fp); + + return err; +} + +static int save_fpu_state(struct sigcontext *sc) +{ + struct user_fp user_fp; + + save_to_user_fp(&user_fp); + + return copy_to_user(&sc->sc_user_fp, &user_fp, sizeof(user_fp)); +} +#else +static inline int restore_fpu_state(struct sigcontext *sc) { return 0; } +static inline int save_fpu_state(struct sigcontext *sc) { return 0; } +#endif + +struct rt_sigframe { + int sig; + struct siginfo *pinfo; + void *puc; + struct siginfo info; + struct ucontext uc; +}; + +static int +restore_sigframe(struct pt_regs *regs, + struct sigcontext *sc, int *pr2) +{ + int err = 0; + + /* Always make any pending restarted system calls return -EINTR */ + current_thread_info()->task->restart_block.fn = do_no_restart_syscall; + + err |= copy_from_user(regs, &sc->sc_pt_regs, sizeof(struct pt_regs)); + + err |= restore_fpu_state(sc); + + *pr2 = regs->a0; + return err; +} + +asmlinkage int +do_rt_sigreturn(void) +{ + sigset_t set; + int a0; + struct pt_regs *regs = current_pt_regs(); + struct rt_sigframe *frame = (struct rt_sigframe *)(regs->usp); + + if (verify_area(VERIFY_READ, frame, sizeof(*frame))) + goto badframe; + if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) + goto badframe; + + sigdelsetmask(&set, (sigmask(SIGKILL) | sigmask(SIGSTOP))); + spin_lock_irq(¤t->sighand->siglock); + current->blocked = set; + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + + if (restore_sigframe(regs, &frame->uc.uc_mcontext, &a0)) + goto badframe; + + return a0; + +badframe: + force_sig(SIGSEGV, current); + return 0; +} + +static int setup_sigframe(struct sigcontext *sc, struct pt_regs *regs) +{ + int err = 0; + + err |= copy_to_user(&sc->sc_pt_regs, regs, sizeof(struct pt_regs)); + err |= save_fpu_state(sc); + + return err; +} + +static inline void * +get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) +{ + unsigned long usp; + + /* Default to using normal stack. */ + usp = regs->usp; + + /* This is the X/Open sanctioned signal stack switching. */ + if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(usp)) { + if (!on_sig_stack(usp)) + usp = current->sas_ss_sp + current->sas_ss_size; + } + return (void *)((usp - frame_size) & -8UL); +} + +static int +setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) +{ + struct rt_sigframe *frame; + int err = 0; + + struct csky_vdso *vdso = current->mm->context.vdso; + + frame = get_sigframe(&ksig->ka, regs, sizeof(*frame)); + if (!frame) + return 1; + + err |= __put_user(ksig->sig, &frame->sig); + err |= __put_user(&frame->info, &frame->pinfo); + err |= __put_user(&frame->uc, &frame->puc); + err |= copy_siginfo_to_user(&frame->info, &ksig->info); + + /* Create the ucontext. */ + err |= __put_user(0, &frame->uc.uc_flags); + err |= __put_user(0, &frame->uc.uc_link); + err |= __put_user((void *)current->sas_ss_sp, + &frame->uc.uc_stack.ss_sp); + err |= __put_user(sas_ss_flags(regs->usp), + &frame->uc.uc_stack.ss_flags); + err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); + err |= setup_sigframe(&frame->uc.uc_mcontext, regs); + err |= copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); + + if (err) + goto give_sigsegv; + + /* Set up registers for signal handler */ + regs->usp = (unsigned long)frame; + regs->pc = (unsigned long)ksig->ka.sa.sa_handler; + regs->lr = (unsigned long)vdso->rt_signal_retcode; + +adjust_stack: + regs->a0 = ksig->sig; /* first arg is signo */ + regs->a1 = (unsigned long)(&(frame->info)); + regs->a2 = (unsigned long)(&(frame->uc)); + return err; + +give_sigsegv: + if (ksig->sig == SIGSEGV) + ksig->ka.sa.sa_handler = SIG_DFL; + force_sig(SIGSEGV, current); + goto adjust_stack; +} + +/* + * OK, we're invoking a handler + */ +static int +handle_signal(struct ksignal *ksig, struct pt_regs *regs) +{ + int ret; + sigset_t *oldset = sigmask_to_save(); + + /* + * set up the stack frame, regardless of SA_SIGINFO, + * and pass info anyway. + */ + ret = setup_rt_frame(ksig, oldset, regs); + + if (ret != 0) { + force_sigsegv(ksig->sig, current); + return ret; + } + + /* Block the signal if we were successful. */ + spin_lock_irq(¤t->sighand->siglock); + sigorsets(¤t->blocked, ¤t->blocked, &ksig->ka.sa.sa_mask); + if (!(ksig->ka.sa.sa_flags & SA_NODEFER)) + sigaddset(¤t->blocked, ksig->sig); + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + + return 0; +} + +/* + * Note that 'init' is a special process: it doesn't get signals it doesn't + * want to handle. Thus you cannot kill init even with a SIGKILL even by + * mistake. + * + * Note that we go through the signals twice: once to check the signals + * that the kernel can handle, and then we build all the user-level signal + * handling stack-frames in one go after that. + */ +static void do_signal(struct pt_regs *regs, int syscall) +{ + unsigned int retval = 0, continue_addr = 0, restart_addr = 0; + struct ksignal ksig; + + /* + * We want the common case to go fast, which + * is why we may in certain cases get here from + * kernel mode. Just return without doing anything + * if so. + */ + if (!user_mode(regs)) + return; + + current->thread.esp0 = (unsigned long)regs; + + /* + * If we were from a system call, check for system call restarting... + */ + if (syscall) { + continue_addr = regs->pc; +#if defined(__CSKYABIV2__) + restart_addr = continue_addr - 4; +#else + restart_addr = continue_addr - 2; +#endif + retval = regs->a0; + + /* + * Prepare for system call restart. We do this here so that a + * debugger will see the already changed. + */ + switch (retval) { + case -ERESTARTNOHAND: + case -ERESTARTSYS: + case -ERESTARTNOINTR: + regs->a0 = regs->orig_a0; + regs->pc = restart_addr; + break; + case -ERESTART_RESTARTBLOCK: + regs->a0 = -EINTR; + break; + } + } + + if (try_to_freeze()) + goto no_signal; + + /* + * Get the signal to deliver. When running under ptrace, at this + * point the debugger may change all our registers ... + */ + if (get_signal(&ksig)) { + /* + * Depending on the signal settings we may need to revert the + * decision to restart the system call. But skip this if a + * debugger has chosen to restart at a different PC. + */ + if (regs->pc == restart_addr) { + if (retval == -ERESTARTNOHAND || + (retval == -ERESTARTSYS && + !(ksig.ka.sa.sa_flags & SA_RESTART))) { + regs->a0 = -EINTR; + regs->pc = continue_addr; + } + } + + /* Whee! Actually deliver the signal. */ + if (handle_signal(&ksig, regs) == 0) { + /* + * A signal was successfully delivered; the saved + * sigmask will have been stored in the signal frame, + * and will be restored by sigreturn, so we can simply + * clear the TIF_RESTORE_SIGMASK flag. + */ + if (test_thread_flag(TIF_RESTORE_SIGMASK)) + clear_thread_flag(TIF_RESTORE_SIGMASK); + } + return; + } + +no_signal: + if (syscall) { + /* + * Handle restarting a different system call. As above, + * if a debugger has chosen to restart at a different PC, + * ignore the restart. + */ + if (retval == -ERESTART_RESTARTBLOCK + && regs->pc == continue_addr) { +#if defined(__CSKYABIV2__) + regs->regs[3] = __NR_restart_syscall; + regs->pc -= 4; +#else + regs->regs[9] = __NR_restart_syscall; + regs->pc -= 2; +#endif + } + + /* + * If there's no signal to deliver, we just put the saved + * sigmask back. + */ + if (test_thread_flag(TIF_RESTORE_SIGMASK)) { + clear_thread_flag(TIF_RESTORE_SIGMASK); + sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); + } + } +} + +asmlinkage void +do_notify_resume(unsigned int thread_flags, struct pt_regs *regs, int syscall) +{ + if (thread_flags & _TIF_SIGPENDING) + do_signal(regs, syscall); + + if (thread_flags & _TIF_NOTIFY_RESUME) { + clear_thread_flag(TIF_NOTIFY_RESUME); + tracehook_notify_resume(regs); + } +} diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c new file mode 100644 index 000000000000..36ebaf9834e1 --- /dev/null +++ b/arch/csky/kernel/smp.c @@ -0,0 +1,237 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/kernel_stat.h> +#include <linux/notifier.h> +#include <linux/cpu.h> +#include <linux/percpu.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/of.h> +#include <linux/sched/task_stack.h> +#include <linux/sched/mm.h> +#include <asm/irq.h> +#include <asm/traps.h> +#include <asm/sections.h> +#include <asm/mmu_context.h> +#include <asm/pgalloc.h> + +struct ipi_data_struct { + unsigned long bits ____cacheline_aligned; +}; +static DEFINE_PER_CPU(struct ipi_data_struct, ipi_data); + +enum ipi_message_type { + IPI_EMPTY, + IPI_RESCHEDULE, + IPI_CALL_FUNC, + IPI_MAX +}; + +static irqreturn_t handle_ipi(int irq, void *dev) +{ + while (true) { + unsigned long ops; + + ops = xchg(&this_cpu_ptr(&ipi_data)->bits, 0); + if (ops == 0) + return IRQ_HANDLED; + + if (ops & (1 << IPI_RESCHEDULE)) + scheduler_ipi(); + + if (ops & (1 << IPI_CALL_FUNC)) + generic_smp_call_function_interrupt(); + + BUG_ON((ops >> IPI_MAX) != 0); + } + + return IRQ_HANDLED; +} + +static void (*send_arch_ipi)(const struct cpumask *mask); + +static int ipi_irq; +void __init set_send_ipi(void (*func)(const struct cpumask *mask), int irq) +{ + if (send_arch_ipi) + return; + + send_arch_ipi = func; + ipi_irq = irq; +} + +static void +send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation) +{ + int i; + + for_each_cpu(i, to_whom) + set_bit(operation, &per_cpu_ptr(&ipi_data, i)->bits); + + smp_mb(); + send_arch_ipi(to_whom); +} + +void arch_send_call_function_ipi_mask(struct cpumask *mask) +{ + send_ipi_message(mask, IPI_CALL_FUNC); +} + +void arch_send_call_function_single_ipi(int cpu) +{ + send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC); +} + +static void ipi_stop(void *unused) +{ + while (1); +} + +void smp_send_stop(void) +{ + on_each_cpu(ipi_stop, NULL, 1); +} + +void smp_send_reschedule(int cpu) +{ + send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); +} + +void __init smp_prepare_boot_cpu(void) +{ +} + +void __init smp_prepare_cpus(unsigned int max_cpus) +{ +} + +static void __init enable_smp_ipi(void) +{ + enable_percpu_irq(ipi_irq, 0); +} + +static int ipi_dummy_dev; +void __init setup_smp_ipi(void) +{ + int rc; + + if (ipi_irq == 0) + panic("%s IRQ mapping failed\n", __func__); + + rc = request_percpu_irq(ipi_irq, handle_ipi, "IPI Interrupt", + &ipi_dummy_dev); + if (rc) + panic("%s IRQ request failed\n", __func__); + + enable_smp_ipi(); +} + +void __init setup_smp(void) +{ + struct device_node *node = NULL; + int cpu; + + while ((node = of_find_node_by_type(node, "cpu"))) { + if (!of_device_is_available(node)) + continue; + + if (of_property_read_u32(node, "reg", &cpu)) + continue; + + if (cpu >= NR_CPUS) + continue; + + set_cpu_possible(cpu, true); + set_cpu_present(cpu, true); + } +} + +extern void _start_smp_secondary(void); + +volatile unsigned int secondary_hint; +volatile unsigned int secondary_ccr; +volatile unsigned int secondary_stack; + +int __cpu_up(unsigned int cpu, struct task_struct *tidle) +{ + unsigned int tmp; + + secondary_stack = (unsigned int)tidle->stack + THREAD_SIZE; + + secondary_hint = mfcr("cr31"); + + secondary_ccr = mfcr("cr18"); + + /* + * Because other CPUs are in reset status, we must flush data + * from cache to out and secondary CPUs use them in + * csky_start_secondary(void) + */ + mtcr("cr17", 0x22); + + /* Enable cpu in SMP reset ctrl reg */ + tmp = mfcr("cr<29, 0>"); + tmp |= 1 << cpu; + mtcr("cr<29, 0>", tmp); + + /* Wait for the cpu online */ + while (!cpu_online(cpu)); + + secondary_stack = 0; + + return 0; +} + +void __init smp_cpus_done(unsigned int max_cpus) +{ +} + +int setup_profiling_timer(unsigned int multiplier) +{ + return -EINVAL; +} + +void csky_start_secondary(void) +{ + struct mm_struct *mm = &init_mm; + unsigned int cpu = smp_processor_id(); + + mtcr("cr31", secondary_hint); + mtcr("cr18", secondary_ccr); + + mtcr("vbr", vec_base); + + flush_tlb_all(); + write_mmu_pagemask(0); + TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir); + TLBMISS_HANDLER_SETUP_PGD_KERNEL(swapper_pg_dir); + + asid_cache(smp_processor_id()) = ASID_FIRST_VERSION; + +#ifdef CONFIG_CPU_HAS_FPU + init_fpu(); +#endif + + enable_smp_ipi(); + + mmget(mm); + mmgrab(mm); + current->active_mm = mm; + cpumask_set_cpu(cpu, mm_cpumask(mm)); + + notify_cpu_starting(cpu); + set_cpu_online(cpu, true); + + pr_info("CPU%u Online: %s...\n", cpu, __func__); + + local_irq_enable(); + preempt_disable(); + cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); +} diff --git a/arch/csky/kernel/syscall.c b/arch/csky/kernel/syscall.c new file mode 100644 index 000000000000..3d30e58a45d2 --- /dev/null +++ b/arch/csky/kernel/syscall.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/syscalls.h> + +SYSCALL_DEFINE1(set_thread_area, unsigned long, addr) +{ + struct thread_info *ti = task_thread_info(current); + struct pt_regs *reg = current_pt_regs(); + + reg->tls = addr; + ti->tp_value = addr; + + return 0; +} + +SYSCALL_DEFINE6(mmap2, + unsigned long, addr, + unsigned long, len, + unsigned long, prot, + unsigned long, flags, + unsigned long, fd, + off_t, offset) +{ + if (unlikely(offset & (~PAGE_MASK >> 12))) + return -EINVAL; + + return ksys_mmap_pgoff(addr, len, prot, flags, fd, + offset >> (PAGE_SHIFT - 12)); +} + +/* + * for abiv1 the 64bits args should be even th, So we need mov the advice + * forward. + */ +SYSCALL_DEFINE4(csky_fadvise64_64, + int, fd, + int, advice, + loff_t, offset, + loff_t, len) +{ + return ksys_fadvise64_64(fd, offset, len, advice); +} diff --git a/arch/csky/kernel/syscall_table.c b/arch/csky/kernel/syscall_table.c new file mode 100644 index 000000000000..a0c238c5377a --- /dev/null +++ b/arch/csky/kernel/syscall_table.c @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/syscalls.h> +#include <asm/syscalls.h> + +#undef __SYSCALL +#define __SYSCALL(nr, call)[nr] = (call), + +#define sys_fadvise64_64 sys_csky_fadvise64_64 +void * const sys_call_table[__NR_syscalls] __page_aligned_data = { + [0 ... __NR_syscalls - 1] = sys_ni_syscall, +#include <asm/unistd.h> +}; diff --git a/arch/csky/kernel/time.c b/arch/csky/kernel/time.c new file mode 100644 index 000000000000..b5fc9447d93f --- /dev/null +++ b/arch/csky/kernel/time.c @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/clk-provider.h> +#include <linux/clocksource.h> + +void __init time_init(void) +{ + of_clk_init(NULL); + timer_probe(); +} diff --git a/arch/csky/kernel/traps.c b/arch/csky/kernel/traps.c new file mode 100644 index 000000000000..a8368ed43517 --- /dev/null +++ b/arch/csky/kernel/traps.c @@ -0,0 +1,169 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/sched.h> +#include <linux/signal.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/user.h> +#include <linux/string.h> +#include <linux/linkage.h> +#include <linux/init.h> +#include <linux/ptrace.h> +#include <linux/kallsyms.h> +#include <linux/rtc.h> +#include <linux/uaccess.h> + +#include <asm/setup.h> +#include <asm/traps.h> +#include <asm/pgalloc.h> +#include <asm/siginfo.h> + +#include <asm/mmu_context.h> + +#ifdef CONFIG_CPU_HAS_FPU +#include <abi/fpu.h> +#endif + +/* Defined in entry.S */ +asmlinkage void csky_trap(void); + +asmlinkage void csky_systemcall(void); +asmlinkage void csky_cmpxchg(void); +asmlinkage void csky_get_tls(void); +asmlinkage void csky_irq(void); + +asmlinkage void csky_tlbinvalidl(void); +asmlinkage void csky_tlbinvalids(void); +asmlinkage void csky_tlbmodified(void); + +/* Defined in head.S */ +asmlinkage void _start_smp_secondary(void); + +void __init pre_trap_init(void) +{ + int i; + + mtcr("vbr", vec_base); + + for (i = 1; i < 128; i++) + VEC_INIT(i, csky_trap); +} + +void __init trap_init(void) +{ + VEC_INIT(VEC_AUTOVEC, csky_irq); + + /* setup trap0 trap2 trap3 */ + VEC_INIT(VEC_TRAP0, csky_systemcall); + VEC_INIT(VEC_TRAP2, csky_cmpxchg); + VEC_INIT(VEC_TRAP3, csky_get_tls); + + /* setup MMU TLB exception */ + VEC_INIT(VEC_TLBINVALIDL, csky_tlbinvalidl); + VEC_INIT(VEC_TLBINVALIDS, csky_tlbinvalids); + VEC_INIT(VEC_TLBMODIFIED, csky_tlbmodified); + +#ifdef CONFIG_CPU_HAS_FPU + init_fpu(); +#endif + +#ifdef CONFIG_SMP + mtcr("cr<28, 0>", virt_to_phys(vec_base)); + + VEC_INIT(VEC_RESET, (void *)virt_to_phys(_start_smp_secondary)); +#endif +} + +void die_if_kernel(char *str, struct pt_regs *regs, int nr) +{ + if (user_mode(regs)) + return; + + console_verbose(); + pr_err("%s: %08x\n", str, nr); + show_regs(regs); + add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); + do_exit(SIGSEGV); +} + +void buserr(struct pt_regs *regs) +{ +#ifdef CONFIG_CPU_CK810 + static unsigned long prev_pc; + + if ((regs->pc == prev_pc) && prev_pc != 0) { + prev_pc = 0; + } else { + prev_pc = regs->pc; + return; + } +#endif + + die_if_kernel("Kernel mode BUS error", regs, 0); + + pr_err("User mode Bus Error\n"); + show_regs(regs); + + current->thread.esp0 = (unsigned long) regs; + force_sig_fault(SIGSEGV, 0, (void __user *)regs->pc, current); +} + +#define USR_BKPT 0x1464 +asmlinkage void trap_c(struct pt_regs *regs) +{ + int sig; + unsigned long vector; + siginfo_t info; + + vector = (mfcr("psr") >> 16) & 0xff; + + switch (vector) { + case VEC_ZERODIV: + sig = SIGFPE; + break; + /* ptrace */ + case VEC_TRACE: + info.si_code = TRAP_TRACE; + sig = SIGTRAP; + break; + case VEC_ILLEGAL: +#ifndef CONFIG_CPU_NO_USER_BKPT + if (*(uint16_t *)instruction_pointer(regs) != USR_BKPT) +#endif + { + sig = SIGILL; + break; + } + /* gdbserver breakpoint */ + case VEC_TRAP1: + /* jtagserver breakpoint */ + case VEC_BREAKPOINT: + info.si_code = TRAP_BRKPT; + sig = SIGTRAP; + break; + case VEC_ACCESS: + return buserr(regs); +#ifdef CONFIG_CPU_NEED_SOFTALIGN + case VEC_ALIGN: + return csky_alignment(regs); +#endif +#ifdef CONFIG_CPU_HAS_FPU + case VEC_FPE: + return fpu_fpe(regs); + case VEC_PRIV: + if (fpu_libc_helper(regs)) + return; +#endif + default: + sig = SIGSEGV; + break; + } + send_sig(sig, current, 0); +} + +asmlinkage void set_esp0(unsigned long ssp) +{ + current->thread.esp0 = ssp; +} diff --git a/arch/csky/kernel/vdso.c b/arch/csky/kernel/vdso.c new file mode 100644 index 000000000000..60ff7adfad1d --- /dev/null +++ b/arch/csky/kernel/vdso.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/kernel.h> +#include <linux/err.h> +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/init.h> +#include <linux/binfmts.h> +#include <linux/elf.h> +#include <linux/vmalloc.h> +#include <linux/unistd.h> +#include <linux/uaccess.h> + +#include <asm/vdso.h> +#include <asm/cacheflush.h> + +static struct page *vdso_page; + +static int __init init_vdso(void) +{ + struct csky_vdso *vdso; + int err = 0; + + vdso_page = alloc_page(GFP_KERNEL); + if (!vdso_page) + panic("Cannot allocate vdso"); + + vdso = vmap(&vdso_page, 1, 0, PAGE_KERNEL); + if (!vdso) + panic("Cannot map vdso"); + + clear_page(vdso); + + err = setup_vdso_page(vdso->rt_signal_retcode); + if (err) + panic("Cannot set signal return code, err: %x.", err); + + dcache_wb_range((unsigned long)vdso, (unsigned long)vdso + 16); + + vunmap(vdso); + + return 0; +} +subsys_initcall(init_vdso); + +int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) +{ + int ret; + unsigned long addr; + struct mm_struct *mm = current->mm; + + down_write(&mm->mmap_sem); + + addr = get_unmapped_area(NULL, STACK_TOP, PAGE_SIZE, 0, 0); + if (IS_ERR_VALUE(addr)) { + ret = addr; + goto up_fail; + } + + ret = install_special_mapping( + mm, + addr, + PAGE_SIZE, + VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, + &vdso_page); + if (ret) + goto up_fail; + + mm->context.vdso = (void *)addr; + +up_fail: + up_write(&mm->mmap_sem); + return ret; +} + +const char *arch_vma_name(struct vm_area_struct *vma) +{ + if (vma->vm_mm == NULL) + return NULL; + + if (vma->vm_start == (long)vma->vm_mm->context.vdso) + return "[vdso]"; + else + return NULL; +} diff --git a/arch/csky/kernel/vmlinux.lds.S b/arch/csky/kernel/vmlinux.lds.S new file mode 100644 index 000000000000..ae7961b973f2 --- /dev/null +++ b/arch/csky/kernel/vmlinux.lds.S @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include <asm/vmlinux.lds.h> +#include <asm/page.h> + +OUTPUT_ARCH(csky) +ENTRY(_start) + +#ifndef __cskyBE__ +jiffies = jiffies_64; +#else +jiffies = jiffies_64 + 4; +#endif + +#define VBR_BASE \ + . = ALIGN(1024); \ + vec_base = .; \ + . += 512; + +SECTIONS +{ + . = PAGE_OFFSET + PHYS_OFFSET_OFFSET; + + _stext = .; + __init_begin = .; + HEAD_TEXT_SECTION + INIT_TEXT_SECTION(PAGE_SIZE) + INIT_DATA_SECTION(PAGE_SIZE) + PERCPU_SECTION(L1_CACHE_BYTES) + . = ALIGN(PAGE_SIZE); + __init_end = .; + + .text : AT(ADDR(.text) - LOAD_OFFSET) { + _text = .; + IRQENTRY_TEXT + SOFTIRQENTRY_TEXT + TEXT_TEXT + SCHED_TEXT + CPUIDLE_TEXT + LOCK_TEXT + KPROBES_TEXT + *(.fixup) + *(.gnu.warning) + } = 0 + _etext = .; + + /* __init_begin __init_end must be page aligned for free_initmem */ + . = ALIGN(PAGE_SIZE); + + + _sdata = .; + RO_DATA_SECTION(PAGE_SIZE) + RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) + _edata = .; + + NOTES + EXCEPTION_TABLE(L1_CACHE_BYTES) + BSS_SECTION(L1_CACHE_BYTES, PAGE_SIZE, L1_CACHE_BYTES) + VBR_BASE + _end = . ; + + STABS_DEBUG + DWARF_DEBUG + + DISCARDS +} diff --git a/arch/csky/lib/Makefile b/arch/csky/lib/Makefile new file mode 100644 index 000000000000..d1f368c59ef6 --- /dev/null +++ b/arch/csky/lib/Makefile @@ -0,0 +1 @@ +lib-y := usercopy.o delay.o diff --git a/arch/csky/lib/delay.c b/arch/csky/lib/delay.c new file mode 100644 index 000000000000..22570b0790d6 --- /dev/null +++ b/arch/csky/lib/delay.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/delay.h> + +void __delay(unsigned long loops) +{ + asm volatile ( + "mov r0, r0\n" + "1:declt %0\n" + "bf 1b" + : "=r"(loops) + : "0"(loops)); +} +EXPORT_SYMBOL(__delay); + +void __const_udelay(unsigned long xloops) +{ + unsigned long long loops; + + loops = (unsigned long long)xloops * loops_per_jiffy * HZ; + + __delay(loops >> 32); +} +EXPORT_SYMBOL(__const_udelay); + +void __udelay(unsigned long usecs) +{ + __const_udelay(usecs * 0x10C7UL); /* 2**32 / 1000000 (rounded up) */ +} +EXPORT_SYMBOL(__udelay); + +void __ndelay(unsigned long nsecs) +{ + __const_udelay(nsecs * 0x5UL); /* 2**32 / 1000000000 (rounded up) */ +} +EXPORT_SYMBOL(__ndelay); diff --git a/arch/csky/lib/usercopy.c b/arch/csky/lib/usercopy.c new file mode 100644 index 000000000000..ac9170e2cbb8 --- /dev/null +++ b/arch/csky/lib/usercopy.c @@ -0,0 +1,262 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/uaccess.h> +#include <linux/types.h> + +unsigned long raw_copy_from_user(void *to, const void *from, + unsigned long n) +{ + if (access_ok(VERIFY_READ, from, n)) + __copy_user_zeroing(to, from, n); + else + memset(to, 0, n); + return n; +} +EXPORT_SYMBOL(raw_copy_from_user); + +unsigned long raw_copy_to_user(void *to, const void *from, + unsigned long n) +{ + if (access_ok(VERIFY_WRITE, to, n)) + __copy_user(to, from, n); + return n; +} +EXPORT_SYMBOL(raw_copy_to_user); + + +/* + * copy a null terminated string from userspace. + */ +#define __do_strncpy_from_user(dst, src, count, res) \ +do { \ + int tmp; \ + long faultres; \ + asm volatile( \ + " cmpnei %3, 0 \n" \ + " bf 4f \n" \ + "1: cmpnei %1, 0 \n" \ + " bf 5f \n" \ + "2: ldb %4, (%3, 0) \n" \ + " stb %4, (%2, 0) \n" \ + " cmpnei %4, 0 \n" \ + " bf 3f \n" \ + " addi %3, 1 \n" \ + " addi %2, 1 \n" \ + " subi %1, 1 \n" \ + " br 1b \n" \ + "3: subu %0, %1 \n" \ + " br 5f \n" \ + "4: mov %0, %5 \n" \ + " br 5f \n" \ + ".section __ex_table, \"a\" \n" \ + ".align 2 \n" \ + ".long 2b, 4b \n" \ + ".previous \n" \ + "5: \n" \ + : "=r"(res), "=r"(count), "=r"(dst), \ + "=r"(src), "=r"(tmp), "=r"(faultres) \ + : "5"(-EFAULT), "0"(count), "1"(count), \ + "2"(dst), "3"(src) \ + : "memory", "cc"); \ +} while (0) + +/* + * __strncpy_from_user: - Copy a NUL terminated string from userspace, + * with less checking. + * @dst: Destination address, in kernel space. This buffer must be at + * least @count bytes long. + * @src: Source address, in user space. + * @count: Maximum number of bytes to copy, including the trailing NUL. + * + * Copies a NUL-terminated string from userspace to kernel space. + * Caller must check the specified block with access_ok() before calling + * this function. + * + * On success, returns the length of the string (not including the trailing + * NUL). + * + * If access to userspace fails, returns -EFAULT (some data may have been + * copied). + * + * If @count is smaller than the length of the string, copies @count bytes + * and returns @count. + */ +long __strncpy_from_user(char *dst, const char *src, long count) +{ + long res; + + __do_strncpy_from_user(dst, src, count, res); + return res; +} +EXPORT_SYMBOL(__strncpy_from_user); + +/* + * strncpy_from_user: - Copy a NUL terminated string from userspace. + * @dst: Destination address, in kernel space. This buffer must be at + * least @count bytes long. + * @src: Source address, in user space. + * @count: Maximum number of bytes to copy, including the trailing NUL. + * + * Copies a NUL-terminated string from userspace to kernel space. + * + * On success, returns the length of the string (not including the trailing + * NUL). + * + * If access to userspace fails, returns -EFAULT (some data may have been + * copied). + * + * If @count is smaller than the length of the string, copies @count bytes + * and returns @count. + */ +long strncpy_from_user(char *dst, const char *src, long count) +{ + long res = -EFAULT; + + if (access_ok(VERIFY_READ, src, 1)) + __do_strncpy_from_user(dst, src, count, res); + return res; +} +EXPORT_SYMBOL(strncpy_from_user); + +/* + * strlen_user: - Get the size of a string in user space. + * @str: The string to measure. + * @n: The maximum valid length + * + * Get the size of a NUL-terminated string in user space. + * + * Returns the size of the string INCLUDING the terminating NUL. + * On exception, returns 0. + * If the string is too long, returns a value greater than @n. + */ +long strnlen_user(const char *s, long n) +{ + unsigned long res, tmp; + + if (s == NULL) + return 0; + + asm volatile( + " cmpnei %1, 0 \n" + " bf 3f \n" + "1: cmpnei %0, 0 \n" + " bf 3f \n" + "2: ldb %3, (%1, 0) \n" + " cmpnei %3, 0 \n" + " bf 3f \n" + " subi %0, 1 \n" + " addi %1, 1 \n" + " br 1b \n" + "3: subu %2, %0 \n" + " addi %2, 1 \n" + " br 5f \n" + "4: movi %0, 0 \n" + " br 5f \n" + ".section __ex_table, \"a\" \n" + ".align 2 \n" + ".long 2b, 4b \n" + ".previous \n" + "5: \n" + : "=r"(n), "=r"(s), "=r"(res), "=r"(tmp) + : "0"(n), "1"(s), "2"(n) + : "memory", "cc"); + + return res; +} +EXPORT_SYMBOL(strnlen_user); + +#define __do_clear_user(addr, size) \ +do { \ + int __d0, zvalue, tmp; \ + \ + asm volatile( \ + "0: cmpnei %1, 0 \n" \ + " bf 7f \n" \ + " mov %3, %1 \n" \ + " andi %3, 3 \n" \ + " cmpnei %3, 0 \n" \ + " bf 1f \n" \ + " br 5f \n" \ + "1: cmplti %0, 32 \n" /* 4W */ \ + " bt 3f \n" \ + "8: stw %2, (%1, 0) \n" \ + "10: stw %2, (%1, 4) \n" \ + "11: stw %2, (%1, 8) \n" \ + "12: stw %2, (%1, 12) \n" \ + "13: stw %2, (%1, 16) \n" \ + "14: stw %2, (%1, 20) \n" \ + "15: stw %2, (%1, 24) \n" \ + "16: stw %2, (%1, 28) \n" \ + " addi %1, 32 \n" \ + " subi %0, 32 \n" \ + " br 1b \n" \ + "3: cmplti %0, 4 \n" /* 1W */ \ + " bt 5f \n" \ + "4: stw %2, (%1, 0) \n" \ + " addi %1, 4 \n" \ + " subi %0, 4 \n" \ + " br 3b \n" \ + "5: cmpnei %0, 0 \n" /* 1B */ \ + "9: bf 7f \n" \ + "6: stb %2, (%1, 0) \n" \ + " addi %1, 1 \n" \ + " subi %0, 1 \n" \ + " br 5b \n" \ + ".section __ex_table,\"a\" \n" \ + ".align 2 \n" \ + ".long 8b, 9b \n" \ + ".long 10b, 9b \n" \ + ".long 11b, 9b \n" \ + ".long 12b, 9b \n" \ + ".long 13b, 9b \n" \ + ".long 14b, 9b \n" \ + ".long 15b, 9b \n" \ + ".long 16b, 9b \n" \ + ".long 4b, 9b \n" \ + ".long 6b, 9b \n" \ + ".previous \n" \ + "7: \n" \ + : "=r"(size), "=r" (__d0), \ + "=r"(zvalue), "=r"(tmp) \ + : "0"(size), "1"(addr), "2"(0) \ + : "memory", "cc"); \ +} while (0) + +/* + * clear_user: - Zero a block of memory in user space. + * @to: Destination address, in user space. + * @n: Number of bytes to zero. + * + * Zero a block of memory in user space. + * + * Returns number of bytes that could not be cleared. + * On success, this will be zero. + */ +unsigned long +clear_user(void __user *to, unsigned long n) +{ + if (access_ok(VERIFY_WRITE, to, n)) + __do_clear_user(to, n); + return n; +} +EXPORT_SYMBOL(clear_user); + +/* + * __clear_user: - Zero a block of memory in user space, with less checking. + * @to: Destination address, in user space. + * @n: Number of bytes to zero. + * + * Zero a block of memory in user space. Caller must check + * the specified block with access_ok() before calling this function. + * + * Returns number of bytes that could not be cleared. + * On success, this will be zero. + */ +unsigned long +__clear_user(void __user *to, unsigned long n) +{ + __do_clear_user(to, n); + return n; +} +EXPORT_SYMBOL(__clear_user); diff --git a/arch/csky/mm/Makefile b/arch/csky/mm/Makefile new file mode 100644 index 000000000000..c870eb36efbc --- /dev/null +++ b/arch/csky/mm/Makefile @@ -0,0 +1,13 @@ +ifeq ($(CONFIG_CPU_HAS_CACHEV2),y) +obj-y += cachev2.o +else +obj-y += cachev1.o +endif + +obj-y += dma-mapping.o +obj-y += fault.o +obj-$(CONFIG_HIGHMEM) += highmem.o +obj-y += init.o +obj-y += ioremap.o +obj-y += syscache.o +obj-y += tlb.o diff --git a/arch/csky/mm/cachev1.c b/arch/csky/mm/cachev1.c new file mode 100644 index 000000000000..b8a75cce0b8c --- /dev/null +++ b/arch/csky/mm/cachev1.c @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/spinlock.h> +#include <asm/cache.h> +#include <abi/reg_ops.h> + +/* for L1-cache */ +#define INS_CACHE (1 << 0) +#define DATA_CACHE (1 << 1) +#define CACHE_INV (1 << 4) +#define CACHE_CLR (1 << 5) +#define CACHE_OMS (1 << 6) +#define CACHE_ITS (1 << 7) +#define CACHE_LICF (1 << 31) + +/* for L2-cache */ +#define CR22_LEVEL_SHIFT (1) +#define CR22_SET_SHIFT (7) +#define CR22_WAY_SHIFT (30) +#define CR22_WAY_SHIFT_L2 (29) + +static DEFINE_SPINLOCK(cache_lock); + +static inline void cache_op_line(unsigned long i, unsigned int val) +{ + mtcr("cr22", i); + mtcr("cr17", val); +} + +#define CCR2_L2E (1 << 3) +static void cache_op_all(unsigned int value, unsigned int l2) +{ + mtcr("cr17", value | CACHE_CLR); + mb(); + + if (l2 && (mfcr_ccr2() & CCR2_L2E)) { + mtcr("cr24", value | CACHE_CLR); + mb(); + } +} + +static void cache_op_range( + unsigned int start, + unsigned int end, + unsigned int value, + unsigned int l2) +{ + unsigned long i, flags; + unsigned int val = value | CACHE_CLR | CACHE_OMS; + bool l2_sync; + + if (unlikely((end - start) >= PAGE_SIZE) || + unlikely(start < PAGE_OFFSET) || + unlikely(start >= PAGE_OFFSET + LOWMEM_LIMIT)) { + cache_op_all(value, l2); + return; + } + + if ((mfcr_ccr2() & CCR2_L2E) && l2) + l2_sync = 1; + else + l2_sync = 0; + + spin_lock_irqsave(&cache_lock, flags); + + i = start & ~(L1_CACHE_BYTES - 1); + for (; i < end; i += L1_CACHE_BYTES) { + cache_op_line(i, val); + if (l2_sync) { + mb(); + mtcr("cr24", val); + } + } + spin_unlock_irqrestore(&cache_lock, flags); + + mb(); +} + +void dcache_wb_line(unsigned long start) +{ + asm volatile("idly4\n":::"memory"); + cache_op_line(start, DATA_CACHE|CACHE_CLR); + mb(); +} + +void icache_inv_range(unsigned long start, unsigned long end) +{ + cache_op_range(start, end, INS_CACHE|CACHE_INV, 0); +} + +void icache_inv_all(void) +{ + cache_op_all(INS_CACHE|CACHE_INV, 0); +} + +void dcache_wb_range(unsigned long start, unsigned long end) +{ + cache_op_range(start, end, DATA_CACHE|CACHE_CLR, 0); +} + +void dcache_wbinv_all(void) +{ + cache_op_all(DATA_CACHE|CACHE_CLR|CACHE_INV, 0); +} + +void cache_wbinv_range(unsigned long start, unsigned long end) +{ + cache_op_range(start, end, INS_CACHE|DATA_CACHE|CACHE_CLR|CACHE_INV, 0); +} +EXPORT_SYMBOL(cache_wbinv_range); + +void cache_wbinv_all(void) +{ + cache_op_all(INS_CACHE|DATA_CACHE|CACHE_CLR|CACHE_INV, 0); +} + +void dma_wbinv_range(unsigned long start, unsigned long end) +{ + cache_op_range(start, end, DATA_CACHE|CACHE_CLR|CACHE_INV, 1); +} + +void dma_wb_range(unsigned long start, unsigned long end) +{ + cache_op_range(start, end, DATA_CACHE|CACHE_INV, 1); +} diff --git a/arch/csky/mm/cachev2.c b/arch/csky/mm/cachev2.c new file mode 100644 index 000000000000..baaf05d69f44 --- /dev/null +++ b/arch/csky/mm/cachev2.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/spinlock.h> +#include <linux/smp.h> +#include <asm/cache.h> +#include <asm/barrier.h> + +inline void dcache_wb_line(unsigned long start) +{ + asm volatile("dcache.cval1 %0\n"::"r"(start):"memory"); + sync_is(); +} + +void icache_inv_range(unsigned long start, unsigned long end) +{ + unsigned long i = start & ~(L1_CACHE_BYTES - 1); + + for (; i < end; i += L1_CACHE_BYTES) + asm volatile("icache.iva %0\n"::"r"(i):"memory"); + sync_is(); +} + +void icache_inv_all(void) +{ + asm volatile("icache.ialls\n":::"memory"); + sync_is(); +} + +void dcache_wb_range(unsigned long start, unsigned long end) +{ + unsigned long i = start & ~(L1_CACHE_BYTES - 1); + + for (; i < end; i += L1_CACHE_BYTES) + asm volatile("dcache.cval1 %0\n"::"r"(i):"memory"); + sync_is(); +} + +void dcache_inv_range(unsigned long start, unsigned long end) +{ + unsigned long i = start & ~(L1_CACHE_BYTES - 1); + + for (; i < end; i += L1_CACHE_BYTES) + asm volatile("dcache.civa %0\n"::"r"(i):"memory"); + sync_is(); +} + +void cache_wbinv_range(unsigned long start, unsigned long end) +{ + unsigned long i = start & ~(L1_CACHE_BYTES - 1); + + for (; i < end; i += L1_CACHE_BYTES) + asm volatile("dcache.cval1 %0\n"::"r"(i):"memory"); + sync_is(); + + i = start & ~(L1_CACHE_BYTES - 1); + for (; i < end; i += L1_CACHE_BYTES) + asm volatile("icache.iva %0\n"::"r"(i):"memory"); + sync_is(); +} +EXPORT_SYMBOL(cache_wbinv_range); + +void dma_wbinv_range(unsigned long start, unsigned long end) +{ + unsigned long i = start & ~(L1_CACHE_BYTES - 1); + + for (; i < end; i += L1_CACHE_BYTES) + asm volatile("dcache.civa %0\n"::"r"(i):"memory"); + sync_is(); +} + +void dma_wb_range(unsigned long start, unsigned long end) +{ + unsigned long i = start & ~(L1_CACHE_BYTES - 1); + + for (; i < end; i += L1_CACHE_BYTES) + asm volatile("dcache.civa %0\n"::"r"(i):"memory"); + sync_is(); +} diff --git a/arch/csky/mm/dma-mapping.c b/arch/csky/mm/dma-mapping.c new file mode 100644 index 000000000000..85437b21e045 --- /dev/null +++ b/arch/csky/mm/dma-mapping.c @@ -0,0 +1,254 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/cache.h> +#include <linux/dma-mapping.h> +#include <linux/dma-contiguous.h> +#include <linux/dma-noncoherent.h> +#include <linux/genalloc.h> +#include <linux/highmem.h> +#include <linux/io.h> +#include <linux/mm.h> +#include <linux/scatterlist.h> +#include <linux/types.h> +#include <linux/version.h> +#include <asm/cache.h> + +static struct gen_pool *atomic_pool; +static size_t atomic_pool_size __initdata = SZ_256K; + +static int __init early_coherent_pool(char *p) +{ + atomic_pool_size = memparse(p, &p); + return 0; +} +early_param("coherent_pool", early_coherent_pool); + +static int __init atomic_pool_init(void) +{ + struct page *page; + size_t size = atomic_pool_size; + void *ptr; + int ret; + + atomic_pool = gen_pool_create(PAGE_SHIFT, -1); + if (!atomic_pool) + BUG(); + + page = alloc_pages(GFP_KERNEL | GFP_DMA, get_order(size)); + if (!page) + BUG(); + + ptr = dma_common_contiguous_remap(page, size, VM_ALLOC, + pgprot_noncached(PAGE_KERNEL), + __builtin_return_address(0)); + if (!ptr) + BUG(); + + ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr, + page_to_phys(page), atomic_pool_size, -1); + if (ret) + BUG(); + + gen_pool_set_algo(atomic_pool, gen_pool_first_fit_order_align, NULL); + + pr_info("DMA: preallocated %zu KiB pool for atomic coherent pool\n", + atomic_pool_size / 1024); + + pr_info("DMA: vaddr: 0x%x phy: 0x%lx,\n", (unsigned int)ptr, + page_to_phys(page)); + + return 0; +} +postcore_initcall(atomic_pool_init); + +static void *csky_dma_alloc_atomic(struct device *dev, size_t size, + dma_addr_t *dma_handle) +{ + unsigned long addr; + + addr = gen_pool_alloc(atomic_pool, size); + if (addr) + *dma_handle = gen_pool_virt_to_phys(atomic_pool, addr); + + return (void *)addr; +} + +static void csky_dma_free_atomic(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle, unsigned long attrs) +{ + gen_pool_free(atomic_pool, (unsigned long)vaddr, size); +} + +static void __dma_clear_buffer(struct page *page, size_t size) +{ + if (PageHighMem(page)) { + unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; + + do { + void *ptr = kmap_atomic(page); + size_t _size = (size < PAGE_SIZE) ? size : PAGE_SIZE; + + memset(ptr, 0, _size); + dma_wbinv_range((unsigned long)ptr, + (unsigned long)ptr + _size); + + kunmap_atomic(ptr); + + page++; + size -= PAGE_SIZE; + count--; + } while (count); + } else { + void *ptr = page_address(page); + + memset(ptr, 0, size); + dma_wbinv_range((unsigned long)ptr, (unsigned long)ptr + size); + } +} + +static void *csky_dma_alloc_nonatomic(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, + unsigned long attrs) +{ + void *vaddr; + struct page *page; + unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; + + if (DMA_ATTR_NON_CONSISTENT & attrs) { + pr_err("csky %s can't support DMA_ATTR_NON_CONSISTENT.\n", __func__); + return NULL; + } + + if (IS_ENABLED(CONFIG_DMA_CMA)) + page = dma_alloc_from_contiguous(dev, count, get_order(size), + gfp); + else + page = alloc_pages(gfp, get_order(size)); + + if (!page) { + pr_err("csky %s no more free pages.\n", __func__); + return NULL; + } + + *dma_handle = page_to_phys(page); + + __dma_clear_buffer(page, size); + + if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) + return page; + + vaddr = dma_common_contiguous_remap(page, PAGE_ALIGN(size), VM_USERMAP, + pgprot_noncached(PAGE_KERNEL), __builtin_return_address(0)); + if (!vaddr) + BUG(); + + return vaddr; +} + +static void csky_dma_free_nonatomic( + struct device *dev, + size_t size, + void *vaddr, + dma_addr_t dma_handle, + unsigned long attrs + ) +{ + struct page *page = phys_to_page(dma_handle); + unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; + + if ((unsigned int)vaddr >= VMALLOC_START) + dma_common_free_remap(vaddr, size, VM_USERMAP); + + if (IS_ENABLED(CONFIG_DMA_CMA)) + dma_release_from_contiguous(dev, page, count); + else + __free_pages(page, get_order(size)); +} + +void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t gfp, unsigned long attrs) +{ + if (gfpflags_allow_blocking(gfp)) + return csky_dma_alloc_nonatomic(dev, size, dma_handle, gfp, + attrs); + else + return csky_dma_alloc_atomic(dev, size, dma_handle); +} + +void arch_dma_free(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle, unsigned long attrs) +{ + if (!addr_in_gen_pool(atomic_pool, (unsigned int) vaddr, size)) + csky_dma_free_nonatomic(dev, size, vaddr, dma_handle, attrs); + else + csky_dma_free_atomic(dev, size, vaddr, dma_handle, attrs); +} + +static inline void cache_op(phys_addr_t paddr, size_t size, + void (*fn)(unsigned long start, unsigned long end)) +{ + struct page *page = pfn_to_page(paddr >> PAGE_SHIFT); + unsigned int offset = paddr & ~PAGE_MASK; + size_t left = size; + unsigned long start; + + do { + size_t len = left; + + if (PageHighMem(page)) { + void *addr; + + if (offset + len > PAGE_SIZE) { + if (offset >= PAGE_SIZE) { + page += offset >> PAGE_SHIFT; + offset &= ~PAGE_MASK; + } + len = PAGE_SIZE - offset; + } + + addr = kmap_atomic(page); + start = (unsigned long)(addr + offset); + fn(start, start + len); + kunmap_atomic(addr); + } else { + start = (unsigned long)phys_to_virt(paddr); + fn(start, start + size); + } + offset = 0; + page++; + left -= len; + } while (left); +} + +void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir) +{ + switch (dir) { + case DMA_TO_DEVICE: + cache_op(paddr, size, dma_wb_range); + break; + case DMA_FROM_DEVICE: + case DMA_BIDIRECTIONAL: + cache_op(paddr, size, dma_wbinv_range); + break; + default: + BUG(); + } +} + +void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir) +{ + switch (dir) { + case DMA_TO_DEVICE: + cache_op(paddr, size, dma_wb_range); + break; + case DMA_FROM_DEVICE: + case DMA_BIDIRECTIONAL: + cache_op(paddr, size, dma_wbinv_range); + break; + default: + BUG(); + } +} diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c new file mode 100644 index 000000000000..7df57f90b52c --- /dev/null +++ b/arch/csky/mm/fault.c @@ -0,0 +1,212 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/signal.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/ptrace.h> +#include <linux/mman.h> +#include <linux/mm.h> +#include <linux/smp.h> +#include <linux/version.h> +#include <linux/vt_kern.h> +#include <linux/kernel.h> +#include <linux/extable.h> +#include <linux/uaccess.h> + +#include <asm/hardirq.h> +#include <asm/mmu_context.h> +#include <asm/traps.h> +#include <asm/page.h> + +int fixup_exception(struct pt_regs *regs) +{ + const struct exception_table_entry *fixup; + + fixup = search_exception_tables(instruction_pointer(regs)); + if (fixup) { + regs->pc = fixup->nextinsn; + + return 1; + } + + return 0; +} + +/* + * This routine handles page faults. It determines the address, + * and the problem, and then passes it off to one of the appropriate + * routines. + */ +asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, + unsigned long mmu_meh) +{ + struct vm_area_struct *vma = NULL; + struct task_struct *tsk = current; + struct mm_struct *mm = tsk->mm; + int si_code; + int fault; + unsigned long address = mmu_meh & PAGE_MASK; + + si_code = SEGV_MAPERR; + +#ifndef CONFIG_CPU_HAS_TLBI + /* + * We fault-in kernel-space virtual memory on-demand. The + * 'reference' page table is init_mm.pgd. + * + * NOTE! We MUST NOT take any locks for this case. We may + * be in an interrupt or a critical region, and should + * only copy the information from the master page table, + * nothing more. + */ + if (unlikely(address >= VMALLOC_START) && + unlikely(address <= VMALLOC_END)) { + /* + * Synchronize this task's top level page-table + * with the 'reference' page table. + * + * Do _not_ use "tsk" here. We might be inside + * an interrupt in the middle of a task switch.. + */ + int offset = __pgd_offset(address); + pgd_t *pgd, *pgd_k; + pud_t *pud, *pud_k; + pmd_t *pmd, *pmd_k; + pte_t *pte_k; + + unsigned long pgd_base; + + pgd_base = tlb_get_pgd(); + pgd = (pgd_t *)pgd_base + offset; + pgd_k = init_mm.pgd + offset; + + if (!pgd_present(*pgd_k)) + goto no_context; + set_pgd(pgd, *pgd_k); + + pud = (pud_t *)pgd; + pud_k = (pud_t *)pgd_k; + if (!pud_present(*pud_k)) + goto no_context; + + pmd = pmd_offset(pud, address); + pmd_k = pmd_offset(pud_k, address); + if (!pmd_present(*pmd_k)) + goto no_context; + set_pmd(pmd, *pmd_k); + + pte_k = pte_offset_kernel(pmd_k, address); + if (!pte_present(*pte_k)) + goto no_context; + return; + } +#endif + /* + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ + if (in_atomic() || !mm) + goto bad_area_nosemaphore; + + down_read(&mm->mmap_sem); + vma = find_vma(mm, address); + if (!vma) + goto bad_area; + if (vma->vm_start <= address) + goto good_area; + if (!(vma->vm_flags & VM_GROWSDOWN)) + goto bad_area; + if (expand_stack(vma, address)) + goto bad_area; + /* + * Ok, we have a good vm_area for this memory access, so + * we can handle it.. + */ +good_area: + si_code = SEGV_ACCERR; + + if (write) { + if (!(vma->vm_flags & VM_WRITE)) + goto bad_area; + } else { + if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) + goto bad_area; + } + + /* + * If for any reason at all we couldn't handle the fault, + * make sure we exit gracefully rather than endlessly redo + * the fault. + */ + fault = handle_mm_fault(vma, address, write ? FAULT_FLAG_WRITE : 0); + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; + BUG(); + } + if (fault & VM_FAULT_MAJOR) + tsk->maj_flt++; + else + tsk->min_flt++; + + up_read(&mm->mmap_sem); + return; + + /* + * Something tried to access memory that isn't in our memory map.. + * Fix it, but check if it's kernel or user first.. + */ +bad_area: + up_read(&mm->mmap_sem); + +bad_area_nosemaphore: + /* User mode accesses just cause a SIGSEGV */ + if (user_mode(regs)) { + tsk->thread.address = address; + tsk->thread.error_code = write; + force_sig_fault(SIGSEGV, si_code, (void __user *)address, current); + return; + } + +no_context: + /* Are we prepared to handle this kernel fault? */ + if (fixup_exception(regs)) + return; + + /* + * Oops. The kernel tried to access some bad page. We'll have to + * terminate things with extreme prejudice. + */ + bust_spinlocks(1); + pr_alert("Unable to %s at vaddr: %08lx, epc: %08lx\n", + __func__, address, regs->pc); + die_if_kernel("Oops", regs, write); + +out_of_memory: + /* + * We ran out of memory, call the OOM killer, and return the userspace + * (which will retry the fault, or kill us if we got oom-killed). + */ + pagefault_out_of_memory(); + return; + +do_sigbus: + up_read(&mm->mmap_sem); + + /* Kernel mode? Handle exceptions or die */ + if (!user_mode(regs)) + goto no_context; + + tsk->thread.address = address; + force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, current); +} diff --git a/arch/csky/mm/highmem.c b/arch/csky/mm/highmem.c new file mode 100644 index 000000000000..e168ac087ccb --- /dev/null +++ b/arch/csky/mm/highmem.c @@ -0,0 +1,198 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/module.h> +#include <linux/highmem.h> +#include <linux/smp.h> +#include <linux/bootmem.h> +#include <asm/fixmap.h> +#include <asm/tlbflush.h> +#include <asm/cacheflush.h> + +static pte_t *kmap_pte; + +unsigned long highstart_pfn, highend_pfn; + +void *kmap(struct page *page) +{ + void *addr; + + might_sleep(); + if (!PageHighMem(page)) + return page_address(page); + addr = kmap_high(page); + flush_tlb_one((unsigned long)addr); + + return addr; +} +EXPORT_SYMBOL(kmap); + +void kunmap(struct page *page) +{ + BUG_ON(in_interrupt()); + if (!PageHighMem(page)) + return; + kunmap_high(page); +} +EXPORT_SYMBOL(kunmap); + +void *kmap_atomic(struct page *page) +{ + unsigned long vaddr; + int idx, type; + + preempt_disable(); + pagefault_disable(); + if (!PageHighMem(page)) + return page_address(page); + + type = kmap_atomic_idx_push(); + idx = type + KM_TYPE_NR*smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); +#ifdef CONFIG_DEBUG_HIGHMEM + BUG_ON(!pte_none(*(kmap_pte - idx))); +#endif + set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL)); + flush_tlb_one((unsigned long)vaddr); + + return (void *)vaddr; +} +EXPORT_SYMBOL(kmap_atomic); + +void __kunmap_atomic(void *kvaddr) +{ + unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; + int idx; + + if (vaddr < FIXADDR_START) + goto out; + +#ifdef CONFIG_DEBUG_HIGHMEM + idx = KM_TYPE_NR*smp_processor_id() + kmap_atomic_idx(); + + BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); + + pte_clear(&init_mm, vaddr, kmap_pte - idx); + flush_tlb_one(vaddr); +#else + (void) idx; /* to kill a warning */ +#endif + kmap_atomic_idx_pop(); +out: + pagefault_enable(); + preempt_enable(); +} +EXPORT_SYMBOL(__kunmap_atomic); + +/* + * This is the same as kmap_atomic() but can map memory that doesn't + * have a struct page associated with it. + */ +void *kmap_atomic_pfn(unsigned long pfn) +{ + unsigned long vaddr; + int idx, type; + + pagefault_disable(); + + type = kmap_atomic_idx_push(); + idx = type + KM_TYPE_NR*smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL)); + flush_tlb_one(vaddr); + + return (void *) vaddr; +} + +struct page *kmap_atomic_to_page(void *ptr) +{ + unsigned long idx, vaddr = (unsigned long)ptr; + pte_t *pte; + + if (vaddr < FIXADDR_START) + return virt_to_page(ptr); + + idx = virt_to_fix(vaddr); + pte = kmap_pte - (idx - FIX_KMAP_BEGIN); + return pte_page(*pte); +} + +static void __init fixrange_init(unsigned long start, unsigned long end, + pgd_t *pgd_base) +{ +#ifdef CONFIG_HIGHMEM + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + int i, j, k; + unsigned long vaddr; + + vaddr = start; + i = __pgd_offset(vaddr); + j = __pud_offset(vaddr); + k = __pmd_offset(vaddr); + pgd = pgd_base + i; + + for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { + pud = (pud_t *)pgd; + for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { + pmd = (pmd_t *)pud; + for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { + if (pmd_none(*pmd)) { + pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); + set_pmd(pmd, __pmd(__pa(pte))); + BUG_ON(pte != pte_offset_kernel(pmd, 0)); + } + vaddr += PMD_SIZE; + } + k = 0; + } + j = 0; + } +#endif +} + +void __init fixaddr_kmap_pages_init(void) +{ + unsigned long vaddr; + pgd_t *pgd_base; +#ifdef CONFIG_HIGHMEM + pgd_t *pgd; + pmd_t *pmd; + pud_t *pud; + pte_t *pte; +#endif + pgd_base = swapper_pg_dir; + + /* + * Fixed mappings: + */ + vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; + fixrange_init(vaddr, 0, pgd_base); + +#ifdef CONFIG_HIGHMEM + /* + * Permanent kmaps: + */ + vaddr = PKMAP_BASE; + fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); + + pgd = swapper_pg_dir + __pgd_offset(vaddr); + pud = (pud_t *)pgd; + pmd = pmd_offset(pud, vaddr); + pte = pte_offset_kernel(pmd, vaddr); + pkmap_page_table = pte; +#endif +} + +void __init kmap_init(void) +{ + unsigned long vaddr; + + fixaddr_kmap_pages_init(); + + vaddr = __fix_to_virt(FIX_KMAP_BEGIN); + + kmap_pte = pte_offset_kernel((pmd_t *)pgd_offset_k(vaddr), vaddr); +} diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c new file mode 100644 index 000000000000..ce2711e050ad --- /dev/null +++ b/arch/csky/mm/init.c @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/bug.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/pagemap.h> +#include <linux/ptrace.h> +#include <linux/mman.h> +#include <linux/mm.h> +#include <linux/bootmem.h> +#include <linux/highmem.h> +#include <linux/memblock.h> +#include <linux/swap.h> +#include <linux/proc_fs.h> +#include <linux/pfn.h> + +#include <asm/setup.h> +#include <asm/cachectl.h> +#include <asm/dma.h> +#include <asm/pgtable.h> +#include <asm/pgalloc.h> +#include <asm/mmu_context.h> +#include <asm/sections.h> +#include <asm/tlb.h> + +pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; +pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss; +unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] + __page_aligned_bss; +EXPORT_SYMBOL(empty_zero_page); + +void __init mem_init(void) +{ +#ifdef CONFIG_HIGHMEM + unsigned long tmp; + + max_mapnr = highend_pfn; +#else + max_mapnr = max_low_pfn; +#endif + high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); + + free_all_bootmem(); + +#ifdef CONFIG_HIGHMEM + for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { + struct page *page = pfn_to_page(tmp); + + /* FIXME not sure about */ + if (!memblock_is_reserved(tmp << PAGE_SHIFT)) + free_highmem_page(page); + } +#endif + mem_init_print_info(NULL); +} + +#ifdef CONFIG_BLK_DEV_INITRD +void free_initrd_mem(unsigned long start, unsigned long end) +{ + if (start < end) + pr_info("Freeing initrd memory: %ldk freed\n", + (end - start) >> 10); + + for (; start < end; start += PAGE_SIZE) { + ClearPageReserved(virt_to_page(start)); + init_page_count(virt_to_page(start)); + free_page(start); + totalram_pages++; + } +} +#endif + +extern char __init_begin[], __init_end[]; + +void free_initmem(void) +{ + unsigned long addr; + + addr = (unsigned long) &__init_begin; + + while (addr < (unsigned long) &__init_end) { + ClearPageReserved(virt_to_page(addr)); + init_page_count(virt_to_page(addr)); + free_page(addr); + totalram_pages++; + addr += PAGE_SIZE; + } + + pr_info("Freeing unused kernel memory: %dk freed\n", + ((unsigned int)&__init_end - (unsigned int)&__init_begin) >> 10); +} + +void pgd_init(unsigned long *p) +{ + int i; + + for (i = 0; i < PTRS_PER_PGD; i++) + p[i] = __pa(invalid_pte_table); +} + +void __init pre_mmu_init(void) +{ + /* + * Setup page-table and enable TLB-hardrefill + */ + flush_tlb_all(); + pgd_init((unsigned long *)swapper_pg_dir); + TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir); + TLBMISS_HANDLER_SETUP_PGD_KERNEL(swapper_pg_dir); + + asid_cache(smp_processor_id()) = ASID_FIRST_VERSION; + + /* Setup page mask to 4k */ + write_mmu_pagemask(0); +} diff --git a/arch/csky/mm/ioremap.c b/arch/csky/mm/ioremap.c new file mode 100644 index 000000000000..7ad3ff103f4a --- /dev/null +++ b/arch/csky/mm/ioremap.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/export.h> +#include <linux/mm.h> +#include <linux/vmalloc.h> +#include <linux/io.h> + +#include <asm/pgtable.h> + +void __iomem *ioremap(phys_addr_t addr, size_t size) +{ + phys_addr_t last_addr; + unsigned long offset, vaddr; + struct vm_struct *area; + pgprot_t prot; + + last_addr = addr + size - 1; + if (!size || last_addr < addr) + return NULL; + + offset = addr & (~PAGE_MASK); + addr &= PAGE_MASK; + size = PAGE_ALIGN(size + offset); + + area = get_vm_area_caller(size, VM_ALLOC, __builtin_return_address(0)); + if (!area) + return NULL; + + vaddr = (unsigned long)area->addr; + + prot = __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | + _PAGE_GLOBAL | _CACHE_UNCACHED); + + if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) { + free_vm_area(area); + return NULL; + } + + return (void __iomem *)(vaddr + offset); +} +EXPORT_SYMBOL(ioremap); + +void iounmap(void __iomem *addr) +{ + vunmap((void *)((unsigned long)addr & PAGE_MASK)); +} +EXPORT_SYMBOL(iounmap); diff --git a/arch/csky/mm/syscache.c b/arch/csky/mm/syscache.c new file mode 100644 index 000000000000..c4645e4e97f4 --- /dev/null +++ b/arch/csky/mm/syscache.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/syscalls.h> +#include <asm/page.h> +#include <asm/cache.h> +#include <asm/cachectl.h> + +SYSCALL_DEFINE3(cacheflush, + void __user *, addr, + unsigned long, bytes, + int, cache) +{ + switch (cache) { + case ICACHE: + icache_inv_range((unsigned long)addr, + (unsigned long)addr + bytes); + break; + case DCACHE: + dcache_wb_range((unsigned long)addr, + (unsigned long)addr + bytes); + break; + case BCACHE: + cache_wbinv_range((unsigned long)addr, + (unsigned long)addr + bytes); + break; + default: + return -EINVAL; + } + + return 0; +} diff --git a/arch/csky/mm/tlb.c b/arch/csky/mm/tlb.c new file mode 100644 index 000000000000..08b8394e5b8f --- /dev/null +++ b/arch/csky/mm/tlb.c @@ -0,0 +1,219 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/init.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/sched.h> + +#include <asm/mmu_context.h> +#include <asm/pgtable.h> +#include <asm/setup.h> + +#define CSKY_TLB_SIZE CONFIG_CPU_TLB_SIZE + +void flush_tlb_all(void) +{ + tlb_invalid_all(); +} + +void flush_tlb_mm(struct mm_struct *mm) +{ + int cpu = smp_processor_id(); + + if (cpu_context(cpu, mm) != 0) + drop_mmu_context(mm, cpu); + + tlb_invalid_all(); +} + +#define restore_asid_inv_utlb(oldpid, newpid) \ +do { \ + if ((oldpid & ASID_MASK) == newpid) \ + write_mmu_entryhi(oldpid + 1); \ + write_mmu_entryhi(oldpid); \ +} while (0) + +void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) +{ + struct mm_struct *mm = vma->vm_mm; + int cpu = smp_processor_id(); + + if (cpu_context(cpu, mm) != 0) { + unsigned long size, flags; + int newpid = cpu_asid(cpu, mm); + + local_irq_save(flags); + size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; + size = (size + 1) >> 1; + if (size <= CSKY_TLB_SIZE/2) { + start &= (PAGE_MASK << 1); + end += ((PAGE_SIZE << 1) - 1); + end &= (PAGE_MASK << 1); +#ifdef CONFIG_CPU_HAS_TLBI + while (start < end) { + asm volatile("tlbi.vaas %0" + ::"r"(start | newpid)); + start += (PAGE_SIZE << 1); + } + sync_is(); +#else + { + int oldpid = read_mmu_entryhi(); + + while (start < end) { + int idx; + + write_mmu_entryhi(start | newpid); + start += (PAGE_SIZE << 1); + tlb_probe(); + idx = read_mmu_index(); + if (idx >= 0) + tlb_invalid_indexed(); + } + restore_asid_inv_utlb(oldpid, newpid); + } +#endif + } else { + drop_mmu_context(mm, cpu); + } + local_irq_restore(flags); + } +} + +void flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + unsigned long size, flags; + + local_irq_save(flags); + size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; + if (size <= CSKY_TLB_SIZE) { + start &= (PAGE_MASK << 1); + end += ((PAGE_SIZE << 1) - 1); + end &= (PAGE_MASK << 1); +#ifdef CONFIG_CPU_HAS_TLBI + while (start < end) { + asm volatile("tlbi.vaas %0"::"r"(start)); + start += (PAGE_SIZE << 1); + } + sync_is(); +#else + { + int oldpid = read_mmu_entryhi(); + + while (start < end) { + int idx; + + write_mmu_entryhi(start); + start += (PAGE_SIZE << 1); + tlb_probe(); + idx = read_mmu_index(); + if (idx >= 0) + tlb_invalid_indexed(); + } + restore_asid_inv_utlb(oldpid, 0); + } +#endif + } else { + flush_tlb_all(); + } + + local_irq_restore(flags); +} + +void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) +{ + int cpu = smp_processor_id(); + int newpid = cpu_asid(cpu, vma->vm_mm); + + if (!vma || cpu_context(cpu, vma->vm_mm) != 0) { + page &= (PAGE_MASK << 1); + +#ifdef CONFIG_CPU_HAS_TLBI + asm volatile("tlbi.vaas %0"::"r"(page | newpid)); + sync_is(); +#else + { + int oldpid, idx; + unsigned long flags; + + local_irq_save(flags); + oldpid = read_mmu_entryhi(); + write_mmu_entryhi(page | newpid); + tlb_probe(); + idx = read_mmu_index(); + if (idx >= 0) + tlb_invalid_indexed(); + + restore_asid_inv_utlb(oldpid, newpid); + local_irq_restore(flags); + } +#endif + } +} + +/* + * Remove one kernel space TLB entry. This entry is assumed to be marked + * global so we don't do the ASID thing. + */ +void flush_tlb_one(unsigned long page) +{ + int oldpid; + + oldpid = read_mmu_entryhi(); + page &= (PAGE_MASK << 1); + +#ifdef CONFIG_CPU_HAS_TLBI + page = page | (oldpid & 0xfff); + asm volatile("tlbi.vaas %0"::"r"(page)); + sync_is(); +#else + { + int idx; + unsigned long flags; + + page = page | (oldpid & 0xff); + + local_irq_save(flags); + write_mmu_entryhi(page); + tlb_probe(); + idx = read_mmu_index(); + if (idx >= 0) + tlb_invalid_indexed(); + restore_asid_inv_utlb(oldpid, oldpid); + local_irq_restore(flags); + } +#endif +} +EXPORT_SYMBOL(flush_tlb_one); + +/* show current 32 jtlbs */ +void show_jtlb_table(void) +{ + unsigned long flags; + int entryhi, entrylo0, entrylo1; + int entry; + int oldpid; + + local_irq_save(flags); + entry = 0; + pr_info("\n\n\n"); + + oldpid = read_mmu_entryhi(); + while (entry < CSKY_TLB_SIZE) { + write_mmu_index(entry); + tlb_read(); + entryhi = read_mmu_entryhi(); + entrylo0 = read_mmu_entrylo0(); + entrylo0 = entrylo0; + entrylo1 = read_mmu_entrylo1(); + entrylo1 = entrylo1; + pr_info("jtlb[%d]: entryhi - 0x%x; entrylo0 - 0x%x;" + " entrylo1 - 0x%x\n", + entry, entryhi, entrylo0, entrylo1); + entry++; + } + write_mmu_entryhi(oldpid); + local_irq_restore(flags); +} |