summaryrefslogtreecommitdiff
path: root/arch/csky/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/csky/include/asm')
-rw-r--r--arch/csky/include/asm/Kbuild49
-rw-r--r--arch/csky/include/asm/addrspace.h10
-rw-r--r--arch/csky/include/asm/atomic.h212
-rw-r--r--arch/csky/include/asm/barrier.h49
-rw-r--r--arch/csky/include/asm/bitops.h82
-rw-r--r--arch/csky/include/asm/bug.h26
-rw-r--r--arch/csky/include/asm/cache.h30
-rw-r--r--arch/csky/include/asm/cacheflush.h9
-rw-r--r--arch/csky/include/asm/checksum.h50
-rw-r--r--arch/csky/include/asm/cmpxchg.h73
-rw-r--r--arch/csky/include/asm/elf.h85
-rw-r--r--arch/csky/include/asm/fixmap.h27
-rw-r--r--arch/csky/include/asm/highmem.h51
-rw-r--r--arch/csky/include/asm/io.h24
-rw-r--r--arch/csky/include/asm/irqflags.h49
-rw-r--r--arch/csky/include/asm/mmu.h12
-rw-r--r--arch/csky/include/asm/mmu_context.h150
-rw-r--r--arch/csky/include/asm/page.h104
-rw-r--r--arch/csky/include/asm/pgalloc.h115
-rw-r--r--arch/csky/include/asm/pgtable.h306
-rw-r--r--arch/csky/include/asm/processor.h115
-rw-r--r--arch/csky/include/asm/reg_ops.h26
-rw-r--r--arch/csky/include/asm/segment.h19
-rw-r--r--arch/csky/include/asm/shmparam.h11
-rw-r--r--arch/csky/include/asm/smp.h26
-rw-r--r--arch/csky/include/asm/spinlock.h256
-rw-r--r--arch/csky/include/asm/spinlock_types.h37
-rw-r--r--arch/csky/include/asm/string.h13
-rw-r--r--arch/csky/include/asm/switch_to.h36
-rw-r--r--arch/csky/include/asm/syscall.h71
-rw-r--r--arch/csky/include/asm/syscalls.h15
-rw-r--r--arch/csky/include/asm/thread_info.h75
-rw-r--r--arch/csky/include/asm/tlb.h25
-rw-r--r--arch/csky/include/asm/tlbflush.h25
-rw-r--r--arch/csky/include/asm/traps.h44
-rw-r--r--arch/csky/include/asm/uaccess.h416
-rw-r--r--arch/csky/include/asm/unistd.h4
-rw-r--r--arch/csky/include/asm/vdso.h12
38 files changed, 2739 insertions, 0 deletions
diff --git a/arch/csky/include/asm/Kbuild b/arch/csky/include/asm/Kbuild
new file mode 100644
index 000000000000..2a0abe8f2a35
--- /dev/null
+++ b/arch/csky/include/asm/Kbuild
@@ -0,0 +1,49 @@
+generic-y += asm-offsets.h
+generic-y += bugs.h
+generic-y += clkdev.h
+generic-y += compat.h
+generic-y += current.h
+generic-y += delay.h
+generic-y += device.h
+generic-y += div64.h
+generic-y += dma.h
+generic-y += dma-contiguous.h
+generic-y += dma-mapping.h
+generic-y += emergency-restart.h
+generic-y += exec.h
+generic-y += fb.h
+generic-y += ftrace.h
+generic-y += futex.h
+generic-y += gpio.h
+generic-y += hardirq.h
+generic-y += hw_irq.h
+generic-y += irq.h
+generic-y += irq_regs.h
+generic-y += irq_work.h
+generic-y += kdebug.h
+generic-y += kmap_types.h
+generic-y += kprobes.h
+generic-y += kvm_para.h
+generic-y += linkage.h
+generic-y += local.h
+generic-y += local64.h
+generic-y += mm-arch-hooks.h
+generic-y += module.h
+generic-y += mutex.h
+generic-y += pci.h
+generic-y += percpu.h
+generic-y += preempt.h
+generic-y += qrwlock.h
+generic-y += scatterlist.h
+generic-y += sections.h
+generic-y += serial.h
+generic-y += shm.h
+generic-y += timex.h
+generic-y += topology.h
+generic-y += trace_clock.h
+generic-y += unaligned.h
+generic-y += user.h
+generic-y += vga.h
+generic-y += vmlinux.lds.h
+generic-y += word-at-a-time.h
+generic-y += xor.h
diff --git a/arch/csky/include/asm/addrspace.h b/arch/csky/include/asm/addrspace.h
new file mode 100644
index 000000000000..d1c2ede692ed
--- /dev/null
+++ b/arch/csky/include/asm/addrspace.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_ADDRSPACE_H
+#define __ASM_CSKY_ADDRSPACE_H
+
+#define KSEG0 0x80000000ul
+#define KSEG0ADDR(a) (((unsigned long)a & 0x1fffffff) | KSEG0)
+
+#endif /* __ASM_CSKY_ADDRSPACE_H */
diff --git a/arch/csky/include/asm/atomic.h b/arch/csky/include/asm/atomic.h
new file mode 100644
index 000000000000..e369d73b13e3
--- /dev/null
+++ b/arch/csky/include/asm/atomic.h
@@ -0,0 +1,212 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_ATOMIC_H
+#define __ASM_CSKY_ATOMIC_H
+
+#include <linux/version.h>
+#include <asm/cmpxchg.h>
+#include <asm/barrier.h>
+
+#ifdef CONFIG_CPU_HAS_LDSTEX
+
+#define __atomic_add_unless __atomic_add_unless
+static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+{
+ unsigned long tmp, ret;
+
+ smp_mb();
+
+ asm volatile (
+ "1: ldex.w %0, (%3) \n"
+ " mov %1, %0 \n"
+ " cmpne %0, %4 \n"
+ " bf 2f \n"
+ " add %0, %2 \n"
+ " stex.w %0, (%3) \n"
+ " bez %0, 1b \n"
+ "2: \n"
+ : "=&r" (tmp), "=&r" (ret)
+ : "r" (a), "r"(&v->counter), "r"(u)
+ : "memory");
+
+ if (ret != u)
+ smp_mb();
+
+ return ret;
+}
+
+#define ATOMIC_OP(op, c_op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ unsigned long tmp; \
+ \
+ asm volatile ( \
+ "1: ldex.w %0, (%2) \n" \
+ " " #op " %0, %1 \n" \
+ " stex.w %0, (%2) \n" \
+ " bez %0, 1b \n" \
+ : "=&r" (tmp) \
+ : "r" (i), "r"(&v->counter) \
+ : "memory"); \
+}
+
+#define ATOMIC_OP_RETURN(op, c_op) \
+static inline int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ unsigned long tmp, ret; \
+ \
+ smp_mb(); \
+ asm volatile ( \
+ "1: ldex.w %0, (%3) \n" \
+ " " #op " %0, %2 \n" \
+ " mov %1, %0 \n" \
+ " stex.w %0, (%3) \n" \
+ " bez %0, 1b \n" \
+ : "=&r" (tmp), "=&r" (ret) \
+ : "r" (i), "r"(&v->counter) \
+ : "memory"); \
+ smp_mb(); \
+ \
+ return ret; \
+}
+
+#define ATOMIC_FETCH_OP(op, c_op) \
+static inline int atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ unsigned long tmp, ret; \
+ \
+ smp_mb(); \
+ asm volatile ( \
+ "1: ldex.w %0, (%3) \n" \
+ " mov %1, %0 \n" \
+ " " #op " %0, %2 \n" \
+ " stex.w %0, (%3) \n" \
+ " bez %0, 1b \n" \
+ : "=&r" (tmp), "=&r" (ret) \
+ : "r" (i), "r"(&v->counter) \
+ : "memory"); \
+ smp_mb(); \
+ \
+ return ret; \
+}
+
+#else /* CONFIG_CPU_HAS_LDSTEX */
+
+#include <linux/irqflags.h>
+
+#define __atomic_add_unless __atomic_add_unless
+static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+{
+ unsigned long tmp, ret, flags;
+
+ raw_local_irq_save(flags);
+
+ asm volatile (
+ " ldw %0, (%3) \n"
+ " mov %1, %0 \n"
+ " cmpne %0, %4 \n"
+ " bf 2f \n"
+ " add %0, %2 \n"
+ " stw %0, (%3) \n"
+ "2: \n"
+ : "=&r" (tmp), "=&r" (ret)
+ : "r" (a), "r"(&v->counter), "r"(u)
+ : "memory");
+
+ raw_local_irq_restore(flags);
+
+ return ret;
+}
+
+#define ATOMIC_OP(op, c_op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ unsigned long tmp, flags; \
+ \
+ raw_local_irq_save(flags); \
+ \
+ asm volatile ( \
+ " ldw %0, (%2) \n" \
+ " " #op " %0, %1 \n" \
+ " stw %0, (%2) \n" \
+ : "=&r" (tmp) \
+ : "r" (i), "r"(&v->counter) \
+ : "memory"); \
+ \
+ raw_local_irq_restore(flags); \
+}
+
+#define ATOMIC_OP_RETURN(op, c_op) \
+static inline int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ unsigned long tmp, ret, flags; \
+ \
+ raw_local_irq_save(flags); \
+ \
+ asm volatile ( \
+ " ldw %0, (%3) \n" \
+ " " #op " %0, %2 \n" \
+ " stw %0, (%3) \n" \
+ " mov %1, %0 \n" \
+ : "=&r" (tmp), "=&r" (ret) \
+ : "r" (i), "r"(&v->counter) \
+ : "memory"); \
+ \
+ raw_local_irq_restore(flags); \
+ \
+ return ret; \
+}
+
+#define ATOMIC_FETCH_OP(op, c_op) \
+static inline int atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ unsigned long tmp, ret, flags; \
+ \
+ raw_local_irq_save(flags); \
+ \
+ asm volatile ( \
+ " ldw %0, (%3) \n" \
+ " mov %1, %0 \n" \
+ " " #op " %0, %2 \n" \
+ " stw %0, (%3) \n" \
+ : "=&r" (tmp), "=&r" (ret) \
+ : "r" (i), "r"(&v->counter) \
+ : "memory"); \
+ \
+ raw_local_irq_restore(flags); \
+ \
+ return ret; \
+}
+
+#endif /* CONFIG_CPU_HAS_LDSTEX */
+
+#define atomic_add_return atomic_add_return
+ATOMIC_OP_RETURN(add, +)
+#define atomic_sub_return atomic_sub_return
+ATOMIC_OP_RETURN(sub, -)
+
+#define atomic_fetch_add atomic_fetch_add
+ATOMIC_FETCH_OP(add, +)
+#define atomic_fetch_sub atomic_fetch_sub
+ATOMIC_FETCH_OP(sub, -)
+#define atomic_fetch_and atomic_fetch_and
+ATOMIC_FETCH_OP(and, &)
+#define atomic_fetch_or atomic_fetch_or
+ATOMIC_FETCH_OP(or, |)
+#define atomic_fetch_xor atomic_fetch_xor
+ATOMIC_FETCH_OP(xor, ^)
+
+#define atomic_and atomic_and
+ATOMIC_OP(and, &)
+#define atomic_or atomic_or
+ATOMIC_OP(or, |)
+#define atomic_xor atomic_xor
+ATOMIC_OP(xor, ^)
+
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
+#include <asm-generic/atomic.h>
+
+#endif /* __ASM_CSKY_ATOMIC_H */
diff --git a/arch/csky/include/asm/barrier.h b/arch/csky/include/asm/barrier.h
new file mode 100644
index 000000000000..476eb786f22d
--- /dev/null
+++ b/arch/csky/include/asm/barrier.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_BARRIER_H
+#define __ASM_CSKY_BARRIER_H
+
+#ifndef __ASSEMBLY__
+
+#define nop() asm volatile ("nop\n":::"memory")
+
+/*
+ * sync: completion barrier
+ * sync.s: completion barrier and shareable to other cores
+ * sync.i: completion barrier with flush cpu pipeline
+ * sync.is: completion barrier with flush cpu pipeline and shareable to
+ * other cores
+ *
+ * bar.brwarw: ordering barrier for all load/store instructions before it
+ * bar.brwarws: ordering barrier for all load/store instructions before it
+ * and shareable to other cores
+ * bar.brar: ordering barrier for all load instructions before it
+ * bar.brars: ordering barrier for all load instructions before it
+ * and shareable to other cores
+ * bar.bwaw: ordering barrier for all store instructions before it
+ * bar.bwaws: ordering barrier for all store instructions before it
+ * and shareable to other cores
+ */
+
+#ifdef CONFIG_CPU_HAS_CACHEV2
+#define mb() asm volatile ("bar.brwarw\n":::"memory")
+#define rmb() asm volatile ("bar.brar\n":::"memory")
+#define wmb() asm volatile ("bar.bwaw\n":::"memory")
+
+#ifdef CONFIG_SMP
+#define __smp_mb() asm volatile ("bar.brwarws\n":::"memory")
+#define __smp_rmb() asm volatile ("bar.brars\n":::"memory")
+#define __smp_wmb() asm volatile ("bar.bwaws\n":::"memory")
+#endif /* CONFIG_SMP */
+
+#define sync_is() asm volatile ("sync.is\n":::"memory")
+
+#else /* !CONFIG_CPU_HAS_CACHEV2 */
+#define mb() asm volatile ("sync\n":::"memory")
+#endif
+
+#include <asm-generic/barrier.h>
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_CSKY_BARRIER_H */
diff --git a/arch/csky/include/asm/bitops.h b/arch/csky/include/asm/bitops.h
new file mode 100644
index 000000000000..335f2883fb1e
--- /dev/null
+++ b/arch/csky/include/asm/bitops.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_BITOPS_H
+#define __ASM_CSKY_BITOPS_H
+
+#include <linux/compiler.h>
+#include <asm/barrier.h>
+
+/*
+ * asm-generic/bitops/ffs.h
+ */
+static inline int ffs(int x)
+{
+ if (!x)
+ return 0;
+
+ asm volatile (
+ "brev %0\n"
+ "ff1 %0\n"
+ "addi %0, 1\n"
+ : "=&r"(x)
+ : "0"(x));
+ return x;
+}
+
+/*
+ * asm-generic/bitops/__ffs.h
+ */
+static __always_inline unsigned long __ffs(unsigned long x)
+{
+ asm volatile (
+ "brev %0\n"
+ "ff1 %0\n"
+ : "=&r"(x)
+ : "0"(x));
+ return x;
+}
+
+/*
+ * asm-generic/bitops/fls.h
+ */
+static __always_inline int fls(int x)
+{
+ asm volatile(
+ "ff1 %0\n"
+ : "=&r"(x)
+ : "0"(x));
+
+ return (32 - x);
+}
+
+/*
+ * asm-generic/bitops/__fls.h
+ */
+static __always_inline unsigned long __fls(unsigned long x)
+{
+ return fls(x) - 1;
+}
+
+#include <asm-generic/bitops/ffz.h>
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/find.h>
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
+#include <asm-generic/bitops/atomic.h>
+
+/*
+ * bug fix, why only could use atomic!!!!
+ */
+#include <asm-generic/bitops/non-atomic.h>
+#define __clear_bit(nr, vaddr) clear_bit(nr, vaddr)
+
+#include <asm-generic/bitops/le.h>
+#include <asm-generic/bitops/ext2-atomic.h>
+#endif /* __ASM_CSKY_BITOPS_H */
diff --git a/arch/csky/include/asm/bug.h b/arch/csky/include/asm/bug.h
new file mode 100644
index 000000000000..bd7b3235bb84
--- /dev/null
+++ b/arch/csky/include/asm/bug.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_BUG_H
+#define __ASM_CSKY_BUG_H
+
+#include <linux/compiler.h>
+#include <linux/const.h>
+#include <linux/types.h>
+
+#define BUG() \
+do { \
+ asm volatile ("bkpt\n"); \
+ unreachable(); \
+} while (0)
+
+#define HAVE_ARCH_BUG
+
+#include <asm-generic/bug.h>
+
+struct pt_regs;
+
+void die_if_kernel(char *str, struct pt_regs *regs, int nr);
+void show_regs(struct pt_regs *regs);
+
+#endif /* __ASM_CSKY_BUG_H */
diff --git a/arch/csky/include/asm/cache.h b/arch/csky/include/asm/cache.h
new file mode 100644
index 000000000000..d68373463676
--- /dev/null
+++ b/arch/csky/include/asm/cache.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_CACHE_H
+#define __ASM_CSKY_CACHE_H
+
+/* bytes per L1 cache line */
+#define L1_CACHE_SHIFT CONFIG_L1_CACHE_SHIFT
+
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+
+#ifndef __ASSEMBLY__
+
+void dcache_wb_line(unsigned long start);
+
+void icache_inv_range(unsigned long start, unsigned long end);
+void icache_inv_all(void);
+
+void dcache_wb_range(unsigned long start, unsigned long end);
+void dcache_wbinv_all(void);
+
+void cache_wbinv_range(unsigned long start, unsigned long end);
+void cache_wbinv_all(void);
+
+void dma_wbinv_range(unsigned long start, unsigned long end);
+void dma_wb_range(unsigned long start, unsigned long end);
+
+#endif
+#endif /* __ASM_CSKY_CACHE_H */
diff --git a/arch/csky/include/asm/cacheflush.h b/arch/csky/include/asm/cacheflush.h
new file mode 100644
index 000000000000..a96da67261ae
--- /dev/null
+++ b/arch/csky/include/asm/cacheflush.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_CACHEFLUSH_H
+#define __ASM_CSKY_CACHEFLUSH_H
+
+#include <abi/cacheflush.h>
+
+#endif /* __ASM_CSKY_CACHEFLUSH_H */
diff --git a/arch/csky/include/asm/checksum.h b/arch/csky/include/asm/checksum.h
new file mode 100644
index 000000000000..7685824291b1
--- /dev/null
+++ b/arch/csky/include/asm/checksum.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_CHECKSUM_H
+#define __ASM_CSKY_CHECKSUM_H
+
+#include <linux/in6.h>
+#include <asm/byteorder.h>
+
+static inline __sum16 csum_fold(__wsum csum)
+{
+ u32 tmp;
+
+ asm volatile(
+ "mov %1, %0\n"
+ "rori %0, 16\n"
+ "addu %0, %1\n"
+ "lsri %0, 16\n"
+ : "=r"(csum), "=r"(tmp)
+ : "0"(csum));
+
+ return (__force __sum16) ~csum;
+}
+#define csum_fold csum_fold
+
+static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
+ unsigned short len, unsigned short proto, __wsum sum)
+{
+ asm volatile(
+ "clrc\n"
+ "addc %0, %1\n"
+ "addc %0, %2\n"
+ "addc %0, %3\n"
+ "inct %0\n"
+ : "=r"(sum)
+ : "r"((__force u32)saddr), "r"((__force u32)daddr),
+#ifdef __BIG_ENDIAN
+ "r"(proto + len),
+#else
+ "r"((proto + len) << 8),
+#endif
+ "0" ((__force unsigned long)sum)
+ : "cc");
+ return sum;
+}
+#define csum_tcpudp_nofold csum_tcpudp_nofold
+
+#include <asm-generic/checksum.h>
+
+#endif /* __ASM_CSKY_CHECKSUM_H */
diff --git a/arch/csky/include/asm/cmpxchg.h b/arch/csky/include/asm/cmpxchg.h
new file mode 100644
index 000000000000..89224530a0ee
--- /dev/null
+++ b/arch/csky/include/asm/cmpxchg.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_CMPXCHG_H
+#define __ASM_CSKY_CMPXCHG_H
+
+#ifdef CONFIG_CPU_HAS_LDSTEX
+#include <asm/barrier.h>
+
+extern void __bad_xchg(void);
+
+#define __xchg(new, ptr, size) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(new) __new = (new); \
+ __typeof__(*(ptr)) __ret; \
+ unsigned long tmp; \
+ switch (size) { \
+ case 4: \
+ smp_mb(); \
+ asm volatile ( \
+ "1: ldex.w %0, (%3) \n" \
+ " mov %1, %2 \n" \
+ " stex.w %1, (%3) \n" \
+ " bez %1, 1b \n" \
+ : "=&r" (__ret), "=&r" (tmp) \
+ : "r" (__new), "r"(__ptr) \
+ :); \
+ smp_mb(); \
+ break; \
+ default: \
+ __bad_xchg(); \
+ } \
+ __ret; \
+})
+
+#define xchg(ptr, x) (__xchg((x), (ptr), sizeof(*(ptr))))
+
+#define __cmpxchg(ptr, old, new, size) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(new) __new = (new); \
+ __typeof__(new) __tmp; \
+ __typeof__(old) __old = (old); \
+ __typeof__(*(ptr)) __ret; \
+ switch (size) { \
+ case 4: \
+ smp_mb(); \
+ asm volatile ( \
+ "1: ldex.w %0, (%3) \n" \
+ " cmpne %0, %4 \n" \
+ " bt 2f \n" \
+ " mov %1, %2 \n" \
+ " stex.w %1, (%3) \n" \
+ " bez %1, 1b \n" \
+ "2: \n" \
+ : "=&r" (__ret), "=&r" (__tmp) \
+ : "r" (__new), "r"(__ptr), "r"(__old) \
+ :); \
+ smp_mb(); \
+ break; \
+ default: \
+ __bad_xchg(); \
+ } \
+ __ret; \
+})
+
+#define cmpxchg(ptr, o, n) \
+ (__cmpxchg((ptr), (o), (n), sizeof(*(ptr))))
+#else
+#include <asm-generic/cmpxchg.h>
+#endif
+
+#endif /* __ASM_CSKY_CMPXCHG_H */
diff --git a/arch/csky/include/asm/elf.h b/arch/csky/include/asm/elf.h
new file mode 100644
index 000000000000..773b133ca297
--- /dev/null
+++ b/arch/csky/include/asm/elf.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_ELF_H
+#define __ASM_CSKY_ELF_H
+
+#include <asm/ptrace.h>
+#include <abi/regdef.h>
+
+#define ELF_ARCH 252
+
+/* CSKY Relocations */
+#define R_CSKY_NONE 0
+#define R_CSKY_32 1
+#define R_CSKY_PCIMM8BY4 2
+#define R_CSKY_PCIMM11BY2 3
+#define R_CSKY_PCIMM4BY2 4
+#define R_CSKY_PC32 5
+#define R_CSKY_PCRELJSR_IMM11BY2 6
+#define R_CSKY_GNU_VTINHERIT 7
+#define R_CSKY_GNU_VTENTRY 8
+#define R_CSKY_RELATIVE 9
+#define R_CSKY_COPY 10
+#define R_CSKY_GLOB_DAT 11
+#define R_CSKY_JUMP_SLOT 12
+#define R_CSKY_ADDR_HI16 24
+#define R_CSKY_ADDR_LO16 25
+#define R_CSKY_PCRELJSR_IMM26BY2 40
+
+typedef unsigned long elf_greg_t;
+
+typedef struct user_fp elf_fpregset_t;
+
+#define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t))
+
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) ((x)->e_machine == ELF_ARCH)
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define USE_ELF_CORE_DUMP
+#define ELF_EXEC_PAGESIZE 4096
+#define ELF_CLASS ELFCLASS32
+#define ELF_PLAT_INIT(_r, load_addr) { _r->a0 = 0; }
+
+#ifdef __cskyBE__
+#define ELF_DATA ELFDATA2MSB
+#else
+#define ELF_DATA ELFDATA2LSB
+#endif
+
+/*
+ * This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ * use of this is to invoke "./ld.so someprog" to test out a new version of
+ * the loader. We need to make sure that it is out of the way of the program
+ * that it will "exec", and that there is sufficient room for the brk.
+ */
+#define ELF_ET_DYN_BASE 0x0UL
+#include <abi/elf.h>
+
+/* Similar, but for a thread other than current. */
+struct task_struct;
+extern int dump_task_regs(struct task_struct *tsk, elf_gregset_t *elf_regs);
+#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs)
+
+#define ELF_HWCAP (0)
+
+/*
+ * This yields a string that ld.so will use to load implementation specific
+ * libraries for optimization. This is more specific in intent than poking
+ * at uname or /proc/cpuinfo.
+ */
+#define ELF_PLATFORM (NULL)
+#define SET_PERSONALITY(ex) set_personality(PER_LINUX)
+
+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
+struct linux_binprm;
+extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+ int uses_interp);
+#endif /* __ASM_CSKY_ELF_H */
diff --git a/arch/csky/include/asm/fixmap.h b/arch/csky/include/asm/fixmap.h
new file mode 100644
index 000000000000..380ff0a307df
--- /dev/null
+++ b/arch/csky/include/asm/fixmap.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_FIXMAP_H
+#define __ASM_CSKY_FIXMAP_H
+
+#include <asm/page.h>
+#ifdef CONFIG_HIGHMEM
+#include <linux/threads.h>
+#include <asm/kmap_types.h>
+#endif
+
+enum fixed_addresses {
+#ifdef CONFIG_HIGHMEM
+ FIX_KMAP_BEGIN,
+ FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
+#endif
+ __end_of_fixed_addresses
+};
+
+#define FIXADDR_TOP 0xffffc000
+#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
+#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
+
+#include <asm-generic/fixmap.h>
+
+#endif /* __ASM_CSKY_FIXMAP_H */
diff --git a/arch/csky/include/asm/highmem.h b/arch/csky/include/asm/highmem.h
new file mode 100644
index 000000000000..a345a2f2c22e
--- /dev/null
+++ b/arch/csky/include/asm/highmem.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_HIGHMEM_H
+#define __ASM_CSKY_HIGHMEM_H
+
+#ifdef __KERNEL__
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/uaccess.h>
+#include <asm/kmap_types.h>
+#include <asm/cache.h>
+
+/* undef for production */
+#define HIGHMEM_DEBUG 1
+
+/* declarations for highmem.c */
+extern unsigned long highstart_pfn, highend_pfn;
+
+extern pte_t *pkmap_page_table;
+
+/*
+ * Right now we initialize only a single pte table. It can be extended
+ * easily, subsequent pte tables have to be allocated in one physical
+ * chunk of RAM.
+ */
+#define LAST_PKMAP 1024
+#define LAST_PKMAP_MASK (LAST_PKMAP-1)
+#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
+#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+
+extern void *kmap_high(struct page *page);
+extern void kunmap_high(struct page *page);
+
+extern void *kmap(struct page *page);
+extern void kunmap(struct page *page);
+extern void *kmap_atomic(struct page *page);
+extern void __kunmap_atomic(void *kvaddr);
+extern void *kmap_atomic_pfn(unsigned long pfn);
+extern struct page *kmap_atomic_to_page(void *ptr);
+
+#define flush_cache_kmaps() do {} while (0)
+
+extern void kmap_init(void);
+
+#define kmap_prot PAGE_KERNEL
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_CSKY_HIGHMEM_H */
diff --git a/arch/csky/include/asm/io.h b/arch/csky/include/asm/io.h
new file mode 100644
index 000000000000..ecae6b358f95
--- /dev/null
+++ b/arch/csky/include/asm/io.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_IO_H
+#define __ASM_CSKY_IO_H
+
+#include <abi/pgtable-bits.h>
+#include <linux/types.h>
+#include <linux/version.h>
+
+extern void __iomem *ioremap(phys_addr_t offset, size_t size);
+
+extern void iounmap(void *addr);
+
+extern int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
+ size_t size, unsigned long flags);
+
+#define ioremap_nocache(phy, sz) ioremap(phy, sz)
+#define ioremap_wc ioremap_nocache
+#define ioremap_wt ioremap_nocache
+
+#include <asm-generic/io.h>
+
+#endif /* __ASM_CSKY_IO_H */
diff --git a/arch/csky/include/asm/irqflags.h b/arch/csky/include/asm/irqflags.h
new file mode 100644
index 000000000000..9e3a569a55d6
--- /dev/null
+++ b/arch/csky/include/asm/irqflags.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_IRQFLAGS_H
+#define __ASM_CSKY_IRQFLAGS_H
+#include <abi/reg_ops.h>
+
+static inline unsigned long arch_local_irq_save(void)
+{
+ unsigned long flags;
+
+ flags = mfcr("psr");
+ asm volatile("psrclr ie\n":::"memory");
+ return flags;
+}
+#define arch_local_irq_save arch_local_irq_save
+
+static inline void arch_local_irq_enable(void)
+{
+ asm volatile("psrset ee, ie\n":::"memory");
+}
+#define arch_local_irq_enable arch_local_irq_enable
+
+static inline void arch_local_irq_disable(void)
+{
+ asm volatile("psrclr ie\n":::"memory");
+}
+#define arch_local_irq_disable arch_local_irq_disable
+
+static inline unsigned long arch_local_save_flags(void)
+{
+ return mfcr("psr");
+}
+#define arch_local_save_flags arch_local_save_flags
+
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+ mtcr("psr", flags);
+}
+#define arch_local_irq_restore arch_local_irq_restore
+
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+ return !(flags & (1<<6));
+}
+#define arch_irqs_disabled_flags arch_irqs_disabled_flags
+
+#include <asm-generic/irqflags.h>
+
+#endif /* __ASM_CSKY_IRQFLAGS_H */
diff --git a/arch/csky/include/asm/mmu.h b/arch/csky/include/asm/mmu.h
new file mode 100644
index 000000000000..cb344675ccc4
--- /dev/null
+++ b/arch/csky/include/asm/mmu.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_MMU_H
+#define __ASM_CSKY_MMU_H
+
+typedef struct {
+ unsigned long asid[NR_CPUS];
+ void *vdso;
+} mm_context_t;
+
+#endif /* __ASM_CSKY_MMU_H */
diff --git a/arch/csky/include/asm/mmu_context.h b/arch/csky/include/asm/mmu_context.h
new file mode 100644
index 000000000000..c410aa4fff1a
--- /dev/null
+++ b/arch/csky/include/asm/mmu_context.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_MMU_CONTEXT_H
+#define __ASM_CSKY_MMU_CONTEXT_H
+
+#include <asm-generic/mm_hooks.h>
+#include <asm/setup.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <abi/ckmmu.h>
+
+static inline void tlbmiss_handler_setup_pgd(unsigned long pgd, bool kernel)
+{
+ pgd &= ~(1<<31);
+ pgd += PHYS_OFFSET;
+ pgd |= 1;
+ setup_pgd(pgd, kernel);
+}
+
+#define TLBMISS_HANDLER_SETUP_PGD(pgd) \
+ tlbmiss_handler_setup_pgd((unsigned long)pgd, 0)
+#define TLBMISS_HANDLER_SETUP_PGD_KERNEL(pgd) \
+ tlbmiss_handler_setup_pgd((unsigned long)pgd, 1)
+
+static inline unsigned long tlb_get_pgd(void)
+{
+ return ((get_pgd()|(1<<31)) - PHYS_OFFSET) & ~1;
+}
+
+#define cpu_context(cpu, mm) ((mm)->context.asid[cpu])
+#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK)
+#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
+
+#define ASID_FIRST_VERSION (1 << CONFIG_CPU_ASID_BITS)
+#define ASID_INC 0x1
+#define ASID_MASK (ASID_FIRST_VERSION - 1)
+#define ASID_VERSION_MASK ~ASID_MASK
+
+#define destroy_context(mm) do {} while (0)
+#define enter_lazy_tlb(mm, tsk) do {} while (0)
+#define deactivate_mm(tsk, mm) do {} while (0)
+
+/*
+ * All unused by hardware upper bits will be considered
+ * as a software asid extension.
+ */
+static inline void
+get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
+{
+ unsigned long asid = asid_cache(cpu);
+
+ asid += ASID_INC;
+ if (!(asid & ASID_MASK)) {
+ flush_tlb_all(); /* start new asid cycle */
+ if (!asid) /* fix version if needed */
+ asid = ASID_FIRST_VERSION;
+ }
+ cpu_context(cpu, mm) = asid_cache(cpu) = asid;
+}
+
+/*
+ * Initialize the context related info for a new mm_struct
+ * instance.
+ */
+static inline int
+init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+ int i;
+
+ for_each_online_cpu(i)
+ cpu_context(i, mm) = 0;
+ return 0;
+}
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned long flags;
+
+ local_irq_save(flags);
+ /* Check if our ASID is of an older version and thus invalid */
+ if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK)
+ get_new_mmu_context(next, cpu);
+ write_mmu_entryhi(cpu_asid(cpu, next));
+ TLBMISS_HANDLER_SETUP_PGD(next->pgd);
+
+ /*
+ * Mark current->active_mm as not "active" anymore.
+ * We don't want to mislead possible IPI tlb flush routines.
+ */
+ cpumask_clear_cpu(cpu, mm_cpumask(prev));
+ cpumask_set_cpu(cpu, mm_cpumask(next));
+
+ local_irq_restore(flags);
+}
+
+/*
+ * After we have set current->mm to a new value, this activates
+ * the context for the new mm so we see the new mappings.
+ */
+static inline void
+activate_mm(struct mm_struct *prev, struct mm_struct *next)
+{
+ unsigned long flags;
+ int cpu = smp_processor_id();
+
+ local_irq_save(flags);
+
+ /* Unconditionally get a new ASID. */
+ get_new_mmu_context(next, cpu);
+
+ write_mmu_entryhi(cpu_asid(cpu, next));
+ TLBMISS_HANDLER_SETUP_PGD(next->pgd);
+
+ /* mark mmu ownership change */
+ cpumask_clear_cpu(cpu, mm_cpumask(prev));
+ cpumask_set_cpu(cpu, mm_cpumask(next));
+
+ local_irq_restore(flags);
+}
+
+/*
+ * If mm is currently active_mm, we can't really drop it. Instead,
+ * we will get a new one for it.
+ */
+static inline void
+drop_mmu_context(struct mm_struct *mm, unsigned int cpu)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
+ get_new_mmu_context(mm, cpu);
+ write_mmu_entryhi(cpu_asid(cpu, mm));
+ } else {
+ /* will get a new context next time */
+ cpu_context(cpu, mm) = 0;
+ }
+
+ local_irq_restore(flags);
+}
+
+#endif /* __ASM_CSKY_MMU_CONTEXT_H */
diff --git a/arch/csky/include/asm/page.h b/arch/csky/include/asm/page.h
new file mode 100644
index 000000000000..73cf2bd66a13
--- /dev/null
+++ b/arch/csky/include/asm/page.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_PAGE_H
+#define __ASM_CSKY_PAGE_H
+
+#include <asm/setup.h>
+#include <asm/cache.h>
+#include <linux/const.h>
+
+/*
+ * PAGE_SHIFT determines the page size
+ */
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE - 1))
+#define THREAD_SIZE (PAGE_SIZE * 2)
+#define THREAD_MASK (~(THREAD_SIZE - 1))
+#define THREAD_SHIFT (PAGE_SHIFT + 1)
+
+/*
+ * NOTE: virtual isn't really correct, actually it should be the offset into the
+ * memory node, but we have no highmem, so that works for now.
+ * TODO: implement (fast) pfn<->pgdat_idx conversion functions, this makes lots
+ * of the shifts unnecessary.
+ */
+
+#ifndef __ASSEMBLY__
+
+#include <linux/pfn.h>
+
+#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
+#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
+
+#define virt_addr_valid(kaddr) ((void *)(kaddr) >= (void *)PAGE_OFFSET && \
+ (void *)(kaddr) < high_memory)
+#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
+
+extern void *memset(void *dest, int c, size_t l);
+extern void *memcpy(void *to, const void *from, size_t l);
+
+#define clear_page(page) memset((page), 0, PAGE_SIZE)
+#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
+
+#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
+#define phys_to_page(paddr) (pfn_to_page(PFN_DOWN(paddr)))
+
+struct page;
+
+#include <abi/page.h>
+
+struct vm_area_struct;
+
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct { unsigned long pte_low; } pte_t;
+#define pte_val(x) ((x).pte_low)
+
+typedef struct { unsigned long pgd; } pgd_t;
+typedef struct { unsigned long pgprot; } pgprot_t;
+typedef struct page *pgtable_t;
+
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define ptep_buddy(x) ((pte_t *)((unsigned long)(x) ^ sizeof(pte_t)))
+
+#define __pte(x) ((pte_t) { (x) })
+#define __pgd(x) ((pgd_t) { (x) })
+#define __pgprot(x) ((pgprot_t) { (x) })
+
+#endif /* !__ASSEMBLY__ */
+
+#define PHYS_OFFSET (CONFIG_RAM_BASE & ~(LOWMEM_LIMIT - 1))
+#define PHYS_OFFSET_OFFSET (CONFIG_RAM_BASE & (LOWMEM_LIMIT - 1))
+#define ARCH_PFN_OFFSET PFN_DOWN(CONFIG_RAM_BASE)
+
+#define PAGE_OFFSET 0x80000000
+#define LOWMEM_LIMIT 0x40000000
+
+#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET)
+#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - \
+ PHYS_OFFSET))
+#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
+
+#define MAP_NR(x) PFN_DOWN((unsigned long)(x) - PAGE_OFFSET - \
+ PHYS_OFFSET_OFFSET)
+#define virt_to_page(x) (mem_map + MAP_NR(x))
+
+#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+/*
+ * main RAM and kernel working space are coincident at 0x80000000, but to make
+ * life more interesting, there's also an uncached virtual shadow at 0xb0000000
+ * - these mappings are fixed in the MMU
+ */
+
+#define pfn_to_kaddr(x) __va(PFN_PHYS(x))
+
+#include <asm-generic/memory_model.h>
+#include <asm-generic/getorder.h>
+
+#endif /* __ASM_CSKY_PAGE_H */
diff --git a/arch/csky/include/asm/pgalloc.h b/arch/csky/include/asm/pgalloc.h
new file mode 100644
index 000000000000..bf4f4a0e140e
--- /dev/null
+++ b/arch/csky/include/asm/pgalloc.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_PGALLOC_H
+#define __ASM_CSKY_PGALLOC_H
+
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+
+static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
+ pte_t *pte)
+{
+ set_pmd(pmd, __pmd(__pa(pte)));
+}
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
+ pgtable_t pte)
+{
+ set_pmd(pmd, __pmd(__pa(page_address(pte))));
+}
+
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
+extern void pgd_init(unsigned long *p);
+
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+ unsigned long address)
+{
+ pte_t *pte;
+ unsigned long *kaddr, i;
+
+ pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_RETRY_MAYFAIL,
+ PTE_ORDER);
+ kaddr = (unsigned long *)pte;
+ if (address & 0x80000000)
+ for (i = 0; i < (PAGE_SIZE/4); i++)
+ *(kaddr + i) = 0x1;
+ else
+ clear_page(kaddr);
+
+ return pte;
+}
+
+static inline struct page *pte_alloc_one(struct mm_struct *mm,
+ unsigned long address)
+{
+ struct page *pte;
+ unsigned long *kaddr, i;
+
+ pte = alloc_pages(GFP_KERNEL | __GFP_RETRY_MAYFAIL, PTE_ORDER);
+ if (pte) {
+ kaddr = kmap_atomic(pte);
+ if (address & 0x80000000) {
+ for (i = 0; i < (PAGE_SIZE/4); i++)
+ *(kaddr + i) = 0x1;
+ } else
+ clear_page(kaddr);
+ kunmap_atomic(kaddr);
+ pgtable_page_ctor(pte);
+ }
+ return pte;
+}
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ free_pages((unsigned long)pte, PTE_ORDER);
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
+{
+ pgtable_page_dtor(pte);
+ __free_pages(pte, PTE_ORDER);
+}
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ free_pages((unsigned long)pgd, PGD_ORDER);
+}
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ pgd_t *ret;
+ pgd_t *init;
+
+ ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER);
+ if (ret) {
+ init = pgd_offset(&init_mm, 0UL);
+ pgd_init((unsigned long *)ret);
+ memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+ /* prevent out of order excute */
+ smp_mb();
+#ifdef CONFIG_CPU_NEED_TLBSYNC
+ dcache_wb_range((unsigned int)ret,
+ (unsigned int)(ret + PTRS_PER_PGD));
+#endif
+ }
+
+ return ret;
+}
+
+#define __pte_free_tlb(tlb, pte, address) \
+do { \
+ pgtable_page_dtor(pte); \
+ tlb_remove_page(tlb, pte); \
+} while (0)
+
+#define check_pgt_cache() do {} while (0)
+
+extern void pagetable_init(void);
+extern void pre_mmu_init(void);
+extern void pre_trap_init(void);
+
+#endif /* __ASM_CSKY_PGALLOC_H */
diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h
new file mode 100644
index 000000000000..edfcbb25fd9f
--- /dev/null
+++ b/arch/csky/include/asm/pgtable.h
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_PGTABLE_H
+#define __ASM_CSKY_PGTABLE_H
+
+#include <asm/fixmap.h>
+#include <asm/addrspace.h>
+#include <abi/pgtable-bits.h>
+#include <asm-generic/pgtable-nopmd.h>
+
+#define PGDIR_SHIFT 22
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
+#define FIRST_USER_ADDRESS 0UL
+
+#define PKMAP_BASE (0xff800000)
+
+#define VMALLOC_START (0xc0008000)
+#define VMALLOC_END (PKMAP_BASE - 2*PAGE_SIZE)
+
+/*
+ * C-SKY is two-level paging structure:
+ */
+#define PGD_ORDER 0
+#define PTE_ORDER 0
+
+#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
+#define PTRS_PER_PMD 1
+#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
+
+#define pte_ERROR(e) \
+ pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
+#define pgd_ERROR(e) \
+ pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+/* Find an entry in the third-level page table.. */
+#define __pte_offset_t(address) \
+ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset_kernel(dir, address) \
+ (pmd_page_vaddr(*(dir)) + __pte_offset_t(address))
+#define pte_offset_map(dir, address) \
+ ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset_t(address))
+#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
+#define pte_clear(mm, addr, ptep) set_pte((ptep), \
+ (((unsigned int)addr&0x80000000)?__pte(1):__pte(0)))
+#define pte_none(pte) (!(pte_val(pte)&0xfffffffe))
+#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
+#define pte_pfn(x) ((unsigned long)((x).pte_low >> PAGE_SHIFT))
+#define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \
+ | pgprot_val(prot))
+
+#define __READABLE (_PAGE_READ | _PAGE_VALID | _PAGE_ACCESSED)
+#define __WRITEABLE (_PAGE_WRITE | _PAGE_DIRTY | _PAGE_MODIFIED)
+
+#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | \
+ _CACHE_MASK)
+
+#define pte_unmap(pte) ((void)(pte))
+
+#define __swp_type(x) (((x).val >> 4) & 0xff)
+#define __swp_offset(x) ((x).val >> 12)
+#define __swp_entry(type, offset) ((swp_entry_t) {((type) << 4) | \
+ ((offset) << 12) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+#define pte_page(x) pfn_to_page(pte_pfn(x))
+#define __mk_pte(page_nr, pgprot) __pte(((page_nr) << PAGE_SHIFT) | \
+ pgprot_val(pgprot))
+
+/*
+ * CSKY can't do page protection for execute, and considers that the same like
+ * read. Also, write permissions imply read permissions. This is the closest
+ * we can get by reasonable means..
+ */
+#define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHED)
+#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
+ _CACHE_CACHED)
+#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | _CACHE_CACHED)
+#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | _CACHE_CACHED)
+#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
+ _PAGE_GLOBAL | _CACHE_CACHED)
+#define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
+ _CACHE_CACHED)
+
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY
+#define __P010 PAGE_COPY
+#define __P011 PAGE_COPY
+#define __P100 PAGE_READONLY
+#define __P101 PAGE_READONLY
+#define __P110 PAGE_COPY
+#define __P111 PAGE_COPY
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY
+#define __S010 PAGE_SHARED
+#define __S011 PAGE_SHARED
+#define __S100 PAGE_READONLY
+#define __S101 PAGE_READONLY
+#define __S110 PAGE_SHARED
+#define __S111 PAGE_SHARED
+
+extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+extern void load_pgd(unsigned long pg_dir);
+extern pte_t invalid_pte_table[PTRS_PER_PTE];
+
+static inline int pte_special(pte_t pte) { return 0; }
+static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
+
+static inline void set_pte(pte_t *p, pte_t pte)
+{
+ *p = pte;
+#if defined(CONFIG_CPU_NEED_TLBSYNC)
+ dcache_wb_line((u32)p);
+#endif
+ /* prevent out of order excution */
+ smp_mb();
+}
+#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
+
+static inline pte_t *pmd_page_vaddr(pmd_t pmd)
+{
+ unsigned long ptr;
+
+ ptr = pmd_val(pmd);
+
+ return __va(ptr);
+}
+
+#define pmd_phys(pmd) pmd_val(pmd)
+
+static inline void set_pmd(pmd_t *p, pmd_t pmd)
+{
+ *p = pmd;
+#if defined(CONFIG_CPU_NEED_TLBSYNC)
+ dcache_wb_line((u32)p);
+#endif
+ /* prevent specul excute */
+ smp_mb();
+}
+
+
+static inline int pmd_none(pmd_t pmd)
+{
+ return pmd_val(pmd) == __pa(invalid_pte_table);
+}
+
+#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
+
+static inline int pmd_present(pmd_t pmd)
+{
+ return (pmd_val(pmd) != __pa(invalid_pte_table));
+}
+
+static inline void pmd_clear(pmd_t *p)
+{
+ pmd_val(*p) = (__pa(invalid_pte_table));
+#if defined(CONFIG_CPU_NEED_TLBSYNC)
+ dcache_wb_line((u32)p);
+#endif
+}
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+static inline int pte_read(pte_t pte)
+{
+ return pte.pte_low & _PAGE_READ;
+}
+
+static inline int pte_write(pte_t pte)
+{
+ return (pte).pte_low & _PAGE_WRITE;
+}
+
+static inline int pte_dirty(pte_t pte)
+{
+ return (pte).pte_low & _PAGE_MODIFIED;
+}
+
+static inline int pte_young(pte_t pte)
+{
+ return (pte).pte_low & _PAGE_ACCESSED;
+}
+
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
+ return pte;
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+ pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_DIRTY);
+ return pte;
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+ pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_VALID);
+ return pte;
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_WRITE;
+ if (pte_val(pte) & _PAGE_MODIFIED)
+ pte_val(pte) |= _PAGE_DIRTY;
+ return pte;
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_MODIFIED;
+ if (pte_val(pte) & _PAGE_WRITE)
+ pte_val(pte) |= _PAGE_DIRTY;
+ return pte;
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_ACCESSED;
+ if (pte_val(pte) & _PAGE_READ)
+ pte_val(pte) |= _PAGE_VALID;
+ return pte;
+}
+
+#define __pgd_offset(address) pgd_index(address)
+#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
+#define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
+
+/* to find an entry in a kernel page-table-directory */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+#define pgd_index(address) ((address) >> PGDIR_SHIFT)
+
+/*
+ * Macro to make mark a page protection value as "uncacheable". Note
+ * that "protection" is really a misnomer here as the protection value
+ * contains the memory attribute bits, dirty bits, and various other
+ * bits as well.
+ */
+#define pgprot_noncached pgprot_noncached
+
+static inline pgprot_t pgprot_noncached(pgprot_t _prot)
+{
+ unsigned long prot = pgprot_val(_prot);
+
+ prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
+
+ return __pgprot(prot);
+}
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
+ (pgprot_val(newprot)));
+}
+
+/* to find an entry in a page-table-directory */
+static inline pgd_t *pgd_offset(struct mm_struct *mm, unsigned long address)
+{
+ return mm->pgd + pgd_index(address);
+}
+
+/* Find an entry in the third-level page table.. */
+static inline pte_t *pte_offset(pmd_t *dir, unsigned long address)
+{
+ return (pte_t *) (pmd_page_vaddr(*dir)) +
+ ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
+}
+
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+extern void paging_init(void);
+
+extern void show_jtlb_table(void);
+
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
+ pte_t *pte);
+
+/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
+#define kern_addr_valid(addr) (1)
+
+/*
+ * No page table caches to initialise
+ */
+#define pgtable_cache_init() do {} while (0)
+
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
+#include <asm-generic/pgtable.h>
+
+#endif /* __ASM_CSKY_PGTABLE_H */
diff --git a/arch/csky/include/asm/processor.h b/arch/csky/include/asm/processor.h
new file mode 100644
index 000000000000..b1748659b2e9
--- /dev/null
+++ b/arch/csky/include/asm/processor.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_PROCESSOR_H
+#define __ASM_CSKY_PROCESSOR_H
+
+#include <linux/bitops.h>
+#include <asm/segment.h>
+#include <asm/ptrace.h>
+#include <asm/current.h>
+#include <asm/cache.h>
+#include <abi/reg_ops.h>
+#include <abi/regdef.h>
+#ifdef CONFIG_CPU_HAS_FPU
+#include <abi/fpu.h>
+#endif
+
+struct cpuinfo_csky {
+ unsigned long udelay_val;
+ unsigned long asid_cache;
+ /*
+ * Capability and feature descriptor structure for CSKY CPU
+ */
+ unsigned long options;
+ unsigned int processor_id[4];
+ unsigned int fpu_id;
+} __aligned(SMP_CACHE_BYTES);
+
+extern struct cpuinfo_csky cpu_data[];
+
+/*
+ * User space process size: 2GB. This is hardcoded into a few places,
+ * so don't change it unless you know what you are doing. TASK_SIZE
+ * for a 64 bit kernel expandable to 8192EB, of which the current CSKY
+ * implementations will "only" be able to use 1TB ...
+ */
+#define TASK_SIZE 0x7fff8000UL
+
+#ifdef __KERNEL__
+#define STACK_TOP TASK_SIZE
+#define STACK_TOP_MAX STACK_TOP
+#endif
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE (TASK_SIZE / 3)
+
+struct thread_struct {
+ unsigned long ksp; /* kernel stack pointer */
+ unsigned long sr; /* saved status register */
+ unsigned long esp0; /* points to SR of stack frame */
+ unsigned long hi;
+ unsigned long lo;
+
+ /* Other stuff associated with the thread. */
+ unsigned long address; /* Last user fault */
+ unsigned long error_code;
+
+ /* FPU regs */
+ struct user_fp __aligned(16) user_fp;
+};
+
+#define INIT_THREAD { \
+ .ksp = (unsigned long) init_thread_union.stack + THREAD_SIZE, \
+ .sr = DEFAULT_PSR_VALUE, \
+}
+
+/*
+ * Do necessary setup to start up a newly executed thread.
+ *
+ * pass the data segment into user programs if it exists,
+ * it can't hurt anything as far as I can tell
+ */
+#define start_thread(_regs, _pc, _usp) \
+do { \
+ set_fs(USER_DS); /* reads from user space */ \
+ (_regs)->pc = (_pc); \
+ (_regs)->regs[1] = 0; /* ABIV1 is R7, uClibc_main rtdl arg */ \
+ (_regs)->regs[2] = 0; \
+ (_regs)->regs[3] = 0; /* ABIV2 is R7, use it? */ \
+ (_regs)->sr &= ~PS_S; \
+ (_regs)->usp = (_usp); \
+} while (0)
+
+/* Forward declaration, a strange C thing */
+struct task_struct;
+
+/* Free all resources held by a thread. */
+static inline void release_thread(struct task_struct *dead_task)
+{
+}
+
+/* Prepare to copy thread state - unlazy all lazy status */
+#define prepare_to_copy(tsk) do { } while (0)
+
+extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
+
+#define copy_segments(tsk, mm) do { } while (0)
+#define release_segments(mm) do { } while (0)
+#define forget_segments() do { } while (0)
+
+extern unsigned long thread_saved_pc(struct task_struct *tsk);
+
+unsigned long get_wchan(struct task_struct *p);
+
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->usp)
+
+#define task_pt_regs(p) \
+ ((struct pt_regs *)(THREAD_SIZE + p->stack) - 1)
+
+#define cpu_relax() barrier()
+
+#endif /* __ASM_CSKY_PROCESSOR_H */
diff --git a/arch/csky/include/asm/reg_ops.h b/arch/csky/include/asm/reg_ops.h
new file mode 100644
index 000000000000..cccf7d525fe2
--- /dev/null
+++ b/arch/csky/include/asm/reg_ops.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_REGS_OPS_H
+#define __ASM_REGS_OPS_H
+
+#define mfcr(reg) \
+({ \
+ unsigned int tmp; \
+ asm volatile( \
+ "mfcr %0, "reg"\n" \
+ : "=r"(tmp) \
+ : \
+ : "memory"); \
+ tmp; \
+})
+
+#define mtcr(reg, val) \
+({ \
+ asm volatile( \
+ "mtcr %0, "reg"\n" \
+ : \
+ : "r"(val) \
+ : "memory"); \
+})
+
+#endif /* __ASM_REGS_OPS_H */
diff --git a/arch/csky/include/asm/segment.h b/arch/csky/include/asm/segment.h
new file mode 100644
index 000000000000..ffdc4c47ff43
--- /dev/null
+++ b/arch/csky/include/asm/segment.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_SEGMENT_H
+#define __ASM_CSKY_SEGMENT_H
+
+typedef struct {
+ unsigned long seg;
+} mm_segment_t;
+
+#define KERNEL_DS ((mm_segment_t) { 0xFFFFFFFF })
+#define get_ds() KERNEL_DS
+
+#define USER_DS ((mm_segment_t) { 0x80000000UL })
+#define get_fs() (current_thread_info()->addr_limit)
+#define set_fs(x) (current_thread_info()->addr_limit = (x))
+#define segment_eq(a, b) ((a).seg == (b).seg)
+
+#endif /* __ASM_CSKY_SEGMENT_H */
diff --git a/arch/csky/include/asm/shmparam.h b/arch/csky/include/asm/shmparam.h
new file mode 100644
index 000000000000..efafe4c79fed
--- /dev/null
+++ b/arch/csky/include/asm/shmparam.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_SHMPARAM_H
+#define __ASM_CSKY_SHMPARAM_H
+
+#define SHMLBA (4 * PAGE_SIZE)
+
+#define __ARCH_FORCE_SHMLBA
+
+#endif /* __ASM_CSKY_SHMPARAM_H */
diff --git a/arch/csky/include/asm/smp.h b/arch/csky/include/asm/smp.h
new file mode 100644
index 000000000000..4a929c4d6437
--- /dev/null
+++ b/arch/csky/include/asm/smp.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_SMP_H
+#define __ASM_CSKY_SMP_H
+
+#include <linux/cpumask.h>
+#include <linux/irqreturn.h>
+#include <linux/threads.h>
+
+#ifdef CONFIG_SMP
+
+void __init setup_smp(void);
+
+void __init setup_smp_ipi(void);
+
+void arch_send_call_function_ipi_mask(struct cpumask *mask);
+
+void arch_send_call_function_single_ipi(int cpu);
+
+void __init set_send_ipi(void (*func)(const struct cpumask *mask), int irq);
+
+#define raw_smp_processor_id() (current_thread_info()->cpu)
+
+#endif /* CONFIG_SMP */
+
+#endif /* __ASM_CSKY_SMP_H */
diff --git a/arch/csky/include/asm/spinlock.h b/arch/csky/include/asm/spinlock.h
new file mode 100644
index 000000000000..7cf3f2b34cea
--- /dev/null
+++ b/arch/csky/include/asm/spinlock.h
@@ -0,0 +1,256 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_SPINLOCK_H
+#define __ASM_CSKY_SPINLOCK_H
+
+#include <linux/spinlock_types.h>
+#include <asm/barrier.h>
+
+#ifdef CONFIG_QUEUED_RWLOCKS
+
+/*
+ * Ticket-based spin-locking.
+ */
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ arch_spinlock_t lockval;
+ u32 ticket_next = 1 << TICKET_NEXT;
+ u32 *p = &lock->lock;
+ u32 tmp;
+
+ asm volatile (
+ "1: ldex.w %0, (%2) \n"
+ " mov %1, %0 \n"
+ " add %0, %3 \n"
+ " stex.w %0, (%2) \n"
+ " bez %0, 1b \n"
+ : "=&r" (tmp), "=&r" (lockval)
+ : "r"(p), "r"(ticket_next)
+ : "cc");
+
+ while (lockval.tickets.next != lockval.tickets.owner)
+ lockval.tickets.owner = READ_ONCE(lock->tickets.owner);
+
+ smp_mb();
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ u32 tmp, contended, res;
+ u32 ticket_next = 1 << TICKET_NEXT;
+ u32 *p = &lock->lock;
+
+ do {
+ asm volatile (
+ " ldex.w %0, (%3) \n"
+ " movi %2, 1 \n"
+ " rotli %1, %0, 16 \n"
+ " cmpne %1, %0 \n"
+ " bt 1f \n"
+ " movi %2, 0 \n"
+ " add %0, %0, %4 \n"
+ " stex.w %0, (%3) \n"
+ "1: \n"
+ : "=&r" (res), "=&r" (tmp), "=&r" (contended)
+ : "r"(p), "r"(ticket_next)
+ : "cc");
+ } while (!res);
+
+ if (!contended)
+ smp_mb();
+
+ return !contended;
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ smp_mb();
+ WRITE_ONCE(lock->tickets.owner, lock->tickets.owner + 1);
+}
+
+static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ return lock.tickets.owner == lock.tickets.next;
+}
+
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
+{
+ return !arch_spin_value_unlocked(READ_ONCE(*lock));
+}
+
+static inline int arch_spin_is_contended(arch_spinlock_t *lock)
+{
+ struct __raw_tickets tickets = READ_ONCE(lock->tickets);
+
+ return (tickets.next - tickets.owner) > 1;
+}
+#define arch_spin_is_contended arch_spin_is_contended
+
+#include <asm/qrwlock.h>
+
+/* See include/linux/spinlock.h */
+#define smp_mb__after_spinlock() smp_mb()
+
+#else /* CONFIG_QUEUED_RWLOCKS */
+
+/*
+ * Test-and-set spin-locking.
+ */
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ u32 *p = &lock->lock;
+ u32 tmp;
+
+ asm volatile (
+ "1: ldex.w %0, (%1) \n"
+ " bnez %0, 1b \n"
+ " movi %0, 1 \n"
+ " stex.w %0, (%1) \n"
+ " bez %0, 1b \n"
+ : "=&r" (tmp)
+ : "r"(p)
+ : "cc");
+ smp_mb();
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ smp_mb();
+ WRITE_ONCE(lock->lock, 0);
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ u32 *p = &lock->lock;
+ u32 tmp;
+
+ asm volatile (
+ "1: ldex.w %0, (%1) \n"
+ " bnez %0, 2f \n"
+ " movi %0, 1 \n"
+ " stex.w %0, (%1) \n"
+ " bez %0, 1b \n"
+ " movi %0, 0 \n"
+ "2: \n"
+ : "=&r" (tmp)
+ : "r"(p)
+ : "cc");
+
+ if (!tmp)
+ smp_mb();
+
+ return !tmp;
+}
+
+#define arch_spin_is_locked(x) (READ_ONCE((x)->lock) != 0)
+
+/*
+ * read lock/unlock/trylock
+ */
+static inline void arch_read_lock(arch_rwlock_t *lock)
+{
+ u32 *p = &lock->lock;
+ u32 tmp;
+
+ asm volatile (
+ "1: ldex.w %0, (%1) \n"
+ " blz %0, 1b \n"
+ " addi %0, 1 \n"
+ " stex.w %0, (%1) \n"
+ " bez %0, 1b \n"
+ : "=&r" (tmp)
+ : "r"(p)
+ : "cc");
+ smp_mb();
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *lock)
+{
+ u32 *p = &lock->lock;
+ u32 tmp;
+
+ smp_mb();
+ asm volatile (
+ "1: ldex.w %0, (%1) \n"
+ " subi %0, 1 \n"
+ " stex.w %0, (%1) \n"
+ " bez %0, 1b \n"
+ : "=&r" (tmp)
+ : "r"(p)
+ : "cc");
+}
+
+static inline int arch_read_trylock(arch_rwlock_t *lock)
+{
+ u32 *p = &lock->lock;
+ u32 tmp;
+
+ asm volatile (
+ "1: ldex.w %0, (%1) \n"
+ " blz %0, 2f \n"
+ " addi %0, 1 \n"
+ " stex.w %0, (%1) \n"
+ " bez %0, 1b \n"
+ " movi %0, 0 \n"
+ "2: \n"
+ : "=&r" (tmp)
+ : "r"(p)
+ : "cc");
+
+ if (!tmp)
+ smp_mb();
+
+ return !tmp;
+}
+
+/*
+ * write lock/unlock/trylock
+ */
+static inline void arch_write_lock(arch_rwlock_t *lock)
+{
+ u32 *p = &lock->lock;
+ u32 tmp;
+
+ asm volatile (
+ "1: ldex.w %0, (%1) \n"
+ " bnez %0, 1b \n"
+ " subi %0, 1 \n"
+ " stex.w %0, (%1) \n"
+ " bez %0, 1b \n"
+ : "=&r" (tmp)
+ : "r"(p)
+ : "cc");
+ smp_mb();
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *lock)
+{
+ smp_mb();
+ WRITE_ONCE(lock->lock, 0);
+}
+
+static inline int arch_write_trylock(arch_rwlock_t *lock)
+{
+ u32 *p = &lock->lock;
+ u32 tmp;
+
+ asm volatile (
+ "1: ldex.w %0, (%1) \n"
+ " bnez %0, 2f \n"
+ " subi %0, 1 \n"
+ " stex.w %0, (%1) \n"
+ " bez %0, 1b \n"
+ " movi %0, 0 \n"
+ "2: \n"
+ : "=&r" (tmp)
+ : "r"(p)
+ : "cc");
+
+ if (!tmp)
+ smp_mb();
+
+ return !tmp;
+}
+
+#endif /* CONFIG_QUEUED_RWLOCKS */
+#endif /* __ASM_CSKY_SPINLOCK_H */
diff --git a/arch/csky/include/asm/spinlock_types.h b/arch/csky/include/asm/spinlock_types.h
new file mode 100644
index 000000000000..88b82438b182
--- /dev/null
+++ b/arch/csky/include/asm/spinlock_types.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_SPINLOCK_TYPES_H
+#define __ASM_CSKY_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+#define TICKET_NEXT 16
+
+typedef struct {
+ union {
+ u32 lock;
+ struct __raw_tickets {
+ /* little endian */
+ u16 owner;
+ u16 next;
+ } tickets;
+ };
+} arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } }
+
+#ifdef CONFIG_QUEUED_RWLOCKS
+#include <asm-generic/qrwlock_types.h>
+
+#else /* CONFIG_NR_CPUS > 2 */
+
+typedef struct {
+ u32 lock;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
+
+#endif /* CONFIG_QUEUED_RWLOCKS */
+#endif /* __ASM_CSKY_SPINLOCK_TYPES_H */
diff --git a/arch/csky/include/asm/string.h b/arch/csky/include/asm/string.h
new file mode 100644
index 000000000000..73142de18355
--- /dev/null
+++ b/arch/csky/include/asm/string.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef _CSKY_STRING_MM_H_
+#define _CSKY_STRING_MM_H_
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <abi/string.h>
+#endif
+
+#endif /* _CSKY_STRING_MM_H_ */
diff --git a/arch/csky/include/asm/switch_to.h b/arch/csky/include/asm/switch_to.h
new file mode 100644
index 000000000000..35a39e88933d
--- /dev/null
+++ b/arch/csky/include/asm/switch_to.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_SWITCH_TO_H
+#define __ASM_CSKY_SWITCH_TO_H
+
+#include <linux/thread_info.h>
+#ifdef CONFIG_CPU_HAS_FPU
+#include <abi/fpu.h>
+static inline void __switch_to_fpu(struct task_struct *prev,
+ struct task_struct *next)
+{
+ save_to_user_fp(&prev->thread.user_fp);
+ restore_from_user_fp(&next->thread.user_fp);
+}
+#else
+static inline void __switch_to_fpu(struct task_struct *prev,
+ struct task_struct *next)
+{}
+#endif
+
+/*
+ * Context switching is now performed out-of-line in switch_to.S
+ */
+extern struct task_struct *__switch_to(struct task_struct *,
+ struct task_struct *);
+
+#define switch_to(prev, next, last) \
+ do { \
+ struct task_struct *__prev = (prev); \
+ struct task_struct *__next = (next); \
+ __switch_to_fpu(__prev, __next); \
+ ((last) = __switch_to((prev), (next))); \
+ } while (0)
+
+#endif /* __ASM_CSKY_SWITCH_TO_H */
diff --git a/arch/csky/include/asm/syscall.h b/arch/csky/include/asm/syscall.h
new file mode 100644
index 000000000000..926a64a8b4ee
--- /dev/null
+++ b/arch/csky/include/asm/syscall.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_SYSCALL_H
+#define __ASM_SYSCALL_H
+
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <abi/regdef.h>
+
+static inline int
+syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
+{
+ return regs_syscallid(regs);
+}
+
+static inline void
+syscall_rollback(struct task_struct *task, struct pt_regs *regs)
+{
+ regs->a0 = regs->orig_a0;
+}
+
+static inline long
+syscall_get_error(struct task_struct *task, struct pt_regs *regs)
+{
+ unsigned long error = regs->a0;
+
+ return IS_ERR_VALUE(error) ? error : 0;
+}
+
+static inline long
+syscall_get_return_value(struct task_struct *task, struct pt_regs *regs)
+{
+ return regs->a0;
+}
+
+static inline void
+syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
+ int error, long val)
+{
+ regs->a0 = (long) error ?: val;
+}
+
+static inline void
+syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
+ unsigned int i, unsigned int n, unsigned long *args)
+{
+ BUG_ON(i + n > 6);
+ if (i == 0) {
+ args[0] = regs->orig_a0;
+ args++;
+ i++;
+ n--;
+ }
+ memcpy(args, &regs->a1 + i * sizeof(regs->a1), n * sizeof(args[0]));
+}
+
+static inline void
+syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
+ unsigned int i, unsigned int n, const unsigned long *args)
+{
+ BUG_ON(i + n > 6);
+ if (i == 0) {
+ regs->orig_a0 = args[0];
+ args++;
+ i++;
+ n--;
+ }
+ memcpy(&regs->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0));
+}
+
+#endif /* __ASM_SYSCALL_H */
diff --git a/arch/csky/include/asm/syscalls.h b/arch/csky/include/asm/syscalls.h
new file mode 100644
index 000000000000..5d48e5e0082e
--- /dev/null
+++ b/arch/csky/include/asm/syscalls.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_SYSCALLS_H
+#define __ASM_CSKY_SYSCALLS_H
+
+#include <asm-generic/syscalls.h>
+
+long sys_cacheflush(void __user *, unsigned long, int);
+
+long sys_set_thread_area(unsigned long addr);
+
+long sys_csky_fadvise64_64(int fd, int advice, loff_t offset, loff_t len);
+
+#endif /* __ASM_CSKY_SYSCALLS_H */
diff --git a/arch/csky/include/asm/thread_info.h b/arch/csky/include/asm/thread_info.h
new file mode 100644
index 000000000000..a2c69a7836f7
--- /dev/null
+++ b/arch/csky/include/asm/thread_info.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef _ASM_CSKY_THREAD_INFO_H
+#define _ASM_CSKY_THREAD_INFO_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/version.h>
+#include <asm/types.h>
+#include <asm/page.h>
+#include <asm/processor.h>
+
+struct thread_info {
+ struct task_struct *task;
+ void *dump_exec_domain;
+ unsigned long flags;
+ int preempt_count;
+ unsigned long tp_value;
+ mm_segment_t addr_limit;
+ struct restart_block restart_block;
+ struct pt_regs *regs;
+ unsigned int cpu;
+};
+
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .task = &tsk, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+ .addr_limit = KERNEL_DS, \
+ .cpu = 0, \
+ .restart_block = { \
+ .fn = do_no_restart_syscall, \
+ }, \
+}
+
+#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
+
+static inline struct thread_info *current_thread_info(void)
+{
+ unsigned long sp;
+
+ asm volatile("mov %0, sp\n":"=r"(sp));
+
+ return (struct thread_info *)(sp & ~(THREAD_SIZE - 1));
+}
+
+#endif /* !__ASSEMBLY__ */
+
+/* entry.S relies on these definitions!
+ * bits 0-5 are tested at every exception exit
+ */
+#define TIF_SIGPENDING 0 /* signal pending */
+#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
+#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
+#define TIF_SYSCALL_TRACE 5 /* syscall trace active */
+#define TIF_DELAYED_TRACE 14 /* single step a syscall */
+#define TIF_POLLING_NRFLAG 16 /* poll_idle() is TIF_NEED_RESCHED */
+#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
+#define TIF_FREEZE 19 /* thread is freezing for suspend */
+#define TIF_RESTORE_SIGMASK 20 /* restore signal mask in do_signal() */
+#define TIF_SECCOMP 21 /* secure computing */
+
+#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+#define _TIF_DELAYED_TRACE (1 << TIF_DELAYED_TRACE)
+#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
+#define _TIF_MEMDIE (1 << TIF_MEMDIE)
+#define _TIF_FREEZE (1 << TIF_FREEZE)
+#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
+#define _TIF_SECCOMP (1 << TIF_SECCOMP)
+
+#endif /* _ASM_CSKY_THREAD_INFO_H */
diff --git a/arch/csky/include/asm/tlb.h b/arch/csky/include/asm/tlb.h
new file mode 100644
index 000000000000..8c7cc097666f
--- /dev/null
+++ b/arch/csky/include/asm/tlb.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_TLB_H
+#define __ASM_CSKY_TLB_H
+
+#include <asm/cacheflush.h>
+
+#define tlb_start_vma(tlb, vma) \
+ do { \
+ if (!tlb->fullmm) \
+ flush_cache_range(vma, vma->vm_start, vma->vm_end); \
+ } while (0)
+
+#define tlb_end_vma(tlb, vma) \
+ do { \
+ if (!tlb->fullmm) \
+ flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
+ } while (0)
+
+#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
+
+#include <asm-generic/tlb.h>
+
+#endif /* __ASM_CSKY_TLB_H */
diff --git a/arch/csky/include/asm/tlbflush.h b/arch/csky/include/asm/tlbflush.h
new file mode 100644
index 000000000000..6845b0667703
--- /dev/null
+++ b/arch/csky/include/asm/tlbflush.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_TLBFLUSH_H
+#define __ASM_TLBFLUSH_H
+
+/*
+ * TLB flushing:
+ *
+ * - flush_tlb_all() flushes all processes TLB entries
+ * - flush_tlb_mm(mm) flushes the specified mm context TLB entries
+ * - flush_tlb_page(vma, vmaddr) flushes one page
+ * - flush_tlb_range(vma, start, end) flushes a range of pages
+ * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
+ */
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm(struct mm_struct *mm);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+
+extern void flush_tlb_one(unsigned long vaddr);
+
+#endif
diff --git a/arch/csky/include/asm/traps.h b/arch/csky/include/asm/traps.h
new file mode 100644
index 000000000000..1c081805b962
--- /dev/null
+++ b/arch/csky/include/asm/traps.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_TRAPS_H
+#define __ASM_CSKY_TRAPS_H
+
+#define VEC_RESET 0
+#define VEC_ALIGN 1
+#define VEC_ACCESS 2
+#define VEC_ZERODIV 3
+#define VEC_ILLEGAL 4
+#define VEC_PRIV 5
+#define VEC_TRACE 6
+#define VEC_BREAKPOINT 7
+#define VEC_UNRECOVER 8
+#define VEC_SOFTRESET 9
+#define VEC_AUTOVEC 10
+#define VEC_FAUTOVEC 11
+#define VEC_HWACCEL 12
+
+#define VEC_TLBMISS 14
+#define VEC_TLBMODIFIED 15
+
+#define VEC_TRAP0 16
+#define VEC_TRAP1 17
+#define VEC_TRAP2 18
+#define VEC_TRAP3 19
+
+#define VEC_TLBINVALIDL 20
+#define VEC_TLBINVALIDS 21
+
+#define VEC_PRFL 29
+#define VEC_FPE 30
+
+extern void *vec_base[];
+
+#define VEC_INIT(i, func) \
+do { \
+ vec_base[i] = (void *)func; \
+} while (0)
+
+void csky_alignment(struct pt_regs *regs);
+
+#endif /* __ASM_CSKY_TRAPS_H */
diff --git a/arch/csky/include/asm/uaccess.h b/arch/csky/include/asm/uaccess.h
new file mode 100644
index 000000000000..acaf0e210d81
--- /dev/null
+++ b/arch/csky/include/asm/uaccess.h
@@ -0,0 +1,416 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_UACCESS_H
+#define __ASM_CSKY_UACCESS_H
+
+/*
+ * User space memory access functions
+ */
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/version.h>
+#include <asm/segment.h>
+
+#define VERIFY_READ 0
+#define VERIFY_WRITE 1
+
+static inline int access_ok(int type, const void *addr, unsigned long size)
+{
+ unsigned long limit = current_thread_info()->addr_limit.seg;
+
+ return (((unsigned long)addr < limit) &&
+ ((unsigned long)(addr + size) < limit));
+}
+
+static inline int verify_area(int type, const void *addr, unsigned long size)
+{
+ return access_ok(type, addr, size) ? 0 : -EFAULT;
+}
+
+#define __addr_ok(addr) (access_ok(VERIFY_READ, addr, 0))
+
+extern int __put_user_bad(void);
+
+/*
+ * Tell gcc we read from memory instead of writing: this is because
+ * we do not write to any memory gcc knows about, so there are no
+ * aliasing issues.
+ */
+
+/*
+ * These are the main single-value transfer routines. They automatically
+ * use the right size if we just have the right pointer type.
+ *
+ * This gets kind of ugly. We want to return _two_ values in "get_user()"
+ * and yet we don't want to do any pointers, because that is too much
+ * of a performance impact. Thus we have a few rather ugly macros here,
+ * and hide all the ugliness from the user.
+ *
+ * The "__xxx" versions of the user access functions are versions that
+ * do not verify the address space, that must have been done previously
+ * with a separate "access_ok()" call (this is used when we do multiple
+ * accesses to the same area of user memory).
+ *
+ * As we use the same address space for kernel and user data on
+ * Ckcore, we can just do these as direct assignments. (Of course, the
+ * exception handling means that it's no longer "just"...)
+ */
+
+#define put_user(x, ptr) \
+ __put_user_check((x), (ptr), sizeof(*(ptr)))
+
+#define __put_user(x, ptr) \
+ __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
+
+#define __ptr(x) ((unsigned long *)(x))
+
+#define get_user(x, ptr) \
+ __get_user_check((x), (ptr), sizeof(*(ptr)))
+
+#define __get_user(x, ptr) \
+ __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
+
+#define __put_user_nocheck(x, ptr, size) \
+({ \
+ long __pu_err = 0; \
+ typeof(*(ptr)) *__pu_addr = (ptr); \
+ typeof(*(ptr)) __pu_val = (typeof(*(ptr)))(x); \
+ if (__pu_addr) \
+ __put_user_size(__pu_val, (__pu_addr), (size), \
+ __pu_err); \
+ __pu_err; \
+})
+
+#define __put_user_check(x, ptr, size) \
+({ \
+ long __pu_err = -EFAULT; \
+ typeof(*(ptr)) *__pu_addr = (ptr); \
+ typeof(*(ptr)) __pu_val = (typeof(*(ptr)))(x); \
+ if (access_ok(VERIFY_WRITE, __pu_addr, size) && __pu_addr) \
+ __put_user_size(__pu_val, __pu_addr, (size), __pu_err); \
+ __pu_err; \
+})
+
+#define __put_user_size(x, ptr, size, retval) \
+do { \
+ retval = 0; \
+ switch (size) { \
+ case 1: \
+ __put_user_asm_b(x, ptr, retval); \
+ break; \
+ case 2: \
+ __put_user_asm_h(x, ptr, retval); \
+ break; \
+ case 4: \
+ __put_user_asm_w(x, ptr, retval); \
+ break; \
+ case 8: \
+ __put_user_asm_64(x, ptr, retval); \
+ break; \
+ default: \
+ __put_user_bad(); \
+ } \
+} while (0)
+
+/*
+ * We don't tell gcc that we are accessing memory, but this is OK
+ * because we do not write to any memory gcc knows about, so there
+ * are no aliasing issues.
+ *
+ * Note that PC at a fault is the address *after* the faulting
+ * instruction.
+ */
+#define __put_user_asm_b(x, ptr, err) \
+do { \
+ int errcode; \
+ asm volatile( \
+ "1: stb %1, (%2,0) \n" \
+ " br 3f \n" \
+ "2: mov %0, %3 \n" \
+ " br 3f \n" \
+ ".section __ex_table, \"a\" \n" \
+ ".align 2 \n" \
+ ".long 1b,2b \n" \
+ ".previous \n" \
+ "3: \n" \
+ : "=r"(err), "=r"(x), "=r"(ptr), "=r"(errcode) \
+ : "0"(err), "1"(x), "2"(ptr), "3"(-EFAULT) \
+ : "memory"); \
+} while (0)
+
+#define __put_user_asm_h(x, ptr, err) \
+do { \
+ int errcode; \
+ asm volatile( \
+ "1: sth %1, (%2,0) \n" \
+ " br 3f \n" \
+ "2: mov %0, %3 \n" \
+ " br 3f \n" \
+ ".section __ex_table, \"a\" \n" \
+ ".align 2 \n" \
+ ".long 1b,2b \n" \
+ ".previous \n" \
+ "3: \n" \
+ : "=r"(err), "=r"(x), "=r"(ptr), "=r"(errcode) \
+ : "0"(err), "1"(x), "2"(ptr), "3"(-EFAULT) \
+ : "memory"); \
+} while (0)
+
+#define __put_user_asm_w(x, ptr, err) \
+do { \
+ int errcode; \
+ asm volatile( \
+ "1: stw %1, (%2,0) \n" \
+ " br 3f \n" \
+ "2: mov %0, %3 \n" \
+ " br 3f \n" \
+ ".section __ex_table,\"a\" \n" \
+ ".align 2 \n" \
+ ".long 1b, 2b \n" \
+ ".previous \n" \
+ "3: \n" \
+ : "=r"(err), "=r"(x), "=r"(ptr), "=r"(errcode) \
+ : "0"(err), "1"(x), "2"(ptr), "3"(-EFAULT) \
+ : "memory"); \
+} while (0)
+
+#define __put_user_asm_64(x, ptr, err) \
+do { \
+ int tmp; \
+ int errcode; \
+ typeof(*(ptr))src = (typeof(*(ptr)))x; \
+ typeof(*(ptr))*psrc = &src; \
+ \
+ asm volatile( \
+ " ldw %3, (%1, 0) \n" \
+ "1: stw %3, (%2, 0) \n" \
+ " ldw %3, (%1, 4) \n" \
+ "2: stw %3, (%2, 4) \n" \
+ " br 4f \n" \
+ "3: mov %0, %4 \n" \
+ " br 4f \n" \
+ ".section __ex_table, \"a\" \n" \
+ ".align 2 \n" \
+ ".long 1b, 3b \n" \
+ ".long 2b, 3b \n" \
+ ".previous \n" \
+ "4: \n" \
+ : "=r"(err), "=r"(psrc), "=r"(ptr), \
+ "=r"(tmp), "=r"(errcode) \
+ : "0"(err), "1"(psrc), "2"(ptr), "3"(0), "4"(-EFAULT) \
+ : "memory"); \
+} while (0)
+
+#define __get_user_nocheck(x, ptr, size) \
+({ \
+ long __gu_err; \
+ __get_user_size(x, (ptr), (size), __gu_err); \
+ __gu_err; \
+})
+
+#define __get_user_check(x, ptr, size) \
+({ \
+ int __gu_err = -EFAULT; \
+ const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
+ if (access_ok(VERIFY_READ, __gu_ptr, size) && __gu_ptr) \
+ __get_user_size(x, __gu_ptr, size, __gu_err); \
+ __gu_err; \
+})
+
+#define __get_user_size(x, ptr, size, retval) \
+do { \
+ switch (size) { \
+ case 1: \
+ __get_user_asm_common((x), ptr, "ldb", retval); \
+ break; \
+ case 2: \
+ __get_user_asm_common((x), ptr, "ldh", retval); \
+ break; \
+ case 4: \
+ __get_user_asm_common((x), ptr, "ldw", retval); \
+ break; \
+ default: \
+ x = 0; \
+ (retval) = __get_user_bad(); \
+ } \
+} while (0)
+
+#define __get_user_asm_common(x, ptr, ins, err) \
+do { \
+ int errcode; \
+ asm volatile( \
+ "1: " ins " %1, (%4,0) \n" \
+ " br 3f \n" \
+ /* Fix up codes */ \
+ "2: mov %0, %2 \n" \
+ " movi %1, 0 \n" \
+ " br 3f \n" \
+ ".section __ex_table,\"a\" \n" \
+ ".align 2 \n" \
+ ".long 1b, 2b \n" \
+ ".previous \n" \
+ "3: \n" \
+ : "=r"(err), "=r"(x), "=r"(errcode) \
+ : "0"(0), "r"(ptr), "2"(-EFAULT) \
+ : "memory"); \
+} while (0)
+
+extern int __get_user_bad(void);
+
+#define __copy_user(to, from, n) \
+do { \
+ int w0, w1, w2, w3; \
+ asm volatile( \
+ "0: cmpnei %1, 0 \n" \
+ " bf 8f \n" \
+ " mov %3, %1 \n" \
+ " or %3, %2 \n" \
+ " andi %3, 3 \n" \
+ " cmpnei %3, 0 \n" \
+ " bf 1f \n" \
+ " br 5f \n" \
+ "1: cmplti %0, 16 \n" /* 4W */ \
+ " bt 3f \n" \
+ " ldw %3, (%2, 0) \n" \
+ " ldw %4, (%2, 4) \n" \
+ " ldw %5, (%2, 8) \n" \
+ " ldw %6, (%2, 12) \n" \
+ "2: stw %3, (%1, 0) \n" \
+ "9: stw %4, (%1, 4) \n" \
+ "10: stw %5, (%1, 8) \n" \
+ "11: stw %6, (%1, 12) \n" \
+ " addi %2, 16 \n" \
+ " addi %1, 16 \n" \
+ " subi %0, 16 \n" \
+ " br 1b \n" \
+ "3: cmplti %0, 4 \n" /* 1W */ \
+ " bt 5f \n" \
+ " ldw %3, (%2, 0) \n" \
+ "4: stw %3, (%1, 0) \n" \
+ " addi %2, 4 \n" \
+ " addi %1, 4 \n" \
+ " subi %0, 4 \n" \
+ " br 3b \n" \
+ "5: cmpnei %0, 0 \n" /* 1B */ \
+ " bf 8f \n" \
+ " ldb %3, (%2, 0) \n" \
+ "6: stb %3, (%1, 0) \n" \
+ " addi %2, 1 \n" \
+ " addi %1, 1 \n" \
+ " subi %0, 1 \n" \
+ " br 5b \n" \
+ "7: br 8f \n" \
+ ".section __ex_table, \"a\" \n" \
+ ".align 2 \n" \
+ ".long 2b, 7b \n" \
+ ".long 9b, 7b \n" \
+ ".long 10b, 7b \n" \
+ ".long 11b, 7b \n" \
+ ".long 4b, 7b \n" \
+ ".long 6b, 7b \n" \
+ ".previous \n" \
+ "8: \n" \
+ : "=r"(n), "=r"(to), "=r"(from), "=r"(w0), \
+ "=r"(w1), "=r"(w2), "=r"(w3) \
+ : "0"(n), "1"(to), "2"(from) \
+ : "memory"); \
+} while (0)
+
+#define __copy_user_zeroing(to, from, n) \
+do { \
+ int tmp; \
+ int nsave; \
+ asm volatile( \
+ "0: cmpnei %1, 0 \n" \
+ " bf 7f \n" \
+ " mov %3, %1 \n" \
+ " or %3, %2 \n" \
+ " andi %3, 3 \n" \
+ " cmpnei %3, 0 \n" \
+ " bf 1f \n" \
+ " br 5f \n" \
+ "1: cmplti %0, 16 \n" \
+ " bt 3f \n" \
+ "2: ldw %3, (%2, 0) \n" \
+ "10: ldw %4, (%2, 4) \n" \
+ " stw %3, (%1, 0) \n" \
+ " stw %4, (%1, 4) \n" \
+ "11: ldw %3, (%2, 8) \n" \
+ "12: ldw %4, (%2, 12) \n" \
+ " stw %3, (%1, 8) \n" \
+ " stw %4, (%1, 12) \n" \
+ " addi %2, 16 \n" \
+ " addi %1, 16 \n" \
+ " subi %0, 16 \n" \
+ " br 1b \n" \
+ "3: cmplti %0, 4 \n" \
+ " bt 5f \n" \
+ "4: ldw %3, (%2, 0) \n" \
+ " stw %3, (%1, 0) \n" \
+ " addi %2, 4 \n" \
+ " addi %1, 4 \n" \
+ " subi %0, 4 \n" \
+ " br 3b \n" \
+ "5: cmpnei %0, 0 \n" \
+ " bf 7f \n" \
+ "6: ldb %3, (%2, 0) \n" \
+ " stb %3, (%1, 0) \n" \
+ " addi %2, 1 \n" \
+ " addi %1, 1 \n" \
+ " subi %0, 1 \n" \
+ " br 5b \n" \
+ "8: mov %3, %0 \n" \
+ " movi %4, 0 \n" \
+ "9: stb %4, (%1, 0) \n" \
+ " addi %1, 1 \n" \
+ " subi %3, 1 \n" \
+ " cmpnei %3, 0 \n" \
+ " bt 9b \n" \
+ " br 7f \n" \
+ ".section __ex_table, \"a\" \n" \
+ ".align 2 \n" \
+ ".long 2b, 8b \n" \
+ ".long 10b, 8b \n" \
+ ".long 11b, 8b \n" \
+ ".long 12b, 8b \n" \
+ ".long 4b, 8b \n" \
+ ".long 6b, 8b \n" \
+ ".previous \n" \
+ "7: \n" \
+ : "=r"(n), "=r"(to), "=r"(from), "=r"(nsave), \
+ "=r"(tmp) \
+ : "0"(n), "1"(to), "2"(from) \
+ : "memory"); \
+} while (0)
+
+unsigned long raw_copy_from_user(void *to, const void *from, unsigned long n);
+unsigned long raw_copy_to_user(void *to, const void *from, unsigned long n);
+
+unsigned long clear_user(void *to, unsigned long n);
+unsigned long __clear_user(void __user *to, unsigned long n);
+
+long strncpy_from_user(char *dst, const char *src, long count);
+long __strncpy_from_user(char *dst, const char *src, long count);
+
+/*
+ * Return the size of a string (including the ending 0)
+ *
+ * Return 0 on exception, a value greater than N if too long
+ */
+long strnlen_user(const char *src, long n);
+
+#define strlen_user(str) strnlen_user(str, 32767)
+
+struct exception_table_entry {
+ unsigned long insn;
+ unsigned long nextinsn;
+};
+
+extern int fixup_exception(struct pt_regs *regs);
+
+#endif /* __ASM_CSKY_UACCESS_H */
diff --git a/arch/csky/include/asm/unistd.h b/arch/csky/include/asm/unistd.h
new file mode 100644
index 000000000000..284487477a61
--- /dev/null
+++ b/arch/csky/include/asm/unistd.h
@@ -0,0 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <uapi/asm/unistd.h>
diff --git a/arch/csky/include/asm/vdso.h b/arch/csky/include/asm/vdso.h
new file mode 100644
index 000000000000..d963d691f3a1
--- /dev/null
+++ b/arch/csky/include/asm/vdso.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_VDSO_H
+#define __ASM_CSKY_VDSO_H
+
+#include <abi/vdso.h>
+
+struct csky_vdso {
+ unsigned short rt_signal_retcode[4];
+};
+
+#endif /* __ASM_CSKY_VDSO_H */