summaryrefslogtreecommitdiff
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/Kbuild1
-rw-r--r--include/asm-generic/atomic.h2
-rw-r--r--include/asm-generic/barrier.h19
-rw-r--r--include/asm-generic/bug.h10
-rw-r--r--include/asm-generic/cacheflush.h5
-rw-r--r--include/asm-generic/hugetlb.h2
-rw-r--r--include/asm-generic/io.h4
-rw-r--r--include/asm-generic/mmiowb.h6
-rw-r--r--include/asm-generic/qspinlock.h1
-rw-r--r--include/asm-generic/qspinlock_types.h8
-rw-r--r--include/asm-generic/rwonce.h90
-rw-r--r--include/asm-generic/seccomp.h2
-rw-r--r--include/asm-generic/tlb.h55
-rw-r--r--include/asm-generic/vmlinux.lds.h29
14 files changed, 180 insertions, 54 deletions
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild
index 44ec80e70518..74b0612601dd 100644
--- a/include/asm-generic/Kbuild
+++ b/include/asm-generic/Kbuild
@@ -45,6 +45,7 @@ mandatory-y += pci.h
mandatory-y += percpu.h
mandatory-y += pgalloc.h
mandatory-y += preempt.h
+mandatory-y += rwonce.h
mandatory-y += sections.h
mandatory-y += serial.h
mandatory-y += shmparam.h
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index 286867f593d2..11f96f40f4a7 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -159,8 +159,6 @@ ATOMIC_OP(xor, ^)
* resource counting etc..
*/
-#define ATOMIC_INIT(i) { (i) }
-
/**
* atomic_read - read atomic variable
* @v: pointer of type atomic_t
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index 2eacaf7d62f6..fec97dc34de7 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -13,7 +13,7 @@
#ifndef __ASSEMBLY__
-#include <linux/compiler.h>
+#include <asm/rwonce.h>
#ifndef nop
#define nop() asm volatile ("nop")
@@ -46,10 +46,6 @@
#define dma_wmb() wmb()
#endif
-#ifndef read_barrier_depends
-#define read_barrier_depends() do { } while (0)
-#endif
-
#ifndef __smp_mb
#define __smp_mb() mb()
#endif
@@ -62,10 +58,6 @@
#define __smp_wmb() wmb()
#endif
-#ifndef __smp_read_barrier_depends
-#define __smp_read_barrier_depends() read_barrier_depends()
-#endif
-
#ifdef CONFIG_SMP
#ifndef smp_mb
@@ -80,10 +72,6 @@
#define smp_wmb() __smp_wmb()
#endif
-#ifndef smp_read_barrier_depends
-#define smp_read_barrier_depends() __smp_read_barrier_depends()
-#endif
-
#else /* !CONFIG_SMP */
#ifndef smp_mb
@@ -98,10 +86,6 @@
#define smp_wmb() barrier()
#endif
-#ifndef smp_read_barrier_depends
-#define smp_read_barrier_depends() do { } while (0)
-#endif
-
#endif /* CONFIG_SMP */
#ifndef __smp_store_mb
@@ -196,7 +180,6 @@ do { \
#define virt_mb() __smp_mb()
#define virt_rmb() __smp_rmb()
#define virt_wmb() __smp_wmb()
-#define virt_read_barrier_depends() __smp_read_barrier_depends()
#define virt_store_mb(var, value) __smp_store_mb(var, value)
#define virt_mb__before_atomic() __smp_mb__before_atomic()
#define virt_mb__after_atomic() __smp_mb__after_atomic()
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 384b5c835ced..18b0f4eee8cb 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -3,6 +3,7 @@
#define _ASM_GENERIC_BUG_H
#include <linux/compiler.h>
+#include <linux/instrumentation.h>
#define CUT_HERE "------------[ cut here ]------------\n"
@@ -83,14 +84,19 @@ extern __printf(4, 5)
void warn_slowpath_fmt(const char *file, const int line, unsigned taint,
const char *fmt, ...);
#define __WARN() __WARN_printf(TAINT_WARN, NULL)
-#define __WARN_printf(taint, arg...) \
- warn_slowpath_fmt(__FILE__, __LINE__, taint, arg)
+#define __WARN_printf(taint, arg...) do { \
+ instrumentation_begin(); \
+ warn_slowpath_fmt(__FILE__, __LINE__, taint, arg); \
+ instrumentation_end(); \
+ } while (0)
#else
extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
#define __WARN() __WARN_FLAGS(BUGFLAG_TAINT(TAINT_WARN))
#define __WARN_printf(taint, arg...) do { \
+ instrumentation_begin(); \
__warn_printk(arg); \
__WARN_FLAGS(BUGFLAG_NO_CUT_HERE | BUGFLAG_TAINT(taint));\
+ instrumentation_end(); \
} while (0)
#define WARN_ON_ONCE(condition) ({ \
int __ret_warn_on = !!(condition); \
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h
index 907fa5d16494..4a674db4e1fa 100644
--- a/include/asm-generic/cacheflush.h
+++ b/include/asm-generic/cacheflush.h
@@ -2,6 +2,11 @@
#ifndef _ASM_GENERIC_CACHEFLUSH_H
#define _ASM_GENERIC_CACHEFLUSH_H
+struct mm_struct;
+struct vm_area_struct;
+struct page;
+struct address_space;
+
/*
* The cache doesn't need to be flushed when TLB entries change when
* the cache is mapped to physical memory, not virtual memory
diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h
index 40f85decc2ee..8e1e6244a89d 100644
--- a/include/asm-generic/hugetlb.h
+++ b/include/asm-generic/hugetlb.h
@@ -122,7 +122,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
#ifndef __HAVE_ARCH_HUGE_PTEP_GET
static inline pte_t huge_ptep_get(pte_t *ptep)
{
- return READ_ONCE(*ptep);
+ return ptep_get(ptep);
}
#endif
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 8b1e020e9a03..30a3aab312e6 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -456,7 +456,7 @@ static inline void writesq(volatile void __iomem *addr, const void *buffer,
#if !defined(inb) && !defined(_inb)
#define _inb _inb
-static inline u16 _inb(unsigned long addr)
+static inline u8 _inb(unsigned long addr)
{
u8 val;
@@ -482,7 +482,7 @@ static inline u16 _inw(unsigned long addr)
#if !defined(inl) && !defined(_inl)
#define _inl _inl
-static inline u16 _inl(unsigned long addr)
+static inline u32 _inl(unsigned long addr)
{
u32 val;
diff --git a/include/asm-generic/mmiowb.h b/include/asm-generic/mmiowb.h
index 9439ff037b2d..5698fca3bf56 100644
--- a/include/asm-generic/mmiowb.h
+++ b/include/asm-generic/mmiowb.h
@@ -27,7 +27,7 @@
#include <asm/smp.h>
DECLARE_PER_CPU(struct mmiowb_state, __mmiowb_state);
-#define __mmiowb_state() this_cpu_ptr(&__mmiowb_state)
+#define __mmiowb_state() raw_cpu_ptr(&__mmiowb_state)
#else
#define __mmiowb_state() arch_mmiowb_state()
#endif /* arch_mmiowb_state */
@@ -35,7 +35,9 @@ DECLARE_PER_CPU(struct mmiowb_state, __mmiowb_state);
static inline void mmiowb_set_pending(void)
{
struct mmiowb_state *ms = __mmiowb_state();
- ms->mmiowb_pending = ms->nesting_count;
+
+ if (likely(ms->nesting_count))
+ ms->mmiowb_pending = ms->nesting_count;
}
static inline void mmiowb_spin_lock(void)
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index fde943d180e0..2b26cd729b94 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -11,6 +11,7 @@
#define __ASM_GENERIC_QSPINLOCK_H
#include <asm-generic/qspinlock_types.h>
+#include <linux/atomic.h>
/**
* queued_spin_is_locked - is the spinlock locked?
diff --git a/include/asm-generic/qspinlock_types.h b/include/asm-generic/qspinlock_types.h
index 56d1309d32f8..2fd1fb89ec36 100644
--- a/include/asm-generic/qspinlock_types.h
+++ b/include/asm-generic/qspinlock_types.h
@@ -9,15 +9,7 @@
#ifndef __ASM_GENERIC_QSPINLOCK_TYPES_H
#define __ASM_GENERIC_QSPINLOCK_TYPES_H
-/*
- * Including atomic.h with PARAVIRT on will cause compilation errors because
- * of recursive header file incluson via paravirt_types.h. So don't include
- * it if PARAVIRT is on.
- */
-#ifndef CONFIG_PARAVIRT
#include <linux/types.h>
-#include <linux/atomic.h>
-#endif
typedef struct qspinlock {
union {
diff --git a/include/asm-generic/rwonce.h b/include/asm-generic/rwonce.h
new file mode 100644
index 000000000000..8d0a6280e982
--- /dev/null
+++ b/include/asm-generic/rwonce.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Prevent the compiler from merging or refetching reads or writes. The
+ * compiler is also forbidden from reordering successive instances of
+ * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
+ * particular ordering. One way to make the compiler aware of ordering is to
+ * put the two invocations of READ_ONCE or WRITE_ONCE in different C
+ * statements.
+ *
+ * These two macros will also work on aggregate data types like structs or
+ * unions.
+ *
+ * Their two major use cases are: (1) Mediating communication between
+ * process-level code and irq/NMI handlers, all running on the same CPU,
+ * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
+ * mutilate accesses that either do not require ordering or that interact
+ * with an explicit memory barrier or atomic instruction that provides the
+ * required ordering.
+ */
+#ifndef __ASM_GENERIC_RWONCE_H
+#define __ASM_GENERIC_RWONCE_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/compiler_types.h>
+#include <linux/kasan-checks.h>
+#include <linux/kcsan-checks.h>
+
+/*
+ * Yes, this permits 64-bit accesses on 32-bit architectures. These will
+ * actually be atomic in some cases (namely Armv7 + LPAE), but for others we
+ * rely on the access being split into 2x32-bit accesses for a 32-bit quantity
+ * (e.g. a virtual address) and a strong prevailing wind.
+ */
+#define compiletime_assert_rwonce_type(t) \
+ compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long), \
+ "Unsupported access size for {READ,WRITE}_ONCE().")
+
+/*
+ * Use __READ_ONCE() instead of READ_ONCE() if you do not require any
+ * atomicity. Note that this may result in tears!
+ */
+#ifndef __READ_ONCE
+#define __READ_ONCE(x) (*(const volatile __unqual_scalar_typeof(x) *)&(x))
+#endif
+
+#define READ_ONCE(x) \
+({ \
+ compiletime_assert_rwonce_type(x); \
+ __READ_ONCE(x); \
+})
+
+#define __WRITE_ONCE(x, val) \
+do { \
+ *(volatile typeof(x) *)&(x) = (val); \
+} while (0)
+
+#define WRITE_ONCE(x, val) \
+do { \
+ compiletime_assert_rwonce_type(x); \
+ __WRITE_ONCE(x, val); \
+} while (0)
+
+static __no_sanitize_or_inline
+unsigned long __read_once_word_nocheck(const void *addr)
+{
+ return __READ_ONCE(*(unsigned long *)addr);
+}
+
+/*
+ * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need to load a
+ * word from memory atomically but without telling KASAN/KCSAN. This is
+ * usually used by unwinding code when walking the stack of a running process.
+ */
+#define READ_ONCE_NOCHECK(x) \
+({ \
+ compiletime_assert(sizeof(x) == sizeof(unsigned long), \
+ "Unsupported access size for READ_ONCE_NOCHECK()."); \
+ (typeof(x))__read_once_word_nocheck(&(x)); \
+})
+
+static __no_kasan_or_inline
+unsigned long read_word_at_a_time(const void *addr)
+{
+ kasan_check_read(addr, 1);
+ return *(unsigned long *)addr;
+}
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_GENERIC_RWONCE_H */
diff --git a/include/asm-generic/seccomp.h b/include/asm-generic/seccomp.h
index 1321ac7821d7..6b6f42bc58f9 100644
--- a/include/asm-generic/seccomp.h
+++ b/include/asm-generic/seccomp.h
@@ -33,7 +33,7 @@ static inline const int *get_compat_mode1_syscalls(void)
static const int mode1_syscalls_32[] = {
__NR_seccomp_read_32, __NR_seccomp_write_32,
__NR_seccomp_exit_32, __NR_seccomp_sigreturn_32,
- 0, /* null terminated */
+ -1, /* negative terminated */
};
return mode1_syscalls_32;
}
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 3f1649a8cf55..ef75ec86f865 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -512,6 +512,38 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
}
#endif
+/*
+ * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end,
+ * and set corresponding cleared_*.
+ */
+static inline void tlb_flush_pte_range(struct mmu_gather *tlb,
+ unsigned long address, unsigned long size)
+{
+ __tlb_adjust_range(tlb, address, size);
+ tlb->cleared_ptes = 1;
+}
+
+static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
+ unsigned long address, unsigned long size)
+{
+ __tlb_adjust_range(tlb, address, size);
+ tlb->cleared_pmds = 1;
+}
+
+static inline void tlb_flush_pud_range(struct mmu_gather *tlb,
+ unsigned long address, unsigned long size)
+{
+ __tlb_adjust_range(tlb, address, size);
+ tlb->cleared_puds = 1;
+}
+
+static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
+ unsigned long address, unsigned long size)
+{
+ __tlb_adjust_range(tlb, address, size);
+ tlb->cleared_p4ds = 1;
+}
+
#ifndef __tlb_remove_tlb_entry
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
#endif
@@ -525,19 +557,17 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
*/
#define tlb_remove_tlb_entry(tlb, ptep, address) \
do { \
- __tlb_adjust_range(tlb, address, PAGE_SIZE); \
- tlb->cleared_ptes = 1; \
+ tlb_flush_pte_range(tlb, address, PAGE_SIZE); \
__tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
do { \
unsigned long _sz = huge_page_size(h); \
- __tlb_adjust_range(tlb, address, _sz); \
if (_sz == PMD_SIZE) \
- tlb->cleared_pmds = 1; \
+ tlb_flush_pmd_range(tlb, address, _sz); \
else if (_sz == PUD_SIZE) \
- tlb->cleared_puds = 1; \
+ tlb_flush_pud_range(tlb, address, _sz); \
__tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)
@@ -551,8 +581,7 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
do { \
- __tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE); \
- tlb->cleared_pmds = 1; \
+ tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE); \
__tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
} while (0)
@@ -566,8 +595,7 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
#define tlb_remove_pud_tlb_entry(tlb, pudp, address) \
do { \
- __tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE); \
- tlb->cleared_puds = 1; \
+ tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE); \
__tlb_remove_pud_tlb_entry(tlb, pudp, address); \
} while (0)
@@ -592,9 +620,8 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
#ifndef pte_free_tlb
#define pte_free_tlb(tlb, ptep, address) \
do { \
- __tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ tlb_flush_pmd_range(tlb, address, PAGE_SIZE); \
tlb->freed_tables = 1; \
- tlb->cleared_pmds = 1; \
__pte_free_tlb(tlb, ptep, address); \
} while (0)
#endif
@@ -602,9 +629,8 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
#ifndef pmd_free_tlb
#define pmd_free_tlb(tlb, pmdp, address) \
do { \
- __tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ tlb_flush_pud_range(tlb, address, PAGE_SIZE); \
tlb->freed_tables = 1; \
- tlb->cleared_puds = 1; \
__pmd_free_tlb(tlb, pmdp, address); \
} while (0)
#endif
@@ -612,9 +638,8 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
#ifndef pud_free_tlb
#define pud_free_tlb(tlb, pudp, address) \
do { \
- __tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ tlb_flush_p4d_range(tlb, address, PAGE_SIZE); \
tlb->freed_tables = 1; \
- tlb->cleared_p4ds = 1; \
__pud_free_tlb(tlb, pudp, address); \
} while (0)
#endif
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index db600ef218d7..de8493cc3082 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -109,12 +109,31 @@
#endif
/*
- * Align to a 32 byte boundary equal to the
- * alignment gcc 4.5 uses for a struct
+ * GCC 4.5 and later have a 32 bytes section alignment for structures.
+ * Except GCC 4.9, that feels the need to align on 64 bytes.
*/
+#if __GNUC__ == 4 && __GNUC_MINOR__ == 9
+#define STRUCT_ALIGNMENT 64
+#else
#define STRUCT_ALIGNMENT 32
+#endif
#define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT)
+/*
+ * The order of the sched class addresses are important, as they are
+ * used to determine the order of the priority of each sched class in
+ * relation to each other.
+ */
+#define SCHED_DATA \
+ STRUCT_ALIGN(); \
+ __begin_sched_classes = .; \
+ *(__idle_sched_class) \
+ *(__fair_sched_class) \
+ *(__rt_sched_class) \
+ *(__dl_sched_class) \
+ *(__stop_sched_class) \
+ __end_sched_classes = .;
+
/* The actual configuration determine if the init/exit sections
* are handled as text/data or they can be discarded (which
* often happens at runtime)
@@ -341,7 +360,8 @@
#define PAGE_ALIGNED_DATA(page_align) \
. = ALIGN(page_align); \
- *(.data..page_aligned)
+ *(.data..page_aligned) \
+ . = ALIGN(page_align);
#define READ_MOSTLY_DATA(align) \
. = ALIGN(align); \
@@ -388,6 +408,7 @@
.rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
__start_rodata = .; \
*(.rodata) *(.rodata.*) \
+ SCHED_DATA \
RO_AFTER_INIT_DATA /* Read only after init */ \
. = ALIGN(8); \
__start___tracepoints_ptrs = .; \
@@ -737,7 +758,9 @@
. = ALIGN(bss_align); \
.bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
BSS_FIRST_SECTIONS \
+ . = ALIGN(PAGE_SIZE); \
*(.bss..page_aligned) \
+ . = ALIGN(PAGE_SIZE); \
*(.dynbss) \
*(BSS_MAIN) \
*(COMMON) \