summaryrefslogtreecommitdiff
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/Kbuild1
-rw-r--r--include/asm-generic/cacheflush.h33
-rw-r--r--include/asm-generic/export.h8
-rw-r--r--include/asm-generic/io.h4
-rw-r--r--include/asm-generic/iomap.h4
-rw-r--r--include/asm-generic/mm_hooks.h5
-rw-r--r--include/asm-generic/percpu.h10
-rw-r--r--include/asm-generic/pgtable.h20
-rw-r--r--include/asm-generic/tlb.h120
-rw-r--r--include/asm-generic/vdso/vsyscall.h4
10 files changed, 139 insertions, 70 deletions
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild
index ddfee1bd9dc1..cd17d50697cc 100644
--- a/include/asm-generic/Kbuild
+++ b/include/asm-generic/Kbuild
@@ -4,5 +4,6 @@
# (This file is not included when SRCARCH=um since UML borrows several
# asm headers from the host architecutre.)
+mandatory-y += dma-contiguous.h
mandatory-y += msi.h
mandatory-y += simd.h
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h
index a950a22c4890..cac7404b2bdd 100644
--- a/include/asm-generic/cacheflush.h
+++ b/include/asm-generic/cacheflush.h
@@ -11,71 +11,102 @@
* The cache doesn't need to be flushed when TLB entries change when
* the cache is mapped to physical memory, not virtual memory
*/
+#ifndef flush_cache_all
static inline void flush_cache_all(void)
{
}
+#endif
+#ifndef flush_cache_mm
static inline void flush_cache_mm(struct mm_struct *mm)
{
}
+#endif
+#ifndef flush_cache_dup_mm
static inline void flush_cache_dup_mm(struct mm_struct *mm)
{
}
+#endif
+#ifndef flush_cache_range
static inline void flush_cache_range(struct vm_area_struct *vma,
unsigned long start,
unsigned long end)
{
}
+#endif
+#ifndef flush_cache_page
static inline void flush_cache_page(struct vm_area_struct *vma,
unsigned long vmaddr,
unsigned long pfn)
{
}
+#endif
+#ifndef flush_dcache_page
static inline void flush_dcache_page(struct page *page)
{
}
+#endif
+#ifndef flush_dcache_mmap_lock
static inline void flush_dcache_mmap_lock(struct address_space *mapping)
{
}
+#endif
+#ifndef flush_dcache_mmap_unlock
static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
{
}
+#endif
+#ifndef flush_icache_range
static inline void flush_icache_range(unsigned long start, unsigned long end)
{
}
+#endif
+#ifndef flush_icache_page
static inline void flush_icache_page(struct vm_area_struct *vma,
struct page *page)
{
}
+#endif
+#ifndef flush_icache_user_range
static inline void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page,
unsigned long addr, int len)
{
}
+#endif
+#ifndef flush_cache_vmap
static inline void flush_cache_vmap(unsigned long start, unsigned long end)
{
}
+#endif
+#ifndef flush_cache_vunmap
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
{
}
+#endif
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+#ifndef copy_to_user_page
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \
flush_icache_user_range(vma, page, vaddr, len); \
} while (0)
+#endif
+
+#ifndef copy_from_user_page
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
+#endif
#endif /* __ASM_CACHEFLUSH_H */
diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h
index afddc5442e92..365345f9a9e3 100644
--- a/include/asm-generic/export.h
+++ b/include/asm-generic/export.h
@@ -27,9 +27,11 @@
.endm
/*
- * note on .section use: @progbits vs %progbits nastiness doesn't matter,
- * since we immediately emit into those sections anyway.
+ * note on .section use: we specify progbits since usage of the "M" (SHF_MERGE)
+ * section flag requires it. Use '%progbits' instead of '@progbits' since the
+ * former apparently works on all arches according to the binutils source.
*/
+
.macro ___EXPORT_SYMBOL name,val,sec
#ifdef CONFIG_MODULES
.section ___ksymtab\sec+\name,"a"
@@ -37,7 +39,7 @@
__ksymtab_\name:
__put \val, __kstrtab_\name
.previous
- .section __ksymtab_strings,"a"
+ .section __ksymtab_strings,"aMS",%progbits,1
__kstrtab_\name:
.asciz "\name"
.previous
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 325fc98cc9ff..d39ac997dda8 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -960,10 +960,6 @@ static inline void __iomem *ioremap(phys_addr_t addr, size_t size)
}
#endif /* !CONFIG_MMU || CONFIG_GENERIC_IOREMAP */
-#ifndef ioremap_nocache
-#define ioremap_nocache ioremap
-#endif
-
#ifndef ioremap_wc
#define ioremap_wc ioremap
#endif
diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h
index a008f504a2d0..9d28a5e82f73 100644
--- a/include/asm-generic/iomap.h
+++ b/include/asm-generic/iomap.h
@@ -94,11 +94,11 @@ extern void ioport_unmap(void __iomem *);
#endif
#ifndef ARCH_HAS_IOREMAP_WC
-#define ioremap_wc ioremap_nocache
+#define ioremap_wc ioremap
#endif
#ifndef ARCH_HAS_IOREMAP_WT
-#define ioremap_wt ioremap_nocache
+#define ioremap_wt ioremap
#endif
#ifdef CONFIG_PCI
diff --git a/include/asm-generic/mm_hooks.h b/include/asm-generic/mm_hooks.h
index 6736ed2f632b..4dbb177d1150 100644
--- a/include/asm-generic/mm_hooks.h
+++ b/include/asm-generic/mm_hooks.h
@@ -22,11 +22,6 @@ static inline void arch_unmap(struct mm_struct *mm,
{
}
-static inline void arch_bprm_mm_init(struct mm_struct *mm,
- struct vm_area_struct *vma)
-{
-}
-
static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
bool write, bool execute, bool foreign)
{
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index c2de013b2cf4..35e4a53b83e6 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -74,7 +74,7 @@ do { \
#define raw_cpu_generic_add_return(pcp, val) \
({ \
- typeof(&(pcp)) __p = raw_cpu_ptr(&(pcp)); \
+ typeof(pcp) *__p = raw_cpu_ptr(&(pcp)); \
\
*__p += val; \
*__p; \
@@ -82,7 +82,7 @@ do { \
#define raw_cpu_generic_xchg(pcp, nval) \
({ \
- typeof(&(pcp)) __p = raw_cpu_ptr(&(pcp)); \
+ typeof(pcp) *__p = raw_cpu_ptr(&(pcp)); \
typeof(pcp) __ret; \
__ret = *__p; \
*__p = nval; \
@@ -91,7 +91,7 @@ do { \
#define raw_cpu_generic_cmpxchg(pcp, oval, nval) \
({ \
- typeof(&(pcp)) __p = raw_cpu_ptr(&(pcp)); \
+ typeof(pcp) *__p = raw_cpu_ptr(&(pcp)); \
typeof(pcp) __ret; \
__ret = *__p; \
if (__ret == (oval)) \
@@ -101,8 +101,8 @@ do { \
#define raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
({ \
- typeof(&(pcp1)) __p1 = raw_cpu_ptr(&(pcp1)); \
- typeof(&(pcp2)) __p2 = raw_cpu_ptr(&(pcp2)); \
+ typeof(pcp1) *__p1 = raw_cpu_ptr(&(pcp1)); \
+ typeof(pcp2) *__p2 = raw_cpu_ptr(&(pcp2)); \
int __ret = 0; \
if (*__p1 == (oval1) && *__p2 == (oval2)) { \
*__p1 = nval1; \
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 798ea36a0549..e2e2bef07dd2 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -1238,4 +1238,24 @@ static inline bool arch_has_pfn_modify_check(void)
#define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED)
#endif
+/*
+ * p?d_leaf() - true if this entry is a final mapping to a physical address.
+ * This differs from p?d_huge() by the fact that they are always available (if
+ * the architecture supports large pages at the appropriate level) even
+ * if CONFIG_HUGETLB_PAGE is not defined.
+ * Only meaningful when called on a valid entry.
+ */
+#ifndef pgd_leaf
+#define pgd_leaf(x) 0
+#endif
+#ifndef p4d_leaf
+#define p4d_leaf(x) 0
+#endif
+#ifndef pud_leaf
+#define pud_leaf(x) 0
+#endif
+#ifndef pmd_leaf
+#define pmd_leaf(x) 0
+#endif
+
#endif /* _ASM_GENERIC_PGTABLE_H */
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 2b10036fefd0..f391f6b500b4 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -56,6 +56,15 @@
* Defaults to flushing at tlb_end_vma() to reset the range; helps when
* there's large holes between the VMAs.
*
+ * - tlb_remove_table()
+ *
+ * tlb_remove_table() is the basic primitive to free page-table directories
+ * (__p*_free_tlb()). In it's most primitive form it is an alias for
+ * tlb_remove_page() below, for when page directories are pages and have no
+ * additional constraints.
+ *
+ * See also MMU_GATHER_TABLE_FREE and MMU_GATHER_RCU_TABLE_FREE.
+ *
* - tlb_remove_page() / __tlb_remove_page()
* - tlb_remove_page_size() / __tlb_remove_page_size()
*
@@ -121,65 +130,53 @@
*
* Additionally there are a few opt-in features:
*
- * HAVE_MMU_GATHER_PAGE_SIZE
+ * MMU_GATHER_PAGE_SIZE
*
* This ensures we call tlb_flush() every time tlb_change_page_size() actually
* changes the size and provides mmu_gather::page_size to tlb_flush().
*
- * HAVE_RCU_TABLE_FREE
+ * This might be useful if your architecture has size specific TLB
+ * invalidation instructions.
+ *
+ * MMU_GATHER_TABLE_FREE
*
* This provides tlb_remove_table(), to be used instead of tlb_remove_page()
- * for page directores (__p*_free_tlb()). This provides separate freeing of
- * the page-table pages themselves in a semi-RCU fashion (see comment below).
- * Useful if your architecture doesn't use IPIs for remote TLB invalidates
- * and therefore doesn't naturally serialize with software page-table walkers.
+ * for page directores (__p*_free_tlb()).
+ *
+ * Useful if your architecture has non-page page directories.
*
* When used, an architecture is expected to provide __tlb_remove_table()
* which does the actual freeing of these pages.
*
- * HAVE_RCU_TABLE_NO_INVALIDATE
+ * MMU_GATHER_RCU_TABLE_FREE
*
- * This makes HAVE_RCU_TABLE_FREE avoid calling tlb_flush_mmu_tlbonly() before
- * freeing the page-table pages. This can be avoided if you use
- * HAVE_RCU_TABLE_FREE and your architecture does _NOT_ use the Linux
- * page-tables natively.
+ * Like MMU_GATHER_TABLE_FREE, and adds semi-RCU semantics to the free (see
+ * comment below).
+ *
+ * Useful if your architecture doesn't use IPIs for remote TLB invalidates
+ * and therefore doesn't naturally serialize with software page-table walkers.
*
* MMU_GATHER_NO_RANGE
*
* Use this if your architecture lacks an efficient flush_tlb_range().
- */
-
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
-/*
- * Semi RCU freeing of the page directories.
- *
- * This is needed by some architectures to implement software pagetable walkers.
*
- * gup_fast() and other software pagetable walkers do a lockless page-table
- * walk and therefore needs some synchronization with the freeing of the page
- * directories. The chosen means to accomplish that is by disabling IRQs over
- * the walk.
+ * MMU_GATHER_NO_GATHER
*
- * Architectures that use IPIs to flush TLBs will then automagically DTRT,
- * since we unlink the page, flush TLBs, free the page. Since the disabling of
- * IRQs delays the completion of the TLB flush we can never observe an already
- * freed page.
- *
- * Architectures that do not have this (PPC) need to delay the freeing by some
- * other means, this is that means.
- *
- * What we do is batch the freed directory pages (tables) and RCU free them.
- * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling
- * holds off grace periods.
- *
- * However, in order to batch these pages we need to allocate storage, this
- * allocation is deep inside the MM code and can thus easily fail on memory
- * pressure. To guarantee progress we fall back to single table freeing, see
- * the implementation of tlb_remove_table_one().
+ * If the option is set the mmu_gather will not track individual pages for
+ * delayed page free anymore. A platform that enables the option needs to
+ * provide its own implementation of the __tlb_remove_page_size() function to
+ * free pages.
*
+ * This is useful if your architecture already flushes TLB entries in the
+ * various ptep_get_and_clear() functions.
*/
+
+#ifdef CONFIG_MMU_GATHER_TABLE_FREE
+
struct mmu_table_batch {
+#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
struct rcu_head rcu;
+#endif
unsigned int nr;
void *tables[0];
};
@@ -189,9 +186,35 @@ struct mmu_table_batch {
extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
+#else /* !CONFIG_MMU_GATHER_HAVE_TABLE_FREE */
+
+/*
+ * Without MMU_GATHER_TABLE_FREE the architecture is assumed to have page based
+ * page directories and we can use the normal page batching to free them.
+ */
+#define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page))
+
+#endif /* CONFIG_MMU_GATHER_TABLE_FREE */
+
+#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
+/*
+ * This allows an architecture that does not use the linux page-tables for
+ * hardware to skip the TLBI when freeing page tables.
+ */
+#ifndef tlb_needs_table_invalidate
+#define tlb_needs_table_invalidate() (true)
#endif
-#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
+#else
+
+#ifdef tlb_needs_table_invalidate
+#error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE
+#endif
+
+#endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
+
+
+#ifndef CONFIG_MMU_GATHER_NO_GATHER
/*
* If we can't allocate a page to make a big batch of page pointers
* to work on, then just handle a few from the on-stack structure.
@@ -227,7 +250,7 @@ extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
struct mmu_gather {
struct mm_struct *mm;
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+#ifdef CONFIG_MMU_GATHER_TABLE_FREE
struct mmu_table_batch *batch;
#endif
@@ -266,22 +289,18 @@ struct mmu_gather {
unsigned int batch_count;
-#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
+#ifndef CONFIG_MMU_GATHER_NO_GATHER
struct mmu_gather_batch *active;
struct mmu_gather_batch local;
struct page *__pages[MMU_GATHER_BUNDLE];
-#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
+#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
unsigned int page_size;
#endif
#endif
};
-void arch_tlb_gather_mmu(struct mmu_gather *tlb,
- struct mm_struct *mm, unsigned long start, unsigned long end);
void tlb_flush_mmu(struct mmu_gather *tlb);
-void arch_tlb_finish_mmu(struct mmu_gather *tlb,
- unsigned long start, unsigned long end, bool force);
static inline void __tlb_adjust_range(struct mmu_gather *tlb,
unsigned long address,
@@ -394,7 +413,12 @@ tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
- if (!tlb->end)
+ /*
+ * Anything calling __tlb_adjust_range() also sets at least one of
+ * these bits.
+ */
+ if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds ||
+ tlb->cleared_puds || tlb->cleared_p4ds))
return;
tlb_flush(tlb);
@@ -426,7 +450,7 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
static inline void tlb_change_page_size(struct mmu_gather *tlb,
unsigned int page_size)
{
-#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
+#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
if (tlb->page_size && tlb->page_size != page_size) {
if (!tlb->fullmm && !tlb->need_flush_all)
tlb_flush_mmu(tlb);
diff --git a/include/asm-generic/vdso/vsyscall.h b/include/asm-generic/vdso/vsyscall.h
index ce4103208619..cec543d9e87b 100644
--- a/include/asm-generic/vdso/vsyscall.h
+++ b/include/asm-generic/vdso/vsyscall.h
@@ -12,9 +12,9 @@ static __always_inline struct vdso_data *__arch_get_k_vdso_data(void)
#endif /* __arch_get_k_vdso_data */
#ifndef __arch_update_vdso_data
-static __always_inline int __arch_update_vdso_data(void)
+static __always_inline bool __arch_update_vdso_data(void)
{
- return 0;
+ return true;
}
#endif /* __arch_update_vdso_data */