diff options
Diffstat (limited to 'arch/mips/mm')
-rw-r--r-- | arch/mips/mm/c-r4k.c | 17 | ||||
-rw-r--r-- | arch/mips/mm/dma-noncoherent.c | 51 | ||||
-rw-r--r-- | arch/mips/mm/init.c | 2 | ||||
-rw-r--r-- | arch/mips/mm/page.c | 16 | ||||
-rw-r--r-- | arch/mips/mm/sc-mips.c | 2 | ||||
-rw-r--r-- | arch/mips/mm/tlbex.c | 8 | ||||
-rw-r--r-- | arch/mips/mm/uasm.c | 2 |
7 files changed, 58 insertions, 40 deletions
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 0ef717093262..9cede7ce37e6 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -130,9 +130,10 @@ struct bcache_ops *bcops = &no_sc_ops; #define R4600_HIT_CACHEOP_WAR_IMPL \ do { \ - if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \ + if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) && \ + cpu_is_r4600_v2_x()) \ *(volatile unsigned long *)CKSEG1; \ - if (R4600_V1_HIT_CACHEOP_WAR) \ + if (IS_ENABLED(CONFIG_WAR_R4600_V1_HIT_CACHEOP)) \ __asm__ __volatile__("nop;nop;nop;nop"); \ } while (0) @@ -238,7 +239,7 @@ static void r4k_blast_dcache_setup(void) r4k_blast_dcache = blast_dcache128; } -/* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */ +/* force code alignment (used for CONFIG_WAR_TX49XX_ICACHE_INDEX_INV) */ #define JUMP_TO_ALIGN(order) \ __asm__ __volatile__( \ "b\t1f\n\t" \ @@ -366,10 +367,11 @@ static void r4k_blast_icache_page_indexed_setup(void) else if (ic_lsize == 16) r4k_blast_icache_page_indexed = blast_icache16_page_indexed; else if (ic_lsize == 32) { - if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x()) + if (IS_ENABLED(CONFIG_WAR_R4600_V1_INDEX_ICACHEOP) && + cpu_is_r4600_v1_x()) r4k_blast_icache_page_indexed = blast_icache32_r4600_v1_page_indexed; - else if (TX49XX_ICACHE_INDEX_INV_WAR) + else if (IS_ENABLED(CONFIG_WAR_TX49XX_ICACHE_INDEX_INV)) r4k_blast_icache_page_indexed = tx49_blast_icache32_page_indexed; else if (current_cpu_type() == CPU_LOONGSON2EF) @@ -394,9 +396,10 @@ static void r4k_blast_icache_setup(void) else if (ic_lsize == 16) r4k_blast_icache = blast_icache16; else if (ic_lsize == 32) { - if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x()) + if (IS_ENABLED(CONFIG_WAR_R4600_V1_INDEX_ICACHEOP) && + cpu_is_r4600_v1_x()) r4k_blast_icache = blast_r4600_v1_icache32; - else if (TX49XX_ICACHE_INDEX_INV_WAR) + else if (IS_ENABLED(CONFIG_WAR_TX49XX_ICACHE_INDEX_INV)) r4k_blast_icache = tx49_blast_icache32; else if (current_cpu_type() == CPU_LOONGSON2EF) r4k_blast_icache = loongson2_blast_icache32; diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c index 563c2c0d0c81..38d3d9143b47 100644 --- a/arch/mips/mm/dma-noncoherent.c +++ b/arch/mips/mm/dma-noncoherent.c @@ -5,8 +5,7 @@ * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. */ #include <linux/dma-direct.h> -#include <linux/dma-noncoherent.h> -#include <linux/dma-contiguous.h> +#include <linux/dma-map-ops.h> #include <linux/highmem.h> #include <asm/cache.h> @@ -55,22 +54,34 @@ void *arch_dma_set_uncached(void *addr, size_t size) return (void *)(__pa(addr) + UNCAC_BASE); } -static inline void dma_sync_virt(void *addr, size_t size, +static inline void dma_sync_virt_for_device(void *addr, size_t size, enum dma_data_direction dir) { switch (dir) { case DMA_TO_DEVICE: dma_cache_wback((unsigned long)addr, size); break; - case DMA_FROM_DEVICE: dma_cache_inv((unsigned long)addr, size); break; - case DMA_BIDIRECTIONAL: dma_cache_wback_inv((unsigned long)addr, size); break; + default: + BUG(); + } +} +static inline void dma_sync_virt_for_cpu(void *addr, size_t size, + enum dma_data_direction dir) +{ + switch (dir) { + case DMA_TO_DEVICE: + break; + case DMA_FROM_DEVICE: + case DMA_BIDIRECTIONAL: + dma_cache_inv((unsigned long)addr, size); + break; default: BUG(); } @@ -82,7 +93,7 @@ static inline void dma_sync_virt(void *addr, size_t size, * configured then the bulk of this loop gets optimized out. */ static inline void dma_sync_phys(phys_addr_t paddr, size_t size, - enum dma_data_direction dir) + enum dma_data_direction dir, bool for_device) { struct page *page = pfn_to_page(paddr >> PAGE_SHIFT); unsigned long offset = paddr & ~PAGE_MASK; @@ -90,18 +101,20 @@ static inline void dma_sync_phys(phys_addr_t paddr, size_t size, do { size_t len = left; + void *addr; if (PageHighMem(page)) { - void *addr; - if (offset + len > PAGE_SIZE) len = PAGE_SIZE - offset; + } + + addr = kmap_atomic(page); + if (for_device) + dma_sync_virt_for_device(addr + offset, len, dir); + else + dma_sync_virt_for_cpu(addr + offset, len, dir); + kunmap_atomic(addr); - addr = kmap_atomic(page); - dma_sync_virt(addr + offset, len, dir); - kunmap_atomic(addr); - } else - dma_sync_virt(page_address(page) + offset, size, dir); offset = 0; page++; left -= len; @@ -111,7 +124,7 @@ static inline void dma_sync_phys(phys_addr_t paddr, size_t size, void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, enum dma_data_direction dir) { - dma_sync_phys(paddr, size, dir); + dma_sync_phys(paddr, size, dir, true); } #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU @@ -119,18 +132,10 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, enum dma_data_direction dir) { if (cpu_needs_post_dma_flush()) - dma_sync_phys(paddr, size, dir); + dma_sync_phys(paddr, size, dir, false); } #endif -void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - - dma_sync_virt(vaddr, size, direction); -} - #ifdef CONFIG_DMA_PERDEV_COHERENT void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, const struct iommu_ops *iommu, bool coherent) diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 6c7bbfe35ba3..07e84a774938 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -569,7 +569,7 @@ unsigned long pgd_current[NR_CPUS]; * size, and waste space. So we place it in its own section and align * it in the linker script. */ -pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir); +pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir"); #ifndef __PAGETABLE_PUD_FOLDED pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss; #endif diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c index cd805b005509..504bc4047c4c 100644 --- a/arch/mips/mm/page.c +++ b/arch/mips/mm/page.c @@ -250,14 +250,16 @@ static inline void build_clear_pref(u32 **buf, int off) if (cpu_has_cache_cdex_s) { uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0); } else if (cpu_has_cache_cdex_p) { - if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) { + if (IS_ENABLED(CONFIG_WAR_R4600_V1_HIT_CACHEOP) && + cpu_is_r4600_v1_x()) { uasm_i_nop(buf); uasm_i_nop(buf); uasm_i_nop(buf); uasm_i_nop(buf); } - if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) + if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) && + cpu_is_r4600_v2_x()) uasm_i_lw(buf, ZERO, ZERO, AT); uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0); @@ -302,7 +304,7 @@ void build_clear_page(void) else uasm_i_ori(&buf, A2, A0, off); - if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) + if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) && cpu_is_r4600_v2_x()) uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000)); off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size) @@ -402,14 +404,16 @@ static inline void build_copy_store_pref(u32 **buf, int off) if (cpu_has_cache_cdex_s) { uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0); } else if (cpu_has_cache_cdex_p) { - if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) { + if (IS_ENABLED(CONFIG_WAR_R4600_V1_HIT_CACHEOP) && + cpu_is_r4600_v1_x()) { uasm_i_nop(buf); uasm_i_nop(buf); uasm_i_nop(buf); uasm_i_nop(buf); } - if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) + if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) && + cpu_is_r4600_v2_x()) uasm_i_lw(buf, ZERO, ZERO, AT); uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0); @@ -453,7 +457,7 @@ void build_copy_page(void) else uasm_i_ori(&buf, A2, A0, off); - if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) + if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) && cpu_is_r4600_v2_x()) uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000)); off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) * diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c index 97dc0511e63f..dd0a5becaabd 100644 --- a/arch/mips/mm/sc-mips.c +++ b/arch/mips/mm/sc-mips.c @@ -228,6 +228,7 @@ static inline int __init mips_sc_probe(void) * contradicted by all documentation. */ case MACH_INGENIC_JZ4770: + case MACH_INGENIC_JZ4775: c->scache.ways = 4; break; @@ -236,6 +237,7 @@ static inline int __init mips_sc_probe(void) * but that is contradicted by all documentation. */ case MACH_INGENIC_X1000: + case MACH_INGENIC_X1000E: c->scache.sets = 256; c->scache.ways = 4; break; diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 14f8ba93367f..a7521b8f7658 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -83,14 +83,18 @@ static inline int r4k_250MHZhwbug(void) return 0; } +extern int sb1250_m3_workaround_needed(void); + static inline int __maybe_unused bcm1250_m3_war(void) { - return BCM1250_M3_WAR; + if (IS_ENABLED(CONFIG_SB1_PASS_2_WORKAROUNDS)) + return sb1250_m3_workaround_needed(); + return 0; } static inline int __maybe_unused r10000_llsc_war(void) { - return R10000_LLSC_WAR; + return IS_ENABLED(CONFIG_WAR_R10000_LLSC); } static int use_bbit_insns(void) diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c index c56f129c9a4b..81dd226d6b6b 100644 --- a/arch/mips/mm/uasm.c +++ b/arch/mips/mm/uasm.c @@ -394,7 +394,7 @@ I_u2u1u3(_lddir) void uasm_i_pref(u32 **buf, unsigned int a, signed int b, unsigned int c) { - if (CAVIUM_OCTEON_DCACHE_PREFETCH_WAR && a <= 24 && a != 5) + if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && a <= 24 && a != 5) /* * As per erratum Core-14449, replace prefetches 0-4, * 6-24 with 'pref 28'. |