summaryrefslogtreecommitdiff
path: root/arch/mips/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/Makefile10
-rw-r--r--arch/mips/mm/c-octeon.c2
-rw-r--r--arch/mips/mm/c-r3k.c8
-rw-r--r--arch/mips/mm/c-r4k.c63
-rw-r--r--arch/mips/mm/c-tx39.c12
-rw-r--r--arch/mips/mm/cache.c1
-rw-r--r--arch/mips/mm/cerr-sb1.c30
-rw-r--r--arch/mips/mm/cex-gen.S6
-rw-r--r--arch/mips/mm/cex-oct.S36
-rw-r--r--arch/mips/mm/cex-sb1.S8
-rw-r--r--arch/mips/mm/dma-default.c27
-rw-r--r--arch/mips/mm/fault.c4
-rw-r--r--arch/mips/mm/gup.c2
-rw-r--r--arch/mips/mm/init.c40
-rw-r--r--arch/mips/mm/ioremap.c4
-rw-r--r--arch/mips/mm/page.c16
-rw-r--r--arch/mips/mm/pgtable-64.c4
-rw-r--r--arch/mips/mm/sc-ip22.c2
-rw-r--r--arch/mips/mm/sc-mips.c6
-rw-r--r--arch/mips/mm/sc-r5k.c4
-rw-r--r--arch/mips/mm/tlb-r3k.c20
-rw-r--r--arch/mips/mm/tlb-r4k.c6
-rw-r--r--arch/mips/mm/tlb-r8k.c2
-rw-r--r--arch/mips/mm/tlbex.c165
-rw-r--r--arch/mips/mm/uasm-micromips.c221
-rw-r--r--arch/mips/mm/uasm-mips.c205
-rw-r--r--arch/mips/mm/uasm.c330
27 files changed, 804 insertions, 430 deletions
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile
index 90ceb963aaf1..e87aae1f2e80 100644
--- a/arch/mips/mm/Makefile
+++ b/arch/mips/mm/Makefile
@@ -4,7 +4,7 @@
obj-y += cache.o dma-default.o extable.o fault.o \
gup.o init.o mmap.o page.o page-funcs.o \
- tlbex.o tlbex-fault.o uasm.o
+ tlbex.o tlbex-fault.o uasm-mips.o
obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o
obj-$(CONFIG_64BIT) += pgtable-64.o
@@ -16,9 +16,11 @@ obj-$(CONFIG_CPU_R3000) += c-r3k.o tlb-r3k.o
obj-$(CONFIG_CPU_R8000) += c-r4k.o cex-gen.o tlb-r8k.o
obj-$(CONFIG_CPU_SB1) += c-r4k.o cerr-sb1.o cex-sb1.o tlb-r4k.o
obj-$(CONFIG_CPU_TX39XX) += c-tx39.o tlb-r3k.o
-obj-$(CONFIG_CPU_CAVIUM_OCTEON) += c-octeon.o cex-oct.o tlb-r4k.o
+obj-$(CONFIG_CPU_CAVIUM_OCTEON) += c-octeon.o cex-oct.o tlb-r4k.o
obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o
-obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o
-obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o
+obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o
+obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o
obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o
+
+obj-$(CONFIG_SYS_SUPPORTS_MICROMIPS) += uasm-micromips.o
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 6ec04daf4231..8557fb552863 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -106,7 +106,7 @@ static void octeon_flush_icache_all(void)
* Called to flush all memory associated with a memory
* context.
*
- * @mm: Memory context to flush
+ * @mm: Memory context to flush
*/
static void octeon_flush_cache_mm(struct mm_struct *mm)
{
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
index 031c4c2cdf2e..704dc735a59d 100644
--- a/arch/mips/mm/c-r3k.c
+++ b/arch/mips/mm/c-r3k.c
@@ -119,7 +119,7 @@ static void r3k_flush_icache_range(unsigned long start, unsigned long end)
write_c0_status((ST0_ISC|ST0_SWC|flags)&~ST0_IEC);
for (i = 0; i < size; i += 0x080) {
- asm( "sb\t$0, 0x000(%0)\n\t"
+ asm( "sb\t$0, 0x000(%0)\n\t"
"sb\t$0, 0x004(%0)\n\t"
"sb\t$0, 0x008(%0)\n\t"
"sb\t$0, 0x00c(%0)\n\t"
@@ -176,7 +176,7 @@ static void r3k_flush_dcache_range(unsigned long start, unsigned long end)
write_c0_status((ST0_ISC|flags)&~ST0_IEC);
for (i = 0; i < size; i += 0x080) {
- asm( "sb\t$0, 0x000(%0)\n\t"
+ asm( "sb\t$0, 0x000(%0)\n\t"
"sb\t$0, 0x004(%0)\n\t"
"sb\t$0, 0x008(%0)\n\t"
"sb\t$0, 0x00c(%0)\n\t"
@@ -285,13 +285,13 @@ static void r3k_flush_cache_sigtramp(unsigned long addr)
write_c0_status(flags&~ST0_IEC);
/* Fill the TLB to avoid an exception with caches isolated. */
- asm( "lw\t$0, 0x000(%0)\n\t"
+ asm( "lw\t$0, 0x000(%0)\n\t"
"lw\t$0, 0x004(%0)\n\t"
: : "r" (addr) );
write_c0_status((ST0_ISC|ST0_SWC|flags)&~ST0_IEC);
- asm( "sb\t$0, 0x000(%0)\n\t"
+ asm( "sb\t$0, 0x000(%0)\n\t"
"sb\t$0, 0x004(%0)\n\t"
: : "r" (addr) );
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 0f7d788e8810..21813beec7a5 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -33,6 +33,7 @@
#include <asm/war.h>
#include <asm/cacheflush.h> /* for run_uncached() */
#include <asm/traps.h>
+#include <asm/dma-coherence.h>
/*
* Special Variant of smp_call_function for use by cache functions:
@@ -136,7 +137,8 @@ static void __cpuinit r4k_blast_dcache_page_indexed_setup(void)
r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed;
}
-static void (* r4k_blast_dcache)(void);
+void (* r4k_blast_dcache)(void);
+EXPORT_SYMBOL(r4k_blast_dcache);
static void __cpuinit r4k_blast_dcache_setup(void)
{
@@ -160,7 +162,7 @@ static void __cpuinit r4k_blast_dcache_setup(void)
"1:\n\t" \
)
#define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
-#define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
+#define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
static inline void blast_r4600_v1_icache32(void)
{
@@ -177,7 +179,7 @@ static inline void tx49_blast_icache32(void)
unsigned long end = start + current_cpu_data.icache.waysize;
unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
unsigned long ws_end = current_cpu_data.icache.ways <<
- current_cpu_data.icache.waybit;
+ current_cpu_data.icache.waybit;
unsigned long ws, addr;
CACHE32_UNROLL32_ALIGN2;
@@ -208,7 +210,7 @@ static inline void tx49_blast_icache32_page_indexed(unsigned long page)
unsigned long end = start + PAGE_SIZE;
unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
unsigned long ws_end = current_cpu_data.icache.ways <<
- current_cpu_data.icache.waybit;
+ current_cpu_data.icache.waybit;
unsigned long ws, addr;
CACHE32_UNROLL32_ALIGN2;
@@ -264,7 +266,8 @@ static void __cpuinit r4k_blast_icache_page_indexed_setup(void)
r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
}
-static void (* r4k_blast_icache)(void);
+void (* r4k_blast_icache)(void);
+EXPORT_SYMBOL(r4k_blast_icache);
static void __cpuinit r4k_blast_icache_setup(void)
{
@@ -637,7 +640,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
* for the cache instruction on MIPS processors and
* some processors, among them the RM5200 and RM7000
* QED processors will throw an address error for cache
- * hit ops with insufficient alignment. Solved by
+ * hit ops with insufficient alignment. Solved by
* aligning the address to cache line size.
*/
blast_inv_scache_range(addr, addr + size);
@@ -864,7 +867,7 @@ static void __cpuinit probe_pcache(void)
icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
c->icache.ways = 1;
- c->icache.waybit = 0; /* doesn't matter */
+ c->icache.waybit = 0; /* doesn't matter */
dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
@@ -923,7 +926,7 @@ static void __cpuinit probe_pcache(void)
icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
c->icache.ways = 1;
- c->icache.waybit = 0; /* doesn't matter */
+ c->icache.waybit = 0; /* doesn't matter */
dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
@@ -986,8 +989,8 @@ static void __cpuinit probe_pcache(void)
c->icache.ways = 1 + ((config1 >> 16) & 7);
icache_size = c->icache.sets *
- c->icache.ways *
- c->icache.linesz;
+ c->icache.ways *
+ c->icache.linesz;
c->icache.waybit = __ffs(icache_size/c->icache.ways);
if (config & 0x8) /* VI bit */
@@ -1006,8 +1009,8 @@ static void __cpuinit probe_pcache(void)
c->dcache.ways = 1 + ((config1 >> 7) & 7);
dcache_size = c->dcache.sets *
- c->dcache.ways *
- c->dcache.linesz;
+ c->dcache.ways *
+ c->dcache.linesz;
c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
c->options |= MIPS_CPU_PREFETCH;
@@ -1016,7 +1019,7 @@ static void __cpuinit probe_pcache(void)
/*
* Processor configuration sanity check for the R4000SC erratum
- * #5. With page sizes larger than 32kB there is no possibility
+ * #5. With page sizes larger than 32kB there is no possibility
* to get a VCE exception anymore so we don't care about this
* misconfiguration. The case is rather theoretical anyway;
* presumably no vendor is shipping his hardware in the "bad"
@@ -1057,6 +1060,7 @@ static void __cpuinit probe_pcache(void)
break;
case CPU_M14KC:
+ case CPU_M14KEC:
case CPU_24K:
case CPU_34K:
case CPU_74K:
@@ -1088,7 +1092,7 @@ static void __cpuinit probe_pcache(void)
break;
}
-#ifdef CONFIG_CPU_LOONGSON2
+#ifdef CONFIG_CPU_LOONGSON2
/*
* LOONGSON2 has 4 way icache, but when using indexed cache op,
* one op will act on all 4 ways
@@ -1228,7 +1232,7 @@ static void __cpuinit setup_scache(void)
#ifdef CONFIG_R5000_CPU_SCACHE
r5k_sc_init();
#endif
- return;
+ return;
case CPU_RM7000:
#ifdef CONFIG_RM7000_CPU_SCACHE
@@ -1246,10 +1250,8 @@ static void __cpuinit setup_scache(void)
return;
default:
- if (c->isa_level == MIPS_CPU_ISA_M32R1 ||
- c->isa_level == MIPS_CPU_ISA_M32R2 ||
- c->isa_level == MIPS_CPU_ISA_M64R1 ||
- c->isa_level == MIPS_CPU_ISA_M64R2) {
+ if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
+ MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {
#ifdef CONFIG_MIPS_CPU_SCACHE
if (mips_sc_init ()) {
scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
@@ -1378,20 +1380,6 @@ static void __cpuinit coherency_setup(void)
}
}
-#if defined(CONFIG_DMA_NONCOHERENT)
-
-static int __cpuinitdata coherentio;
-
-static int __init setcoherentio(char *str)
-{
- coherentio = 1;
-
- return 0;
-}
-
-early_param("coherentio", setcoherentio);
-#endif
-
static void __cpuinit r4k_cache_error_setup(void)
{
extern char __weak except_vec2_generic;
@@ -1473,9 +1461,14 @@ void __cpuinit r4k_cache_init(void)
build_clear_page();
build_copy_page();
-#if !defined(CONFIG_MIPS_CMP)
+
+ /*
+ * We want to run CMP kernels on core with and without coherent
+ * caches. Therefore, do not use CONFIG_MIPS_CMP to decide whether
+ * or not to flush caches.
+ */
local_r4k___flush_cache_all(NULL);
-#endif
+
coherency_setup();
board_cache_error_setup = r4k_cache_error_setup;
}
diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c
index 87d23cada6d6..ba9da270289f 100644
--- a/arch/mips/mm/c-tx39.c
+++ b/arch/mips/mm/c-tx39.c
@@ -33,9 +33,9 @@ extern int r3k_have_wired_reg; /* in r3k-tlb.c */
/* This sequence is required to ensure icache is disabled immediately */
#define TX39_STOP_STREAMING() \
__asm__ __volatile__( \
- ".set push\n\t" \
- ".set noreorder\n\t" \
- "b 1f\n\t" \
+ ".set push\n\t" \
+ ".set noreorder\n\t" \
+ "b 1f\n\t" \
"nop\n\t" \
"1:\n\t" \
".set pop" \
@@ -361,7 +361,7 @@ void __cpuinit tx39_cache_init(void)
/* TX39/H core (writethru direct-map cache) */
__flush_cache_vmap = tx39__flush_cache_vmap;
__flush_cache_vunmap = tx39__flush_cache_vunmap;
- flush_cache_all = tx39h_flush_icache_all;
+ flush_cache_all = tx39h_flush_icache_all;
__flush_cache_all = tx39h_flush_icache_all;
flush_cache_mm = (void *) tx39h_flush_icache_all;
flush_cache_range = (void *) tx39h_flush_icache_all;
@@ -409,8 +409,8 @@ void __cpuinit tx39_cache_init(void)
_dma_cache_inv = tx39_dma_cache_inv;
shm_align_mask = max_t(unsigned long,
- (dcache_size / current_cpu_data.dcache.ways) - 1,
- PAGE_SIZE - 1);
+ (dcache_size / current_cpu_data.dcache.ways) - 1,
+ PAGE_SIZE - 1);
break;
}
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 07cec4407b0c..5aeb3eb0b72f 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -48,6 +48,7 @@ void (*flush_icache_all)(void);
EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
EXPORT_SYMBOL(flush_data_cache_page);
+EXPORT_SYMBOL(flush_icache_all);
#ifdef CONFIG_DMA_NONCOHERENT
diff --git a/arch/mips/mm/cerr-sb1.c b/arch/mips/mm/cerr-sb1.c
index 3571090ba178..576add33bf5b 100644
--- a/arch/mips/mm/cerr-sb1.c
+++ b/arch/mips/mm/cerr-sb1.c
@@ -27,7 +27,7 @@
/*
* We'd like to dump the L2_ECC_TAG register on errors, but errata make
- * that unsafe... So for now we don't. (BCM1250/BCM112x erratum SOC-48.)
+ * that unsafe... So for now we don't. (BCM1250/BCM112x erratum SOC-48.)
*/
#undef DUMP_L2_ECC_TAG_ON_ERROR
@@ -48,7 +48,7 @@
#define CP0_CERRI_EXTERNAL (1 << 26)
#define CP0_CERRI_IDX_VALID(c) (!((c) & CP0_CERRI_EXTERNAL))
-#define CP0_CERRI_DATA (CP0_CERRI_DATA_PARITY)
+#define CP0_CERRI_DATA (CP0_CERRI_DATA_PARITY)
#define CP0_CERRD_MULTIPLE (1 << 31)
#define CP0_CERRD_TAG_STATE (1 << 30)
@@ -56,8 +56,8 @@
#define CP0_CERRD_DATA_SBE (1 << 28)
#define CP0_CERRD_DATA_DBE (1 << 27)
#define CP0_CERRD_EXTERNAL (1 << 26)
-#define CP0_CERRD_LOAD (1 << 25)
-#define CP0_CERRD_STORE (1 << 24)
+#define CP0_CERRD_LOAD (1 << 25)
+#define CP0_CERRD_STORE (1 << 24)
#define CP0_CERRD_FILLWB (1 << 23)
#define CP0_CERRD_COHERENCY (1 << 22)
#define CP0_CERRD_DUPTAG (1 << 21)
@@ -69,10 +69,10 @@
(CP0_CERRD_LOAD | CP0_CERRD_STORE | CP0_CERRD_FILLWB | CP0_CERRD_COHERENCY | CP0_CERRD_DUPTAG)
#define CP0_CERRD_TYPES \
(CP0_CERRD_TAG_STATE | CP0_CERRD_TAG_ADDRESS | CP0_CERRD_DATA_SBE | CP0_CERRD_DATA_DBE | CP0_CERRD_EXTERNAL)
-#define CP0_CERRD_DATA (CP0_CERRD_DATA_SBE | CP0_CERRD_DATA_DBE)
+#define CP0_CERRD_DATA (CP0_CERRD_DATA_SBE | CP0_CERRD_DATA_DBE)
-static uint32_t extract_ic(unsigned short addr, int data);
-static uint32_t extract_dc(unsigned short addr, int data);
+static uint32_t extract_ic(unsigned short addr, int data);
+static uint32_t extract_dc(unsigned short addr, int data);
static inline void breakout_errctl(unsigned int val)
{
@@ -209,11 +209,11 @@ asmlinkage void sb1_cache_error(void)
"=r" (dpahi), "=r" (dpalo), "=r" (eepc));
cerr_dpa = (((uint64_t)dpahi) << 32) | dpalo;
- printk(" c0_errorepc == %08x\n", eepc);
- printk(" c0_errctl == %08x", errctl);
+ printk(" c0_errorepc == %08x\n", eepc);
+ printk(" c0_errctl == %08x", errctl);
breakout_errctl(errctl);
if (errctl & CP0_ERRCTL_ICACHE) {
- printk(" c0_cerr_i == %08x", cerr_i);
+ printk(" c0_cerr_i == %08x", cerr_i);
breakout_cerri(cerr_i);
if (CP0_CERRI_IDX_VALID(cerr_i)) {
/* Check index of EPC, allowing for delay slot */
@@ -229,7 +229,7 @@ asmlinkage void sb1_cache_error(void)
}
}
if (errctl & CP0_ERRCTL_DCACHE) {
- printk(" c0_cerr_d == %08x", cerr_d);
+ printk(" c0_cerr_d == %08x", cerr_d);
breakout_cerrd(cerr_d);
if (CP0_CERRD_DPA_VALID(cerr_d)) {
printk(" c0_cerr_dpa == %010llx\n", cerr_dpa);
@@ -256,7 +256,7 @@ asmlinkage void sb1_cache_error(void)
/*
* Calling panic() when a fatal cache error occurs scrambles the
* state of the system (and the cache), making it difficult to
- * investigate after the fact. However, if you just stall the CPU,
+ * investigate after the fact. However, if you just stall the CPU,
* the other CPU may keep on running, which is typically very
* undesirable.
*/
@@ -411,7 +411,7 @@ static uint32_t extract_ic(unsigned short addr, int data)
" dmfc0 $1, $28, 1\n\t"
" dsrl32 %1, $1, 0 \n\t"
" sll %2, $1, 0 \n\t"
- " .set pop \n"
+ " .set pop \n"
: "=r" (datahi), "=r" (insta), "=r" (instb)
: "r" ((way << 13) | addr | (offset << 3)));
predecode = (datahi >> 8) & 0xff;
@@ -441,8 +441,8 @@ static uint8_t dc_ecc(uint64_t dword)
{
uint64_t t;
uint32_t w;
- uint8_t p;
- int i;
+ uint8_t p;
+ int i;
p = 0;
for (i = 7; i >= 0; i--)
diff --git a/arch/mips/mm/cex-gen.S b/arch/mips/mm/cex-gen.S
index e743622fd24d..45dff5cd4b8e 100644
--- a/arch/mips/mm/cex-gen.S
+++ b/arch/mips/mm/cex-gen.S
@@ -14,17 +14,17 @@
#include <asm/stackframe.h>
/*
- * Game over. Go to the button. Press gently. Swear where allowed by
+ * Game over. Go to the button. Press gently. Swear where allowed by
* legislation.
*/
LEAF(except_vec2_generic)
.set noreorder
.set noat
- .set mips0
+ .set mips0
/*
* This is a very bad place to be. Our cache error
* detection has triggered. If we have write-back data
- * in the cache, we may not be able to recover. As a
+ * in the cache, we may not be able to recover. As a
* first-order desperate measure, turn off KSEG0 cacheing.
*/
mfc0 k0,CP0_CONFIG
diff --git a/arch/mips/mm/cex-oct.S b/arch/mips/mm/cex-oct.S
index 3db8553fcd34..9029092aa740 100644
--- a/arch/mips/mm/cex-oct.S
+++ b/arch/mips/mm/cex-oct.S
@@ -18,7 +18,7 @@
*/
LEAF(except_vec2_octeon)
- .set push
+ .set push
.set mips64r2
.set noreorder
.set noat
@@ -27,19 +27,19 @@
/* due to an errata we need to read the COP0 CacheErr (Dcache)
* before any cache/DRAM access */
- rdhwr k0, $0 /* get core_id */
- PTR_LA k1, cache_err_dcache
- sll k0, k0, 3
+ rdhwr k0, $0 /* get core_id */
+ PTR_LA k1, cache_err_dcache
+ sll k0, k0, 3
PTR_ADDU k1, k0, k1 /* k1 = &cache_err_dcache[core_id] */
- dmfc0 k0, CP0_CACHEERR, 1
- sd k0, (k1)
- dmtc0 $0, CP0_CACHEERR, 1
+ dmfc0 k0, CP0_CACHEERR, 1
+ sd k0, (k1)
+ dmtc0 $0, CP0_CACHEERR, 1
- /* check whether this is a nested exception */
- mfc0 k1, CP0_STATUS
- andi k1, k1, ST0_EXL
- beqz k1, 1f
+ /* check whether this is a nested exception */
+ mfc0 k1, CP0_STATUS
+ andi k1, k1, ST0_EXL
+ beqz k1, 1f
nop
j cache_parity_error_octeon_non_recoverable
nop
@@ -48,22 +48,22 @@
1: j handle_cache_err
nop
- .set pop
+ .set pop
END(except_vec2_octeon)
/* We need to jump to handle_cache_err so that the previous handler
* can fit within 0x80 bytes. We also move from 0xFFFFFFFFAXXXXXXX
- * space (uncached) to the 0xFFFFFFFF8XXXXXXX space (cached). */
+ * space (uncached) to the 0xFFFFFFFF8XXXXXXX space (cached). */
LEAF(handle_cache_err)
- .set push
- .set noreorder
- .set noat
+ .set push
+ .set noreorder
+ .set noat
SAVE_ALL
KMODE
- jal cache_parity_error_octeon_recoverable
+ jal cache_parity_error_octeon_recoverable
nop
- j ret_from_exception
+ j ret_from_exception
nop
.set pop
diff --git a/arch/mips/mm/cex-sb1.S b/arch/mips/mm/cex-sb1.S
index 89c412bc4b64..fe1d887e8d70 100644
--- a/arch/mips/mm/cex-sb1.S
+++ b/arch/mips/mm/cex-sb1.S
@@ -24,9 +24,9 @@
#include <asm/cacheops.h>
#include <asm/sibyte/board.h>
-#define C0_ERRCTL $26 /* CP0: Error info */
-#define C0_CERR_I $27 /* CP0: Icache error */
-#define C0_CERR_D $27,1 /* CP0: Dcache error */
+#define C0_ERRCTL $26 /* CP0: Error info */
+#define C0_CERR_I $27 /* CP0: Icache error */
+#define C0_CERR_D $27,1 /* CP0: Dcache error */
/*
* Based on SiByte sample software cache-err/cerr.S
@@ -88,7 +88,7 @@ attempt_recovery:
/*
* k0 has C0_ERRCTL << 1, which puts 'DC' at bit 31. Any
* Dcache errors we can recover from will take more extensive
- * processing. For now, they are considered "unrecoverable".
+ * processing. For now, they are considered "unrecoverable".
* Note that 'DC' becoming set (outside of ERL mode) will
* cause 'IC' to clear; so if there's an Icache error, we'll
* only find out about it if we recover from this error and
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index 3fab2046c8a4..caf92ecb37d6 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -4,7 +4,7 @@
* for more details.
*
* Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
- * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
+ * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
* swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
*/
@@ -22,6 +22,26 @@
#include <dma-coherence.h>
+int coherentio = 0; /* User defined DMA coherency from command line. */
+EXPORT_SYMBOL_GPL(coherentio);
+int hw_coherentio = 0; /* Actual hardware supported DMA coherency setting. */
+
+static int __init setcoherentio(char *str)
+{
+ coherentio = 1;
+ pr_info("Hardware DMA cache coherency (command line)\n");
+ return 0;
+}
+early_param("coherentio", setcoherentio);
+
+static int __init setnocoherentio(char *str)
+{
+ coherentio = 0;
+ pr_info("Software DMA cache coherency (command line)\n");
+ return 0;
+}
+early_param("nocoherentio", setnocoherentio);
+
static inline struct page *dma_addr_to_page(struct device *dev,
dma_addr_t dma_addr)
{
@@ -115,7 +135,8 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
if (!plat_device_is_coherent(dev)) {
dma_cache_wback_inv((unsigned long) ret, size);
- ret = UNCAC_ADDR(ret);
+ if (!hw_coherentio)
+ ret = UNCAC_ADDR(ret);
}
}
@@ -142,7 +163,7 @@ static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
- if (!plat_device_is_coherent(dev))
+ if (!plat_device_is_coherent(dev) && !hw_coherentio)
addr = CAC_ADDR(addr);
free_pages(addr, get_order(size));
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index ddcec1e1a0cd..0fead53d1c26 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -52,7 +52,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ
#ifdef CONFIG_KPROBES
/*
- * This is to notify the fault handler of the kprobes. The
+ * This is to notify the fault handler of the kprobes. The
* exception code is redundant as it is also carried in REGS,
* but we pass it anyhow.
*/
@@ -216,7 +216,7 @@ bad_area_nosemaphore:
}
no_context:
- /* Are we prepared to handle this kernel fault? */
+ /* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs)) {
current->thread.cp0_baduaddr = address;
return;
diff --git a/arch/mips/mm/gup.c b/arch/mips/mm/gup.c
index dcfd573871c1..d4ea5c9c4a93 100644
--- a/arch/mips/mm/gup.c
+++ b/arch/mips/mm/gup.c
@@ -249,7 +249,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
* @nr_pages: number of pages from start to pin
* @write: whether pages will be written to
* @pages: array that receives pointers to the pages pinned.
- * Should be at least nr_pages long.
+ * Should be at least nr_pages long.
*
* Attempt to pin user pages in memory without taking mm->mmap_sem.
* If not successful, it will fall back to taking the lock and
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index be9acb2b959d..9b973e0af9cb 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -29,6 +29,7 @@
#include <linux/pfn.h>
#include <linux/hardirq.h>
#include <linux/gfp.h>
+#include <linux/kcore.h>
#include <asm/asm-offsets.h>
#include <asm/bootinfo.h>
@@ -66,7 +67,7 @@
/*
* We have up to 8 empty zeroed pages so we can map one of the right colour
- * when needed. This is necessary only on R4000 / R4400 SC and MC versions
+ * when needed. This is necessary only on R4000 / R4400 SC and MC versions
* where we have to avoid VCED / VECI exceptions for good performance at
* any price. Since page is never written to after the initialization we
* don't have to care about aliases on other CPUs.
@@ -77,10 +78,9 @@ EXPORT_SYMBOL_GPL(empty_zero_page);
/*
* Not static inline because used by IP27 special magic initialization code
*/
-unsigned long setup_zero_pages(void)
+void setup_zero_pages(void)
{
- unsigned int order;
- unsigned long size;
+ unsigned int order, i;
struct page *page;
if (cpu_has_vce)
@@ -94,15 +94,10 @@ unsigned long setup_zero_pages(void)
page = virt_to_page((void *)empty_zero_page);
split_page(page, order);
- while (page < virt_to_page((void *)(empty_zero_page + (PAGE_SIZE << order)))) {
- SetPageReserved(page);
- page++;
- }
-
- size = PAGE_SIZE << order;
- zero_page_mask = (size - 1) & PAGE_MASK;
+ for (i = 0; i < (1 << order); i++, page++)
+ mark_page_reserved(page);
- return 1UL << order;
+ zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
}
#ifdef CONFIG_MIPS_MT_SMTC
@@ -380,7 +375,7 @@ void __init mem_init(void)
high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
totalram_pages += free_all_bootmem();
- totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */
+ setup_zero_pages(); /* Setup zeroed pages. */
reservedpages = ram = 0;
for (tmp = 0; tmp < max_low_pfn; tmp++)
@@ -399,12 +394,8 @@ void __init mem_init(void)
SetPageReserved(page);
continue;
}
- ClearPageReserved(page);
- init_page_count(page);
- __free_page(page);
- totalhigh_pages++;
+ free_highmem_page(page);
}
- totalram_pages += totalhigh_pages;
num_physpages += totalhigh_pages;
#endif
@@ -440,11 +431,8 @@ void free_init_pages(const char *what, unsigned long begin, unsigned long end)
struct page *page = pfn_to_page(pfn);
void *addr = phys_to_virt(PFN_PHYS(pfn));
- ClearPageReserved(page);
- init_page_count(page);
memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
- __free_page(page);
- totalram_pages++;
+ free_reserved_page(page);
}
printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
}
@@ -452,18 +440,14 @@ void free_init_pages(const char *what, unsigned long begin, unsigned long end)
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
- free_init_pages("initrd memory",
- virt_to_phys((void *)start),
- virt_to_phys((void *)end));
+ free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd");
}
#endif
void __init_refok free_initmem(void)
{
prom_free_prom_memory();
- free_init_pages("unused kernel memory",
- __pa_symbol(&__init_begin),
- __pa_symbol(&__init_end));
+ free_initmem_default(POISON_FREE_INITMEM);
}
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c
index cacfd31e8ec9..7f840bc08abf 100644
--- a/arch/mips/mm/ioremap.c
+++ b/arch/mips/mm/ioremap.c
@@ -22,7 +22,7 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address,
phys_t end;
unsigned long pfn;
pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | __READABLE
- | __WRITEABLE | flags);
+ | __WRITEABLE | flags);
address &= ~PMD_MASK;
end = address + size;
@@ -185,7 +185,7 @@ void __iounmap(const volatile void __iomem *addr)
if (!p)
printk(KERN_ERR "iounmap: bad address %p\n", addr);
- kfree(p);
+ kfree(p);
}
EXPORT_SYMBOL(__ioremap);
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index 8e666c55f4d4..4eb8dcfaf1ce 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -247,6 +247,11 @@ void __cpuinit build_clear_page(void)
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
int i;
+ static atomic_t run_once = ATOMIC_INIT(0);
+
+ if (atomic_xchg(&run_once, 1)) {
+ return;
+ }
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
@@ -271,7 +276,7 @@ void __cpuinit build_clear_page(void)
uasm_i_lui(&buf, AT, 0xa000);
off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size)
- * cache_line_size : 0;
+ * cache_line_size : 0;
while (off) {
build_clear_pref(&buf, -off);
off -= cache_line_size;
@@ -389,6 +394,11 @@ void __cpuinit build_copy_page(void)
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
int i;
+ static atomic_t run_once = ATOMIC_INIT(0);
+
+ if (atomic_xchg(&run_once, 1)) {
+ return;
+ }
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
@@ -417,13 +427,13 @@ void __cpuinit build_copy_page(void)
uasm_i_lui(&buf, AT, 0xa000);
off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) *
- cache_line_size : 0;
+ cache_line_size : 0;
while (off) {
build_copy_load_pref(&buf, -off);
off -= cache_line_size;
}
off = cache_line_size ? min(8, pref_bias_copy_store / cache_line_size) *
- cache_line_size : 0;
+ cache_line_size : 0;
while (off) {
build_copy_store_pref(&buf, -off);
off -= cache_line_size;
diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c
index ee331bbd8f8a..e8adc0069d66 100644
--- a/arch/mips/mm/pgtable-64.c
+++ b/arch/mips/mm/pgtable-64.c
@@ -24,7 +24,7 @@ void pgd_init(unsigned long page)
entry = (unsigned long)invalid_pmd_table;
#endif
- p = (unsigned long *) page;
+ p = (unsigned long *) page;
end = p + PTRS_PER_PGD;
do {
@@ -45,7 +45,7 @@ void pmd_init(unsigned long addr, unsigned long pagetable)
{
unsigned long *p, *end;
- p = (unsigned long *) addr;
+ p = (unsigned long *) addr;
end = p + PTRS_PER_PMD;
do {
diff --git a/arch/mips/mm/sc-ip22.c b/arch/mips/mm/sc-ip22.c
index 1eb708ef75ff..c6aaed934d53 100644
--- a/arch/mips/mm/sc-ip22.c
+++ b/arch/mips/mm/sc-ip22.c
@@ -159,7 +159,7 @@ static inline int __init indy_sc_probe(void)
}
/* XXX Check with wje if the Indy caches can differenciate between
- writeback + invalidate and just invalidate. */
+ writeback + invalidate and just invalidate. */
static struct bcache_ops indy_sc_ops = {
.bc_enable = indy_sc_enable,
.bc_disable = indy_sc_disable,
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
index 93d937b4b1ba..df96da7e939b 100644
--- a/arch/mips/mm/sc-mips.c
+++ b/arch/mips/mm/sc-mips.c
@@ -98,10 +98,8 @@ static inline int __init mips_sc_probe(void)
c->scache.flags |= MIPS_CACHE_NOT_PRESENT;
/* Ignore anything but MIPSxx processors */
- if (c->isa_level != MIPS_CPU_ISA_M32R1 &&
- c->isa_level != MIPS_CPU_ISA_M32R2 &&
- c->isa_level != MIPS_CPU_ISA_M64R1 &&
- c->isa_level != MIPS_CPU_ISA_M64R2)
+ if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
+ MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)))
return 0;
/* Does this MIPS32/MIPS64 CPU have a config2 register? */
diff --git a/arch/mips/mm/sc-r5k.c b/arch/mips/mm/sc-r5k.c
index 8d90ff25b123..8bc67720e145 100644
--- a/arch/mips/mm/sc-r5k.c
+++ b/arch/mips/mm/sc-r5k.c
@@ -58,7 +58,7 @@ static void r5k_dma_cache_inv_sc(unsigned long addr, unsigned long size)
static void r5k_sc_enable(void)
{
- unsigned long flags;
+ unsigned long flags;
local_irq_save(flags);
set_c0_config(R5K_CONF_SE);
@@ -68,7 +68,7 @@ static void r5k_sc_enable(void)
static void r5k_sc_disable(void)
{
- unsigned long flags;
+ unsigned long flags;
local_irq_save(flags);
blast_r5000_scache();
diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c
index a63d1ed0827f..4a13c150f31b 100644
--- a/arch/mips/mm/tlb-r3k.c
+++ b/arch/mips/mm/tlb-r3k.c
@@ -51,7 +51,7 @@ void local_flush_tlb_all(void)
#endif
local_irq_save(flags);
- old_ctx = read_c0_entryhi() & ASID_MASK;
+ old_ctx = ASID_MASK(read_c0_entryhi());
write_c0_entrylo0(0);
entry = r3k_have_wired_reg ? read_c0_wired() : 8;
for (; entry < current_cpu_data.tlbsize; entry++) {
@@ -87,13 +87,13 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
#ifdef DEBUG_TLB
printk("[tlbrange<%lu,0x%08lx,0x%08lx>]",
- cpu_context(cpu, mm) & ASID_MASK, start, end);
+ ASID_MASK(cpu_context(cpu, mm)), start, end);
#endif
local_irq_save(flags);
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
if (size <= current_cpu_data.tlbsize) {
- int oldpid = read_c0_entryhi() & ASID_MASK;
- int newpid = cpu_context(cpu, mm) & ASID_MASK;
+ int oldpid = ASID_MASK(read_c0_entryhi());
+ int newpid = ASID_MASK(cpu_context(cpu, mm));
start &= PAGE_MASK;
end += PAGE_SIZE - 1;
@@ -166,10 +166,10 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
#ifdef DEBUG_TLB
printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page);
#endif
- newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK;
+ newpid = ASID_MASK(cpu_context(cpu, vma->vm_mm));
page &= PAGE_MASK;
local_irq_save(flags);
- oldpid = read_c0_entryhi() & ASID_MASK;
+ oldpid = ASID_MASK(read_c0_entryhi());
write_c0_entryhi(page | newpid);
BARRIER;
tlb_probe();
@@ -197,10 +197,10 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
if (current->active_mm != vma->vm_mm)
return;
- pid = read_c0_entryhi() & ASID_MASK;
+ pid = ASID_MASK(read_c0_entryhi());
#ifdef DEBUG_TLB
- if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) {
+ if ((pid != ASID_MASK(cpu_context(cpu, vma->vm_mm))) || (cpu_context(cpu, vma->vm_mm) == 0)) {
printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n",
(cpu_context(cpu, vma->vm_mm)), pid);
}
@@ -241,7 +241,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
local_irq_save(flags);
/* Save old context and create impossible VPN2 value */
- old_ctx = read_c0_entryhi() & ASID_MASK;
+ old_ctx = ASID_MASK(read_c0_entryhi());
old_pagemask = read_c0_pagemask();
w = read_c0_wired();
write_c0_wired(w + 1);
@@ -264,7 +264,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
#endif
local_irq_save(flags);
- old_ctx = read_c0_entryhi() & ASID_MASK;
+ old_ctx = ASID_MASK(read_c0_entryhi());
write_c0_entrylo0(entrylo0);
write_c0_entryhi(entryhi);
write_c0_index(wired);
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 2a7c9725b2a3..09653b290d53 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -13,6 +13,7 @@
#include <linux/smp.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
+#include <linux/module.h>
#include <asm/cpu.h>
#include <asm/bootinfo.h>
@@ -94,6 +95,7 @@ void local_flush_tlb_all(void)
FLUSH_ITLB;
EXIT_CRITICAL(flags);
}
+EXPORT_SYMBOL(local_flush_tlb_all);
/* All entries common to a mm share an asid. To effectively flush
these entries, we just bump the asid. */
@@ -285,7 +287,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
ENTER_CRITICAL(flags);
- pid = read_c0_entryhi() & ASID_MASK;
+ pid = ASID_MASK(read_c0_entryhi());
address &= (PAGE_MASK << 1);
write_c0_entryhi(address | pid);
pgdp = pgd_offset(vma->vm_mm, address);
@@ -424,7 +426,7 @@ void __cpuinit tlb_init(void)
write_c0_pagegrain(pg);
}
- /* From this point on the ARC firmware is dead. */
+ /* From this point on the ARC firmware is dead. */
local_flush_tlb_all();
/* Did I tell you that ARC SUCKS? */
diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c
index 91c2499f806a..122f9207f49e 100644
--- a/arch/mips/mm/tlb-r8k.c
+++ b/arch/mips/mm/tlb-r8k.c
@@ -195,7 +195,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
if (current->active_mm != vma->vm_mm)
return;
- pid = read_c0_entryhi() & ASID_MASK;
+ pid = ASID_MASK(read_c0_entryhi());
local_irq_save(flags);
address &= PAGE_MASK;
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 1c8ac49ec72c..4d46d3787576 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -5,8 +5,8 @@
*
* Synthesize TLB refill handlers at runtime.
*
- * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
- * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki
+ * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
+ * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki
* Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 2008, 2009 Cavium Networks, Inc.
* Copyright (C) 2011 MIPS Technologies, Inc.
@@ -29,6 +29,7 @@
#include <linux/init.h>
#include <linux/cache.h>
+#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/pgtable.h>
#include <asm/war.h>
@@ -212,7 +213,7 @@ static void __cpuinit uasm_bgezl_label(struct uasm_label **l,
/*
* pgtable bits are assigned dynamically depending on processor feature
* and statically based on kernel configuration. This spits out the actual
- * values the kernel is using. Required to make sense from disassembled
+ * values the kernel is using. Required to make sense from disassembled
* TLB exception handlers.
*/
static void output_pgtable_bits_defines(void)
@@ -305,6 +306,78 @@ static struct uasm_reloc relocs[128] __cpuinitdata;
static int check_for_high_segbits __cpuinitdata;
#endif
+static void __cpuinit insn_fixup(unsigned int **start, unsigned int **stop,
+ unsigned int i_const)
+{
+ unsigned int **p;
+
+ for (p = start; p < stop; p++) {
+#ifndef CONFIG_CPU_MICROMIPS
+ unsigned int *ip;
+
+ ip = *p;
+ *ip = (*ip & 0xffff0000) | i_const;
+#else
+ unsigned short *ip;
+
+ ip = ((unsigned short *)((unsigned int)*p - 1));
+ if ((*ip & 0xf000) == 0x4000) {
+ *ip &= 0xfff1;
+ *ip |= (i_const << 1);
+ } else if ((*ip & 0xf000) == 0x6000) {
+ *ip &= 0xfff1;
+ *ip |= ((i_const >> 2) << 1);
+ } else {
+ ip++;
+ *ip = i_const;
+ }
+#endif
+ local_flush_icache_range((unsigned long)ip,
+ (unsigned long)ip + sizeof(*ip));
+ }
+}
+
+#define asid_insn_fixup(section, const) \
+do { \
+ extern unsigned int *__start_ ## section; \
+ extern unsigned int *__stop_ ## section; \
+ insn_fixup(&__start_ ## section, &__stop_ ## section, const); \
+} while(0)
+
+/*
+ * Caller is assumed to flush the caches before the first context switch.
+ */
+static void __cpuinit setup_asid(unsigned int inc, unsigned int mask,
+ unsigned int version_mask,
+ unsigned int first_version)
+{
+ extern asmlinkage void handle_ri_rdhwr_vivt(void);
+ unsigned long *vivt_exc;
+
+#ifdef CONFIG_CPU_MICROMIPS
+ /*
+ * Worst case optimised microMIPS addiu instructions support
+ * only a 3-bit immediate value.
+ */
+ if(inc > 7)
+ panic("Invalid ASID increment value!");
+#endif
+ asid_insn_fixup(__asid_inc, inc);
+ asid_insn_fixup(__asid_mask, mask);
+ asid_insn_fixup(__asid_version_mask, version_mask);
+ asid_insn_fixup(__asid_first_version, first_version);
+
+ /* Patch up the 'handle_ri_rdhwr_vivt' handler. */
+ vivt_exc = (unsigned long *) &handle_ri_rdhwr_vivt;
+#ifdef CONFIG_CPU_MICROMIPS
+ vivt_exc = (unsigned long *)((unsigned long) vivt_exc - 1);
+#endif
+ vivt_exc++;
+ *vivt_exc = (*vivt_exc & ~mask) | mask;
+
+ current_cpu_data.asid_cache = first_version;
+}
+
static int check_for_high_segbits __cpuinitdata;
static unsigned int kscratch_used_mask __cpuinitdata;
@@ -464,8 +537,8 @@ static u32 final_handler[64] __cpuinitdata;
* From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
* 2. A timing hazard exists for the TLBP instruction.
*
- * stalling_instruction
- * TLBP
+ * stalling_instruction
+ * TLBP
*
* The JTLB is being read for the TLBP throughout the stall generated by the
* previous instruction. This is not really correct as the stalling instruction
@@ -476,7 +549,7 @@ static u32 final_handler[64] __cpuinitdata;
* The software work-around is to not allow the instruction preceding the TLBP
* to stall - make it an NOP or some other instruction guaranteed not to stall.
*
- * Errata 2 will not be fixed. This errata is also on the R5000.
+ * Errata 2 will not be fixed. This errata is also on the R5000.
*
* As if we MIPS hackers wouldn't know how to nop pipelines happy ...
*/
@@ -581,6 +654,7 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
case CPU_4KC:
case CPU_4KEC:
case CPU_M14KC:
+ case CPU_M14KEC:
case CPU_SB1:
case CPU_SB1A:
case CPU_4KSC:
@@ -748,7 +822,7 @@ static __cpuinit void build_huge_update_entries(u32 **p,
*/
small_sequence = (HPAGE_SIZE >> 7) < 0x10000;
- /* We can clobber tmp. It isn't used after this.*/
+ /* We can clobber tmp. It isn't used after this.*/
if (!small_sequence)
uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));
@@ -830,12 +904,12 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
/* Clear lower 23 bits of context. */
uasm_i_dins(p, ptr, 0, 0, 23);
- /* 1 0 1 0 1 << 6 xkphys cached */
+ /* 1 0 1 0 1 << 6 xkphys cached */
uasm_i_ori(p, ptr, ptr, 0x540);
uasm_i_drotr(p, ptr, ptr, 11);
}
#elif defined(CONFIG_SMP)
-# ifdef CONFIG_MIPS_MT_SMTC
+# ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC uses TCBind value as "CPU" index
*/
@@ -955,7 +1029,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
/* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
#ifdef CONFIG_SMP
-#ifdef CONFIG_MIPS_MT_SMTC
+#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC uses TCBind value as "CPU" index
*/
@@ -965,7 +1039,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
#else
/*
* smp_processor_id() << 3 is stored in CONTEXT.
- */
+ */
uasm_i_mfc0(p, ptr, C0_CONTEXT);
UASM_i_LA_mostly(p, tmp, pgdc);
uasm_i_srl(p, ptr, ptr, 23);
@@ -1153,7 +1227,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
if (pgd_reg == -1) {
vmalloc_branch_delay_filled = 1;
- /* 1 0 1 0 1 << 6 xkphys cached */
+ /* 1 0 1 0 1 << 6 xkphys cached */
uasm_i_ori(p, ptr, ptr, 0x540);
uasm_i_drotr(p, ptr, ptr, 11);
}
@@ -1171,9 +1245,9 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
uasm_l_vmalloc_done(l, *p);
/*
- * tmp ptr
- * fall-through case = badvaddr *pgd_current
- * vmalloc case = badvaddr swapper_pg_dir
+ * tmp ptr
+ * fall-through case = badvaddr *pgd_current
+ * vmalloc case = badvaddr swapper_pg_dir
*/
if (vmalloc_branch_delay_filled)
@@ -1212,7 +1286,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update);
/*
* The in the LWX case we don't want to do the load in the
- * delay slot. It cannot issue in the same cycle and may be
+ * delay slot. It cannot issue in the same cycle and may be
* speculative and unneeded.
*/
if (use_lwx_insns())
@@ -1457,17 +1531,17 @@ u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned;
u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned;
u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned;
#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
-u32 tlbmiss_handler_setup_pgd[16] __cacheline_aligned;
+u32 tlbmiss_handler_setup_pgd_array[16] __cacheline_aligned;
static void __cpuinit build_r4000_setup_pgd(void)
{
const int a0 = 4;
const int a1 = 5;
- u32 *p = tlbmiss_handler_setup_pgd;
+ u32 *p = tlbmiss_handler_setup_pgd_array;
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
- memset(tlbmiss_handler_setup_pgd, 0, sizeof(tlbmiss_handler_setup_pgd));
+ memset(tlbmiss_handler_setup_pgd_array, 0, sizeof(tlbmiss_handler_setup_pgd_array));
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
@@ -1495,15 +1569,15 @@ static void __cpuinit build_r4000_setup_pgd(void)
uasm_i_jr(&p, 31);
UASM_i_MTC0(&p, a0, 31, pgd_reg);
}
- if (p - tlbmiss_handler_setup_pgd > ARRAY_SIZE(tlbmiss_handler_setup_pgd))
- panic("tlbmiss_handler_setup_pgd space exceeded");
+ if (p - tlbmiss_handler_setup_pgd_array > ARRAY_SIZE(tlbmiss_handler_setup_pgd_array))
+ panic("tlbmiss_handler_setup_pgd_array space exceeded");
uasm_resolve_relocs(relocs, labels);
- pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n",
- (unsigned int)(p - tlbmiss_handler_setup_pgd));
+ pr_debug("Wrote tlbmiss_handler_setup_pgd_array (%u instructions).\n",
+ (unsigned int)(p - tlbmiss_handler_setup_pgd_array));
dump_handler("tlbmiss_handler",
- tlbmiss_handler_setup_pgd,
- ARRAY_SIZE(tlbmiss_handler_setup_pgd));
+ tlbmiss_handler_setup_pgd_array,
+ ARRAY_SIZE(tlbmiss_handler_setup_pgd_array));
}
#endif
@@ -2029,6 +2103,13 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
uasm_l_nopage_tlbl(&l, p);
build_restore_work_registers(&p);
+#ifdef CONFIG_CPU_MICROMIPS
+ if ((unsigned long)tlb_do_page_fault_0 & 1) {
+ uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_0));
+ uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_0));
+ uasm_i_jr(&p, K0);
+ } else
+#endif
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
uasm_i_nop(&p);
@@ -2076,6 +2157,13 @@ static void __cpuinit build_r4000_tlb_store_handler(void)
uasm_l_nopage_tlbs(&l, p);
build_restore_work_registers(&p);
+#ifdef CONFIG_CPU_MICROMIPS
+ if ((unsigned long)tlb_do_page_fault_1 & 1) {
+ uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
+ uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
+ uasm_i_jr(&p, K0);
+ } else
+#endif
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
uasm_i_nop(&p);
@@ -2124,6 +2212,13 @@ static void __cpuinit build_r4000_tlb_modify_handler(void)
uasm_l_nopage_tlbm(&l, p);
build_restore_work_registers(&p);
+#ifdef CONFIG_CPU_MICROMIPS
+ if ((unsigned long)tlb_do_page_fault_1 & 1) {
+ uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
+ uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
+ uasm_i_jr(&p, K0);
+ } else
+#endif
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
uasm_i_nop(&p);
@@ -2161,8 +2256,12 @@ void __cpuinit build_tlb_refill_handler(void)
case CPU_TX3922:
case CPU_TX3927:
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
- build_r3000_tlb_refill_handler();
+ setup_asid(0x40, 0xfc0, 0xf000, ASID_FIRST_VERSION_R3000);
+ if (cpu_has_local_ebase)
+ build_r3000_tlb_refill_handler();
if (!run_once) {
+ if (!cpu_has_local_ebase)
+ build_r3000_tlb_refill_handler();
build_r3000_tlb_load_handler();
build_r3000_tlb_store_handler();
build_r3000_tlb_modify_handler();
@@ -2183,6 +2282,11 @@ void __cpuinit build_tlb_refill_handler(void)
break;
default:
+#ifndef CONFIG_MIPS_MT_SMTC
+ setup_asid(0x1, 0xff, 0xff00, ASID_FIRST_VERSION_R4000);
+#else
+ setup_asid(0x1, smtc_asid_mask, 0xff00, ASID_FIRST_VERSION_R4000);
+#endif
if (!run_once) {
scratch_reg = allocate_kscratch();
#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
@@ -2191,9 +2295,12 @@ void __cpuinit build_tlb_refill_handler(void)
build_r4000_tlb_load_handler();
build_r4000_tlb_store_handler();
build_r4000_tlb_modify_handler();
+ if (!cpu_has_local_ebase)
+ build_r4000_tlb_refill_handler();
run_once++;
}
- build_r4000_tlb_refill_handler();
+ if (cpu_has_local_ebase)
+ build_r4000_tlb_refill_handler();
}
}
@@ -2206,7 +2313,7 @@ void __cpuinit flush_tlb_handlers(void)
local_flush_icache_range((unsigned long)handle_tlbm,
(unsigned long)handle_tlbm + sizeof(handle_tlbm));
#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
- local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd,
- (unsigned long)tlbmiss_handler_setup_pgd + sizeof(handle_tlbm));
+ local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd_array,
+ (unsigned long)tlbmiss_handler_setup_pgd_array + sizeof(handle_tlbm));
#endif
}
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
new file mode 100644
index 000000000000..162ee6d62788
--- /dev/null
+++ b/arch/mips/mm/uasm-micromips.c
@@ -0,0 +1,221 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * A small micro-assembler. It is intentionally kept simple, does only
+ * support a subset of instructions, and does not try to hide pipeline
+ * effects like branch delay slots.
+ *
+ * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
+ * Copyright (C) 2005, 2007 Maciej W. Rozycki
+ * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+
+#include <asm/inst.h>
+#include <asm/elf.h>
+#include <asm/bugs.h>
+#define UASM_ISA _UASM_ISA_MICROMIPS
+#include <asm/uasm.h>
+
+#define RS_MASK 0x1f
+#define RS_SH 16
+#define RT_MASK 0x1f
+#define RT_SH 21
+#define SCIMM_MASK 0x3ff
+#define SCIMM_SH 16
+
+/* This macro sets the non-variable bits of an instruction. */
+#define M(a, b, c, d, e, f) \
+ ((a) << OP_SH \
+ | (b) << RT_SH \
+ | (c) << RS_SH \
+ | (d) << RD_SH \
+ | (e) << RE_SH \
+ | (f) << FUNC_SH)
+
+/* Define these when we are not the ISA the kernel is being compiled with. */
+#ifndef CONFIG_CPU_MICROMIPS
+#define MM_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off)
+#define MM_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off)
+#define MM_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off)
+#define MM_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off)
+#endif
+
+#include "uasm.c"
+
+static struct insn insn_table_MM[] __uasminitdata = {
+ { insn_addu, M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD },
+ { insn_addiu, M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
+ { insn_and, M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD },
+ { insn_andi, M(mm_andi32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
+ { insn_beq, M(mm_beq32_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+ { insn_beql, 0, 0 },
+ { insn_bgez, M(mm_pool32i_op, mm_bgez_op, 0, 0, 0, 0), RS | BIMM },
+ { insn_bgezl, 0, 0 },
+ { insn_bltz, M(mm_pool32i_op, mm_bltz_op, 0, 0, 0, 0), RS | BIMM },
+ { insn_bltzl, 0, 0 },
+ { insn_bne, M(mm_bne32_op, 0, 0, 0, 0, 0), RT | RS | BIMM },
+ { insn_cache, M(mm_pool32b_op, 0, 0, mm_cache_func, 0, 0), RT | RS | SIMM },
+ { insn_daddu, 0, 0 },
+ { insn_daddiu, 0, 0 },
+ { insn_dmfc0, 0, 0 },
+ { insn_dmtc0, 0, 0 },
+ { insn_dsll, 0, 0 },
+ { insn_dsll32, 0, 0 },
+ { insn_dsra, 0, 0 },
+ { insn_dsrl, 0, 0 },
+ { insn_dsrl32, 0, 0 },
+ { insn_drotr, 0, 0 },
+ { insn_drotr32, 0, 0 },
+ { insn_dsubu, 0, 0 },
+ { insn_eret, M(mm_pool32a_op, 0, 0, 0, mm_eret_op, mm_pool32axf_op), 0 },
+ { insn_ins, M(mm_pool32a_op, 0, 0, 0, 0, mm_ins_op), RT | RS | RD | RE },
+ { insn_ext, M(mm_pool32a_op, 0, 0, 0, 0, mm_ext_op), RT | RS | RD | RE },
+ { insn_j, M(mm_j32_op, 0, 0, 0, 0, 0), JIMM },
+ { insn_jal, M(mm_jal32_op, 0, 0, 0, 0, 0), JIMM },
+ { insn_jr, M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS },
+ { insn_ld, 0, 0 },
+ { insn_ll, M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM },
+ { insn_lld, 0, 0 },
+ { insn_lui, M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM },
+ { insn_lw, M(mm_lw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
+ { insn_mfc0, M(mm_pool32a_op, 0, 0, 0, mm_mfc0_op, mm_pool32axf_op), RT | RS | RD },
+ { insn_mtc0, M(mm_pool32a_op, 0, 0, 0, mm_mtc0_op, mm_pool32axf_op), RT | RS | RD },
+ { insn_or, M(mm_pool32a_op, 0, 0, 0, 0, mm_or32_op), RT | RS | RD },
+ { insn_ori, M(mm_ori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
+ { insn_pref, M(mm_pool32c_op, 0, 0, (mm_pref_func << 1), 0, 0), RT | RS | SIMM },
+ { insn_rfe, 0, 0 },
+ { insn_sc, M(mm_pool32c_op, 0, 0, (mm_sc_func << 1), 0, 0), RT | RS | SIMM },
+ { insn_scd, 0, 0 },
+ { insn_sd, 0, 0 },
+ { insn_sll, M(mm_pool32a_op, 0, 0, 0, 0, mm_sll32_op), RT | RS | RD },
+ { insn_sra, M(mm_pool32a_op, 0, 0, 0, 0, mm_sra_op), RT | RS | RD },
+ { insn_srl, M(mm_pool32a_op, 0, 0, 0, 0, mm_srl32_op), RT | RS | RD },
+ { insn_rotr, M(mm_pool32a_op, 0, 0, 0, 0, mm_rotr_op), RT | RS | RD },
+ { insn_subu, M(mm_pool32a_op, 0, 0, 0, 0, mm_subu32_op), RT | RS | RD },
+ { insn_sw, M(mm_sw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
+ { insn_tlbp, M(mm_pool32a_op, 0, 0, 0, mm_tlbp_op, mm_pool32axf_op), 0 },
+ { insn_tlbr, M(mm_pool32a_op, 0, 0, 0, mm_tlbr_op, mm_pool32axf_op), 0 },
+ { insn_tlbwi, M(mm_pool32a_op, 0, 0, 0, mm_tlbwi_op, mm_pool32axf_op), 0 },
+ { insn_tlbwr, M(mm_pool32a_op, 0, 0, 0, mm_tlbwr_op, mm_pool32axf_op), 0 },
+ { insn_xor, M(mm_pool32a_op, 0, 0, 0, 0, mm_xor32_op), RT | RS | RD },
+ { insn_xori, M(mm_xori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
+ { insn_dins, 0, 0 },
+ { insn_dinsm, 0, 0 },
+ { insn_syscall, M(mm_pool32a_op, 0, 0, 0, mm_syscall_op, mm_pool32axf_op), SCIMM},
+ { insn_bbit0, 0, 0 },
+ { insn_bbit1, 0, 0 },
+ { insn_lwx, 0, 0 },
+ { insn_ldx, 0, 0 },
+ { insn_invalid, 0, 0 }
+};
+
+#undef M
+
+static inline __uasminit u32 build_bimm(s32 arg)
+{
+ WARN(arg > 0xffff || arg < -0x10000,
+ KERN_WARNING "Micro-assembler field overflow\n");
+
+ WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n");
+
+ return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 1) & 0x7fff);
+}
+
+static inline __uasminit u32 build_jimm(u32 arg)
+{
+
+ WARN(arg & ~((JIMM_MASK << 2) | 1),
+ KERN_WARNING "Micro-assembler field overflow\n");
+
+ return (arg >> 1) & JIMM_MASK;
+}
+
+/*
+ * The order of opcode arguments is implicitly left to right,
+ * starting with RS and ending with FUNC or IMM.
+ */
+static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
+{
+ struct insn *ip = NULL;
+ unsigned int i;
+ va_list ap;
+ u32 op;
+
+ for (i = 0; insn_table_MM[i].opcode != insn_invalid; i++)
+ if (insn_table_MM[i].opcode == opc) {
+ ip = &insn_table_MM[i];
+ break;
+ }
+
+ if (!ip || (opc == insn_daddiu && r4k_daddiu_bug()))
+ panic("Unsupported Micro-assembler instruction %d", opc);
+
+ op = ip->match;
+ va_start(ap, opc);
+ if (ip->fields & RS) {
+ if (opc == insn_mfc0 || opc == insn_mtc0)
+ op |= build_rt(va_arg(ap, u32));
+ else
+ op |= build_rs(va_arg(ap, u32));
+ }
+ if (ip->fields & RT) {
+ if (opc == insn_mfc0 || opc == insn_mtc0)
+ op |= build_rs(va_arg(ap, u32));
+ else
+ op |= build_rt(va_arg(ap, u32));
+ }
+ if (ip->fields & RD)
+ op |= build_rd(va_arg(ap, u32));
+ if (ip->fields & RE)
+ op |= build_re(va_arg(ap, u32));
+ if (ip->fields & SIMM)
+ op |= build_simm(va_arg(ap, s32));
+ if (ip->fields & UIMM)
+ op |= build_uimm(va_arg(ap, u32));
+ if (ip->fields & BIMM)
+ op |= build_bimm(va_arg(ap, s32));
+ if (ip->fields & JIMM)
+ op |= build_jimm(va_arg(ap, u32));
+ if (ip->fields & FUNC)
+ op |= build_func(va_arg(ap, u32));
+ if (ip->fields & SET)
+ op |= build_set(va_arg(ap, u32));
+ if (ip->fields & SCIMM)
+ op |= build_scimm(va_arg(ap, u32));
+ va_end(ap);
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ **buf = ((op & 0xffff) << 16) | (op >> 16);
+#else
+ **buf = op;
+#endif
+ (*buf)++;
+}
+
+static inline void __uasminit
+__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
+{
+ long laddr = (long)lab->addr;
+ long raddr = (long)rel->addr;
+
+ switch (rel->type) {
+ case R_MIPS_PC16:
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ *rel->addr |= (build_bimm(laddr - (raddr + 4)) << 16);
+#else
+ *rel->addr |= build_bimm(laddr - (raddr + 4));
+#endif
+ break;
+
+ default:
+ panic("Unsupported Micro-assembler relocation %d",
+ rel->type);
+ }
+}
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
new file mode 100644
index 000000000000..5fcdd8fe3e83
--- /dev/null
+++ b/arch/mips/mm/uasm-mips.c
@@ -0,0 +1,205 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * A small micro-assembler. It is intentionally kept simple, does only
+ * support a subset of instructions, and does not try to hide pipeline
+ * effects like branch delay slots.
+ *
+ * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
+ * Copyright (C) 2005, 2007 Maciej W. Rozycki
+ * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+
+#include <asm/inst.h>
+#include <asm/elf.h>
+#include <asm/bugs.h>
+#define UASM_ISA _UASM_ISA_CLASSIC
+#include <asm/uasm.h>
+
+#define RS_MASK 0x1f
+#define RS_SH 21
+#define RT_MASK 0x1f
+#define RT_SH 16
+#define SCIMM_MASK 0xfffff
+#define SCIMM_SH 6
+
+/* This macro sets the non-variable bits of an instruction. */
+#define M(a, b, c, d, e, f) \
+ ((a) << OP_SH \
+ | (b) << RS_SH \
+ | (c) << RT_SH \
+ | (d) << RD_SH \
+ | (e) << RE_SH \
+ | (f) << FUNC_SH)
+
+/* Define these when we are not the ISA the kernel is being compiled with. */
+#ifdef CONFIG_CPU_MICROMIPS
+#define CL_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off)
+#define CL_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off)
+#define CL_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off)
+#define CL_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off)
+#endif
+
+#include "uasm.c"
+
+static struct insn insn_table[] __uasminitdata = {
+ { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
+ { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
+ { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD },
+ { insn_bbit0, M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+ { insn_bbit1, M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+ { insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+ { insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+ { insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM },
+ { insn_bgez, M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM },
+ { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM },
+ { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM },
+ { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+ { insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
+ { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },
+ { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE },
+ { insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET},
+ { insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET},
+ { insn_drotr32, M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE },
+ { insn_drotr, M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE },
+ { insn_dsll32, M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE },
+ { insn_dsll, M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE },
+ { insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE },
+ { insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE },
+ { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE },
+ { insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD },
+ { insn_eret, M(cop0_op, cop_op, 0, 0, 0, eret_op), 0 },
+ { insn_ext, M(spec3_op, 0, 0, 0, 0, ext_op), RS | RT | RD | RE },
+ { insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE },
+ { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
+ { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM },
+ { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
+ { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS },
+ { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
+ { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM },
+ { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
+ { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET},
+ { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
+ { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
+ { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD },
+ { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 },
+ { insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE },
+ { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE },
+ { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE },
+ { insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE },
+ { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD },
+ { insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
+ { insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 },
+ { insn_tlbr, M(cop0_op, cop_op, 0, 0, 0, tlbr_op), 0 },
+ { insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 },
+ { insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 },
+ { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
+ { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD },
+ { insn_invalid, 0, 0 }
+};
+
+#undef M
+
+static inline __uasminit u32 build_bimm(s32 arg)
+{
+ WARN(arg > 0x1ffff || arg < -0x20000,
+ KERN_WARNING "Micro-assembler field overflow\n");
+
+ WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n");
+
+ return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
+}
+
+static inline __uasminit u32 build_jimm(u32 arg)
+{
+ WARN(arg & ~(JIMM_MASK << 2),
+ KERN_WARNING "Micro-assembler field overflow\n");
+
+ return (arg >> 2) & JIMM_MASK;
+}
+
+/*
+ * The order of opcode arguments is implicitly left to right,
+ * starting with RS and ending with FUNC or IMM.
+ */
+static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
+{
+ struct insn *ip = NULL;
+ unsigned int i;
+ va_list ap;
+ u32 op;
+
+ for (i = 0; insn_table[i].opcode != insn_invalid; i++)
+ if (insn_table[i].opcode == opc) {
+ ip = &insn_table[i];
+ break;
+ }
+
+ if (!ip || (opc == insn_daddiu && r4k_daddiu_bug()))
+ panic("Unsupported Micro-assembler instruction %d", opc);
+
+ op = ip->match;
+ va_start(ap, opc);
+ if (ip->fields & RS)
+ op |= build_rs(va_arg(ap, u32));
+ if (ip->fields & RT)
+ op |= build_rt(va_arg(ap, u32));
+ if (ip->fields & RD)
+ op |= build_rd(va_arg(ap, u32));
+ if (ip->fields & RE)
+ op |= build_re(va_arg(ap, u32));
+ if (ip->fields & SIMM)
+ op |= build_simm(va_arg(ap, s32));
+ if (ip->fields & UIMM)
+ op |= build_uimm(va_arg(ap, u32));
+ if (ip->fields & BIMM)
+ op |= build_bimm(va_arg(ap, s32));
+ if (ip->fields & JIMM)
+ op |= build_jimm(va_arg(ap, u32));
+ if (ip->fields & FUNC)
+ op |= build_func(va_arg(ap, u32));
+ if (ip->fields & SET)
+ op |= build_set(va_arg(ap, u32));
+ if (ip->fields & SCIMM)
+ op |= build_scimm(va_arg(ap, u32));
+ va_end(ap);
+
+ **buf = op;
+ (*buf)++;
+}
+
+static inline void __uasminit
+__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
+{
+ long laddr = (long)lab->addr;
+ long raddr = (long)rel->addr;
+
+ switch (rel->type) {
+ case R_MIPS_PC16:
+ *rel->addr |= build_bimm(laddr - (raddr + 4));
+ break;
+
+ default:
+ panic("Unsupported Micro-assembler relocation %d",
+ rel->type);
+ }
+}
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index 39b891056227..7eb5e4355d25 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -7,20 +7,12 @@
* support a subset of instructions, and does not try to hide pipeline
* effects like branch delay slots.
*
- * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
+ * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
* Copyright (C) 2005, 2007 Maciej W. Rozycki
* Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved.
*/
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/init.h>
-
-#include <asm/inst.h>
-#include <asm/elf.h>
-#include <asm/bugs.h>
-#include <asm/uasm.h>
-
enum fields {
RS = 0x001,
RT = 0x002,
@@ -37,10 +29,6 @@ enum fields {
#define OP_MASK 0x3f
#define OP_SH 26
-#define RS_MASK 0x1f
-#define RS_SH 21
-#define RT_MASK 0x1f
-#define RT_SH 16
#define RD_MASK 0x1f
#define RD_SH 11
#define RE_MASK 0x1f
@@ -53,8 +41,6 @@ enum fields {
#define FUNC_SH 0
#define SET_MASK 0x7
#define SET_SH 0
-#define SCIMM_MASK 0xfffff
-#define SCIMM_SH 6
enum opcode {
insn_invalid,
@@ -77,85 +63,6 @@ struct insn {
enum fields fields;
};
-/* This macro sets the non-variable bits of an instruction. */
-#define M(a, b, c, d, e, f) \
- ((a) << OP_SH \
- | (b) << RS_SH \
- | (c) << RT_SH \
- | (d) << RD_SH \
- | (e) << RE_SH \
- | (f) << FUNC_SH)
-
-static struct insn insn_table[] __uasminitdata = {
- { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
- { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
- { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD },
- { insn_bbit0, M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
- { insn_bbit1, M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
- { insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
- { insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
- { insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM },
- { insn_bgez, M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM },
- { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM },
- { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM },
- { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
- { insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
- { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },
- { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE },
- { insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET},
- { insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET},
- { insn_drotr32, M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE },
- { insn_drotr, M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE },
- { insn_dsll32, M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE },
- { insn_dsll, M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE },
- { insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE },
- { insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE },
- { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE },
- { insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD },
- { insn_eret, M(cop0_op, cop_op, 0, 0, 0, eret_op), 0 },
- { insn_ext, M(spec3_op, 0, 0, 0, 0, ext_op), RS | RT | RD | RE },
- { insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE },
- { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
- { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM },
- { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
- { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS },
- { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
- { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM },
- { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
- { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET},
- { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
- { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
- { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD },
- { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 },
- { insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE },
- { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE },
- { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE },
- { insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE },
- { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD },
- { insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
- { insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 },
- { insn_tlbr, M(cop0_op, cop_op, 0, 0, 0, tlbr_op), 0 },
- { insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 },
- { insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 },
- { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
- { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD },
- { insn_invalid, 0, 0 }
-};
-
-#undef M
-
static inline __uasminit u32 build_rs(u32 arg)
{
WARN(arg & ~RS_MASK, KERN_WARNING "Micro-assembler field overflow\n");
@@ -199,24 +106,6 @@ static inline __uasminit u32 build_uimm(u32 arg)
return arg & IMM_MASK;
}
-static inline __uasminit u32 build_bimm(s32 arg)
-{
- WARN(arg > 0x1ffff || arg < -0x20000,
- KERN_WARNING "Micro-assembler field overflow\n");
-
- WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n");
-
- return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
-}
-
-static inline __uasminit u32 build_jimm(u32 arg)
-{
- WARN(arg & ~(JIMM_MASK << 2),
- KERN_WARNING "Micro-assembler field overflow\n");
-
- return (arg >> 2) & JIMM_MASK;
-}
-
static inline __uasminit u32 build_scimm(u32 arg)
{
WARN(arg & ~SCIMM_MASK,
@@ -239,55 +128,7 @@ static inline __uasminit u32 build_set(u32 arg)
return arg & SET_MASK;
}
-/*
- * The order of opcode arguments is implicitly left to right,
- * starting with RS and ending with FUNC or IMM.
- */
-static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
-{
- struct insn *ip = NULL;
- unsigned int i;
- va_list ap;
- u32 op;
-
- for (i = 0; insn_table[i].opcode != insn_invalid; i++)
- if (insn_table[i].opcode == opc) {
- ip = &insn_table[i];
- break;
- }
-
- if (!ip || (opc == insn_daddiu && r4k_daddiu_bug()))
- panic("Unsupported Micro-assembler instruction %d", opc);
-
- op = ip->match;
- va_start(ap, opc);
- if (ip->fields & RS)
- op |= build_rs(va_arg(ap, u32));
- if (ip->fields & RT)
- op |= build_rt(va_arg(ap, u32));
- if (ip->fields & RD)
- op |= build_rd(va_arg(ap, u32));
- if (ip->fields & RE)
- op |= build_re(va_arg(ap, u32));
- if (ip->fields & SIMM)
- op |= build_simm(va_arg(ap, s32));
- if (ip->fields & UIMM)
- op |= build_uimm(va_arg(ap, u32));
- if (ip->fields & BIMM)
- op |= build_bimm(va_arg(ap, s32));
- if (ip->fields & JIMM)
- op |= build_jimm(va_arg(ap, u32));
- if (ip->fields & FUNC)
- op |= build_func(va_arg(ap, u32));
- if (ip->fields & SET)
- op |= build_set(va_arg(ap, u32));
- if (ip->fields & SCIMM)
- op |= build_scimm(va_arg(ap, u32));
- va_end(ap);
-
- **buf = op;
- (*buf)++;
-}
+static void __uasminit build_insn(u32 **buf, enum opcode opc, ...);
#define I_u1u2u3(op) \
Ip_u1u2u3(op) \
@@ -345,7 +186,7 @@ Ip_u2u1msbu3(op) \
} \
UASM_EXPORT_SYMBOL(uasm_i##op);
-#define I_u2u1msbdu3(op) \
+#define I_u2u1msbdu3(op) \
Ip_u2u1msbu3(op) \
{ \
build_insn(buf, insn##op, b, a, d-1, c); \
@@ -445,7 +286,7 @@ I_u3u1u2(_ldx)
#ifdef CONFIG_CPU_CAVIUM_OCTEON
#include <asm/octeon/octeon.h>
-void __uasminit uasm_i_pref(u32 **buf, unsigned int a, signed int b,
+void __uasminit ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b,
unsigned int c)
{
if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5)
@@ -457,21 +298,21 @@ void __uasminit uasm_i_pref(u32 **buf, unsigned int a, signed int b,
else
build_insn(buf, insn_pref, c, a, b);
}
-UASM_EXPORT_SYMBOL(uasm_i_pref);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_i_pref));
#else
I_u2s3u1(_pref)
#endif
/* Handle labels. */
-void __uasminit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid)
+void __uasminit ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, int lid)
{
(*lab)->addr = addr;
(*lab)->lab = lid;
(*lab)++;
}
-UASM_EXPORT_SYMBOL(uasm_build_label);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_build_label));
-int __uasminit uasm_in_compat_space_p(long addr)
+int __uasminit ISAFUNC(uasm_in_compat_space_p)(long addr)
{
/* Is this address in 32bit compat space? */
#ifdef CONFIG_64BIT
@@ -480,7 +321,7 @@ int __uasminit uasm_in_compat_space_p(long addr)
return 1;
#endif
}
-UASM_EXPORT_SYMBOL(uasm_in_compat_space_p);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_in_compat_space_p));
static int __uasminit uasm_rel_highest(long val)
{
@@ -500,77 +341,66 @@ static int __uasminit uasm_rel_higher(long val)
#endif
}
-int __uasminit uasm_rel_hi(long val)
+int __uasminit ISAFUNC(uasm_rel_hi)(long val)
{
return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
}
-UASM_EXPORT_SYMBOL(uasm_rel_hi);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_hi));
-int __uasminit uasm_rel_lo(long val)
+int __uasminit ISAFUNC(uasm_rel_lo)(long val)
{
return ((val & 0xffff) ^ 0x8000) - 0x8000;
}
-UASM_EXPORT_SYMBOL(uasm_rel_lo);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_lo));
-void __uasminit UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr)
+void __uasminit ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr)
{
- if (!uasm_in_compat_space_p(addr)) {
- uasm_i_lui(buf, rs, uasm_rel_highest(addr));
+ if (!ISAFUNC(uasm_in_compat_space_p)(addr)) {
+ ISAFUNC(uasm_i_lui)(buf, rs, uasm_rel_highest(addr));
if (uasm_rel_higher(addr))
- uasm_i_daddiu(buf, rs, rs, uasm_rel_higher(addr));
- if (uasm_rel_hi(addr)) {
- uasm_i_dsll(buf, rs, rs, 16);
- uasm_i_daddiu(buf, rs, rs, uasm_rel_hi(addr));
- uasm_i_dsll(buf, rs, rs, 16);
+ ISAFUNC(uasm_i_daddiu)(buf, rs, rs, uasm_rel_higher(addr));
+ if (ISAFUNC(uasm_rel_hi(addr))) {
+ ISAFUNC(uasm_i_dsll)(buf, rs, rs, 16);
+ ISAFUNC(uasm_i_daddiu)(buf, rs, rs,
+ ISAFUNC(uasm_rel_hi)(addr));
+ ISAFUNC(uasm_i_dsll)(buf, rs, rs, 16);
} else
- uasm_i_dsll32(buf, rs, rs, 0);
+ ISAFUNC(uasm_i_dsll32)(buf, rs, rs, 0);
} else
- uasm_i_lui(buf, rs, uasm_rel_hi(addr));
+ ISAFUNC(uasm_i_lui)(buf, rs, ISAFUNC(uasm_rel_hi(addr)));
}
-UASM_EXPORT_SYMBOL(UASM_i_LA_mostly);
+UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA_mostly));
-void __uasminit UASM_i_LA(u32 **buf, unsigned int rs, long addr)
+void __uasminit ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr)
{
- UASM_i_LA_mostly(buf, rs, addr);
- if (uasm_rel_lo(addr)) {
- if (!uasm_in_compat_space_p(addr))
- uasm_i_daddiu(buf, rs, rs, uasm_rel_lo(addr));
+ ISAFUNC(UASM_i_LA_mostly)(buf, rs, addr);
+ if (ISAFUNC(uasm_rel_lo(addr))) {
+ if (!ISAFUNC(uasm_in_compat_space_p)(addr))
+ ISAFUNC(uasm_i_daddiu)(buf, rs, rs,
+ ISAFUNC(uasm_rel_lo(addr)));
else
- uasm_i_addiu(buf, rs, rs, uasm_rel_lo(addr));
+ ISAFUNC(uasm_i_addiu)(buf, rs, rs,
+ ISAFUNC(uasm_rel_lo(addr)));
}
}
-UASM_EXPORT_SYMBOL(UASM_i_LA);
+UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA));
/* Handle relocations. */
void __uasminit
-uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid)
+ISAFUNC(uasm_r_mips_pc16)(struct uasm_reloc **rel, u32 *addr, int lid)
{
(*rel)->addr = addr;
(*rel)->type = R_MIPS_PC16;
(*rel)->lab = lid;
(*rel)++;
}
-UASM_EXPORT_SYMBOL(uasm_r_mips_pc16);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_r_mips_pc16));
static inline void __uasminit
-__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
-{
- long laddr = (long)lab->addr;
- long raddr = (long)rel->addr;
-
- switch (rel->type) {
- case R_MIPS_PC16:
- *rel->addr |= build_bimm(laddr - (raddr + 4));
- break;
-
- default:
- panic("Unsupported Micro-assembler relocation %d",
- rel->type);
- }
-}
+__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab);
void __uasminit
-uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
+ISAFUNC(uasm_resolve_relocs)(struct uasm_reloc *rel, struct uasm_label *lab)
{
struct uasm_label *l;
@@ -579,40 +409,40 @@ uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
if (rel->lab == l->lab)
__resolve_relocs(rel, l);
}
-UASM_EXPORT_SYMBOL(uasm_resolve_relocs);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_resolve_relocs));
void __uasminit
-uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off)
+ISAFUNC(uasm_move_relocs)(struct uasm_reloc *rel, u32 *first, u32 *end, long off)
{
for (; rel->lab != UASM_LABEL_INVALID; rel++)
if (rel->addr >= first && rel->addr < end)
rel->addr += off;
}
-UASM_EXPORT_SYMBOL(uasm_move_relocs);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_relocs));
void __uasminit
-uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off)
+ISAFUNC(uasm_move_labels)(struct uasm_label *lab, u32 *first, u32 *end, long off)
{
for (; lab->lab != UASM_LABEL_INVALID; lab++)
if (lab->addr >= first && lab->addr < end)
lab->addr += off;
}
-UASM_EXPORT_SYMBOL(uasm_move_labels);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_labels));
void __uasminit
-uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first,
+ISAFUNC(uasm_copy_handler)(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first,
u32 *end, u32 *target)
{
long off = (long)(target - first);
memcpy(target, first, (end - first) * sizeof(u32));
- uasm_move_relocs(rel, first, end, off);
- uasm_move_labels(lab, first, end, off);
+ ISAFUNC(uasm_move_relocs(rel, first, end, off));
+ ISAFUNC(uasm_move_labels(lab, first, end, off));
}
-UASM_EXPORT_SYMBOL(uasm_copy_handler);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_copy_handler));
-int __uasminit uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr)
+int __uasminit ISAFUNC(uasm_insn_has_bdelay)(struct uasm_reloc *rel, u32 *addr)
{
for (; rel->lab != UASM_LABEL_INVALID; rel++) {
if (rel->addr == addr
@@ -623,88 +453,88 @@ int __uasminit uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr)
return 0;
}
-UASM_EXPORT_SYMBOL(uasm_insn_has_bdelay);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_insn_has_bdelay));
/* Convenience functions for labeled branches. */
void __uasminit
-uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+ISAFUNC(uasm_il_bltz)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- uasm_i_bltz(p, reg, 0);
+ ISAFUNC(uasm_i_bltz)(p, reg, 0);
}
-UASM_EXPORT_SYMBOL(uasm_il_bltz);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bltz));
void __uasminit
-uasm_il_b(u32 **p, struct uasm_reloc **r, int lid)
+ISAFUNC(uasm_il_b)(u32 **p, struct uasm_reloc **r, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- uasm_i_b(p, 0);
+ ISAFUNC(uasm_i_b)(p, 0);
}
-UASM_EXPORT_SYMBOL(uasm_il_b);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_b));
void __uasminit
-uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+ISAFUNC(uasm_il_beqz)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- uasm_i_beqz(p, reg, 0);
+ ISAFUNC(uasm_i_beqz)(p, reg, 0);
}
-UASM_EXPORT_SYMBOL(uasm_il_beqz);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqz));
void __uasminit
-uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+ISAFUNC(uasm_il_beqzl)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- uasm_i_beqzl(p, reg, 0);
+ ISAFUNC(uasm_i_beqzl)(p, reg, 0);
}
-UASM_EXPORT_SYMBOL(uasm_il_beqzl);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqzl));
void __uasminit
-uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1,
+ISAFUNC(uasm_il_bne)(u32 **p, struct uasm_reloc **r, unsigned int reg1,
unsigned int reg2, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- uasm_i_bne(p, reg1, reg2, 0);
+ ISAFUNC(uasm_i_bne)(p, reg1, reg2, 0);
}
-UASM_EXPORT_SYMBOL(uasm_il_bne);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bne));
void __uasminit
-uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+ISAFUNC(uasm_il_bnez)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- uasm_i_bnez(p, reg, 0);
+ ISAFUNC(uasm_i_bnez)(p, reg, 0);
}
-UASM_EXPORT_SYMBOL(uasm_il_bnez);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bnez));
void __uasminit
-uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+ISAFUNC(uasm_il_bgezl)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- uasm_i_bgezl(p, reg, 0);
+ ISAFUNC(uasm_i_bgezl)(p, reg, 0);
}
-UASM_EXPORT_SYMBOL(uasm_il_bgezl);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgezl));
void __uasminit
-uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+ISAFUNC(uasm_il_bgez)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- uasm_i_bgez(p, reg, 0);
+ ISAFUNC(uasm_i_bgez)(p, reg, 0);
}
-UASM_EXPORT_SYMBOL(uasm_il_bgez);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgez));
void __uasminit
-uasm_il_bbit0(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ISAFUNC(uasm_il_bbit0)(u32 **p, struct uasm_reloc **r, unsigned int reg,
unsigned int bit, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- uasm_i_bbit0(p, reg, bit, 0);
+ ISAFUNC(uasm_i_bbit0)(p, reg, bit, 0);
}
-UASM_EXPORT_SYMBOL(uasm_il_bbit0);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit0));
void __uasminit
-uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ISAFUNC(uasm_il_bbit1)(u32 **p, struct uasm_reloc **r, unsigned int reg,
unsigned int bit, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- uasm_i_bbit1(p, reg, bit, 0);
+ ISAFUNC(uasm_i_bbit1)(p, reg, bit, 0);
}
-UASM_EXPORT_SYMBOL(uasm_il_bbit1);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit1));