summaryrefslogtreecommitdiff
path: root/arch/mips/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/Makefile2
-rw-r--r--arch/mips/mm/c-octeon.c6
-rw-r--r--arch/mips/mm/c-r3k.c8
-rw-r--r--arch/mips/mm/c-r4k.c34
-rw-r--r--arch/mips/mm/c-tx39.c2
-rw-r--r--arch/mips/mm/cache.c2
-rw-r--r--arch/mips/mm/cerr-sb1.c4
-rw-r--r--arch/mips/mm/cex-sb1.S4
-rw-r--r--arch/mips/mm/dma-default.c3
-rw-r--r--arch/mips/mm/fault.c15
-rw-r--r--arch/mips/mm/mmap.c2
-rw-r--r--arch/mips/mm/page.c42
-rw-r--r--arch/mips/mm/sc-ip22.c2
-rw-r--r--arch/mips/mm/sc-mips.c2
-rw-r--r--arch/mips/mm/sc-r5k.c2
-rw-r--r--arch/mips/mm/sc-rm7k.c12
-rw-r--r--arch/mips/mm/tlb-funcs.S37
-rw-r--r--arch/mips/mm/tlb-r3k.c2
-rw-r--r--arch/mips/mm/tlb-r4k.c4
-rw-r--r--arch/mips/mm/tlb-r8k.c4
-rw-r--r--arch/mips/mm/tlbex.c327
-rw-r--r--arch/mips/mm/uasm-micromips.c10
-rw-r--r--arch/mips/mm/uasm-mips.c10
-rw-r--r--arch/mips/mm/uasm.c106
24 files changed, 355 insertions, 287 deletions
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile
index e87aae1f2e80..7f4f93ab22b7 100644
--- a/arch/mips/mm/Makefile
+++ b/arch/mips/mm/Makefile
@@ -4,7 +4,7 @@
obj-y += cache.o dma-default.o extable.o fault.o \
gup.o init.o mmap.o page.o page-funcs.o \
- tlbex.o tlbex-fault.o uasm-mips.o
+ tlbex.o tlbex-fault.o tlb-funcs.o uasm-mips.o
obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o
obj-$(CONFIG_64BIT) += pgtable-64.o
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 8557fb552863..a0bcdbb81d41 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -180,7 +180,7 @@ static void octeon_flush_kernel_vmap_range(unsigned long vaddr, int size)
* Probe Octeon's caches
*
*/
-static void __cpuinit probe_octeon(void)
+static void probe_octeon(void)
{
unsigned long icache_size;
unsigned long dcache_size;
@@ -251,7 +251,7 @@ static void __cpuinit probe_octeon(void)
}
}
-static void __cpuinit octeon_cache_error_setup(void)
+static void octeon_cache_error_setup(void)
{
extern char except_vec2_octeon;
set_handler(0x100, &except_vec2_octeon, 0x80);
@@ -261,7 +261,7 @@ static void __cpuinit octeon_cache_error_setup(void)
* Setup the Octeon cache flush routines
*
*/
-void __cpuinit octeon_cache_init(void)
+void octeon_cache_init(void)
{
probe_octeon();
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
index 704dc735a59d..2fcde0c8ea02 100644
--- a/arch/mips/mm/c-r3k.c
+++ b/arch/mips/mm/c-r3k.c
@@ -26,7 +26,7 @@
static unsigned long icache_size, dcache_size; /* Size in bytes */
static unsigned long icache_lsize, dcache_lsize; /* Size in bytes */
-unsigned long __cpuinit r3k_cache_size(unsigned long ca_flags)
+unsigned long r3k_cache_size(unsigned long ca_flags)
{
unsigned long flags, status, dummy, size;
volatile unsigned long *p;
@@ -61,7 +61,7 @@ unsigned long __cpuinit r3k_cache_size(unsigned long ca_flags)
return size * sizeof(*p);
}
-unsigned long __cpuinit r3k_cache_lsize(unsigned long ca_flags)
+unsigned long r3k_cache_lsize(unsigned long ca_flags)
{
unsigned long flags, status, lsize, i;
volatile unsigned long *p;
@@ -90,7 +90,7 @@ unsigned long __cpuinit r3k_cache_lsize(unsigned long ca_flags)
return lsize * sizeof(*p);
}
-static void __cpuinit r3k_probe_cache(void)
+static void r3k_probe_cache(void)
{
dcache_size = r3k_cache_size(ST0_ISC);
if (dcache_size)
@@ -312,7 +312,7 @@ static void r3k_dma_cache_wback_inv(unsigned long start, unsigned long size)
r3k_flush_dcache_range(start, start + size);
}
-void __cpuinit r3k_cache_init(void)
+void r3k_cache_init(void)
{
extern void build_clear_page(void);
extern void build_copy_page(void);
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 21813beec7a5..f749f687ee87 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -107,7 +107,7 @@ static inline void r4k_blast_dcache_page_dc64(unsigned long addr)
blast_dcache64_page(addr);
}
-static void __cpuinit r4k_blast_dcache_page_setup(void)
+static void r4k_blast_dcache_page_setup(void)
{
unsigned long dc_lsize = cpu_dcache_line_size();
@@ -123,7 +123,7 @@ static void __cpuinit r4k_blast_dcache_page_setup(void)
static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
-static void __cpuinit r4k_blast_dcache_page_indexed_setup(void)
+static void r4k_blast_dcache_page_indexed_setup(void)
{
unsigned long dc_lsize = cpu_dcache_line_size();
@@ -140,7 +140,7 @@ static void __cpuinit r4k_blast_dcache_page_indexed_setup(void)
void (* r4k_blast_dcache)(void);
EXPORT_SYMBOL(r4k_blast_dcache);
-static void __cpuinit r4k_blast_dcache_setup(void)
+static void r4k_blast_dcache_setup(void)
{
unsigned long dc_lsize = cpu_dcache_line_size();
@@ -227,7 +227,7 @@ static inline void tx49_blast_icache32_page_indexed(unsigned long page)
static void (* r4k_blast_icache_page)(unsigned long addr);
-static void __cpuinit r4k_blast_icache_page_setup(void)
+static void r4k_blast_icache_page_setup(void)
{
unsigned long ic_lsize = cpu_icache_line_size();
@@ -244,7 +244,7 @@ static void __cpuinit r4k_blast_icache_page_setup(void)
static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
-static void __cpuinit r4k_blast_icache_page_indexed_setup(void)
+static void r4k_blast_icache_page_indexed_setup(void)
{
unsigned long ic_lsize = cpu_icache_line_size();
@@ -269,7 +269,7 @@ static void __cpuinit r4k_blast_icache_page_indexed_setup(void)
void (* r4k_blast_icache)(void);
EXPORT_SYMBOL(r4k_blast_icache);
-static void __cpuinit r4k_blast_icache_setup(void)
+static void r4k_blast_icache_setup(void)
{
unsigned long ic_lsize = cpu_icache_line_size();
@@ -290,7 +290,7 @@ static void __cpuinit r4k_blast_icache_setup(void)
static void (* r4k_blast_scache_page)(unsigned long addr);
-static void __cpuinit r4k_blast_scache_page_setup(void)
+static void r4k_blast_scache_page_setup(void)
{
unsigned long sc_lsize = cpu_scache_line_size();
@@ -308,7 +308,7 @@ static void __cpuinit r4k_blast_scache_page_setup(void)
static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
-static void __cpuinit r4k_blast_scache_page_indexed_setup(void)
+static void r4k_blast_scache_page_indexed_setup(void)
{
unsigned long sc_lsize = cpu_scache_line_size();
@@ -326,7 +326,7 @@ static void __cpuinit r4k_blast_scache_page_indexed_setup(void)
static void (* r4k_blast_scache)(void);
-static void __cpuinit r4k_blast_scache_setup(void)
+static void r4k_blast_scache_setup(void)
{
unsigned long sc_lsize = cpu_scache_line_size();
@@ -797,11 +797,11 @@ static inline void alias_74k_erratum(struct cpuinfo_mips *c)
}
}
-static char *way_string[] __cpuinitdata = { NULL, "direct mapped", "2-way",
+static char *way_string[] = { NULL, "direct mapped", "2-way",
"3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
};
-static void __cpuinit probe_pcache(void)
+static void probe_pcache(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
unsigned int config = read_c0_config();
@@ -1119,7 +1119,7 @@ static void __cpuinit probe_pcache(void)
* executes in KSEG1 space or else you will crash and burn badly. You have
* been warned.
*/
-static int __cpuinit probe_scache(void)
+static int probe_scache(void)
{
unsigned long flags, addr, begin, end, pow2;
unsigned int config = read_c0_config();
@@ -1196,7 +1196,7 @@ extern int r5k_sc_init(void);
extern int rm7k_sc_init(void);
extern int mips_sc_init(void);
-static void __cpuinit setup_scache(void)
+static void setup_scache(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
unsigned int config = read_c0_config();
@@ -1329,7 +1329,7 @@ static void nxp_pr4450_fixup_config(void)
NXP_BARRIER();
}
-static int __cpuinitdata cca = -1;
+static int cca = -1;
static int __init cca_setup(char *str)
{
@@ -1340,7 +1340,7 @@ static int __init cca_setup(char *str)
early_param("cca", cca_setup);
-static void __cpuinit coherency_setup(void)
+static void coherency_setup(void)
{
if (cca < 0 || cca > 7)
cca = read_c0_config() & CONF_CM_CMASK;
@@ -1380,7 +1380,7 @@ static void __cpuinit coherency_setup(void)
}
}
-static void __cpuinit r4k_cache_error_setup(void)
+static void r4k_cache_error_setup(void)
{
extern char __weak except_vec2_generic;
extern char __weak except_vec2_sb1;
@@ -1398,7 +1398,7 @@ static void __cpuinit r4k_cache_error_setup(void)
}
}
-void __cpuinit r4k_cache_init(void)
+void r4k_cache_init(void)
{
extern void build_clear_page(void);
extern void build_copy_page(void);
diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c
index ba9da270289f..8d909dbbf37f 100644
--- a/arch/mips/mm/c-tx39.c
+++ b/arch/mips/mm/c-tx39.c
@@ -344,7 +344,7 @@ static __init void tx39_probe_cache(void)
}
}
-void __cpuinit tx39_cache_init(void)
+void tx39_cache_init(void)
{
extern void build_clear_page(void);
extern void build_copy_page(void);
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 5aeb3eb0b72f..15f813c303b4 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -182,7 +182,7 @@ static inline void setup_protection_map(void)
}
}
-void __cpuinit cpu_cache_init(void)
+void cpu_cache_init(void)
{
if (cpu_has_3k_cache) {
extern void __weak r3k_cache_init(void);
diff --git a/arch/mips/mm/cerr-sb1.c b/arch/mips/mm/cerr-sb1.c
index 576add33bf5b..ee5c1ff861ae 100644
--- a/arch/mips/mm/cerr-sb1.c
+++ b/arch/mips/mm/cerr-sb1.c
@@ -182,11 +182,7 @@ asmlinkage void sb1_cache_error(void)
#ifdef CONFIG_SIBYTE_BW_TRACE
/* Freeze the trace buffer now */
-#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
- csr_out32(M_BCM1480_SCD_TRACE_CFG_FREEZE, IOADDR(A_SCD_TRACE_CFG));
-#else
csr_out32(M_SCD_TRACE_CFG_FREEZE, IOADDR(A_SCD_TRACE_CFG));
-#endif
printk("Trace buffer frozen\n");
#endif
diff --git a/arch/mips/mm/cex-sb1.S b/arch/mips/mm/cex-sb1.S
index fe1d887e8d70..191cf6e0c725 100644
--- a/arch/mips/mm/cex-sb1.S
+++ b/arch/mips/mm/cex-sb1.S
@@ -49,8 +49,6 @@
* (0x170-0x17f) are used to preserve k0, k1, and ra.
*/
- __CPUINIT
-
LEAF(except_vec2_sb1)
/*
* If this error is recoverable, we need to exit the handler
@@ -142,8 +140,6 @@ unrecoverable:
END(except_vec2_sb1)
- __FINIT
-
LEAF(handle_vec2_sb1)
mfc0 k0,CP0_CONFIG
li k1,~CONF_CM_CMASK
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index caf92ecb37d6..aaccf1c10699 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -246,6 +246,9 @@ static int mips_dma_map_sg(struct device *dev, struct scatterlist *sg,
if (!plat_device_is_coherent(dev))
__dma_sync(sg_page(sg), sg->offset, sg->length,
direction);
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ sg->dma_length = sg->length;
+#endif
sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) +
sg->offset;
}
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 0fead53d1c26..85df1cd8d446 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -5,6 +5,7 @@
*
* Copyright (C) 1995 - 2000 by Ralf Baechle
*/
+#include <linux/context_tracking.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
@@ -32,8 +33,8 @@
* and the problem, and then passes it off to one of the appropriate
* routines.
*/
-asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long write,
- unsigned long address)
+static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
+ unsigned long address)
{
struct vm_area_struct * vma = NULL;
struct task_struct *tsk = current;
@@ -312,3 +313,13 @@ vmalloc_fault:
}
#endif
}
+
+asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
+ unsigned long write, unsigned long address)
+{
+ enum ctx_state prev_state;
+
+ prev_state = exception_enter();
+ __do_page_fault(regs, write, address);
+ exception_exit(prev_state);
+}
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
index 7e5fe2790d8a..f1baadd56e82 100644
--- a/arch/mips/mm/mmap.c
+++ b/arch/mips/mm/mmap.c
@@ -158,11 +158,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
if (mmap_is_legacy()) {
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
mm->get_unmapped_area = arch_get_unmapped_area;
- mm->unmap_area = arch_unmap_area;
} else {
mm->mmap_base = mmap_base(random_factor);
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
- mm->unmap_area = arch_unmap_area_topdown;
}
}
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index 4eb8dcfaf1ce..218c2109a55d 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -66,29 +66,29 @@ UASM_L_LA(_copy_pref_both)
UASM_L_LA(_copy_pref_store)
/* We need one branch and therefore one relocation per target label. */
-static struct uasm_label __cpuinitdata labels[5];
-static struct uasm_reloc __cpuinitdata relocs[5];
+static struct uasm_label labels[5];
+static struct uasm_reloc relocs[5];
#define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
#define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
-static int pref_bias_clear_store __cpuinitdata;
-static int pref_bias_copy_load __cpuinitdata;
-static int pref_bias_copy_store __cpuinitdata;
+static int pref_bias_clear_store;
+static int pref_bias_copy_load;
+static int pref_bias_copy_store;
-static u32 pref_src_mode __cpuinitdata;
-static u32 pref_dst_mode __cpuinitdata;
+static u32 pref_src_mode;
+static u32 pref_dst_mode;
-static int clear_word_size __cpuinitdata;
-static int copy_word_size __cpuinitdata;
+static int clear_word_size;
+static int copy_word_size;
-static int half_clear_loop_size __cpuinitdata;
-static int half_copy_loop_size __cpuinitdata;
+static int half_clear_loop_size;
+static int half_copy_loop_size;
-static int cache_line_size __cpuinitdata;
+static int cache_line_size;
#define cache_line_mask() (cache_line_size - 1)
-static inline void __cpuinit
+static inline void
pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off)
{
if (cpu_has_64bit_gp_regs && DADDI_WAR && r4k_daddiu_bug()) {
@@ -108,7 +108,7 @@ pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off)
}
}
-static void __cpuinit set_prefetch_parameters(void)
+static void set_prefetch_parameters(void)
{
if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg)
clear_word_size = 8;
@@ -199,7 +199,7 @@ static void __cpuinit set_prefetch_parameters(void)
4 * copy_word_size));
}
-static void __cpuinit build_clear_store(u32 **buf, int off)
+static void build_clear_store(u32 **buf, int off)
{
if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) {
uasm_i_sd(buf, ZERO, off, A0);
@@ -208,7 +208,7 @@ static void __cpuinit build_clear_store(u32 **buf, int off)
}
}
-static inline void __cpuinit build_clear_pref(u32 **buf, int off)
+static inline void build_clear_pref(u32 **buf, int off)
{
if (off & cache_line_mask())
return;
@@ -232,7 +232,7 @@ static inline void __cpuinit build_clear_pref(u32 **buf, int off)
uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
}
- }
+ }
}
extern u32 __clear_page_start;
@@ -240,7 +240,7 @@ extern u32 __clear_page_end;
extern u32 __copy_page_start;
extern u32 __copy_page_end;
-void __cpuinit build_clear_page(void)
+void build_clear_page(void)
{
int off;
u32 *buf = &__clear_page_start;
@@ -333,7 +333,7 @@ void __cpuinit build_clear_page(void)
pr_debug("\t.set pop\n");
}
-static void __cpuinit build_copy_load(u32 **buf, int reg, int off)
+static void build_copy_load(u32 **buf, int reg, int off)
{
if (cpu_has_64bit_gp_regs) {
uasm_i_ld(buf, reg, off, A1);
@@ -342,7 +342,7 @@ static void __cpuinit build_copy_load(u32 **buf, int reg, int off)
}
}
-static void __cpuinit build_copy_store(u32 **buf, int reg, int off)
+static void build_copy_store(u32 **buf, int reg, int off)
{
if (cpu_has_64bit_gp_regs) {
uasm_i_sd(buf, reg, off, A0);
@@ -387,7 +387,7 @@ static inline void build_copy_store_pref(u32 **buf, int off)
}
}
-void __cpuinit build_copy_page(void)
+void build_copy_page(void)
{
int off;
u32 *buf = &__copy_page_start;
diff --git a/arch/mips/mm/sc-ip22.c b/arch/mips/mm/sc-ip22.c
index c6aaed934d53..dc7c5a5214a9 100644
--- a/arch/mips/mm/sc-ip22.c
+++ b/arch/mips/mm/sc-ip22.c
@@ -167,7 +167,7 @@ static struct bcache_ops indy_sc_ops = {
.bc_inv = indy_sc_wback_invalidate
};
-void __cpuinit indy_sc_init(void)
+void indy_sc_init(void)
{
if (indy_sc_probe()) {
indy_sc_enable();
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
index df96da7e939b..5d01392e3518 100644
--- a/arch/mips/mm/sc-mips.c
+++ b/arch/mips/mm/sc-mips.c
@@ -132,7 +132,7 @@ static inline int __init mips_sc_probe(void)
return 1;
}
-int __cpuinit mips_sc_init(void)
+int mips_sc_init(void)
{
int found = mips_sc_probe();
if (found) {
diff --git a/arch/mips/mm/sc-r5k.c b/arch/mips/mm/sc-r5k.c
index 8bc67720e145..0216ed6eaa2a 100644
--- a/arch/mips/mm/sc-r5k.c
+++ b/arch/mips/mm/sc-r5k.c
@@ -98,7 +98,7 @@ static struct bcache_ops r5k_sc_ops = {
.bc_inv = r5k_dma_cache_inv_sc
};
-void __cpuinit r5k_sc_init(void)
+void r5k_sc_init(void)
{
if (r5k_sc_probe()) {
r5k_sc_enable();
diff --git a/arch/mips/mm/sc-rm7k.c b/arch/mips/mm/sc-rm7k.c
index 274af3be1442..aaffbba33706 100644
--- a/arch/mips/mm/sc-rm7k.c
+++ b/arch/mips/mm/sc-rm7k.c
@@ -104,7 +104,7 @@ static void blast_rm7k_tcache(void)
/*
* This function is executed in uncached address space.
*/
-static __cpuinit void __rm7k_tc_enable(void)
+static void __rm7k_tc_enable(void)
{
int i;
@@ -117,7 +117,7 @@ static __cpuinit void __rm7k_tc_enable(void)
cache_op(Index_Store_Tag_T, CKSEG0ADDR(i));
}
-static __cpuinit void rm7k_tc_enable(void)
+static void rm7k_tc_enable(void)
{
if (read_c0_config() & RM7K_CONF_TE)
return;
@@ -130,7 +130,7 @@ static __cpuinit void rm7k_tc_enable(void)
/*
* This function is executed in uncached address space.
*/
-static __cpuinit void __rm7k_sc_enable(void)
+static void __rm7k_sc_enable(void)
{
int i;
@@ -143,7 +143,7 @@ static __cpuinit void __rm7k_sc_enable(void)
cache_op(Index_Store_Tag_SD, CKSEG0ADDR(i));
}
-static __cpuinit void rm7k_sc_enable(void)
+static void rm7k_sc_enable(void)
{
if (read_c0_config() & RM7K_CONF_SE)
return;
@@ -184,7 +184,7 @@ static struct bcache_ops rm7k_sc_ops = {
* This is a probing function like the one found in c-r4k.c, we look for the
* wrap around point with different addresses.
*/
-static __cpuinit void __probe_tcache(void)
+static void __probe_tcache(void)
{
unsigned long flags, addr, begin, end, pow2;
@@ -226,7 +226,7 @@ static __cpuinit void __probe_tcache(void)
local_irq_restore(flags);
}
-void __cpuinit rm7k_sc_init(void)
+void rm7k_sc_init(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
unsigned int config = read_c0_config();
diff --git a/arch/mips/mm/tlb-funcs.S b/arch/mips/mm/tlb-funcs.S
new file mode 100644
index 000000000000..30a494db99c2
--- /dev/null
+++ b/arch/mips/mm/tlb-funcs.S
@@ -0,0 +1,37 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Micro-assembler generated tlb handler functions.
+ *
+ * Copyright (C) 2013 Broadcom Corporation.
+ *
+ * Based on mm/page-funcs.c
+ * Copyright (C) 2012 MIPS Technologies, Inc.
+ * Copyright (C) 2012 Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <asm/asm.h>
+#include <asm/regdef.h>
+
+#define FASTPATH_SIZE 128
+
+LEAF(tlbmiss_handler_setup_pgd)
+ .space 16 * 4
+END(tlbmiss_handler_setup_pgd)
+EXPORT(tlbmiss_handler_setup_pgd_end)
+
+LEAF(handle_tlbm)
+ .space FASTPATH_SIZE * 4
+END(handle_tlbm)
+EXPORT(handle_tlbm_end)
+
+LEAF(handle_tlbs)
+ .space FASTPATH_SIZE * 4
+END(handle_tlbs)
+EXPORT(handle_tlbs_end)
+
+LEAF(handle_tlbl)
+ .space FASTPATH_SIZE * 4
+END(handle_tlbl)
+EXPORT(handle_tlbl_end)
diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c
index a63d1ed0827f..9aca10994cd2 100644
--- a/arch/mips/mm/tlb-r3k.c
+++ b/arch/mips/mm/tlb-r3k.c
@@ -276,7 +276,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
}
}
-void __cpuinit tlb_init(void)
+void tlb_init(void)
{
local_flush_tlb_all();
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index c643de4c473a..00b26a67a06d 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -389,7 +389,7 @@ int __init has_transparent_hugepage(void)
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-static int __cpuinitdata ntlb;
+static int ntlb;
static int __init set_ntlb(char *str)
{
get_option(&str, &ntlb);
@@ -398,7 +398,7 @@ static int __init set_ntlb(char *str)
__setup("ntlb=", set_ntlb);
-void __cpuinit tlb_init(void)
+void tlb_init(void)
{
/*
* You should never change this register:
diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c
index 91c2499f806a..6a99733a4440 100644
--- a/arch/mips/mm/tlb-r8k.c
+++ b/arch/mips/mm/tlb-r8k.c
@@ -213,14 +213,14 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
local_irq_restore(flags);
}
-static void __cpuinit probe_tlb(unsigned long config)
+static void probe_tlb(unsigned long config)
{
struct cpuinfo_mips *c = &current_cpu_data;
c->tlbsize = 3 * 128; /* 3 sets each 128 entries */
}
-void __cpuinit tlb_init(void)
+void tlb_init(void)
{
unsigned int config = read_c0_config();
unsigned long status;
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index afeef93f81a7..556cb4815770 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -136,7 +136,7 @@ static int scratchpad_offset(int i)
* why; it's not an issue caused by the core RTL.
*
*/
-static int __cpuinit m4kc_tlbp_war(void)
+static int m4kc_tlbp_war(void)
{
return (current_cpu_data.processor_id & 0xffff00) ==
(PRID_COMP_MIPS | PRID_IMP_4KC);
@@ -181,11 +181,9 @@ UASM_L_LA(_large_segbits_fault)
UASM_L_LA(_tlb_huge_update)
#endif
-static int __cpuinitdata hazard_instance;
+static int hazard_instance;
-static void __cpuinit uasm_bgezl_hazard(u32 **p,
- struct uasm_reloc **r,
- int instance)
+static void uasm_bgezl_hazard(u32 **p, struct uasm_reloc **r, int instance)
{
switch (instance) {
case 0 ... 7:
@@ -196,9 +194,7 @@ static void __cpuinit uasm_bgezl_hazard(u32 **p,
}
}
-static void __cpuinit uasm_bgezl_label(struct uasm_label **l,
- u32 **p,
- int instance)
+static void uasm_bgezl_label(struct uasm_label **l, u32 **p, int instance)
{
switch (instance) {
case 0 ... 7:
@@ -295,17 +291,28 @@ static inline void dump_handler(const char *symbol, const u32 *handler, int coun
* We deliberately chose a buffer size of 128, so we won't scribble
* over anything important on overflow before we panic.
*/
-static u32 tlb_handler[128] __cpuinitdata;
+static u32 tlb_handler[128];
/* simply assume worst case size for labels and relocs */
-static struct uasm_label labels[128] __cpuinitdata;
-static struct uasm_reloc relocs[128] __cpuinitdata;
+static struct uasm_label labels[128];
+static struct uasm_reloc relocs[128];
-static int check_for_high_segbits __cpuinitdata;
+static int check_for_high_segbits;
-static unsigned int kscratch_used_mask __cpuinitdata;
+static unsigned int kscratch_used_mask;
-static int __cpuinit allocate_kscratch(void)
+static inline int __maybe_unused c0_kscratch(void)
+{
+ switch (current_cpu_type()) {
+ case CPU_XLP:
+ case CPU_XLR:
+ return 22;
+ default:
+ return 31;
+ }
+}
+
+static int allocate_kscratch(void)
{
int r;
unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask;
@@ -322,11 +329,11 @@ static int __cpuinit allocate_kscratch(void)
return r;
}
-static int scratch_reg __cpuinitdata;
-static int pgd_reg __cpuinitdata;
+static int scratch_reg;
+static int pgd_reg;
enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch};
-static struct work_registers __cpuinit build_get_work_registers(u32 **p)
+static struct work_registers build_get_work_registers(u32 **p)
{
struct work_registers r;
@@ -334,9 +341,9 @@ static struct work_registers __cpuinit build_get_work_registers(u32 **p)
int smp_processor_id_sel;
int smp_processor_id_shift;
- if (scratch_reg > 0) {
+ if (scratch_reg >= 0) {
/* Save in CPU local C0_KScratch? */
- UASM_i_MTC0(p, 1, 31, scratch_reg);
+ UASM_i_MTC0(p, 1, c0_kscratch(), scratch_reg);
r.r1 = K0;
r.r2 = K1;
r.r3 = 1;
@@ -382,10 +389,10 @@ static struct work_registers __cpuinit build_get_work_registers(u32 **p)
return r;
}
-static void __cpuinit build_restore_work_registers(u32 **p)
+static void build_restore_work_registers(u32 **p)
{
- if (scratch_reg > 0) {
- UASM_i_MFC0(p, 1, 31, scratch_reg);
+ if (scratch_reg >= 0) {
+ UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
return;
}
/* K0 already points to save area, restore $1 and $2 */
@@ -407,7 +414,7 @@ extern unsigned long pgd_current[];
/*
* The R3000 TLB handler is simple.
*/
-static void __cpuinit build_r3000_tlb_refill_handler(void)
+static void build_r3000_tlb_refill_handler(void)
{
long pgdc = (long)pgd_current;
u32 *p;
@@ -452,7 +459,7 @@ static void __cpuinit build_r3000_tlb_refill_handler(void)
* other one.To keep things simple, we first assume linear space,
* then we relocate it to the final handler layout as needed.
*/
-static u32 final_handler[64] __cpuinitdata;
+static u32 final_handler[64];
/*
* Hazards
@@ -476,7 +483,7 @@ static u32 final_handler[64] __cpuinitdata;
*
* As if we MIPS hackers wouldn't know how to nop pipelines happy ...
*/
-static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p)
+static void __maybe_unused build_tlb_probe_entry(u32 **p)
{
switch (current_cpu_type()) {
/* Found by experiment: R4600 v2.0/R4700 needs this, too. */
@@ -500,9 +507,9 @@ static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p)
*/
enum tlb_write_entry { tlb_random, tlb_indexed };
-static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
- struct uasm_reloc **r,
- enum tlb_write_entry wmode)
+static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
+ struct uasm_reloc **r,
+ enum tlb_write_entry wmode)
{
void(*tlbw)(u32 **) = NULL;
@@ -636,8 +643,8 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
}
}
-static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
- unsigned int reg)
+static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
+ unsigned int reg)
{
if (cpu_has_rixi) {
UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
@@ -652,11 +659,9 @@ static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
-static __cpuinit void build_restore_pagemask(u32 **p,
- struct uasm_reloc **r,
- unsigned int tmp,
- enum label_id lid,
- int restore_scratch)
+static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
+ unsigned int tmp, enum label_id lid,
+ int restore_scratch)
{
if (restore_scratch) {
/* Reset default page size */
@@ -673,8 +678,8 @@ static __cpuinit void build_restore_pagemask(u32 **p,
uasm_i_mtc0(p, 0, C0_PAGEMASK);
uasm_il_b(p, r, lid);
}
- if (scratch_reg > 0)
- UASM_i_MFC0(p, 1, 31, scratch_reg);
+ if (scratch_reg >= 0)
+ UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
else
UASM_i_LW(p, 1, scratchpad_offset(0), 0);
} else {
@@ -695,12 +700,11 @@ static __cpuinit void build_restore_pagemask(u32 **p,
}
}
-static __cpuinit void build_huge_tlb_write_entry(u32 **p,
- struct uasm_label **l,
- struct uasm_reloc **r,
- unsigned int tmp,
- enum tlb_write_entry wmode,
- int restore_scratch)
+static void build_huge_tlb_write_entry(u32 **p, struct uasm_label **l,
+ struct uasm_reloc **r,
+ unsigned int tmp,
+ enum tlb_write_entry wmode,
+ int restore_scratch)
{
/* Set huge page tlb entry size */
uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
@@ -715,9 +719,9 @@ static __cpuinit void build_huge_tlb_write_entry(u32 **p,
/*
* Check if Huge PTE is present, if so then jump to LABEL.
*/
-static void __cpuinit
+static void
build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
- unsigned int pmd, int lid)
+ unsigned int pmd, int lid)
{
UASM_i_LW(p, tmp, 0, pmd);
if (use_bbit_insns()) {
@@ -728,9 +732,8 @@ build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
}
}
-static __cpuinit void build_huge_update_entries(u32 **p,
- unsigned int pte,
- unsigned int tmp)
+static void build_huge_update_entries(u32 **p, unsigned int pte,
+ unsigned int tmp)
{
int small_sequence;
@@ -760,11 +763,10 @@ static __cpuinit void build_huge_update_entries(u32 **p,
UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */
}
-static __cpuinit void build_huge_handler_tail(u32 **p,
- struct uasm_reloc **r,
- struct uasm_label **l,
- unsigned int pte,
- unsigned int ptr)
+static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
+ struct uasm_label **l,
+ unsigned int pte,
+ unsigned int ptr)
{
#ifdef CONFIG_SMP
UASM_i_SC(p, pte, 0, ptr);
@@ -783,7 +785,7 @@ static __cpuinit void build_huge_handler_tail(u32 **p,
* TMP and PTR are scratch.
* TMP will be clobbered, PTR will hold the pmd entry.
*/
-static void __cpuinit
+static void
build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
unsigned int tmp, unsigned int ptr)
{
@@ -817,7 +819,7 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
if (pgd_reg != -1) {
/* pgd is in pgd_reg */
- UASM_i_MFC0(p, ptr, 31, pgd_reg);
+ UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
} else {
/*
* &pgd << 11 stored in CONTEXT [23..63].
@@ -875,7 +877,7 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
* BVADDR is the faulting address, PTR is scratch.
* PTR will hold the pgd for vmalloc.
*/
-static void __cpuinit
+static void
build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
unsigned int bvaddr, unsigned int ptr,
enum vmalloc64_mode mode)
@@ -929,8 +931,8 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
uasm_i_jr(p, ptr);
if (mode == refill_scratch) {
- if (scratch_reg > 0)
- UASM_i_MFC0(p, 1, 31, scratch_reg);
+ if (scratch_reg >= 0)
+ UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
else
UASM_i_LW(p, 1, scratchpad_offset(0), 0);
} else {
@@ -945,7 +947,7 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
* TMP and PTR are scratch.
* TMP will be clobbered, PTR will hold the pgd entry.
*/
-static void __cpuinit __maybe_unused
+static void __maybe_unused
build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
{
long pgdc = (long)pgd_current;
@@ -961,7 +963,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
uasm_i_srl(p, ptr, ptr, 19);
#else
/*
- * smp_processor_id() << 3 is stored in CONTEXT.
+ * smp_processor_id() << 2 is stored in CONTEXT.
*/
uasm_i_mfc0(p, ptr, C0_CONTEXT);
UASM_i_LA_mostly(p, tmp, pgdc);
@@ -980,7 +982,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
#endif /* !CONFIG_64BIT */
-static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx)
+static void build_adjust_context(u32 **p, unsigned int ctx)
{
unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
@@ -1006,7 +1008,7 @@ static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx)
uasm_i_andi(p, ctx, ctx, mask);
}
-static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
+static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
{
/*
* Bug workaround for the Nevada. It seems as if under certain
@@ -1031,8 +1033,7 @@ static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr
UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
}
-static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
- unsigned int ptep)
+static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
{
/*
* 64bit address support (36bit on a 32bit CPU) in a 32bit
@@ -1093,10 +1094,10 @@ struct mips_huge_tlb_info {
int restore_scratch;
};
-static struct mips_huge_tlb_info __cpuinit
+static struct mips_huge_tlb_info
build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
struct uasm_reloc **r, unsigned int tmp,
- unsigned int ptr, int c0_scratch)
+ unsigned int ptr, int c0_scratch_reg)
{
struct mips_huge_tlb_info rv;
unsigned int even, odd;
@@ -1110,12 +1111,12 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
UASM_i_MFC0(p, tmp, C0_BADVADDR);
if (pgd_reg != -1)
- UASM_i_MFC0(p, ptr, 31, pgd_reg);
+ UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
else
UASM_i_MFC0(p, ptr, C0_CONTEXT);
- if (c0_scratch >= 0)
- UASM_i_MTC0(p, scratch, 31, c0_scratch);
+ if (c0_scratch_reg >= 0)
+ UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg);
else
UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
@@ -1130,14 +1131,14 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
}
} else {
if (pgd_reg != -1)
- UASM_i_MFC0(p, ptr, 31, pgd_reg);
+ UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
else
UASM_i_MFC0(p, ptr, C0_CONTEXT);
UASM_i_MFC0(p, tmp, C0_BADVADDR);
- if (c0_scratch >= 0)
- UASM_i_MTC0(p, scratch, 31, c0_scratch);
+ if (c0_scratch_reg >= 0)
+ UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg);
else
UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
@@ -1242,8 +1243,8 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
}
UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */
- if (c0_scratch >= 0) {
- UASM_i_MFC0(p, scratch, 31, c0_scratch);
+ if (c0_scratch_reg >= 0) {
+ UASM_i_MFC0(p, scratch, c0_kscratch(), c0_scratch_reg);
build_tlb_write_entry(p, l, r, tlb_random);
uasm_l_leave(l, *p);
rv.restore_scratch = 1;
@@ -1271,7 +1272,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
*/
#define MIPS64_REFILL_INSNS 32
-static void __cpuinit build_r4000_tlb_refill_handler(void)
+static void build_r4000_tlb_refill_handler(void)
{
u32 *p = tlb_handler;
struct uasm_label *l = labels;
@@ -1286,7 +1287,7 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
memset(relocs, 0, sizeof(relocs));
memset(final_handler, 0, sizeof(final_handler));
- if ((scratch_reg > 0 || scratchpad_available()) && use_bbit_insns()) {
+ if ((scratch_reg >= 0 || scratchpad_available()) && use_bbit_insns()) {
htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1,
scratch_reg);
vmalloc_mode = refill_scratch;
@@ -1444,27 +1445,25 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
dump_handler("r4000_tlb_refill", (u32 *)ebase, 64);
}
-/*
- * 128 instructions for the fastpath handler is generous and should
- * never be exceeded.
- */
-#define FASTPATH_SIZE 128
+extern u32 handle_tlbl[], handle_tlbl_end[];
+extern u32 handle_tlbs[], handle_tlbs_end[];
+extern u32 handle_tlbm[], handle_tlbm_end[];
-u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned;
-u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned;
-u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned;
#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
-u32 tlbmiss_handler_setup_pgd_array[16] __cacheline_aligned;
+extern u32 tlbmiss_handler_setup_pgd[], tlbmiss_handler_setup_pgd_end[];
-static void __cpuinit build_r4000_setup_pgd(void)
+static void build_r4000_setup_pgd(void)
{
const int a0 = 4;
const int a1 = 5;
- u32 *p = tlbmiss_handler_setup_pgd_array;
+ u32 *p = tlbmiss_handler_setup_pgd;
+ const int tlbmiss_handler_setup_pgd_size =
+ tlbmiss_handler_setup_pgd_end - tlbmiss_handler_setup_pgd;
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
- memset(tlbmiss_handler_setup_pgd_array, 0, sizeof(tlbmiss_handler_setup_pgd_array));
+ memset(tlbmiss_handler_setup_pgd, 0, tlbmiss_handler_setup_pgd_size *
+ sizeof(tlbmiss_handler_setup_pgd[0]));
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
@@ -1490,21 +1489,21 @@ static void __cpuinit build_r4000_setup_pgd(void)
} else {
/* PGD in c0_KScratch */
uasm_i_jr(&p, 31);
- UASM_i_MTC0(&p, a0, 31, pgd_reg);
+ UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
}
- if (p - tlbmiss_handler_setup_pgd_array > ARRAY_SIZE(tlbmiss_handler_setup_pgd_array))
- panic("tlbmiss_handler_setup_pgd_array space exceeded");
+ if (p >= tlbmiss_handler_setup_pgd_end)
+ panic("tlbmiss_handler_setup_pgd space exceeded");
+
uasm_resolve_relocs(relocs, labels);
- pr_debug("Wrote tlbmiss_handler_setup_pgd_array (%u instructions).\n",
- (unsigned int)(p - tlbmiss_handler_setup_pgd_array));
+ pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n",
+ (unsigned int)(p - tlbmiss_handler_setup_pgd));
- dump_handler("tlbmiss_handler",
- tlbmiss_handler_setup_pgd_array,
- ARRAY_SIZE(tlbmiss_handler_setup_pgd_array));
+ dump_handler("tlbmiss_handler", tlbmiss_handler_setup_pgd,
+ tlbmiss_handler_setup_pgd_size);
}
#endif
-static void __cpuinit
+static void
iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
{
#ifdef CONFIG_SMP
@@ -1524,7 +1523,7 @@ iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
#endif
}
-static void __cpuinit
+static void
iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
unsigned int mode)
{
@@ -1584,7 +1583,7 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
* the page table where this PTE is located, PTE will be re-loaded
* with it's original value.
*/
-static void __cpuinit
+static void
build_pte_present(u32 **p, struct uasm_reloc **r,
int pte, int ptr, int scratch, enum label_id lid)
{
@@ -1612,7 +1611,7 @@ build_pte_present(u32 **p, struct uasm_reloc **r,
}
/* Make PTE valid, store result in PTR. */
-static void __cpuinit
+static void
build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
unsigned int ptr)
{
@@ -1625,7 +1624,7 @@ build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
* Check if PTE can be written to, if not branch to LABEL. Regardless
* restore PTE with value from PTR when done.
*/
-static void __cpuinit
+static void
build_pte_writable(u32 **p, struct uasm_reloc **r,
unsigned int pte, unsigned int ptr, int scratch,
enum label_id lid)
@@ -1645,7 +1644,7 @@ build_pte_writable(u32 **p, struct uasm_reloc **r,
/* Make PTE writable, update software status bits as well, then store
* at PTR.
*/
-static void __cpuinit
+static void
build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
unsigned int ptr)
{
@@ -1659,7 +1658,7 @@ build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
* Check if PTE can be modified, if not branch to LABEL. Regardless
* restore PTE with value from PTR when done.
*/
-static void __cpuinit
+static void
build_pte_modifiable(u32 **p, struct uasm_reloc **r,
unsigned int pte, unsigned int ptr, int scratch,
enum label_id lid)
@@ -1688,7 +1687,7 @@ build_pte_modifiable(u32 **p, struct uasm_reloc **r,
* This places the pte into ENTRYLO0 and writes it with tlbwi.
* Then it returns.
*/
-static void __cpuinit
+static void
build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
{
uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
@@ -1704,7 +1703,7 @@ build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
* may have the probe fail bit set as a result of a trap on a
* kseg2 access, i.e. without refill. Then it returns.
*/
-static void __cpuinit
+static void
build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
struct uasm_reloc **r, unsigned int pte,
unsigned int tmp)
@@ -1722,7 +1721,7 @@ build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
uasm_i_rfe(p); /* branch delay */
}
-static void __cpuinit
+static void
build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
unsigned int ptr)
{
@@ -1742,13 +1741,14 @@ build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
uasm_i_tlbp(p); /* load delay */
}
-static void __cpuinit build_r3000_tlb_load_handler(void)
+static void build_r3000_tlb_load_handler(void)
{
u32 *p = handle_tlbl;
+ const int handle_tlbl_size = handle_tlbl_end - handle_tlbl;
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
- memset(handle_tlbl, 0, sizeof(handle_tlbl));
+ memset(handle_tlbl, 0, handle_tlbl_size * sizeof(handle_tlbl[0]));
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
@@ -1762,23 +1762,24 @@ static void __cpuinit build_r3000_tlb_load_handler(void)
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
uasm_i_nop(&p);
- if ((p - handle_tlbl) > FASTPATH_SIZE)
+ if (p >= handle_tlbl_end)
panic("TLB load handler fastpath space exceeded");
uasm_resolve_relocs(relocs, labels);
pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
(unsigned int)(p - handle_tlbl));
- dump_handler("r3000_tlb_load", handle_tlbl, ARRAY_SIZE(handle_tlbl));
+ dump_handler("r3000_tlb_load", handle_tlbl, handle_tlbl_size);
}
-static void __cpuinit build_r3000_tlb_store_handler(void)
+static void build_r3000_tlb_store_handler(void)
{
u32 *p = handle_tlbs;
+ const int handle_tlbs_size = handle_tlbs_end - handle_tlbs;
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
- memset(handle_tlbs, 0, sizeof(handle_tlbs));
+ memset(handle_tlbs, 0, handle_tlbs_size * sizeof(handle_tlbs[0]));
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
@@ -1792,23 +1793,24 @@ static void __cpuinit build_r3000_tlb_store_handler(void)
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
uasm_i_nop(&p);
- if ((p - handle_tlbs) > FASTPATH_SIZE)
+ if (p >= handle_tlbs_end)
panic("TLB store handler fastpath space exceeded");
uasm_resolve_relocs(relocs, labels);
pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
(unsigned int)(p - handle_tlbs));
- dump_handler("r3000_tlb_store", handle_tlbs, ARRAY_SIZE(handle_tlbs));
+ dump_handler("r3000_tlb_store", handle_tlbs, handle_tlbs_size);
}
-static void __cpuinit build_r3000_tlb_modify_handler(void)
+static void build_r3000_tlb_modify_handler(void)
{
u32 *p = handle_tlbm;
+ const int handle_tlbm_size = handle_tlbm_end - handle_tlbm;
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
- memset(handle_tlbm, 0, sizeof(handle_tlbm));
+ memset(handle_tlbm, 0, handle_tlbm_size * sizeof(handle_tlbm[0]));
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
@@ -1822,21 +1824,21 @@ static void __cpuinit build_r3000_tlb_modify_handler(void)
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
uasm_i_nop(&p);
- if ((p - handle_tlbm) > FASTPATH_SIZE)
+ if (p >= handle_tlbm_end)
panic("TLB modify handler fastpath space exceeded");
uasm_resolve_relocs(relocs, labels);
pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
(unsigned int)(p - handle_tlbm));
- dump_handler("r3000_tlb_modify", handle_tlbm, ARRAY_SIZE(handle_tlbm));
+ dump_handler("r3000_tlb_modify", handle_tlbm, handle_tlbm_size);
}
#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
/*
* R4000 style TLB load/store/modify handlers.
*/
-static struct work_registers __cpuinit
+static struct work_registers
build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
struct uasm_reloc **r)
{
@@ -1872,7 +1874,7 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
return wr;
}
-static void __cpuinit
+static void
build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
struct uasm_reloc **r, unsigned int tmp,
unsigned int ptr)
@@ -1890,14 +1892,15 @@ build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
#endif
}
-static void __cpuinit build_r4000_tlb_load_handler(void)
+static void build_r4000_tlb_load_handler(void)
{
u32 *p = handle_tlbl;
+ const int handle_tlbl_size = handle_tlbl_end - handle_tlbl;
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
struct work_registers wr;
- memset(handle_tlbl, 0, sizeof(handle_tlbl));
+ memset(handle_tlbl, 0, handle_tlbl_size * sizeof(handle_tlbl[0]));
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
@@ -1935,6 +1938,19 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
uasm_i_nop(&p);
uasm_i_tlbr(&p);
+
+ switch (current_cpu_type()) {
+ default:
+ if (cpu_has_mips_r2) {
+ uasm_i_ehb(&p);
+
+ case CPU_CAVIUM_OCTEON:
+ case CPU_CAVIUM_OCTEON_PLUS:
+ case CPU_CAVIUM_OCTEON2:
+ break;
+ }
+ }
+
/* Examine entrylo 0 or 1 based on ptr. */
if (use_bbit_insns()) {
uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
@@ -1989,6 +2005,19 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
uasm_i_nop(&p);
uasm_i_tlbr(&p);
+
+ switch (current_cpu_type()) {
+ default:
+ if (cpu_has_mips_r2) {
+ uasm_i_ehb(&p);
+
+ case CPU_CAVIUM_OCTEON:
+ case CPU_CAVIUM_OCTEON_PLUS:
+ case CPU_CAVIUM_OCTEON2:
+ break;
+ }
+ }
+
/* Examine entrylo 0 or 1 based on ptr. */
if (use_bbit_insns()) {
uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
@@ -2036,24 +2065,25 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
uasm_i_nop(&p);
- if ((p - handle_tlbl) > FASTPATH_SIZE)
+ if (p >= handle_tlbl_end)
panic("TLB load handler fastpath space exceeded");
uasm_resolve_relocs(relocs, labels);
pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
(unsigned int)(p - handle_tlbl));
- dump_handler("r4000_tlb_load", handle_tlbl, ARRAY_SIZE(handle_tlbl));
+ dump_handler("r4000_tlb_load", handle_tlbl, handle_tlbl_size);
}
-static void __cpuinit build_r4000_tlb_store_handler(void)
+static void build_r4000_tlb_store_handler(void)
{
u32 *p = handle_tlbs;
+ const int handle_tlbs_size = handle_tlbs_end - handle_tlbs;
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
struct work_registers wr;
- memset(handle_tlbs, 0, sizeof(handle_tlbs));
+ memset(handle_tlbs, 0, handle_tlbs_size * sizeof(handle_tlbs[0]));
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
@@ -2090,24 +2120,25 @@ static void __cpuinit build_r4000_tlb_store_handler(void)
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
uasm_i_nop(&p);
- if ((p - handle_tlbs) > FASTPATH_SIZE)
+ if (p >= handle_tlbs_end)
panic("TLB store handler fastpath space exceeded");
uasm_resolve_relocs(relocs, labels);
pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
(unsigned int)(p - handle_tlbs));
- dump_handler("r4000_tlb_store", handle_tlbs, ARRAY_SIZE(handle_tlbs));
+ dump_handler("r4000_tlb_store", handle_tlbs, handle_tlbs_size);
}
-static void __cpuinit build_r4000_tlb_modify_handler(void)
+static void build_r4000_tlb_modify_handler(void)
{
u32 *p = handle_tlbm;
+ const int handle_tlbm_size = handle_tlbm_end - handle_tlbm;
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
struct work_registers wr;
- memset(handle_tlbm, 0, sizeof(handle_tlbm));
+ memset(handle_tlbm, 0, handle_tlbm_size * sizeof(handle_tlbm[0]));
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
@@ -2145,17 +2176,31 @@ static void __cpuinit build_r4000_tlb_modify_handler(void)
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
uasm_i_nop(&p);
- if ((p - handle_tlbm) > FASTPATH_SIZE)
+ if (p >= handle_tlbm_end)
panic("TLB modify handler fastpath space exceeded");
uasm_resolve_relocs(relocs, labels);
pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
(unsigned int)(p - handle_tlbm));
- dump_handler("r4000_tlb_modify", handle_tlbm, ARRAY_SIZE(handle_tlbm));
+ dump_handler("r4000_tlb_modify", handle_tlbm, handle_tlbm_size);
+}
+
+static void flush_tlb_handlers(void)
+{
+ local_flush_icache_range((unsigned long)handle_tlbl,
+ (unsigned long)handle_tlbl_end);
+ local_flush_icache_range((unsigned long)handle_tlbs,
+ (unsigned long)handle_tlbs_end);
+ local_flush_icache_range((unsigned long)handle_tlbm,
+ (unsigned long)handle_tlbm_end);
+#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
+ local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd,
+ (unsigned long)tlbmiss_handler_setup_pgd_end);
+#endif
}
-void __cpuinit build_tlb_refill_handler(void)
+void build_tlb_refill_handler(void)
{
/*
* The refill handler is generated per-CPU, multi-node systems
@@ -2187,6 +2232,7 @@ void __cpuinit build_tlb_refill_handler(void)
build_r3000_tlb_load_handler();
build_r3000_tlb_store_handler();
build_r3000_tlb_modify_handler();
+ flush_tlb_handlers();
run_once++;
}
#else
@@ -2214,23 +2260,10 @@ void __cpuinit build_tlb_refill_handler(void)
build_r4000_tlb_modify_handler();
if (!cpu_has_local_ebase)
build_r4000_tlb_refill_handler();
+ flush_tlb_handlers();
run_once++;
}
if (cpu_has_local_ebase)
build_r4000_tlb_refill_handler();
}
}
-
-void __cpuinit flush_tlb_handlers(void)
-{
- local_flush_icache_range((unsigned long)handle_tlbl,
- (unsigned long)handle_tlbl + sizeof(handle_tlbl));
- local_flush_icache_range((unsigned long)handle_tlbs,
- (unsigned long)handle_tlbs + sizeof(handle_tlbs));
- local_flush_icache_range((unsigned long)handle_tlbm,
- (unsigned long)handle_tlbm + sizeof(handle_tlbm));
-#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
- local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd_array,
- (unsigned long)tlbmiss_handler_setup_pgd_array + sizeof(handle_tlbm));
-#endif
-}
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
index 162ee6d62788..060000fa653c 100644
--- a/arch/mips/mm/uasm-micromips.c
+++ b/arch/mips/mm/uasm-micromips.c
@@ -49,7 +49,7 @@
#include "uasm.c"
-static struct insn insn_table_MM[] __uasminitdata = {
+static struct insn insn_table_MM[] = {
{ insn_addu, M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD },
{ insn_addiu, M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
{ insn_and, M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD },
@@ -118,7 +118,7 @@ static struct insn insn_table_MM[] __uasminitdata = {
#undef M
-static inline __uasminit u32 build_bimm(s32 arg)
+static inline u32 build_bimm(s32 arg)
{
WARN(arg > 0xffff || arg < -0x10000,
KERN_WARNING "Micro-assembler field overflow\n");
@@ -128,7 +128,7 @@ static inline __uasminit u32 build_bimm(s32 arg)
return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 1) & 0x7fff);
}
-static inline __uasminit u32 build_jimm(u32 arg)
+static inline u32 build_jimm(u32 arg)
{
WARN(arg & ~((JIMM_MASK << 2) | 1),
@@ -141,7 +141,7 @@ static inline __uasminit u32 build_jimm(u32 arg)
* The order of opcode arguments is implicitly left to right,
* starting with RS and ending with FUNC or IMM.
*/
-static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
+static void build_insn(u32 **buf, enum opcode opc, ...)
{
struct insn *ip = NULL;
unsigned int i;
@@ -199,7 +199,7 @@ static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
(*buf)++;
}
-static inline void __uasminit
+static inline void
__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
{
long laddr = (long)lab->addr;
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
index 5fcdd8fe3e83..0c724589854e 100644
--- a/arch/mips/mm/uasm-mips.c
+++ b/arch/mips/mm/uasm-mips.c
@@ -49,7 +49,7 @@
#include "uasm.c"
-static struct insn insn_table[] __uasminitdata = {
+static struct insn insn_table[] = {
{ insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
{ insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
@@ -119,7 +119,7 @@ static struct insn insn_table[] __uasminitdata = {
#undef M
-static inline __uasminit u32 build_bimm(s32 arg)
+static inline u32 build_bimm(s32 arg)
{
WARN(arg > 0x1ffff || arg < -0x20000,
KERN_WARNING "Micro-assembler field overflow\n");
@@ -129,7 +129,7 @@ static inline __uasminit u32 build_bimm(s32 arg)
return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
}
-static inline __uasminit u32 build_jimm(u32 arg)
+static inline u32 build_jimm(u32 arg)
{
WARN(arg & ~(JIMM_MASK << 2),
KERN_WARNING "Micro-assembler field overflow\n");
@@ -141,7 +141,7 @@ static inline __uasminit u32 build_jimm(u32 arg)
* The order of opcode arguments is implicitly left to right,
* starting with RS and ending with FUNC or IMM.
*/
-static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
+static void build_insn(u32 **buf, enum opcode opc, ...)
{
struct insn *ip = NULL;
unsigned int i;
@@ -187,7 +187,7 @@ static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
(*buf)++;
}
-static inline void __uasminit
+static inline void
__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
{
long laddr = (long)lab->addr;
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index 7eb5e4355d25..b9d14b6c7f58 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -63,35 +63,35 @@ struct insn {
enum fields fields;
};
-static inline __uasminit u32 build_rs(u32 arg)
+static inline u32 build_rs(u32 arg)
{
WARN(arg & ~RS_MASK, KERN_WARNING "Micro-assembler field overflow\n");
return (arg & RS_MASK) << RS_SH;
}
-static inline __uasminit u32 build_rt(u32 arg)
+static inline u32 build_rt(u32 arg)
{
WARN(arg & ~RT_MASK, KERN_WARNING "Micro-assembler field overflow\n");
return (arg & RT_MASK) << RT_SH;
}
-static inline __uasminit u32 build_rd(u32 arg)
+static inline u32 build_rd(u32 arg)
{
WARN(arg & ~RD_MASK, KERN_WARNING "Micro-assembler field overflow\n");
return (arg & RD_MASK) << RD_SH;
}
-static inline __uasminit u32 build_re(u32 arg)
+static inline u32 build_re(u32 arg)
{
WARN(arg & ~RE_MASK, KERN_WARNING "Micro-assembler field overflow\n");
return (arg & RE_MASK) << RE_SH;
}
-static inline __uasminit u32 build_simm(s32 arg)
+static inline u32 build_simm(s32 arg)
{
WARN(arg > 0x7fff || arg < -0x8000,
KERN_WARNING "Micro-assembler field overflow\n");
@@ -99,14 +99,14 @@ static inline __uasminit u32 build_simm(s32 arg)
return arg & 0xffff;
}
-static inline __uasminit u32 build_uimm(u32 arg)
+static inline u32 build_uimm(u32 arg)
{
WARN(arg & ~IMM_MASK, KERN_WARNING "Micro-assembler field overflow\n");
return arg & IMM_MASK;
}
-static inline __uasminit u32 build_scimm(u32 arg)
+static inline u32 build_scimm(u32 arg)
{
WARN(arg & ~SCIMM_MASK,
KERN_WARNING "Micro-assembler field overflow\n");
@@ -114,21 +114,21 @@ static inline __uasminit u32 build_scimm(u32 arg)
return (arg & SCIMM_MASK) << SCIMM_SH;
}
-static inline __uasminit u32 build_func(u32 arg)
+static inline u32 build_func(u32 arg)
{
WARN(arg & ~FUNC_MASK, KERN_WARNING "Micro-assembler field overflow\n");
return arg & FUNC_MASK;
}
-static inline __uasminit u32 build_set(u32 arg)
+static inline u32 build_set(u32 arg)
{
WARN(arg & ~SET_MASK, KERN_WARNING "Micro-assembler field overflow\n");
return arg & SET_MASK;
}
-static void __uasminit build_insn(u32 **buf, enum opcode opc, ...);
+static void build_insn(u32 **buf, enum opcode opc, ...);
#define I_u1u2u3(op) \
Ip_u1u2u3(op) \
@@ -286,7 +286,7 @@ I_u3u1u2(_ldx)
#ifdef CONFIG_CPU_CAVIUM_OCTEON
#include <asm/octeon/octeon.h>
-void __uasminit ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b,
+void ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b,
unsigned int c)
{
if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5)
@@ -304,7 +304,7 @@ I_u2s3u1(_pref)
#endif
/* Handle labels. */
-void __uasminit ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, int lid)
+void ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, int lid)
{
(*lab)->addr = addr;
(*lab)->lab = lid;
@@ -312,7 +312,7 @@ void __uasminit ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, in
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_build_label));
-int __uasminit ISAFUNC(uasm_in_compat_space_p)(long addr)
+int ISAFUNC(uasm_in_compat_space_p)(long addr)
{
/* Is this address in 32bit compat space? */
#ifdef CONFIG_64BIT
@@ -323,7 +323,7 @@ int __uasminit ISAFUNC(uasm_in_compat_space_p)(long addr)
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_in_compat_space_p));
-static int __uasminit uasm_rel_highest(long val)
+static int uasm_rel_highest(long val)
{
#ifdef CONFIG_64BIT
return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000;
@@ -332,7 +332,7 @@ static int __uasminit uasm_rel_highest(long val)
#endif
}
-static int __uasminit uasm_rel_higher(long val)
+static int uasm_rel_higher(long val)
{
#ifdef CONFIG_64BIT
return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000;
@@ -341,19 +341,19 @@ static int __uasminit uasm_rel_higher(long val)
#endif
}
-int __uasminit ISAFUNC(uasm_rel_hi)(long val)
+int ISAFUNC(uasm_rel_hi)(long val)
{
return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_hi));
-int __uasminit ISAFUNC(uasm_rel_lo)(long val)
+int ISAFUNC(uasm_rel_lo)(long val)
{
return ((val & 0xffff) ^ 0x8000) - 0x8000;
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_lo));
-void __uasminit ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr)
+void ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr)
{
if (!ISAFUNC(uasm_in_compat_space_p)(addr)) {
ISAFUNC(uasm_i_lui)(buf, rs, uasm_rel_highest(addr));
@@ -371,7 +371,7 @@ void __uasminit ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr)
}
UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA_mostly));
-void __uasminit ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr)
+void ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr)
{
ISAFUNC(UASM_i_LA_mostly)(buf, rs, addr);
if (ISAFUNC(uasm_rel_lo(addr))) {
@@ -386,8 +386,7 @@ void __uasminit ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr)
UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA));
/* Handle relocations. */
-void __uasminit
-ISAFUNC(uasm_r_mips_pc16)(struct uasm_reloc **rel, u32 *addr, int lid)
+void ISAFUNC(uasm_r_mips_pc16)(struct uasm_reloc **rel, u32 *addr, int lid)
{
(*rel)->addr = addr;
(*rel)->type = R_MIPS_PC16;
@@ -396,11 +395,11 @@ ISAFUNC(uasm_r_mips_pc16)(struct uasm_reloc **rel, u32 *addr, int lid)
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_r_mips_pc16));
-static inline void __uasminit
-__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab);
+static inline void __resolve_relocs(struct uasm_reloc *rel,
+ struct uasm_label *lab);
-void __uasminit
-ISAFUNC(uasm_resolve_relocs)(struct uasm_reloc *rel, struct uasm_label *lab)
+void ISAFUNC(uasm_resolve_relocs)(struct uasm_reloc *rel,
+ struct uasm_label *lab)
{
struct uasm_label *l;
@@ -411,8 +410,8 @@ ISAFUNC(uasm_resolve_relocs)(struct uasm_reloc *rel, struct uasm_label *lab)
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_resolve_relocs));
-void __uasminit
-ISAFUNC(uasm_move_relocs)(struct uasm_reloc *rel, u32 *first, u32 *end, long off)
+void ISAFUNC(uasm_move_relocs)(struct uasm_reloc *rel, u32 *first, u32 *end,
+ long off)
{
for (; rel->lab != UASM_LABEL_INVALID; rel++)
if (rel->addr >= first && rel->addr < end)
@@ -420,8 +419,8 @@ ISAFUNC(uasm_move_relocs)(struct uasm_reloc *rel, u32 *first, u32 *end, long off
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_relocs));
-void __uasminit
-ISAFUNC(uasm_move_labels)(struct uasm_label *lab, u32 *first, u32 *end, long off)
+void ISAFUNC(uasm_move_labels)(struct uasm_label *lab, u32 *first, u32 *end,
+ long off)
{
for (; lab->lab != UASM_LABEL_INVALID; lab++)
if (lab->addr >= first && lab->addr < end)
@@ -429,9 +428,8 @@ ISAFUNC(uasm_move_labels)(struct uasm_label *lab, u32 *first, u32 *end, long off
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_labels));
-void __uasminit
-ISAFUNC(uasm_copy_handler)(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first,
- u32 *end, u32 *target)
+void ISAFUNC(uasm_copy_handler)(struct uasm_reloc *rel, struct uasm_label *lab,
+ u32 *first, u32 *end, u32 *target)
{
long off = (long)(target - first);
@@ -442,7 +440,7 @@ ISAFUNC(uasm_copy_handler)(struct uasm_reloc *rel, struct uasm_label *lab, u32 *
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_copy_handler));
-int __uasminit ISAFUNC(uasm_insn_has_bdelay)(struct uasm_reloc *rel, u32 *addr)
+int ISAFUNC(uasm_insn_has_bdelay)(struct uasm_reloc *rel, u32 *addr)
{
for (; rel->lab != UASM_LABEL_INVALID; rel++) {
if (rel->addr == addr
@@ -456,83 +454,79 @@ int __uasminit ISAFUNC(uasm_insn_has_bdelay)(struct uasm_reloc *rel, u32 *addr)
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_insn_has_bdelay));
/* Convenience functions for labeled branches. */
-void __uasminit
-ISAFUNC(uasm_il_bltz)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+void ISAFUNC(uasm_il_bltz)(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ int lid)
{
uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_bltz)(p, reg, 0);
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bltz));
-void __uasminit
-ISAFUNC(uasm_il_b)(u32 **p, struct uasm_reloc **r, int lid)
+void ISAFUNC(uasm_il_b)(u32 **p, struct uasm_reloc **r, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_b)(p, 0);
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_b));
-void __uasminit
-ISAFUNC(uasm_il_beqz)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+void ISAFUNC(uasm_il_beqz)(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ int lid)
{
uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_beqz)(p, reg, 0);
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqz));
-void __uasminit
-ISAFUNC(uasm_il_beqzl)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+void ISAFUNC(uasm_il_beqzl)(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ int lid)
{
uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_beqzl)(p, reg, 0);
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqzl));
-void __uasminit
-ISAFUNC(uasm_il_bne)(u32 **p, struct uasm_reloc **r, unsigned int reg1,
- unsigned int reg2, int lid)
+void ISAFUNC(uasm_il_bne)(u32 **p, struct uasm_reloc **r, unsigned int reg1,
+ unsigned int reg2, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_bne)(p, reg1, reg2, 0);
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bne));
-void __uasminit
-ISAFUNC(uasm_il_bnez)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+void ISAFUNC(uasm_il_bnez)(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ int lid)
{
uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_bnez)(p, reg, 0);
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bnez));
-void __uasminit
-ISAFUNC(uasm_il_bgezl)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+void ISAFUNC(uasm_il_bgezl)(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ int lid)
{
uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_bgezl)(p, reg, 0);
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgezl));
-void __uasminit
-ISAFUNC(uasm_il_bgez)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+void ISAFUNC(uasm_il_bgez)(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ int lid)
{
uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_bgez)(p, reg, 0);
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgez));
-void __uasminit
-ISAFUNC(uasm_il_bbit0)(u32 **p, struct uasm_reloc **r, unsigned int reg,
- unsigned int bit, int lid)
+void ISAFUNC(uasm_il_bbit0)(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ unsigned int bit, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_bbit0)(p, reg, bit, 0);
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit0));
-void __uasminit
-ISAFUNC(uasm_il_bbit1)(u32 **p, struct uasm_reloc **r, unsigned int reg,
- unsigned int bit, int lid)
+void ISAFUNC(uasm_il_bbit1)(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ unsigned int bit, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_bbit1)(p, reg, bit, 0);