From d377732c8c9aac14ccb900b65678558b0fb8f0f3 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Thu, 17 Jul 2014 23:26:32 +0200 Subject: Revert "MIPS: Delete unused function add_temporary_entry." MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit d7a887a73dec6c387b02a966a71aac767bbd9ce6. Function add_temporary_entry is needed by bcm47xx to support highmem. We need to add a temporary entry to check for amount of RAM. The only change made in this revert was replacing (ENTER|EXIT)_CRITICAL. Signed-off-by: Rafał Miłecki Cc: linux-mips@linux-mips.org Cc: Hauke Mehrtens Patchwork: https://patchwork.linux-mips.org/patch/7395/ Signed-off-by: Ralf Baechle --- arch/mips/include/asm/pgtable-32.h | 10 ++++++++ arch/mips/mm/tlb-r4k.c | 47 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 57 insertions(+) (limited to 'arch') diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h index b4204c179b97..2b1133209bb2 100644 --- a/arch/mips/include/asm/pgtable-32.h +++ b/arch/mips/include/asm/pgtable-32.h @@ -18,6 +18,16 @@ #include +/* + * - add_temporary_entry() add a temporary TLB entry. We use TLB entries + * starting at the top and working down. This is for populating the + * TLB before trap_init() puts the TLB miss handler in place. It + * should be used only for entries matching the actual page tables, + * to prevent inconsistencies. + */ +extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, + unsigned long entryhi, unsigned long pagemask); + /* * Basically we have the same two-level (which is the logical three level * Linux page table layout folded) page tables as the i386. Some day diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index 3914e27456f2..04feeb517010 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -391,6 +391,51 @@ int __init has_transparent_hugepage(void) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +/* + * Used for loading TLB entries before trap_init() has started, when we + * don't actually want to add a wired entry which remains throughout the + * lifetime of the system + */ + +static int temp_tlb_entry __cpuinitdata; + +__init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, + unsigned long entryhi, unsigned long pagemask) +{ + int ret = 0; + unsigned long flags; + unsigned long wired; + unsigned long old_pagemask; + unsigned long old_ctx; + + local_irq_save(flags); + /* Save old context and create impossible VPN2 value */ + old_ctx = read_c0_entryhi(); + old_pagemask = read_c0_pagemask(); + wired = read_c0_wired(); + if (--temp_tlb_entry < wired) { + printk(KERN_WARNING + "No TLB space left for add_temporary_entry\n"); + ret = -ENOSPC; + goto out; + } + + write_c0_index(temp_tlb_entry); + write_c0_pagemask(pagemask); + write_c0_entryhi(entryhi); + write_c0_entrylo0(entrylo0); + write_c0_entrylo1(entrylo1); + mtc0_tlbw_hazard(); + tlb_write_indexed(); + tlbw_use_hazard(); + + write_c0_entryhi(old_ctx); + write_c0_pagemask(old_pagemask); +out: + local_irq_restore(flags); + return ret; +} + static int ntlb; static int __init set_ntlb(char *str) { @@ -431,6 +476,8 @@ static void r4k_tlb_configure(void) write_c0_pagegrain(pg); } + temp_tlb_entry = current_cpu_data.tlbsize - 1; + /* From this point on the ARC firmware is dead. */ local_flush_tlb_all(); -- cgit v1.2.3