diff options
Diffstat (limited to 'arch/mips/mm')
-rw-r--r-- | arch/mips/mm/init.c | 51 | ||||
-rw-r--r-- | arch/mips/mm/ioremap.c | 4 | ||||
-rw-r--r-- | arch/mips/mm/ioremap64.c | 4 | ||||
-rw-r--r-- | arch/mips/mm/pgtable-32.c | 10 | ||||
-rw-r--r-- | arch/mips/mm/pgtable-64.c | 9 | ||||
-rw-r--r-- | arch/mips/mm/pgtable.c | 8 | ||||
-rw-r--r-- | arch/mips/mm/physaddr.c | 2 | ||||
-rw-r--r-- | arch/mips/mm/tlb-r4k.c | 56 |
8 files changed, 72 insertions, 72 deletions
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 4583d1a2a73e..a673d3d68254 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -59,24 +59,16 @@ EXPORT_SYMBOL(zero_page_mask); /* * Not static inline because used by IP27 special magic initialization code */ -void setup_zero_pages(void) +static void __init setup_zero_pages(void) { - unsigned int order, i; - struct page *page; + unsigned int order; if (cpu_has_vce) order = 3; else order = 0; - empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); - if (!empty_zero_page) - panic("Oh boy, that early out of memory?"); - - page = virt_to_page((void *)empty_zero_page); - split_page(page, order); - for (i = 0; i < (1 << order); i++, page++) - mark_page_reserved(page); + empty_zero_page = (unsigned long)memblock_alloc_or_panic(PAGE_SIZE << order, PAGE_SIZE); zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK; } @@ -423,17 +415,8 @@ void __init paging_init(void) " %ldk highmem ignored\n", (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10)); max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn; - - max_mapnr = max_low_pfn; - } else if (highend_pfn) { - max_mapnr = highend_pfn; - } else { - max_mapnr = max_low_pfn; } -#else - max_mapnr = max_low_pfn; #endif - high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); free_area_init(max_zone_pfns); } @@ -442,26 +425,7 @@ void __init paging_init(void) static struct kcore_list kcore_kseg0; #endif -static inline void __init mem_init_free_highmem(void) -{ -#ifdef CONFIG_HIGHMEM - unsigned long tmp; - - if (cpu_has_dc_aliases) - return; - - for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { - struct page *page = pfn_to_page(tmp); - - if (!memblock_is_memory(PFN_PHYS(tmp))) - SetPageReserved(page); - else - free_highmem_page(page); - } -#endif -} - -void __init mem_init(void) +void __init arch_mm_preinit(void) { /* * When PFN_PTE_SHIFT is greater than PAGE_SHIFT we won't have enough PTE @@ -470,9 +434,7 @@ void __init mem_init(void) BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (PFN_PTE_SHIFT > PAGE_SHIFT)); maar_init(); - memblock_free_all(); setup_zero_pages(); /* Setup zeroed pages. */ - mem_init_free_highmem(); #ifdef CONFIG_64BIT if ((unsigned long) &_text > (unsigned long) CKSEG0) @@ -482,6 +444,11 @@ void __init mem_init(void) 0x80000000 - 4, KCORE_TEXT); #endif } +#else /* CONFIG_NUMA */ +void __init arch_mm_preinit(void) +{ + setup_zero_pages(); /* This comes from node 0 */ +} #endif /* !CONFIG_NUMA */ void free_init_pages(const char *what, unsigned long begin, unsigned long end) diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c index d8243d61ef32..c6c4576cd4a8 100644 --- a/arch/mips/mm/ioremap.c +++ b/arch/mips/mm/ioremap.c @@ -44,9 +44,9 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, * ioremap_prot gives the caller control over cache coherency attributes (CCA) */ void __iomem *ioremap_prot(phys_addr_t phys_addr, unsigned long size, - unsigned long prot_val) + pgprot_t prot) { - unsigned long flags = prot_val & _CACHE_MASK; + unsigned long flags = pgprot_val(prot) & _CACHE_MASK; unsigned long offset, pfn, last_pfn; struct vm_struct *area; phys_addr_t last_addr; diff --git a/arch/mips/mm/ioremap64.c b/arch/mips/mm/ioremap64.c index 15e7820d6a5f..acc03ba20098 100644 --- a/arch/mips/mm/ioremap64.c +++ b/arch/mips/mm/ioremap64.c @@ -3,9 +3,9 @@ #include <ioremap.h> void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, - unsigned long prot_val) + pgprot_t prot) { - unsigned long flags = prot_val & _CACHE_MASK; + unsigned long flags = pgprot_val(prot) & _CACHE_MASK; u64 base = (flags == _CACHE_UNCACHED ? IO_BASE : UNCAC_BASE); void __iomem *addr; diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c index 84dd5136d53a..e2cf2166d5cb 100644 --- a/arch/mips/mm/pgtable-32.c +++ b/arch/mips/mm/pgtable-32.c @@ -31,16 +31,6 @@ void pgd_init(void *addr) } #if defined(CONFIG_TRANSPARENT_HUGEPAGE) -pmd_t mk_pmd(struct page *page, pgprot_t prot) -{ - pmd_t pmd; - - pmd_val(pmd) = (page_to_pfn(page) << PFN_PTE_SHIFT) | pgprot_val(prot); - - return pmd; -} - - void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd) { diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c index 1e544827dea9..b24f865de357 100644 --- a/arch/mips/mm/pgtable-64.c +++ b/arch/mips/mm/pgtable-64.c @@ -90,15 +90,6 @@ void pud_init(void *addr) #endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE -pmd_t mk_pmd(struct page *page, pgprot_t prot) -{ - pmd_t pmd; - - pmd_val(pmd) = (page_to_pfn(page) << PFN_PTE_SHIFT) | pgprot_val(prot); - - return pmd; -} - void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd) { diff --git a/arch/mips/mm/pgtable.c b/arch/mips/mm/pgtable.c index 1506e458040d..10835414819f 100644 --- a/arch/mips/mm/pgtable.c +++ b/arch/mips/mm/pgtable.c @@ -10,12 +10,10 @@ pgd_t *pgd_alloc(struct mm_struct *mm) { - pgd_t *init, *ret = NULL; - struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, - PGD_TABLE_ORDER); + pgd_t *init, *ret; - if (ptdesc) { - ret = ptdesc_address(ptdesc); + ret = __pgd_alloc(mm, PGD_TABLE_ORDER); + if (ret) { init = pgd_offset(&init_mm, 0UL); pgd_init(ret); memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, diff --git a/arch/mips/mm/physaddr.c b/arch/mips/mm/physaddr.c index f9b8c85e9843..a6b1bf82057a 100644 --- a/arch/mips/mm/physaddr.c +++ b/arch/mips/mm/physaddr.c @@ -30,7 +30,7 @@ static inline bool __debug_virt_addr_valid(unsigned long x) phys_addr_t __virt_to_phys(volatile const void *x) { WARN(!__debug_virt_addr_valid((unsigned long)x), - "virt_to_phys used for non-linear address: %pK (%pS)\n", + "virt_to_phys used for non-linear address: %p (%pS)\n", x, x); return __virt_to_phys_nodebug(x); diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index 76f3b9c0a9f0..347126dc010d 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -508,6 +508,60 @@ static int __init set_ntlb(char *str) __setup("ntlb=", set_ntlb); +/* Initialise all TLB entries with unique values */ +static void r4k_tlb_uniquify(void) +{ + int entry = num_wired_entries(); + + htw_stop(); + write_c0_entrylo0(0); + write_c0_entrylo1(0); + + while (entry < current_cpu_data.tlbsize) { + unsigned long asid_mask = cpu_asid_mask(¤t_cpu_data); + unsigned long asid = 0; + int idx; + + /* Skip wired MMID to make ginvt_mmid work */ + if (cpu_has_mmid) + asid = MMID_KERNEL_WIRED + 1; + + /* Check for match before using UNIQUE_ENTRYHI */ + do { + if (cpu_has_mmid) { + write_c0_memorymapid(asid); + write_c0_entryhi(UNIQUE_ENTRYHI(entry)); + } else { + write_c0_entryhi(UNIQUE_ENTRYHI(entry) | asid); + } + mtc0_tlbw_hazard(); + tlb_probe(); + tlb_probe_hazard(); + idx = read_c0_index(); + /* No match or match is on current entry */ + if (idx < 0 || idx == entry) + break; + /* + * If we hit a match, we need to try again with + * a different ASID. + */ + asid++; + } while (asid < asid_mask); + + if (idx >= 0 && idx != entry) + panic("Unable to uniquify TLB entry %d", idx); + + write_c0_index(entry); + mtc0_tlbw_hazard(); + tlb_write_indexed(); + entry++; + } + + tlbw_use_hazard(); + htw_start(); + flush_micro_tlb(); +} + /* * Configure TLB (for init or after a CPU has been powered off). */ @@ -547,7 +601,7 @@ static void r4k_tlb_configure(void) temp_tlb_entry = current_cpu_data.tlbsize - 1; /* From this point on the ARC firmware is dead. */ - local_flush_tlb_all(); + r4k_tlb_uniquify(); /* Did I tell you that ARC SUCKS? */ } |