diff options
Diffstat (limited to 'arch/x86/mm/pageattr.c')
-rw-r--r-- | arch/x86/mm/pageattr.c | 253 |
1 files changed, 144 insertions, 109 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 440210a2277d..464d8fc21ce6 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -16,6 +16,7 @@ #include <asm/sections.h> #include <asm/uaccess.h> #include <asm/pgalloc.h> +#include <asm/proto.h> /* * The current flushing context - we pass it instead of 5 arguments: @@ -26,8 +27,23 @@ struct cpa_data { pgprot_t mask_clr; int numpages; int flushtlb; + unsigned long pfn; }; +#ifdef CONFIG_X86_64 + +static inline unsigned long highmap_start_pfn(void) +{ + return __pa(_text) >> PAGE_SHIFT; +} + +static inline unsigned long highmap_end_pfn(void) +{ + return __pa(round_up((unsigned long)_end, PMD_SIZE)) >> PAGE_SHIFT; +} + +#endif + static inline int within(unsigned long addr, unsigned long start, unsigned long end) { @@ -123,29 +139,14 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache) } } -#define HIGH_MAP_START __START_KERNEL_map -#define HIGH_MAP_END (__START_KERNEL_map + KERNEL_TEXT_SIZE) - - -/* - * Converts a virtual address to a X86-64 highmap address - */ -static unsigned long virt_to_highmap(void *address) -{ -#ifdef CONFIG_X86_64 - return __pa((unsigned long)address) + HIGH_MAP_START - phys_base; -#else - return (unsigned long)address; -#endif -} - /* * Certain areas of memory on x86 require very specific protection flags, * for example the BIOS area or kernel text. Callers don't always get this * right (again, ioremap() on BIOS memory is not uncommon) so this function * checks and fixes these known static required protection bits. */ -static inline pgprot_t static_protections(pgprot_t prot, unsigned long address) +static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, + unsigned long pfn) { pgprot_t forbidden = __pgprot(0); @@ -153,30 +154,23 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address) * The BIOS area between 640k and 1Mb needs to be executable for * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support. */ - if (within(__pa(address), BIOS_BEGIN, BIOS_END)) + if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT)) pgprot_val(forbidden) |= _PAGE_NX; /* * The kernel text needs to be executable for obvious reasons - * Does not cover __inittext since that is gone later on + * Does not cover __inittext since that is gone later on. On + * 64bit we do not enforce !NX on the low mapping */ if (within(address, (unsigned long)_text, (unsigned long)_etext)) pgprot_val(forbidden) |= _PAGE_NX; - /* - * Do the same for the x86-64 high kernel mapping - */ - if (within(address, virt_to_highmap(_text), virt_to_highmap(_etext))) - pgprot_val(forbidden) |= _PAGE_NX; - /* The .rodata section needs to be read-only */ - if (within(address, (unsigned long)__start_rodata, - (unsigned long)__end_rodata)) - pgprot_val(forbidden) |= _PAGE_RW; /* - * Do the same for the x86-64 high kernel mapping + * The .rodata section needs to be read-only. Using the pfn + * catches all aliases. */ - if (within(address, virt_to_highmap(__start_rodata), - virt_to_highmap(__end_rodata))) + if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT, + __pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) pgprot_val(forbidden) |= _PAGE_RW; prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); @@ -253,7 +247,7 @@ static int try_preserve_large_page(pte_t *kpte, unsigned long address, struct cpa_data *cpa) { - unsigned long nextpage_addr, numpages, pmask, psize, flags, addr; + unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn; pte_t new_pte, old_pte, *tmp; pgprot_t old_prot, new_prot; int i, do_split = 1; @@ -275,8 +269,8 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, break; #ifdef CONFIG_X86_64 case PG_LEVEL_1G: - psize = PMD_PAGE_SIZE; - pmask = PMD_PAGE_MASK; + psize = PUD_PAGE_SIZE; + pmask = PUD_PAGE_MASK; break; #endif default: @@ -301,7 +295,15 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); - new_prot = static_protections(new_prot, address); + + /* + * old_pte points to the large page base address. So we need + * to add the offset of the virtual address: + */ + pfn = pte_pfn(old_pte) + ((address & (psize - 1)) >> PAGE_SHIFT); + cpa->pfn = pfn; + + new_prot = static_protections(new_prot, address, pfn); /* * We need to check the full range, whether @@ -309,8 +311,9 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, * the pages in the range we try to preserve: */ addr = address + PAGE_SIZE; - for (i = 1; i < cpa->numpages; i++, addr += PAGE_SIZE) { - pgprot_t chk_prot = static_protections(new_prot, addr); + pfn++; + for (i = 1; i < cpa->numpages; i++, addr += PAGE_SIZE, pfn++) { + pgprot_t chk_prot = static_protections(new_prot, addr, pfn); if (pgprot_val(chk_prot) != pgprot_val(new_prot)) goto out_unlock; @@ -505,46 +508,46 @@ out_unlock: return 0; } -static int __change_page_attr(unsigned long address, struct cpa_data *cpa) +static int __change_page_attr(struct cpa_data *cpa, int primary) { + unsigned long address = cpa->vaddr; int do_split, err; unsigned int level; - struct page *kpte_page; - pte_t *kpte; + pte_t *kpte, old_pte; repeat: kpte = lookup_address(address, &level); if (!kpte) - return -EINVAL; + return primary ? -EINVAL : 0; - kpte_page = virt_to_page(kpte); - BUG_ON(PageLRU(kpte_page)); - BUG_ON(PageCompound(kpte_page)); + old_pte = *kpte; + if (!pte_val(old_pte)) { + if (!primary) + return 0; + printk(KERN_WARNING "CPA: called for zero pte. " + "vaddr = %lx cpa->vaddr = %lx\n", address, + cpa->vaddr); + WARN_ON(1); + return -EINVAL; + } if (level == PG_LEVEL_4K) { - pte_t new_pte, old_pte = *kpte; + pte_t new_pte; pgprot_t new_prot = pte_pgprot(old_pte); - - if(!pte_val(old_pte)) { - printk(KERN_WARNING "CPA: called for zero pte. " - "vaddr = %lx cpa->vaddr = %lx\n", address, - cpa->vaddr); - WARN_ON(1); - return -EINVAL; - } + unsigned long pfn = pte_pfn(old_pte); pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); - new_prot = static_protections(new_prot, address); + new_prot = static_protections(new_prot, address, pfn); /* * We need to keep the pfn from the existing PTE, * after all we're only going to change it's attributes * not the memory it points to */ - new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot)); - + new_pte = pfn_pte(pfn, canon_pgprot(new_prot)); + cpa->pfn = pfn; /* * Do we really change anything ? */ @@ -581,67 +584,59 @@ repeat: return err; } -/** - * change_page_attr_addr - Change page table attributes in linear mapping - * @address: Virtual address in linear mapping. - * @prot: New page table attribute (PAGE_*) - * - * Change page attributes of a page in the direct mapping. This is a variant - * of change_page_attr() that also works on memory holes that do not have - * mem_map entry (pfn_valid() is false). - * - * See change_page_attr() documentation for more details. - * - * Modules and drivers should use the set_memory_* APIs instead. - */ -static int change_page_attr_addr(struct cpa_data *cpa) +static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias); + +static int cpa_process_alias(struct cpa_data *cpa) { - int err; - unsigned long address = cpa->vaddr; + struct cpa_data alias_cpa; + int ret = 0; -#ifdef CONFIG_X86_64 - unsigned long phys_addr = __pa(address); + if (cpa->pfn > max_pfn_mapped) + return 0; /* - * If we are inside the high mapped kernel range, then we - * fixup the low mapping first. __va() returns the virtual - * address in the linear mapping: + * No need to redo, when the primary call touched the direct + * mapping already: */ - if (within(address, HIGH_MAP_START, HIGH_MAP_END)) - address = (unsigned long) __va(phys_addr); -#endif + if (!within(cpa->vaddr, PAGE_OFFSET, + PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) { - err = __change_page_attr(address, cpa); - if (err) - return err; + alias_cpa = *cpa; + alias_cpa.vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT); + + ret = __change_page_attr_set_clr(&alias_cpa, 0); + } #ifdef CONFIG_X86_64 + if (ret) + return ret; + /* + * No need to redo, when the primary call touched the high + * mapping already: + */ + if (within(cpa->vaddr, (unsigned long) _text, (unsigned long) _end)) + return 0; + /* * If the physical address is inside the kernel map, we need * to touch the high mapped kernel as well: */ - if (within(phys_addr, 0, KERNEL_TEXT_SIZE)) { - /* - * Calc the high mapping address. See __phys_addr() - * for the non obvious details. - * - * Note that NX and other required permissions are - * checked in static_protections(). - */ - address = phys_addr + HIGH_MAP_START - phys_base; + if (!within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn())) + return 0; - /* - * Our high aliases are imprecise, because we check - * everything between 0 and KERNEL_TEXT_SIZE, so do - * not propagate lookup failures back to users: - */ - __change_page_attr(address, cpa); - } + alias_cpa = *cpa; + alias_cpa.vaddr = + (cpa->pfn << PAGE_SHIFT) + __START_KERNEL_map - phys_base; + + /* + * The high mapping range is imprecise, so ignore the return value. + */ + __change_page_attr_set_clr(&alias_cpa, 0); #endif - return err; + return ret; } -static int __change_page_attr_set_clr(struct cpa_data *cpa) +static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) { int ret, numpages = cpa->numpages; @@ -651,10 +646,17 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa) * preservation check. */ cpa->numpages = numpages; - ret = change_page_attr_addr(cpa); + + ret = __change_page_attr(cpa, checkalias); if (ret) return ret; + if (checkalias) { + ret = cpa_process_alias(cpa); + if (ret) + return ret; + } + /* * Adjust the number of pages with the result of the * CPA operation. Either a large page has been @@ -677,7 +679,7 @@ static int change_page_attr_set_clr(unsigned long addr, int numpages, pgprot_t mask_set, pgprot_t mask_clr) { struct cpa_data cpa; - int ret, cache; + int ret, cache, checkalias; /* * Check, if we are requested to change a not supported @@ -688,13 +690,25 @@ static int change_page_attr_set_clr(unsigned long addr, int numpages, if (!pgprot_val(mask_set) && !pgprot_val(mask_clr)) return 0; + /* Ensure we are PAGE_SIZE aligned */ + if (addr & ~PAGE_MASK) { + addr &= PAGE_MASK; + /* + * People should not be passing in unaligned addresses: + */ + WARN_ON_ONCE(1); + } + cpa.vaddr = addr; cpa.numpages = numpages; cpa.mask_set = mask_set; cpa.mask_clr = mask_clr; cpa.flushtlb = 0; - ret = __change_page_attr_set_clr(&cpa); + /* No alias checking for _NX bit modifications */ + checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX; + + ret = __change_page_attr_set_clr(&cpa, checkalias); /* * Check whether we really changed something: @@ -832,7 +846,7 @@ static int __set_pages_p(struct page *page, int numpages) .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW), .mask_clr = __pgprot(0)}; - return __change_page_attr_set_clr(&cpa); + return __change_page_attr_set_clr(&cpa, 1); } static int __set_pages_np(struct page *page, int numpages) @@ -842,7 +856,7 @@ static int __set_pages_np(struct page *page, int numpages) .mask_set = __pgprot(0), .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW)}; - return __change_page_attr_set_clr(&cpa); + return __change_page_attr_set_clr(&cpa, 1); } void kernel_map_pages(struct page *page, int numpages, int enable) @@ -861,8 +875,12 @@ void kernel_map_pages(struct page *page, int numpages, int enable) return; /* - * The return value is ignored - the calls cannot fail, - * large pages are disabled at boot time: + * The return value is ignored as the calls cannot fail. + * Large pages are kept enabled at boot time, and are + * split up quickly with DEBUG_PAGEALLOC. If a splitup + * fails here (due to temporary memory shortage) no damage + * is done because we just keep the largepage intact up + * to the next attempt when it will likely be split up: */ if (enable) __set_pages_p(page, numpages); @@ -881,7 +899,24 @@ void kernel_map_pages(struct page *page, int numpages, int enable) */ cpa_fill_pool(); } -#endif + +#ifdef CONFIG_HIBERNATION + +bool kernel_page_present(struct page *page) +{ + unsigned int level; + pte_t *pte; + + if (PageHighMem(page)) + return false; + + pte = lookup_address((unsigned long)page_address(page), &level); + return (pte_val(*pte) & _PAGE_PRESENT); +} + +#endif /* CONFIG_HIBERNATION */ + +#endif /* CONFIG_DEBUG_PAGEALLOC */ /* * The testcases use internal knowledge of the implementation that shouldn't |