summaryrefslogtreecommitdiff
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-05-08 12:50:00 +0400
committerIngo Molnar <mingo@elte.hu>2009-05-08 12:50:00 +0400
commitf066a155334642b8a206eec625b1925d88c48aeb (patch)
treecb12975e60b70d1dae3b7397bab955de78a4d01e /arch/x86/mm
parente7c064889606aab3569669078c69b87b2c527e72 (diff)
parent33df4db04a79660150e1948e3296eeb451ac121b (diff)
downloadlinux-f066a155334642b8a206eec625b1925d88c48aeb.tar.xz
Merge branch 'x86/urgent' into x86/xen
Conflicts: arch/frv/include/asm/pgtable.h arch/x86/include/asm/required-features.h arch/x86/xen/mmu.c Merge reason: x86/xen was on a .29 base still, move it to a fresher branch and pick up Xen fixes as well, plus resolve conflicts Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/gup.c16
-rw-r--r--arch/x86/mm/init.c18
-rw-r--r--arch/x86/mm/ioremap.c33
-rw-r--r--arch/x86/mm/kmmio.c2
-rw-r--r--arch/x86/mm/numa_32.c2
-rw-r--r--arch/x86/mm/numa_64.c3
-rw-r--r--arch/x86/mm/pageattr.c127
-rw-r--r--arch/x86/mm/pat.c191
-rw-r--r--arch/x86/mm/pgtable.c3
-rw-r--r--arch/x86/mm/srat_32.c2
-rw-r--r--arch/x86/mm/srat_64.c7
11 files changed, 176 insertions, 228 deletions
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index be54176e9eb2..6340cef6798a 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -219,6 +219,22 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
return 1;
}
+/**
+ * get_user_pages_fast() - pin user pages in memory
+ * @start: starting user address
+ * @nr_pages: number of pages from start to pin
+ * @write: whether pages will be written to
+ * @pages: array that receives pointers to the pages pinned.
+ * Should be at least nr_pages long.
+ *
+ * Attempt to pin user pages in memory without taking mm->mmap_sem.
+ * If not successful, it will fall back to taking the lock and
+ * calling get_user_pages().
+ *
+ * Returns number of pages pinned. This may be fewer than the number
+ * requested. If nr_pages is 0 or negative, returns 0. If no pages
+ * were pinned, returns -errno.
+ */
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages)
{
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index fd3da1dda1c9..ae4f7b5d7104 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -7,6 +7,7 @@
#include <asm/page.h>
#include <asm/page_types.h>
#include <asm/sections.h>
+#include <asm/setup.h>
#include <asm/system.h>
#include <asm/tlbflush.h>
@@ -304,8 +305,23 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
#endif
#ifdef CONFIG_X86_64
- if (!after_bootmem)
+ if (!after_bootmem && !start) {
+ pud_t *pud;
+ pmd_t *pmd;
+
mmu_cr4_features = read_cr4();
+
+ /*
+ * _brk_end cannot change anymore, but it and _end may be
+ * located on different 2M pages. cleanup_highmap(), however,
+ * can only consider _end when it runs, so destroy any
+ * mappings beyond _brk_end here.
+ */
+ pud = pud_offset(pgd_offset_k(_brk_end), _brk_end);
+ pmd = pmd_offset(pud, _brk_end - 1);
+ while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1))
+ pmd_clear(pmd);
+ }
#endif
__flush_tlb_all();
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 0dfa09d69e80..8a450930834f 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -280,15 +280,16 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
return NULL;
area->phys_addr = phys_addr;
vaddr = (unsigned long) area->addr;
- if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
+
+ if (kernel_map_sync_memtype(phys_addr, size, prot_val)) {
free_memtype(phys_addr, phys_addr + size);
free_vm_area(area);
return NULL;
}
- if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
+ if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
free_memtype(phys_addr, phys_addr + size);
- vunmap(area->addr);
+ free_vm_area(area);
return NULL;
}
@@ -374,7 +375,8 @@ static void __iomem *ioremap_default(resource_size_t phys_addr,
* - UC_MINUS for non-WB-able memory with no other conflicting mappings
* - Inherit from confliting mappings otherwise
*/
- err = reserve_memtype(phys_addr, phys_addr + size, -1, &flags);
+ err = reserve_memtype(phys_addr, phys_addr + size,
+ _PAGE_CACHE_WB, &flags);
if (err < 0)
return NULL;
@@ -547,7 +549,7 @@ void __init early_ioremap_reset(void)
}
static void __init __early_set_fixmap(enum fixed_addresses idx,
- unsigned long phys, pgprot_t flags)
+ phys_addr_t phys, pgprot_t flags)
{
unsigned long addr = __fix_to_virt(idx);
pte_t *pte;
@@ -566,7 +568,7 @@ static void __init __early_set_fixmap(enum fixed_addresses idx,
}
static inline void __init early_set_fixmap(enum fixed_addresses idx,
- unsigned long phys, pgprot_t prot)
+ phys_addr_t phys, pgprot_t prot)
{
if (after_paging_init)
__set_fixmap(idx, phys, prot);
@@ -607,9 +609,10 @@ static int __init check_early_ioremap_leak(void)
late_initcall(check_early_ioremap_leak);
static void __init __iomem *
-__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
+__early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
{
- unsigned long offset, last_addr;
+ unsigned long offset;
+ resource_size_t last_addr;
unsigned int nrpages;
enum fixed_addresses idx0, idx;
int i, slot;
@@ -625,15 +628,15 @@ __early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
}
if (slot < 0) {
- printk(KERN_INFO "early_iomap(%08lx, %08lx) not found slot\n",
- phys_addr, size);
+ printk(KERN_INFO "early_iomap(%08llx, %08lx) not found slot\n",
+ (u64)phys_addr, size);
WARN_ON(1);
return NULL;
}
if (early_ioremap_debug) {
- printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
- phys_addr, size, slot);
+ printk(KERN_INFO "early_ioremap(%08llx, %08lx) [%d] => ",
+ (u64)phys_addr, size, slot);
dump_stack();
}
@@ -680,13 +683,15 @@ __early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
}
/* Remap an IO device */
-void __init __iomem *early_ioremap(unsigned long phys_addr, unsigned long size)
+void __init __iomem *
+early_ioremap(resource_size_t phys_addr, unsigned long size)
{
return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
}
/* Remap memory */
-void __init __iomem *early_memremap(unsigned long phys_addr, unsigned long size)
+void __init __iomem *
+early_memremap(resource_size_t phys_addr, unsigned long size)
{
return __early_ioremap(phys_addr, size, PAGE_KERNEL);
}
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index 4f115e00486b..50dc802a1c46 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -87,7 +87,7 @@ static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
{
struct kmmio_probe *p;
list_for_each_entry_rcu(p, &kmmio_probes, list) {
- if (addr >= p->addr && addr <= (p->addr + p->len))
+ if (addr >= p->addr && addr < (p->addr + p->len))
return p;
}
return NULL;
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 3daefa04ace5..d2530062fe00 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -257,7 +257,7 @@ void resume_map_numa_kva(pgd_t *pgd_base)
}
#endif
-static unsigned long calculate_numa_remap_pages(void)
+static __init unsigned long calculate_numa_remap_pages(void)
{
int nid;
unsigned long size, reserve_pages = 0;
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index d73aaa892371..2d05a12029dc 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -188,6 +188,9 @@ void __init setup_node_bootmem(int nodeid, unsigned long start,
const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
int nid;
+ if (!end)
+ return;
+
start = roundup(start, ZONE_ALIGN);
printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid,
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 660cac75ae11..b81b41a0481f 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -931,71 +931,94 @@ int _set_memory_uc(unsigned long addr, int numpages)
int set_memory_uc(unsigned long addr, int numpages)
{
+ int ret;
+
/*
* for now UC MINUS. see comments in ioremap_nocache()
*/
- if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
- _PAGE_CACHE_UC_MINUS, NULL))
- return -EINVAL;
+ ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
+ _PAGE_CACHE_UC_MINUS, NULL);
+ if (ret)
+ goto out_err;
+
+ ret = _set_memory_uc(addr, numpages);
+ if (ret)
+ goto out_free;
- return _set_memory_uc(addr, numpages);
+ return 0;
+
+out_free:
+ free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
+out_err:
+ return ret;
}
EXPORT_SYMBOL(set_memory_uc);
int set_memory_array_uc(unsigned long *addr, int addrinarray)
{
- unsigned long start;
- unsigned long end;
- int i;
+ int i, j;
+ int ret;
+
/*
* for now UC MINUS. see comments in ioremap_nocache()
*/
for (i = 0; i < addrinarray; i++) {
- start = __pa(addr[i]);
- for (end = start + PAGE_SIZE; i < addrinarray - 1; end += PAGE_SIZE) {
- if (end != __pa(addr[i + 1]))
- break;
- i++;
- }
- if (reserve_memtype(start, end, _PAGE_CACHE_UC_MINUS, NULL))
- goto out;
+ ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE,
+ _PAGE_CACHE_UC_MINUS, NULL);
+ if (ret)
+ goto out_free;
}
- return change_page_attr_set(addr, addrinarray,
+ ret = change_page_attr_set(addr, addrinarray,
__pgprot(_PAGE_CACHE_UC_MINUS), 1);
-out:
- for (i = 0; i < addrinarray; i++) {
- unsigned long tmp = __pa(addr[i]);
-
- if (tmp == start)
- break;
- for (end = tmp + PAGE_SIZE; i < addrinarray - 1; end += PAGE_SIZE) {
- if (end != __pa(addr[i + 1]))
- break;
- i++;
- }
- free_memtype(tmp, end);
- }
- return -EINVAL;
+ if (ret)
+ goto out_free;
+
+ return 0;
+
+out_free:
+ for (j = 0; j < i; j++)
+ free_memtype(__pa(addr[j]), __pa(addr[j]) + PAGE_SIZE);
+
+ return ret;
}
EXPORT_SYMBOL(set_memory_array_uc);
int _set_memory_wc(unsigned long addr, int numpages)
{
- return change_page_attr_set(&addr, numpages,
+ int ret;
+ ret = change_page_attr_set(&addr, numpages,
+ __pgprot(_PAGE_CACHE_UC_MINUS), 0);
+
+ if (!ret) {
+ ret = change_page_attr_set(&addr, numpages,
__pgprot(_PAGE_CACHE_WC), 0);
+ }
+ return ret;
}
int set_memory_wc(unsigned long addr, int numpages)
{
+ int ret;
+
if (!pat_enabled)
return set_memory_uc(addr, numpages);
- if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
- _PAGE_CACHE_WC, NULL))
- return -EINVAL;
+ ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
+ _PAGE_CACHE_WC, NULL);
+ if (ret)
+ goto out_err;
+
+ ret = _set_memory_wc(addr, numpages);
+ if (ret)
+ goto out_free;
+
+ return 0;
- return _set_memory_wc(addr, numpages);
+out_free:
+ free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
+out_err:
+ return ret;
}
EXPORT_SYMBOL(set_memory_wc);
@@ -1007,29 +1030,31 @@ int _set_memory_wb(unsigned long addr, int numpages)
int set_memory_wb(unsigned long addr, int numpages)
{
- free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
+ int ret;
+
+ ret = _set_memory_wb(addr, numpages);
+ if (ret)
+ return ret;
- return _set_memory_wb(addr, numpages);
+ free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
+ return 0;
}
EXPORT_SYMBOL(set_memory_wb);
int set_memory_array_wb(unsigned long *addr, int addrinarray)
{
int i;
+ int ret;
- for (i = 0; i < addrinarray; i++) {
- unsigned long start = __pa(addr[i]);
- unsigned long end;
-
- for (end = start + PAGE_SIZE; i < addrinarray - 1; end += PAGE_SIZE) {
- if (end != __pa(addr[i + 1]))
- break;
- i++;
- }
- free_memtype(start, end);
- }
- return change_page_attr_clear(addr, addrinarray,
+ ret = change_page_attr_clear(addr, addrinarray,
__pgprot(_PAGE_CACHE_MASK), 1);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < addrinarray; i++)
+ free_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE);
+
+ return 0;
}
EXPORT_SYMBOL(set_memory_array_wb);
@@ -1122,6 +1147,8 @@ int set_pages_array_wb(struct page **pages, int addrinarray)
retval = cpa_clear_pages_array(pages, addrinarray,
__pgprot(_PAGE_CACHE_MASK));
+ if (retval)
+ return retval;
for (i = 0; i < addrinarray; i++) {
start = (unsigned long)page_address(pages[i]);
@@ -1129,7 +1156,7 @@ int set_pages_array_wb(struct page **pages, int addrinarray)
free_memtype(start, end);
}
- return retval;
+ return 0;
}
EXPORT_SYMBOL(set_pages_array_wb);
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 640339ee4fb2..e6718bb28065 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -31,7 +31,7 @@
#ifdef CONFIG_X86_PAT
int __read_mostly pat_enabled = 1;
-void __cpuinit pat_disable(const char *reason)
+static inline void pat_disable(const char *reason)
{
pat_enabled = 0;
printk(KERN_INFO "%s\n", reason);
@@ -182,10 +182,10 @@ static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
u8 mtrr_type;
mtrr_type = mtrr_type_lookup(start, end);
- if (mtrr_type == MTRR_TYPE_UNCACHABLE)
- return _PAGE_CACHE_UC;
- if (mtrr_type == MTRR_TYPE_WRCOMB)
- return _PAGE_CACHE_WC;
+ if (mtrr_type != MTRR_TYPE_WRBACK)
+ return _PAGE_CACHE_UC_MINUS;
+
+ return _PAGE_CACHE_WB;
}
return req_type;
@@ -352,23 +352,13 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
return 0;
}
- if (req_type == -1) {
- /*
- * Call mtrr_lookup to get the type hint. This is an
- * optimization for /dev/mem mmap'ers into WB memory (BIOS
- * tools and ACPI tools). Use WB request for WB memory and use
- * UC_MINUS otherwise.
- */
- u8 mtrr_type = mtrr_type_lookup(start, end);
-
- if (mtrr_type == MTRR_TYPE_WRBACK)
- actual_type = _PAGE_CACHE_WB;
- else
- actual_type = _PAGE_CACHE_UC_MINUS;
- } else {
- actual_type = pat_x_mtrr_type(start, end,
- req_type & _PAGE_CACHE_MASK);
- }
+ /*
+ * Call mtrr_lookup to get the type hint. This is an
+ * optimization for /dev/mem mmap'ers into WB memory (BIOS
+ * tools and ACPI tools). Use WB request for WB memory and use
+ * UC_MINUS otherwise.
+ */
+ actual_type = pat_x_mtrr_type(start, end, req_type & _PAGE_CACHE_MASK);
if (new_type)
*new_type = actual_type;
@@ -546,9 +536,7 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t *vma_prot)
{
- u64 offset = ((u64) pfn) << PAGE_SHIFT;
- unsigned long flags = -1;
- int retval;
+ unsigned long flags = _PAGE_CACHE_WB;
if (!range_is_allowed(pfn, size))
return 0;
@@ -576,64 +564,11 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
}
#endif
- /*
- * With O_SYNC, we can only take UC_MINUS mapping. Fail if we cannot.
- *
- * Without O_SYNC, we want to get
- * - WB for WB-able memory and no other conflicting mappings
- * - UC_MINUS for non-WB-able memory with no other conflicting mappings
- * - Inherit from confliting mappings otherwise
- */
- if (flags != -1) {
- retval = reserve_memtype(offset, offset + size, flags, NULL);
- } else {
- retval = reserve_memtype(offset, offset + size, -1, &flags);
- }
-
- if (retval < 0)
- return 0;
-
- if (((pfn < max_low_pfn_mapped) ||
- (pfn >= (1UL<<(32 - PAGE_SHIFT)) && pfn < max_pfn_mapped)) &&
- ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) {
- free_memtype(offset, offset + size);
- printk(KERN_INFO
- "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n",
- current->comm, current->pid,
- cattr_name(flags),
- offset, (unsigned long long)(offset + size));
- return 0;
- }
-
*vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
flags);
return 1;
}
-void map_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
-{
- unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK);
- u64 addr = (u64)pfn << PAGE_SHIFT;
- unsigned long flags;
-
- reserve_memtype(addr, addr + size, want_flags, &flags);
- if (flags != want_flags) {
- printk(KERN_INFO
- "%s:%d /dev/mem expected mapping type %s for %Lx-%Lx, got %s\n",
- current->comm, current->pid,
- cattr_name(want_flags),
- addr, (unsigned long long)(addr + size),
- cattr_name(flags));
- }
-}
-
-void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
-{
- u64 addr = (u64)pfn << PAGE_SHIFT;
-
- free_memtype(addr, addr + size);
-}
-
/*
* Change the memory type for the physial address range in kernel identity
* mapping space if that range is a part of identity map.
@@ -671,8 +606,8 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
{
int is_ram = 0;
int ret;
- unsigned long flags;
unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
+ unsigned long flags = want_flags;
is_ram = pat_pagerange_is_ram(paddr, paddr + size);
@@ -734,29 +669,28 @@ static void free_pfn_range(u64 paddr, unsigned long size)
*
* If the vma has a linear pfn mapping for the entire range, we get the prot
* from pte and reserve the entire vma range with single reserve_pfn_range call.
- * Otherwise, we reserve the entire vma range, my ging through the PTEs page
- * by page to get physical address and protection.
*/
int track_pfn_vma_copy(struct vm_area_struct *vma)
{
- int retval = 0;
- unsigned long i, j;
resource_size_t paddr;
unsigned long prot;
- unsigned long vma_start = vma->vm_start;
- unsigned long vma_end = vma->vm_end;
- unsigned long vma_size = vma_end - vma_start;
+ unsigned long vma_size = vma->vm_end - vma->vm_start;
pgprot_t pgprot;
if (!pat_enabled)
return 0;
+ /*
+ * For now, only handle remap_pfn_range() vmas where
+ * is_linear_pfn_mapping() == TRUE. Handling of
+ * vm_insert_pfn() is TBD.
+ */
if (is_linear_pfn_mapping(vma)) {
/*
* reserve the whole chunk covered by vma. We need the
* starting address and protection from pte.
*/
- if (follow_phys(vma, vma_start, 0, &prot, &paddr)) {
+ if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
WARN_ON_ONCE(1);
return -EINVAL;
}
@@ -764,28 +698,7 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
}
- /* reserve entire vma page by page, using pfn and prot from pte */
- for (i = 0; i < vma_size; i += PAGE_SIZE) {
- if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
- continue;
-
- pgprot = __pgprot(prot);
- retval = reserve_pfn_range(paddr, PAGE_SIZE, &pgprot, 1);
- if (retval)
- goto cleanup_ret;
- }
return 0;
-
-cleanup_ret:
- /* Reserve error: Cleanup partial reservation and return error */
- for (j = 0; j < i; j += PAGE_SIZE) {
- if (follow_phys(vma, vma_start + j, 0, &prot, &paddr))
- continue;
-
- free_pfn_range(paddr, PAGE_SIZE);
- }
-
- return retval;
}
/*
@@ -795,50 +708,28 @@ cleanup_ret:
* prot is passed in as a parameter for the new mapping. If the vma has a
* linear pfn mapping for the entire range reserve the entire vma range with
* single reserve_pfn_range call.
- * Otherwise, we look t the pfn and size and reserve only the specified range
- * page by page.
- *
- * Note that this function can be called with caller trying to map only a
- * subrange/page inside the vma.
*/
int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
unsigned long pfn, unsigned long size)
{
- int retval = 0;
- unsigned long i, j;
- resource_size_t base_paddr;
resource_size_t paddr;
- unsigned long vma_start = vma->vm_start;
- unsigned long vma_end = vma->vm_end;
- unsigned long vma_size = vma_end - vma_start;
+ unsigned long vma_size = vma->vm_end - vma->vm_start;
if (!pat_enabled)
return 0;
+ /*
+ * For now, only handle remap_pfn_range() vmas where
+ * is_linear_pfn_mapping() == TRUE. Handling of
+ * vm_insert_pfn() is TBD.
+ */
if (is_linear_pfn_mapping(vma)) {
/* reserve the whole chunk starting from vm_pgoff */
paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
return reserve_pfn_range(paddr, vma_size, prot, 0);
}
- /* reserve page by page using pfn and size */
- base_paddr = (resource_size_t)pfn << PAGE_SHIFT;
- for (i = 0; i < size; i += PAGE_SIZE) {
- paddr = base_paddr + i;
- retval = reserve_pfn_range(paddr, PAGE_SIZE, prot, 0);
- if (retval)
- goto cleanup_ret;
- }
return 0;
-
-cleanup_ret:
- /* Reserve error: Cleanup partial reservation and return error */
- for (j = 0; j < i; j += PAGE_SIZE) {
- paddr = base_paddr + j;
- free_pfn_range(paddr, PAGE_SIZE);
- }
-
- return retval;
}
/*
@@ -849,39 +740,23 @@ cleanup_ret:
void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
unsigned long size)
{
- unsigned long i;
resource_size_t paddr;
- unsigned long prot;
- unsigned long vma_start = vma->vm_start;
- unsigned long vma_end = vma->vm_end;
- unsigned long vma_size = vma_end - vma_start;
+ unsigned long vma_size = vma->vm_end - vma->vm_start;
if (!pat_enabled)
return;
+ /*
+ * For now, only handle remap_pfn_range() vmas where
+ * is_linear_pfn_mapping() == TRUE. Handling of
+ * vm_insert_pfn() is TBD.
+ */
if (is_linear_pfn_mapping(vma)) {
/* free the whole chunk starting from vm_pgoff */
paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
free_pfn_range(paddr, vma_size);
return;
}
-
- if (size != 0 && size != vma_size) {
- /* free page by page, using pfn and size */
- paddr = (resource_size_t)pfn << PAGE_SHIFT;
- for (i = 0; i < size; i += PAGE_SIZE) {
- paddr = paddr + i;
- free_pfn_range(paddr, PAGE_SIZE);
- }
- } else {
- /* free entire vma, page by page, using the pfn from pte */
- for (i = 0; i < vma_size; i += PAGE_SIZE) {
- if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
- continue;
-
- free_pfn_range(paddr, PAGE_SIZE);
- }
- }
}
pgprot_t pgprot_writecombine(pgprot_t prot)
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 5b7c7c8464fe..7aa03a5389f5 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -345,7 +345,8 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
fixmaps_set++;
}
-void native_set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
+void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
+ pgprot_t flags)
{
__native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
}
diff --git a/arch/x86/mm/srat_32.c b/arch/x86/mm/srat_32.c
index 16ae70fc57e7..29a0e37114f8 100644
--- a/arch/x86/mm/srat_32.c
+++ b/arch/x86/mm/srat_32.c
@@ -216,7 +216,7 @@ int __init get_memcfg_from_srat(void)
if (num_memory_chunks == 0) {
printk(KERN_WARNING
- "could not finy any ACPI SRAT memory areas.\n");
+ "could not find any ACPI SRAT memory areas.\n");
goto out_fail;
}
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index c7d272b8574c..01765955baaf 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -28,6 +28,7 @@ int acpi_numa __initdata;
static struct acpi_table_slit *acpi_slit;
static nodemask_t nodes_parsed __initdata;
+static nodemask_t cpu_nodes_parsed __initdata;
static struct bootnode nodes[MAX_NUMNODES] __initdata;
static struct bootnode nodes_add[MAX_NUMNODES];
static int found_add_area __initdata;
@@ -141,6 +142,7 @@ acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
apic_id = pa->apic_id;
apicid_to_node[apic_id] = node;
+ node_set(node, cpu_nodes_parsed);
acpi_numa = 1;
printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n",
pxm, apic_id, node);
@@ -174,6 +176,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
else
apic_id = pa->apic_id;
apicid_to_node[apic_id] = node;
+ node_set(node, cpu_nodes_parsed);
acpi_numa = 1;
printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n",
pxm, apic_id, node);
@@ -358,6 +361,7 @@ static void __init unparse_node(int node)
{
int i;
node_clear(node, nodes_parsed);
+ node_clear(node, cpu_nodes_parsed);
for (i = 0; i < MAX_LOCAL_APIC; i++) {
if (apicid_to_node[i] == node)
apicid_to_node[i] = NUMA_NO_NODE;
@@ -402,7 +406,8 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
return -1;
}
- node_possible_map = nodes_parsed;
+ /* Account for nodes with cpus and no memory */
+ nodes_or(node_possible_map, nodes_parsed, cpu_nodes_parsed);
/* Finally register nodes */
for_each_node_mask(i, node_possible_map)