diff options
author | David S. Miller <davem@davemloft.net> | 2008-05-15 11:34:44 +0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-05-15 11:34:44 +0400 |
commit | 63fe46da9c380b3f2bbdf3765044649517cc717c (patch) | |
tree | 9478c1aca1d692b408955aea20c9cd9a37e589c0 /mm | |
parent | 99dd1a2b8347ac2ae802300b7862f6f7bcf17139 (diff) | |
parent | 066b2118976e6e7cc50eed39e2747c75343a23c4 (diff) | |
download | linux-63fe46da9c380b3f2bbdf3765044649517cc717c.tar.xz |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts:
drivers/net/wireless/iwlwifi/iwl-4965-rs.c
drivers/net/wireless/rt2x00/rt61pci.c
Diffstat (limited to 'mm')
-rw-r--r-- | mm/filemap.c | 7 | ||||
-rw-r--r-- | mm/memory.c | 26 | ||||
-rw-r--r-- | mm/memory_hotplug.c | 86 | ||||
-rw-r--r-- | mm/mprotect.c | 11 | ||||
-rw-r--r-- | mm/page_alloc.c | 3 | ||||
-rw-r--r-- | mm/pdflush.c | 4 | ||||
-rw-r--r-- | mm/slub.c | 2 | ||||
-rw-r--r-- | mm/vmstat.c | 2 |
8 files changed, 94 insertions, 47 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 239d36163bbe..1e6a7d34874f 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1461,6 +1461,11 @@ page_not_uptodate: */ ClearPageError(page); error = mapping->a_ops->readpage(file, page); + if (!error) { + wait_on_page_locked(page); + if (!PageUptodate(page)) + error = -EIO; + } page_cache_release(page); if (!error || error == AOP_TRUNCATED_PAGE) @@ -1655,7 +1660,7 @@ int should_remove_suid(struct dentry *dentry) } EXPORT_SYMBOL(should_remove_suid); -int __remove_suid(struct dentry *dentry, int kill) +static int __remove_suid(struct dentry *dentry, int kill) { struct iattr newattrs; diff --git a/mm/memory.c b/mm/memory.c index bbab1e37055e..fb5608a120ed 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -311,6 +311,21 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address) if (!new) return -ENOMEM; + /* + * Ensure all pte setup (eg. pte page lock and page clearing) are + * visible before the pte is made visible to other CPUs by being + * put into page tables. + * + * The other side of the story is the pointer chasing in the page + * table walking code (when walking the page table without locking; + * ie. most of the time). Fortunately, these data accesses consist + * of a chain of data-dependent loads, meaning most CPUs (alpha + * being the notable exception) will already guarantee loads are + * seen in-order. See the alpha page table accessors for the + * smp_read_barrier_depends() barriers in page table walking code. + */ + smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */ + spin_lock(&mm->page_table_lock); if (!pmd_present(*pmd)) { /* Has another populated it ? */ mm->nr_ptes++; @@ -329,6 +344,8 @@ int __pte_alloc_kernel(pmd_t *pmd, unsigned long address) if (!new) return -ENOMEM; + smp_wmb(); /* See comment in __pte_alloc */ + spin_lock(&init_mm.page_table_lock); if (!pmd_present(*pmd)) { /* Has another populated it ? */ pmd_populate_kernel(&init_mm, pmd, new); @@ -969,7 +986,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, goto no_page_table; pmd = pmd_offset(pud, address); - if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) + if (pmd_none(*pmd)) goto no_page_table; if (pmd_huge(*pmd)) { @@ -978,6 +995,9 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, goto out; } + if (unlikely(pmd_bad(*pmd))) + goto no_page_table; + ptep = pte_offset_map_lock(mm, pmd, address, &ptl); if (!ptep) goto out; @@ -2616,6 +2636,8 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) if (!new) return -ENOMEM; + smp_wmb(); /* See comment in __pte_alloc */ + spin_lock(&mm->page_table_lock); if (pgd_present(*pgd)) /* Another has populated it */ pud_free(mm, new); @@ -2637,6 +2659,8 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) if (!new) return -ENOMEM; + smp_wmb(); /* See comment in __pte_alloc */ + spin_lock(&mm->page_table_lock); #ifndef __ARCH_HAS_4LEVEL_HACK if (pud_present(*pud)) /* Another has populated it */ diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index b17dca7249f8..833f854eabe5 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -159,21 +159,58 @@ void register_page_bootmem_info_node(struct pglist_data *pgdat) } #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ +static void grow_zone_span(struct zone *zone, unsigned long start_pfn, + unsigned long end_pfn) +{ + unsigned long old_zone_end_pfn; + + zone_span_writelock(zone); + + old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; + if (start_pfn < zone->zone_start_pfn) + zone->zone_start_pfn = start_pfn; + + zone->spanned_pages = max(old_zone_end_pfn, end_pfn) - + zone->zone_start_pfn; + + zone_span_writeunlock(zone); +} + +static void grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn, + unsigned long end_pfn) +{ + unsigned long old_pgdat_end_pfn = + pgdat->node_start_pfn + pgdat->node_spanned_pages; + + if (start_pfn < pgdat->node_start_pfn) + pgdat->node_start_pfn = start_pfn; + + pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) - + pgdat->node_start_pfn; +} + static int __add_zone(struct zone *zone, unsigned long phys_start_pfn) { struct pglist_data *pgdat = zone->zone_pgdat; int nr_pages = PAGES_PER_SECTION; int nid = pgdat->node_id; int zone_type; + unsigned long flags; zone_type = zone - pgdat->node_zones; if (!zone->wait_table) { - int ret = 0; + int ret; + ret = init_currently_empty_zone(zone, phys_start_pfn, nr_pages, MEMMAP_HOTPLUG); - if (ret < 0) + if (ret) return ret; } + pgdat_resize_lock(zone->zone_pgdat, &flags); + grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages); + grow_pgdat_span(zone->zone_pgdat, phys_start_pfn, + phys_start_pfn + nr_pages); + pgdat_resize_unlock(zone->zone_pgdat, &flags); memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn, MEMMAP_HOTPLUG); return 0; @@ -299,36 +336,6 @@ int __remove_pages(struct zone *zone, unsigned long phys_start_pfn, } EXPORT_SYMBOL_GPL(__remove_pages); -static void grow_zone_span(struct zone *zone, - unsigned long start_pfn, unsigned long end_pfn) -{ - unsigned long old_zone_end_pfn; - - zone_span_writelock(zone); - - old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; - if (start_pfn < zone->zone_start_pfn) - zone->zone_start_pfn = start_pfn; - - zone->spanned_pages = max(old_zone_end_pfn, end_pfn) - - zone->zone_start_pfn; - - zone_span_writeunlock(zone); -} - -static void grow_pgdat_span(struct pglist_data *pgdat, - unsigned long start_pfn, unsigned long end_pfn) -{ - unsigned long old_pgdat_end_pfn = - pgdat->node_start_pfn + pgdat->node_spanned_pages; - - if (start_pfn < pgdat->node_start_pfn) - pgdat->node_start_pfn = start_pfn; - - pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) - - pgdat->node_start_pfn; -} - void online_page(struct page *page) { totalram_pages++; @@ -367,7 +374,6 @@ static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages, int online_pages(unsigned long pfn, unsigned long nr_pages) { - unsigned long flags; unsigned long onlined_pages = 0; struct zone *zone; int need_zonelists_rebuild = 0; @@ -395,11 +401,6 @@ int online_pages(unsigned long pfn, unsigned long nr_pages) * memory_block->state_mutex. */ zone = page_zone(pfn_to_page(pfn)); - pgdat_resize_lock(zone->zone_pgdat, &flags); - grow_zone_span(zone, pfn, pfn + nr_pages); - grow_pgdat_span(zone->zone_pgdat, pfn, pfn + nr_pages); - pgdat_resize_unlock(zone->zone_pgdat, &flags); - /* * If this zone is not populated, then it is not in zonelist. * This means the page allocator ignores this zone. @@ -408,8 +409,15 @@ int online_pages(unsigned long pfn, unsigned long nr_pages) if (!populated_zone(zone)) need_zonelists_rebuild = 1; - walk_memory_resource(pfn, nr_pages, &onlined_pages, + ret = walk_memory_resource(pfn, nr_pages, &onlined_pages, online_pages_range); + if (ret) { + printk(KERN_DEBUG "online_pages %lx at %lx failed\n", + nr_pages, pfn); + memory_notify(MEM_CANCEL_ONLINE, &arg); + return ret; + } + zone->present_pages += onlined_pages; zone->zone_pgdat->node_present_pages += onlined_pages; diff --git a/mm/mprotect.c b/mm/mprotect.c index 4de546899dc1..a5bf31c27375 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -26,6 +26,13 @@ #include <asm/cacheflush.h> #include <asm/tlbflush.h> +#ifndef pgprot_modify +static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) +{ + return newprot; +} +#endif + static void change_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable) @@ -192,7 +199,9 @@ success: * held in write mode. */ vma->vm_flags = newflags; - vma->vm_page_prot = vm_get_page_prot(newflags); + vma->vm_page_prot = pgprot_modify(vma->vm_page_prot, + vm_get_page_prot(newflags)); + if (vma_wants_writenotify(vma)) { vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED); dirty_accountable = 1; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index bdd5c432c426..63835579323a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2862,8 +2862,6 @@ __meminit int init_currently_empty_zone(struct zone *zone, zone->zone_start_pfn = zone_start_pfn; - memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn); - zone_init_free_lists(zone); return 0; @@ -3433,6 +3431,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, ret = init_currently_empty_zone(zone, zone_start_pfn, size, MEMMAP_EARLY); BUG_ON(ret); + memmap_init(size, nid, j, zone_start_pfn); zone_start_pfn += size; } } diff --git a/mm/pdflush.c b/mm/pdflush.c index 1c96cfc9e040..9d834aa4b979 100644 --- a/mm/pdflush.c +++ b/mm/pdflush.c @@ -207,7 +207,6 @@ int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0) spin_lock_irqsave(&pdflush_lock, flags); if (list_empty(&pdflush_list)) { - spin_unlock_irqrestore(&pdflush_lock, flags); ret = -1; } else { struct pdflush_work *pdf; @@ -219,8 +218,9 @@ int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0) pdf->fn = fn; pdf->arg0 = arg0; wake_up_process(pdf->who); - spin_unlock_irqrestore(&pdflush_lock, flags); } + spin_unlock_irqrestore(&pdflush_lock, flags); + return ret; } diff --git a/mm/slub.c b/mm/slub.c index d379b782fc83..a505a828ef41 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3762,7 +3762,7 @@ static int any_slab_objects(struct kmem_cache *s) if (!n) continue; - if (atomic_read(&n->total_objects)) + if (atomic_long_read(&n->total_objects)) return 1; } return 0; diff --git a/mm/vmstat.c b/mm/vmstat.c index 1a32130b958c..db9eabb2c5b3 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -41,7 +41,9 @@ static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask) */ void all_vm_events(unsigned long *ret) { + get_online_cpus(); sum_vm_events(ret, &cpu_online_map); + put_online_cpus(); } EXPORT_SYMBOL_GPL(all_vm_events); |