summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2005-11-11 13:51:24 +0300
committerJeff Garzik <jgarzik@pobox.com>2005-11-11 13:51:24 +0300
commit3b621ee5df437d3f332a635ab6421aaa61a7dc2b (patch)
treec4a5236cee8eb7418770802313d36a55f1cc0b1e /mm
parent7211bb9b64f17b23834d91fc3d0c1d78671ee9a8 (diff)
parent5e04e7fe774794b837e1d3897e6b96ae2d06679a (diff)
downloadlinux-3b621ee5df437d3f332a635ab6421aaa61a7dc2b.tar.xz
Merge branch 'master'
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig2
-rw-r--r--mm/hugetlb.c4
-rw-r--r--mm/mmap.c4
-rw-r--r--mm/nommu.c4
-rw-r--r--mm/page-writeback.c1
-rw-r--r--mm/page_alloc.c3
-rw-r--r--mm/readahead.c31
-rw-r--r--mm/slab.c74
-rw-r--r--mm/swap.c1
-rw-r--r--mm/swap_state.c1
-rw-r--r--mm/swapfile.c2
-rw-r--r--mm/vmalloc.c4
12 files changed, 68 insertions, 63 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index 1a4473fcb2ca..ae9ce6b73e8a 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -126,9 +126,11 @@ comment "Memory hotplug is currently incompatible with Software Suspend"
# Default to 4 for wider testing, though 8 might be more appropriate.
# ARM's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock.
# PA-RISC's debug spinlock_t is too large for the 32-bit struct page.
+# ARM26 and SPARC32 and PPC64 may use one page for multiple page tables.
#
config SPLIT_PTLOCK_CPUS
int
default "4096" if ARM && !CPU_CACHE_VIPT
default "4096" if PARISC && DEBUG_SPINLOCK && !64BIT
+ default "4096" if ARM26 || SPARC32 || PPC64
default "4"
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index c9b43360fd33..728e9bda12ea 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -103,6 +103,9 @@ static int __init hugetlb_init(void)
unsigned long i;
struct page *page;
+ if (HPAGE_SHIFT == 0)
+ return 0;
+
for (i = 0; i < MAX_NUMNODES; ++i)
INIT_LIST_HEAD(&hugepage_freelists[i]);
@@ -234,7 +237,6 @@ unsigned long hugetlb_total_pages(void)
{
return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
}
-EXPORT_SYMBOL(hugetlb_total_pages);
/*
* We cannot handle pagefaults against hugetlb pages at all. They cause
diff --git a/mm/mmap.c b/mm/mmap.c
index 320dda1778c3..6c997b159600 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -155,10 +155,6 @@ int __vm_enough_memory(long pages, int cap_sys_admin)
return -ENOMEM;
}
-EXPORT_SYMBOL(sysctl_overcommit_memory);
-EXPORT_SYMBOL(sysctl_overcommit_ratio);
-EXPORT_SYMBOL(sysctl_max_map_count);
-EXPORT_SYMBOL(vm_committed_space);
EXPORT_SYMBOL(__vm_enough_memory);
/*
diff --git a/mm/nommu.c b/mm/nommu.c
index d1e076a487cb..6deb6ab3d6ad 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -44,10 +44,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
int heap_stack_gap = 0;
EXPORT_SYMBOL(mem_map);
-EXPORT_SYMBOL(sysctl_max_map_count);
-EXPORT_SYMBOL(sysctl_overcommit_memory);
-EXPORT_SYMBOL(sysctl_overcommit_ratio);
-EXPORT_SYMBOL(vm_committed_space);
EXPORT_SYMBOL(__vm_enough_memory);
/* list of shareable VMAs */
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 0166ea15c9ee..74138c9a22b9 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -750,7 +750,6 @@ int clear_page_dirty_for_io(struct page *page)
}
return TestClearPageDirty(page);
}
-EXPORT_SYMBOL(clear_page_dirty_for_io);
int test_clear_page_writeback(struct page *page)
{
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2dbdd98426fd..987225bdd661 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -64,7 +64,6 @@ long nr_swap_pages;
int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 256, 32 };
EXPORT_SYMBOL(totalram_pages);
-EXPORT_SYMBOL(nr_swap_pages);
/*
* Used by page_zone() to look up the address of the struct zone whose
@@ -1331,7 +1330,7 @@ void show_free_areas(void)
} else
printk("\n");
- for_each_cpu(cpu) {
+ for_each_online_cpu(cpu) {
struct per_cpu_pageset *pageset;
pageset = zone_pcp(zone, cpu);
diff --git a/mm/readahead.c b/mm/readahead.c
index d0b50034e245..72e7adbb87c7 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -254,7 +254,7 @@ out:
*/
static int
__do_page_cache_readahead(struct address_space *mapping, struct file *filp,
- unsigned long offset, unsigned long nr_to_read)
+ pgoff_t offset, unsigned long nr_to_read)
{
struct inode *inode = mapping->host;
struct page *page;
@@ -274,7 +274,7 @@ __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
*/
read_lock_irq(&mapping->tree_lock);
for (page_idx = 0; page_idx < nr_to_read; page_idx++) {
- unsigned long page_offset = offset + page_idx;
+ pgoff_t page_offset = offset + page_idx;
if (page_offset > end_index)
break;
@@ -311,7 +311,7 @@ out:
* memory at once.
*/
int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
- unsigned long offset, unsigned long nr_to_read)
+ pgoff_t offset, unsigned long nr_to_read)
{
int ret = 0;
@@ -368,7 +368,7 @@ static inline int check_ra_success(struct file_ra_state *ra,
* request queues.
*/
int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
- unsigned long offset, unsigned long nr_to_read)
+ pgoff_t offset, unsigned long nr_to_read)
{
if (bdi_read_congested(mapping->backing_dev_info))
return -1;
@@ -385,7 +385,7 @@ int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
*/
static int
blockable_page_cache_readahead(struct address_space *mapping, struct file *filp,
- unsigned long offset, unsigned long nr_to_read,
+ pgoff_t offset, unsigned long nr_to_read,
struct file_ra_state *ra, int block)
{
int actual;
@@ -430,14 +430,27 @@ static int make_ahead_window(struct address_space *mapping, struct file *filp,
return ret;
}
-/*
- * page_cache_readahead is the main function. If performs the adaptive
+/**
+ * page_cache_readahead - generic adaptive readahead
+ * @mapping: address_space which holds the pagecache and I/O vectors
+ * @ra: file_ra_state which holds the readahead state
+ * @filp: passed on to ->readpage() and ->readpages()
+ * @offset: start offset into @mapping, in PAGE_CACHE_SIZE units
+ * @req_size: hint: total size of the read which the caller is performing in
+ * PAGE_CACHE_SIZE units
+ *
+ * page_cache_readahead() is the main function. If performs the adaptive
* readahead window size management and submits the readahead I/O.
+ *
+ * Note that @filp is purely used for passing on to the ->readpage[s]()
+ * handler: it may refer to a different file from @mapping (so we may not use
+ * @filp->f_mapping or @filp->f_dentry->d_inode here).
+ * Also, @ra may not be equal to &@filp->f_ra.
+ *
*/
unsigned long
page_cache_readahead(struct address_space *mapping, struct file_ra_state *ra,
- struct file *filp, unsigned long offset,
- unsigned long req_size)
+ struct file *filp, pgoff_t offset, unsigned long req_size)
{
unsigned long max, newsize;
int sequential;
diff --git a/mm/slab.c b/mm/slab.c
index 22bfb0b2ac8b..e291f5e1afbb 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -368,7 +368,7 @@ static inline void kmem_list3_init(struct kmem_list3 *parent)
* manages a cache.
*/
-struct kmem_cache_s {
+struct kmem_cache {
/* 1) per-cpu data, touched during every alloc/free */
struct array_cache *array[NR_CPUS];
unsigned int batchcount;
@@ -1502,6 +1502,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
{
size_t left_over, slab_size, ralign;
kmem_cache_t *cachep = NULL;
+ struct list_head *p;
/*
* Sanity checks... these are all serious usage bugs.
@@ -1516,6 +1517,35 @@ kmem_cache_create (const char *name, size_t size, size_t align,
BUG();
}
+ down(&cache_chain_sem);
+
+ list_for_each(p, &cache_chain) {
+ kmem_cache_t *pc = list_entry(p, kmem_cache_t, next);
+ mm_segment_t old_fs = get_fs();
+ char tmp;
+ int res;
+
+ /*
+ * This happens when the module gets unloaded and doesn't
+ * destroy its slab cache and no-one else reuses the vmalloc
+ * area of the module. Print a warning.
+ */
+ set_fs(KERNEL_DS);
+ res = __get_user(tmp, pc->name);
+ set_fs(old_fs);
+ if (res) {
+ printk("SLAB: cache with size %d has lost its name\n",
+ pc->objsize);
+ continue;
+ }
+
+ if (!strcmp(pc->name,name)) {
+ printk("kmem_cache_create: duplicate cache %s\n", name);
+ dump_stack();
+ goto oops;
+ }
+ }
+
#if DEBUG
WARN_ON(strchr(name, ' ')); /* It confuses parsers */
if ((flags & SLAB_DEBUG_INITIAL) && !ctor) {
@@ -1592,7 +1622,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
/* Get cache's description obj. */
cachep = (kmem_cache_t *) kmem_cache_alloc(&cache_cache, SLAB_KERNEL);
if (!cachep)
- goto opps;
+ goto oops;
memset(cachep, 0, sizeof(kmem_cache_t));
#if DEBUG
@@ -1686,7 +1716,7 @@ next:
printk("kmem_cache_create: couldn't create cache %s.\n", name);
kmem_cache_free(&cache_cache, cachep);
cachep = NULL;
- goto opps;
+ goto oops;
}
slab_size = ALIGN(cachep->num*sizeof(kmem_bufctl_t)
+ sizeof(struct slab), align);
@@ -1781,43 +1811,14 @@ next:
cachep->limit = BOOT_CPUCACHE_ENTRIES;
}
- /* Need the semaphore to access the chain. */
- down(&cache_chain_sem);
- {
- struct list_head *p;
- mm_segment_t old_fs;
-
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- list_for_each(p, &cache_chain) {
- kmem_cache_t *pc = list_entry(p, kmem_cache_t, next);
- char tmp;
- /* This happens when the module gets unloaded and doesn't
- destroy its slab cache and noone else reuses the vmalloc
- area of the module. Print a warning. */
- if (__get_user(tmp,pc->name)) {
- printk("SLAB: cache with size %d has lost its name\n",
- pc->objsize);
- continue;
- }
- if (!strcmp(pc->name,name)) {
- printk("kmem_cache_create: duplicate cache %s\n",name);
- up(&cache_chain_sem);
- unlock_cpu_hotplug();
- BUG();
- }
- }
- set_fs(old_fs);
- }
-
/* cache setup completed, link it into the list */
list_add(&cachep->next, &cache_chain);
- up(&cache_chain_sem);
unlock_cpu_hotplug();
-opps:
+oops:
if (!cachep && (flags & SLAB_PANIC))
panic("kmem_cache_create(): failed to create slab `%s'\n",
name);
+ up(&cache_chain_sem);
return cachep;
}
EXPORT_SYMBOL(kmem_cache_create);
@@ -3262,6 +3263,7 @@ static void drain_array_locked(kmem_cache_t *cachep,
/**
* cache_reap - Reclaim memory from caches.
+ * @unused: unused parameter
*
* Called from workqueue/eventd every few seconds.
* Purpose:
@@ -3278,7 +3280,7 @@ static void cache_reap(void *unused)
if (down_trylock(&cache_chain_sem)) {
/* Give up. Setup the next iteration. */
- schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC + smp_processor_id());
+ schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC);
return;
}
@@ -3347,7 +3349,7 @@ next:
up(&cache_chain_sem);
drain_remote_pages();
/* Setup the next iteration */
- schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC + smp_processor_id());
+ schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC);
}
#ifdef CONFIG_PROC_FS
diff --git a/mm/swap.c b/mm/swap.c
index 154ae13d8b7e..d09cf7f03e76 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -413,7 +413,6 @@ void vm_acct_memory(long pages)
}
preempt_enable();
}
-EXPORT_SYMBOL(vm_acct_memory);
#ifdef CONFIG_HOTPLUG_CPU
static void lru_drain_cache(unsigned int cpu)
diff --git a/mm/swap_state.c b/mm/swap_state.c
index dfd9a46755b8..0df9a57b1de8 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -40,7 +40,6 @@ struct address_space swapper_space = {
.i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear),
.backing_dev_info = &swap_backing_dev_info,
};
-EXPORT_SYMBOL(swapper_space);
#define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 8970c0b74194..edafeace301f 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -36,8 +36,6 @@ unsigned int nr_swapfiles;
long total_swap_pages;
static int swap_overflow;
-EXPORT_SYMBOL(total_swap_pages);
-
static const char Bad_file[] = "Bad swap file entry ";
static const char Unused_file[] = "Unused swap file entry ";
static const char Bad_offset[] = "Bad swap offset entry ";
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 54a90e83cb31..729eb3eec75f 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -457,7 +457,7 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
* @size: allocation size
* @gfp_mask: flags for the page level allocator
* @prot: protection mask for the allocated pages
- * @node node to use for allocation or -1
+ * @node: node to use for allocation or -1
*
* Allocate enough pages to cover @size from the page level
* allocator with @gfp_mask flags. Map them into contiguous
@@ -507,7 +507,7 @@ EXPORT_SYMBOL(vmalloc);
* vmalloc_node - allocate memory on a specific node
*
* @size: allocation size
- * @node; numa node
+ * @node: numa node
*
* Allocate enough pages to cover @size from the page level
* allocator and map them into contiguous kernel virtual space.