summaryrefslogtreecommitdiff
path: root/fs/proc
diff options
context:
space:
mode:
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>2009-09-23 03:45:41 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-23 18:39:41 +0400
commit2ef43ec772551e975a6ea7cf22b59c84955aadf9 (patch)
tree9560c13bcc6deb3ee00a60f93730b248029e69bb /fs/proc
parentd899bf7b55f503ba7d3d07ed27c3a37e270fa7db (diff)
downloadlinux-2ef43ec772551e975a6ea7cf22b59c84955aadf9.tar.xz
kcore: use usual list for kclist
This patchset is for /proc/kcore. With this, - many per-arch hooks are removed. - /proc/kcore will know really valid physical memory area. - /proc/kcore will be aware of memory hotplug. - /proc/kcore will be architecture independent i.e. if an arch supports CONFIG_MMU, it can use /proc/kcore. (if the arch uses usual memory layout.) This patch: /proc/kcore uses its own list handling codes. It's better to use generic list codes. No changes in logic. just clean up. Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: WANG Cong <xiyou.wangcong@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/proc')
-rw-r--r--fs/proc/kcore.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index 0cf8a24cf6c3..f9327e51ce99 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -20,6 +20,7 @@
#include <linux/init.h>
#include <asm/uaccess.h>
#include <asm/io.h>
+#include <linux/list.h>
#define CORE_STR "CORE"
@@ -57,7 +58,7 @@ struct memelfnote
void *data;
};
-static struct kcore_list *kclist;
+static LIST_HEAD(kclist_head);
static DEFINE_RWLOCK(kclist_lock);
void
@@ -67,8 +68,7 @@ kclist_add(struct kcore_list *new, void *addr, size_t size)
new->size = size;
write_lock(&kclist_lock);
- new->next = kclist;
- kclist = new;
+ list_add_tail(&new->list, &kclist_head);
write_unlock(&kclist_lock);
}
@@ -80,7 +80,7 @@ static size_t get_kcore_size(int *nphdr, size_t *elf_buflen)
*nphdr = 1; /* PT_NOTE */
size = 0;
- for (m=kclist; m; m=m->next) {
+ list_for_each_entry(m, &kclist_head, list) {
try = kc_vaddr_to_offset((size_t)m->addr + m->size);
if (try > size)
size = try;
@@ -192,7 +192,7 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
nhdr->p_align = 0;
/* setup ELF PT_LOAD program header for every area */
- for (m=kclist; m; m=m->next) {
+ list_for_each_entry(m, &kclist_head, list) {
phdr = (struct elf_phdr *) bufp;
bufp += sizeof(struct elf_phdr);
offset += sizeof(struct elf_phdr);
@@ -317,7 +317,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
struct kcore_list *m;
read_lock(&kclist_lock);
- for (m=kclist; m; m=m->next) {
+ list_for_each_entry(m, &kclist_head, list) {
if (start >= m->addr && start < (m->addr+m->size))
break;
}