diff options
author | Ingo Molnar <mingo@kernel.org> | 2014-05-07 15:15:46 +0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-05-07 15:15:46 +0400 |
commit | 2fe5de9ce7d57498abc14b375cad2fcf8c3ee6cc (patch) | |
tree | 9478e8cf470c1d5bdb2d89b57a7e35919ab95e72 /include/linux/vmacache.h | |
parent | 08f8aeb55d7727d644dbbbbfb798fe937d47751d (diff) | |
parent | 2b4cfe64dee0d84506b951d81bf55d9891744d25 (diff) | |
download | linux-2fe5de9ce7d57498abc14b375cad2fcf8c3ee6cc.tar.xz |
Merge branch 'sched/urgent' into sched/core, to avoid conflicts
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/vmacache.h')
-rw-r--r-- | include/linux/vmacache.h | 38 |
1 files changed, 38 insertions, 0 deletions
diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h new file mode 100644 index 000000000000..c3fa0fd43949 --- /dev/null +++ b/include/linux/vmacache.h @@ -0,0 +1,38 @@ +#ifndef __LINUX_VMACACHE_H +#define __LINUX_VMACACHE_H + +#include <linux/sched.h> +#include <linux/mm.h> + +/* + * Hash based on the page number. Provides a good hit rate for + * workloads with good locality and those with random accesses as well. + */ +#define VMACACHE_HASH(addr) ((addr >> PAGE_SHIFT) & VMACACHE_MASK) + +static inline void vmacache_flush(struct task_struct *tsk) +{ + memset(tsk->vmacache, 0, sizeof(tsk->vmacache)); +} + +extern void vmacache_flush_all(struct mm_struct *mm); +extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma); +extern struct vm_area_struct *vmacache_find(struct mm_struct *mm, + unsigned long addr); + +#ifndef CONFIG_MMU +extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm, + unsigned long start, + unsigned long end); +#endif + +static inline void vmacache_invalidate(struct mm_struct *mm) +{ + mm->vmacache_seqnum++; + + /* deal with overflows */ + if (unlikely(mm->vmacache_seqnum == 0)) + vmacache_flush_all(mm); +} + +#endif /* __LINUX_VMACACHE_H */ |