summaryrefslogtreecommitdiff
path: root/arch/um/include
diff options
context:
space:
mode:
authorJohannes Berg <johannes.berg@intel.com>2021-03-16 01:38:04 +0300
committerRichard Weinberger <richard@nod.at>2021-06-17 23:04:40 +0300
commit80f849bf541ef9b633a9c08ac208f9c9afd14eb9 (patch)
tree4da4171a7d84405ecdbecfc246301eb9e176f9da /arch/um/include
parentdd3035a21ba7ccaa883d7107d357ad06320d78fc (diff)
downloadlinux-80f849bf541ef9b633a9c08ac208f9c9afd14eb9.tar.xz
um: implement flush_cache_vmap/flush_cache_vunmap
vmalloc() heavy workloads in UML are extremely slow, due to flushing the entire kernel VM space (flush_tlb_kernel_vm()) on the first segfault. Implement flush_cache_vmap() to avoid that, and while at it also add flush_cache_vunmap() since it's trivial. This speeds up my vmalloc() heavy test of copying files out from /sys/kernel/debug/gcov/ by 30x (from 30s to 1s.) Signed-off-by: Johannes Berg <johannes.berg@intel.com> Acked-By: Anton Ivanov <anton.ivanov@cambridgegreys.com> Signed-off-by: Richard Weinberger <richard@nod.at>
Diffstat (limited to 'arch/um/include')
-rw-r--r--arch/um/include/asm/cacheflush.h9
-rw-r--r--arch/um/include/asm/tlb.h2
2 files changed, 10 insertions, 1 deletions
diff --git a/arch/um/include/asm/cacheflush.h b/arch/um/include/asm/cacheflush.h
new file mode 100644
index 000000000000..4c9858cd36ec
--- /dev/null
+++ b/arch/um/include/asm/cacheflush.h
@@ -0,0 +1,9 @@
+#ifndef __UM_ASM_CACHEFLUSH_H
+#define __UM_ASM_CACHEFLUSH_H
+
+#include <asm/tlbflush.h>
+#define flush_cache_vmap flush_tlb_kernel_range
+#define flush_cache_vunmap flush_tlb_kernel_range
+
+#include <asm-generic/cacheflush.h>
+#endif /* __UM_ASM_CACHEFLUSH_H */
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
index ff9c62828962..0422467bda5b 100644
--- a/arch/um/include/asm/tlb.h
+++ b/arch/um/include/asm/tlb.h
@@ -5,7 +5,7 @@
#include <linux/mm.h>
#include <asm/tlbflush.h>
-#include <asm-generic/cacheflush.h>
+#include <asm/cacheflush.h>
#include <asm-generic/tlb.h>
#endif