diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2020-11-18 22:48:44 +0300 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2020-11-24 16:42:09 +0300 |
commit | f3ba3c710ac5a30cd058615a9eb62d2ad95bb782 (patch) | |
tree | 1678112f3c1f9793ac5551679cfe2a275a270543 /include/linux/highmem-internal.h | |
parent | 5fbda3ecd14a5343644979c98d6eb65b7e7de9d8 (diff) | |
download | linux-f3ba3c710ac5a30cd058615a9eb62d2ad95bb782.tar.xz |
mm/highmem: Provide kmap_local*
Now that the kmap atomic index is stored in task struct provide a
preemptible variant. On context switch the maps of an outgoing task are
removed and the map of the incoming task are restored. That's obviously
slow, but highmem is slow anyway.
The kmap_local.*() functions can be invoked from both preemptible and
atomic context. kmap local sections disable migration to keep the resulting
virtual mapping address correct, but disable neither pagefaults nor
preemption.
A wholesale conversion of kmap_atomic to be fully preemptible is not
possible because some of the usage sites might rely on the preemption
disable for serialization or on the implicit pagefault disable. Needs to be
done on a case by case basis.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20201118204007.468533059@linutronix.de
Diffstat (limited to 'include/linux/highmem-internal.h')
-rw-r--r-- | include/linux/highmem-internal.h | 48 |
1 files changed, 48 insertions, 0 deletions
diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h index c5a22177db85..1bbe96dc8be6 100644 --- a/include/linux/highmem-internal.h +++ b/include/linux/highmem-internal.h @@ -68,6 +68,26 @@ static inline void kmap_flush_unused(void) __kmap_flush_unused(); } +static inline void *kmap_local_page(struct page *page) +{ + return __kmap_local_page_prot(page, kmap_prot); +} + +static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot) +{ + return __kmap_local_page_prot(page, prot); +} + +static inline void *kmap_local_pfn(unsigned long pfn) +{ + return __kmap_local_pfn_prot(pfn, kmap_prot); +} + +static inline void __kunmap_local(void *vaddr) +{ + kunmap_local_indexed(vaddr); +} + static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot) { preempt_disable(); @@ -140,6 +160,28 @@ static inline void kunmap(struct page *page) #endif } +static inline void *kmap_local_page(struct page *page) +{ + return page_address(page); +} + +static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot) +{ + return kmap_local_page(page); +} + +static inline void *kmap_local_pfn(unsigned long pfn) +{ + return kmap_local_page(pfn_to_page(pfn)); +} + +static inline void __kunmap_local(void *addr) +{ +#ifdef ARCH_HAS_FLUSH_ON_KUNMAP + kunmap_flush_on_unmap(addr); +#endif +} + static inline void *kmap_atomic(struct page *page) { preempt_disable(); @@ -181,4 +223,10 @@ do { \ __kunmap_atomic(__addr); \ } while (0) +#define kunmap_local(__addr) \ +do { \ + BUILD_BUG_ON(__same_type((__addr), struct page *)); \ + __kunmap_local(__addr); \ +} while (0) + #endif |