From f49ae86483c494ddc793d889f6df5ea68d138569 Mon Sep 17 00:00:00 2001 From: Jonathan Cameron Date: Mon, 17 Nov 2025 10:47:54 +0000 Subject: memregion: Drop unused IORES_DESC_* parameter from cpu_cache_invalidate_memregion() The res_desc parameter was originally introduced for documentation purposes and with the idea that with HDM-DB CXL invalidation could be triggered from the device. That has not come to pass and the continued existence of the option is confusing when we add a range in the following patch which might not be a strict subset of the res_desc. So avoid that confusion by dropping the parameter. Link: https://lore.kernel.org/linux-mm/686eedb25ed02_24471002e@dwillia2-xfh.jf.intel.com.notmuch/ Reviewed-by: Dan Williams Suggested-by: Dan Williams Signed-off-by: Jonathan Cameron Signed-off-by: Conor Dooley --- include/linux/memregion.h | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memregion.h b/include/linux/memregion.h index c01321467789..945646bde825 100644 --- a/include/linux/memregion.h +++ b/include/linux/memregion.h @@ -26,8 +26,7 @@ static inline void memregion_free(int id) /** * cpu_cache_invalidate_memregion - drop any CPU cached data for - * memregions described by @res_desc - * @res_desc: one of the IORES_DESC_* types + * memregion * * Perform cache maintenance after a memory event / operation that * changes the contents of physical memory in a cache-incoherent manner. @@ -46,7 +45,7 @@ static inline void memregion_free(int id) * the cache maintenance. */ #ifdef CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION -int cpu_cache_invalidate_memregion(int res_desc); +int cpu_cache_invalidate_memregion(void); bool cpu_cache_has_invalidate_memregion(void); #else static inline bool cpu_cache_has_invalidate_memregion(void) @@ -54,7 +53,7 @@ static inline bool cpu_cache_has_invalidate_memregion(void) return false; } -static inline int cpu_cache_invalidate_memregion(int res_desc) +static inline int cpu_cache_invalidate_memregion(void) { WARN_ON_ONCE("CPU cache invalidation required"); return -ENXIO; -- cgit v1.2.3 From b43652d867cf2a5f31b14e3d9a320ad01fca0992 Mon Sep 17 00:00:00 2001 From: Yicong Yang Date: Mon, 17 Nov 2025 10:47:55 +0000 Subject: memregion: Support fine grained invalidate by cpu_cache_invalidate_memregion() Extend cpu_cache_invalidate_memregion() to support invalidating a particular range of memory by introducing start and length parameters. Control of types of invalidation is left for when use cases turn up. For now everything is Clean and Invalidate. Where the range is unknown, use the provided cpu_cache_invalidate_all() helper to act as documentation of intent in a fashion that is clearer than passing (0, -1) to cpu_cache_invalidate_memregion(). Signed-off-by: Yicong Yang Reviewed-by: Dan Williams Acked-by: Davidlohr Bueso Signed-off-by: Jonathan Cameron Signed-off-by: Conor Dooley --- arch/x86/mm/pat/set_memory.c | 2 +- drivers/cxl/core/region.c | 5 ++++- drivers/nvdimm/region.c | 2 +- drivers/nvdimm/region_devs.c | 2 +- include/linux/memregion.h | 13 +++++++++++-- 5 files changed, 18 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c index 4019b17fb65e..292c7202faed 100644 --- a/arch/x86/mm/pat/set_memory.c +++ b/arch/x86/mm/pat/set_memory.c @@ -368,7 +368,7 @@ bool cpu_cache_has_invalidate_memregion(void) } EXPORT_SYMBOL_NS_GPL(cpu_cache_has_invalidate_memregion, "DEVMEM"); -int cpu_cache_invalidate_memregion(void) +int cpu_cache_invalidate_memregion(phys_addr_t start, size_t len) { if (WARN_ON_ONCE(!cpu_cache_has_invalidate_memregion())) return -ENXIO; diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index d7fa76810f82..410e41cef5d3 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -228,7 +228,10 @@ static int cxl_region_invalidate_memregion(struct cxl_region *cxlr) return -ENXIO; } - cpu_cache_invalidate_memregion(); + if (!cxlr->params.res) + return -ENXIO; + cpu_cache_invalidate_memregion(cxlr->params.res->start, + resource_size(cxlr->params.res)); return 0; } diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c index c43506448edf..42e982db5b04 100644 --- a/drivers/nvdimm/region.c +++ b/drivers/nvdimm/region.c @@ -110,7 +110,7 @@ static void nd_region_remove(struct device *dev) * here is ok. */ if (cpu_cache_has_invalidate_memregion()) - cpu_cache_invalidate_memregion(); + cpu_cache_invalidate_all(); } static int child_notify(struct device *dev, void *data) diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index 3cdd93d40997..e27fc380f6c0 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -90,7 +90,7 @@ static int nd_region_invalidate_memregion(struct nd_region *nd_region) } } - cpu_cache_invalidate_memregion(); + cpu_cache_invalidate_all(); out: for (i = 0; i < nd_region->ndr_mappings; i++) { struct nd_mapping *nd_mapping = &nd_region->mapping[i]; diff --git a/include/linux/memregion.h b/include/linux/memregion.h index 945646bde825..a55f62cc5266 100644 --- a/include/linux/memregion.h +++ b/include/linux/memregion.h @@ -27,6 +27,9 @@ static inline void memregion_free(int id) /** * cpu_cache_invalidate_memregion - drop any CPU cached data for * memregion + * @start: start physical address of the target memory region. + * @len: length of the target memory region. -1 for all the regions of + * the target type. * * Perform cache maintenance after a memory event / operation that * changes the contents of physical memory in a cache-incoherent manner. @@ -45,7 +48,7 @@ static inline void memregion_free(int id) * the cache maintenance. */ #ifdef CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION -int cpu_cache_invalidate_memregion(void); +int cpu_cache_invalidate_memregion(phys_addr_t start, size_t len); bool cpu_cache_has_invalidate_memregion(void); #else static inline bool cpu_cache_has_invalidate_memregion(void) @@ -53,10 +56,16 @@ static inline bool cpu_cache_has_invalidate_memregion(void) return false; } -static inline int cpu_cache_invalidate_memregion(void) +static inline int cpu_cache_invalidate_memregion(phys_addr_t start, size_t len) { WARN_ON_ONCE("CPU cache invalidation required"); return -ENXIO; } #endif + +static inline int cpu_cache_invalidate_all(void) +{ + return cpu_cache_invalidate_memregion(0, -1); +} + #endif /* _MEMREGION_H_ */ -- cgit v1.2.3 From c460697d3472d4252917fba9bbc1d1a23eafc124 Mon Sep 17 00:00:00 2001 From: Yicong Yang Date: Mon, 17 Nov 2025 10:47:56 +0000 Subject: lib: Support ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION provides the mechanism for invalidating certain memory regions in a cache-incoherent manner. Currently this is used by NVDIMM and CXL memory drivers in cases where it is necessary to flush all data from caches by physical address range. The operations in question are effectively memory hotplug, where stale data might otherwise remain in the caches. This is separate from the invalidates done to enable use of non-coherent DMA masters, primarily in terms of when it is needed (not related to DMA mappings) and how deep the flush must push data. The flushes done for non-coherent DMA only need to reach the Point of Coherence of a single host (which is often nearer CPUs and DMA masters than the physical storage). This operation must push the data out of non architectural caches (memory-side caches, write buffers etc) and typically all the way to the memory device. In some architectures these operations are supported by system components that may become available only later in boot as they are either present on a discoverable bus, or via a firmware description of an MMIO interface (e.g. ACPI DSDT). Provide a framework to handle this case. Architectures can opt in for this support via CONFIG_GENERIC_CPU_CACHE_MAINTENANCE Add a registration framework. Each driver provides an ops structure and the first op is Write Back and Invalidate by PA Range. The driver may over invalidate. For systems that can perform this operation asynchronously an optional completion check operation is also provided. If present that must be called to ensure that the action has finished. This provides a considerable performance advantage if multiple agents are involved in the maintenance operation. When multiple agents are present in the system each should register with this framework and the core code will issue the invalidate to all of them before checking for completion on each. This is done to avoid need for filtering in the core code which can become complex when interleave, potentially across different cache coherency hardware is going on, so it is easier to tell everyone and let those who don't care do nothing. Signed-off-by: Yicong Yang Co-developed-by: Jonathan Cameron Signed-off-by: Jonathan Cameron Acked-by: Conor Dooley Signed-off-by: Conor Dooley --- include/linux/cache_coherency.h | 61 ++++++++++++++++++ lib/Kconfig | 3 + lib/Makefile | 2 + lib/cache_maint.c | 138 ++++++++++++++++++++++++++++++++++++++++ 4 files changed, 204 insertions(+) create mode 100644 include/linux/cache_coherency.h create mode 100644 lib/cache_maint.c (limited to 'include/linux') diff --git a/include/linux/cache_coherency.h b/include/linux/cache_coherency.h new file mode 100644 index 000000000000..cc81c5733e31 --- /dev/null +++ b/include/linux/cache_coherency.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Cache coherency maintenance operation device drivers + * + * Copyright Huawei 2025 + */ +#ifndef _LINUX_CACHE_COHERENCY_H_ +#define _LINUX_CACHE_COHERENCY_H_ + +#include +#include +#include + +struct cc_inval_params { + phys_addr_t addr; + size_t size; +}; + +struct cache_coherency_ops_inst; + +struct cache_coherency_ops { + int (*wbinv)(struct cache_coherency_ops_inst *cci, + struct cc_inval_params *invp); + int (*done)(struct cache_coherency_ops_inst *cci); +}; + +struct cache_coherency_ops_inst { + struct kref kref; + struct list_head node; + const struct cache_coherency_ops *ops; +}; + +int cache_coherency_ops_instance_register(struct cache_coherency_ops_inst *cci); +void cache_coherency_ops_instance_unregister(struct cache_coherency_ops_inst *cci); + +struct cache_coherency_ops_inst * +_cache_coherency_ops_instance_alloc(const struct cache_coherency_ops *ops, + size_t size); +/** + * cache_coherency_ops_instance_alloc - Allocate cache coherency ops instance + * @ops: Cache maintenance operations + * @drv_struct: structure that contains the struct cache_coherency_ops_inst + * @member: Name of the struct cache_coherency_ops_inst member in @drv_struct. + * + * This allocates a driver specific structure and initializes the + * cache_coherency_ops_inst embedded in the drv_struct. Upon success the + * pointer must be freed via cache_coherency_ops_instance_put(). + * + * Returns a &drv_struct * on success, %NULL on error. + */ +#define cache_coherency_ops_instance_alloc(ops, drv_struct, member) \ + ({ \ + static_assert(__same_type(struct cache_coherency_ops_inst, \ + ((drv_struct *)NULL)->member)); \ + static_assert(offsetof(drv_struct, member) == 0); \ + (drv_struct *)_cache_coherency_ops_instance_alloc(ops, \ + sizeof(drv_struct)); \ + }) +void cache_coherency_ops_instance_put(struct cache_coherency_ops_inst *cci); + +#endif diff --git a/lib/Kconfig b/lib/Kconfig index c483951b624f..cd8e5844f9bb 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -543,6 +543,9 @@ config MEMREGION config ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION bool +config GENERIC_CPU_CACHE_MAINTENANCE + bool + config ARCH_HAS_MEMREMAP_COMPAT_ALIGN bool diff --git a/lib/Makefile b/lib/Makefile index 392ff808c9b9..eed20c50f358 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -130,6 +130,8 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o +obj-$(CONFIG_GENERIC_CPU_CACHE_MAINTENANCE) += cache_maint.o + lib-y += logic_pio.o lib-$(CONFIG_INDIRECT_IOMEM) += logic_iomem.o diff --git a/lib/cache_maint.c b/lib/cache_maint.c new file mode 100644 index 000000000000..9256a9ffc34c --- /dev/null +++ b/lib/cache_maint.c @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Generic support for Memory System Cache Maintenance operations. + * + * Coherency maintenance drivers register with this simple framework that will + * iterate over each registered instance to first kick off invalidation and + * then to wait until it is complete. + * + * If no implementations are registered yet cpu_cache_has_invalidate_memregion() + * will return false. If this runs concurrently with unregistration then a + * race exists but this is no worse than the case where the operations instance + * responsible for a given memory region has not yet registered. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static LIST_HEAD(cache_ops_instance_list); +static DECLARE_RWSEM(cache_ops_instance_list_lock); + +static void __cache_coherency_ops_instance_free(struct kref *kref) +{ + struct cache_coherency_ops_inst *cci = + container_of(kref, struct cache_coherency_ops_inst, kref); + kfree(cci); +} + +void cache_coherency_ops_instance_put(struct cache_coherency_ops_inst *cci) +{ + kref_put(&cci->kref, __cache_coherency_ops_instance_free); +} +EXPORT_SYMBOL_GPL(cache_coherency_ops_instance_put); + +static int cache_inval_one(struct cache_coherency_ops_inst *cci, void *data) +{ + if (!cci->ops) + return -EINVAL; + + return cci->ops->wbinv(cci, data); +} + +static int cache_inval_done_one(struct cache_coherency_ops_inst *cci) +{ + if (!cci->ops) + return -EINVAL; + + if (!cci->ops->done) + return 0; + + return cci->ops->done(cci); +} + +static int cache_invalidate_memregion(phys_addr_t addr, size_t size) +{ + int ret; + struct cache_coherency_ops_inst *cci; + struct cc_inval_params params = { + .addr = addr, + .size = size, + }; + + guard(rwsem_read)(&cache_ops_instance_list_lock); + list_for_each_entry(cci, &cache_ops_instance_list, node) { + ret = cache_inval_one(cci, ¶ms); + if (ret) + return ret; + } + list_for_each_entry(cci, &cache_ops_instance_list, node) { + ret = cache_inval_done_one(cci); + if (ret) + return ret; + } + + return 0; +} + +struct cache_coherency_ops_inst * +_cache_coherency_ops_instance_alloc(const struct cache_coherency_ops *ops, + size_t size) +{ + struct cache_coherency_ops_inst *cci; + + if (!ops || !ops->wbinv) + return NULL; + + cci = kzalloc(size, GFP_KERNEL); + if (!cci) + return NULL; + + cci->ops = ops; + INIT_LIST_HEAD(&cci->node); + kref_init(&cci->kref); + + return cci; +} +EXPORT_SYMBOL_NS_GPL(_cache_coherency_ops_instance_alloc, "CACHE_COHERENCY"); + +int cache_coherency_ops_instance_register(struct cache_coherency_ops_inst *cci) +{ + guard(rwsem_write)(&cache_ops_instance_list_lock); + list_add(&cci->node, &cache_ops_instance_list); + + return 0; +} +EXPORT_SYMBOL_NS_GPL(cache_coherency_ops_instance_register, "CACHE_COHERENCY"); + +void cache_coherency_ops_instance_unregister(struct cache_coherency_ops_inst *cci) +{ + guard(rwsem_write)(&cache_ops_instance_list_lock); + list_del(&cci->node); +} +EXPORT_SYMBOL_NS_GPL(cache_coherency_ops_instance_unregister, "CACHE_COHERENCY"); + +int cpu_cache_invalidate_memregion(phys_addr_t start, size_t len) +{ + return cache_invalidate_memregion(start, len); +} +EXPORT_SYMBOL_NS_GPL(cpu_cache_invalidate_memregion, "DEVMEM"); + +/* + * Used for optimization / debug purposes only as removal can race + * + * Machines that do not support invalidation, e.g. VMs, will not have any + * operations instance to register and so this will always return false. + */ +bool cpu_cache_has_invalidate_memregion(void) +{ + guard(rwsem_read)(&cache_ops_instance_list_lock); + return !list_empty(&cache_ops_instance_list); +} +EXPORT_SYMBOL_NS_GPL(cpu_cache_has_invalidate_memregion, "DEVMEM"); -- cgit v1.2.3