summaryrefslogtreecommitdiff
path: root/io_uring/alloc_cache.h
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring/alloc_cache.h')
-rw-r--r--io_uring/alloc_cache.h60
1 files changed, 35 insertions, 25 deletions
diff --git a/io_uring/alloc_cache.h b/io_uring/alloc_cache.h
index b7a38a2069cf..0dd17d8ba93a 100644
--- a/io_uring/alloc_cache.h
+++ b/io_uring/alloc_cache.h
@@ -1,11 +1,30 @@
#ifndef IOU_ALLOC_CACHE_H
#define IOU_ALLOC_CACHE_H
+#include <linux/io_uring_types.h>
+
/*
* Don't allow the cache to grow beyond this size.
*/
#define IO_ALLOC_CACHE_MAX 128
+void io_alloc_cache_free(struct io_alloc_cache *cache,
+ void (*free)(const void *));
+bool io_alloc_cache_init(struct io_alloc_cache *cache,
+ unsigned max_nr, unsigned int size,
+ unsigned int init_bytes);
+
+void *io_cache_alloc_new(struct io_alloc_cache *cache, gfp_t gfp);
+
+static inline void io_alloc_cache_kasan(struct iovec **iov, int *nr)
+{
+ if (IS_ENABLED(CONFIG_KASAN)) {
+ kfree(*iov);
+ *iov = NULL;
+ *nr = 0;
+ }
+}
+
static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
void *entry)
{
@@ -23,39 +42,30 @@ static inline void *io_alloc_cache_get(struct io_alloc_cache *cache)
if (cache->nr_cached) {
void *entry = cache->entries[--cache->nr_cached];
+ /*
+ * If KASAN is enabled, always clear the initial bytes that
+ * must be zeroed post alloc, in case any of them overlap
+ * with KASAN storage.
+ */
+#if defined(CONFIG_KASAN)
kasan_mempool_unpoison_object(entry, cache->elem_size);
+ if (cache->init_clear)
+ memset(entry, 0, cache->init_clear);
+#endif
return entry;
}
return NULL;
}
-/* returns false if the cache was initialized properly */
-static inline bool io_alloc_cache_init(struct io_alloc_cache *cache,
- unsigned max_nr, size_t size)
+static inline void *io_cache_alloc(struct io_alloc_cache *cache, gfp_t gfp)
{
- cache->entries = kvmalloc_array(max_nr, sizeof(void *), GFP_KERNEL);
- if (cache->entries) {
- cache->nr_cached = 0;
- cache->max_cached = max_nr;
- cache->elem_size = size;
- return false;
- }
- return true;
-}
-
-static inline void io_alloc_cache_free(struct io_alloc_cache *cache,
- void (*free)(const void *))
-{
- void *entry;
+ void *obj;
- if (!cache->entries)
- return;
-
- while ((entry = io_alloc_cache_get(cache)) != NULL)
- free(entry);
-
- kvfree(cache->entries);
- cache->entries = NULL;
+ obj = io_alloc_cache_get(cache);
+ if (obj)
+ return obj;
+ return io_cache_alloc_new(cache, gfp);
}
+
#endif