1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
|
#ifndef IOU_ALLOC_CACHE_H
#define IOU_ALLOC_CACHE_H
#include <linux/io_uring_types.h>
/*
* Don't allow the cache to grow beyond this size.
*/
#define IO_ALLOC_CACHE_MAX 128
void io_alloc_cache_free(struct io_alloc_cache *cache,
void (*free)(const void *));
bool io_alloc_cache_init(struct io_alloc_cache *cache,
unsigned max_nr, unsigned int size,
unsigned int init_bytes);
void *io_cache_alloc_new(struct io_alloc_cache *cache, gfp_t gfp);
static inline void io_alloc_cache_kasan(struct iovec **iov, int *nr)
{
if (IS_ENABLED(CONFIG_KASAN)) {
kfree(*iov);
*iov = NULL;
*nr = 0;
}
}
static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
void *entry)
{
if (cache->nr_cached < cache->max_cached) {
if (!kasan_mempool_poison_object(entry))
return false;
cache->entries[cache->nr_cached++] = entry;
return true;
}
return false;
}
static inline void *io_alloc_cache_get(struct io_alloc_cache *cache)
{
if (cache->nr_cached) {
void *entry = cache->entries[--cache->nr_cached];
/*
* If KASAN is enabled, always clear the initial bytes that
* must be zeroed post alloc, in case any of them overlap
* with KASAN storage.
*/
#if defined(CONFIG_KASAN)
kasan_mempool_unpoison_object(entry, cache->elem_size);
if (cache->init_clear)
memset(entry, 0, cache->init_clear);
#endif
return entry;
}
return NULL;
}
static inline void *io_cache_alloc(struct io_alloc_cache *cache, gfp_t gfp)
{
void *obj;
obj = io_alloc_cache_get(cache);
if (obj)
return obj;
return io_cache_alloc_new(cache, gfp);
}
#endif
|