1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Kernel Electric-Fence (KFENCE). Public interface for allocator and fault
* handler integration. For more info see Documentation/dev-tools/kfence.rst.
*
* Copyright (C) 2020, Google LLC.
*/
#ifndef _LINUX_KFENCE_H
#define _LINUX_KFENCE_H
#include <linux/mm.h>
#include <linux/types.h>
#ifdef CONFIG_KFENCE
/*
* We allocate an even number of pages, as it simplifies calculations to map
* address to metadata indices; effectively, the very first page serves as an
* extended guard page, but otherwise has no special purpose.
*/
#define KFENCE_POOL_SIZE ((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 * PAGE_SIZE)
extern char *__kfence_pool;
#ifdef CONFIG_KFENCE_STATIC_KEYS
#include <linux/static_key.h>
DECLARE_STATIC_KEY_FALSE(kfence_allocation_key);
#else
#include <linux/atomic.h>
extern atomic_t kfence_allocation_gate;
#endif
/**
* is_kfence_address() - check if an address belongs to KFENCE pool
* @addr: address to check
*
* Return: true or false depending on whether the address is within the KFENCE
* object range.
*
* KFENCE objects live in a separate page range and are not to be intermixed
* with regular heap objects (e.g. KFENCE objects must never be added to the
* allocator freelists). Failing to do so may and will result in heap
* corruptions, therefore is_kfence_address() must be used to check whether
* an object requires specific handling.
*
* Note: This function may be used in fast-paths, and is performance critical.
* Future changes should take this into account; for instance, we want to avoid
* introducing another load and therefore need to keep KFENCE_POOL_SIZE a
* constant (until immediate patching support is added to the kernel).
*/
static __always_inline bool is_kfence_address(const void *addr)
{
/*
* The non-NULL check is required in case the __kfence_pool pointer was
* never initialized; keep it in the slow-path after the range-check.
*/
return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && addr);
}
/**
* kfence_alloc_pool() - allocate the KFENCE pool via memblock
*/
void __init kfence_alloc_pool(void);
/**
* kfence_init() - perform KFENCE initialization at boot time
*
* Requires that kfence_alloc_pool() was called before. This sets up the
* allocation gate timer, and requires that workqueues are available.
*/
void __init kfence_init(void);
/**
* kfence_shutdown_cache() - handle shutdown_cache() for KFENCE objects
* @s: cache being shut down
*
* Before shutting down a cache, one must ensure there are no remaining objects
* allocated from it. Because KFENCE objects are not referenced from the cache
* directly, we need to check them here.
*
* Note that shutdown_cache() is internal to SL*B, and kmem_cache_destroy() does
* not return if allocated objects still exist: it prints an error message and
* simply aborts destruction of a cache, leaking memory.
*
* If the only such objects are KFENCE objects, we will not leak the entire
* cache, but instead try to provide more useful debug info by making allocated
* objects "zombie allocations". Objects may then still be used or freed (which
* is handled gracefully), but usage will result in showing KFENCE error reports
* which include stack traces to the user of the object, the original allocation
* site, and caller to shutdown_cache().
*/
void kfence_shutdown_cache(struct kmem_cache *s);
/*
* Allocate a KFENCE object. Allocators must not call this function directly,
* use kfence_alloc() instead.
*/
void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags);
/**
* kfence_alloc() - allocate a KFENCE object with a low probability
* @s: struct kmem_cache with object requirements
* @size: exact size of the object to allocate (can be less than @s->size
* e.g. for kmalloc caches)
* @flags: GFP flags
*
* Return:
* * NULL - must proceed with allocating as usual,
* * non-NULL - pointer to a KFENCE object.
*
* kfence_alloc() should be inserted into the heap allocation fast path,
* allowing it to transparently return KFENCE-allocated objects with a low
* probability using a static branch (the probability is controlled by the
* kfence.sample_interval boot parameter).
*/
static __always_inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
{
#ifdef CONFIG_KFENCE_STATIC_KEYS
if (static_branch_unlikely(&kfence_allocation_key))
#else
if (unlikely(!atomic_read(&kfence_allocation_gate)))
#endif
return __kfence_alloc(s, size, flags);
return NULL;
}
/**
* kfence_ksize() - get actual amount of memory allocated for a KFENCE object
* @addr: pointer to a heap object
*
* Return:
* * 0 - not a KFENCE object, must call __ksize() instead,
* * non-0 - this many bytes can be accessed without causing a memory error.
*
* kfence_ksize() returns the number of bytes requested for a KFENCE object at
* allocation time. This number may be less than the object size of the
* corresponding struct kmem_cache.
*/
size_t kfence_ksize(const void *addr);
/**
* kfence_object_start() - find the beginning of a KFENCE object
* @addr: address within a KFENCE-allocated object
*
* Return: address of the beginning of the object.
*
* SL[AU]B-allocated objects are laid out within a page one by one, so it is
* easy to calculate the beginning of an object given a pointer inside it and
* the object size. The same is not true for KFENCE, which places a single
* object at either end of the page. This helper function is used to find the
* beginning of a KFENCE-allocated object.
*/
void *kfence_object_start(const void *addr);
/**
* __kfence_free() - release a KFENCE heap object to KFENCE pool
* @addr: object to be freed
*
* Requires: is_kfence_address(addr)
*
* Release a KFENCE object and mark it as freed.
*/
void __kfence_free(void *addr);
/**
* kfence_free() - try to release an arbitrary heap object to KFENCE pool
* @addr: object to be freed
*
* Return:
* * false - object doesn't belong to KFENCE pool and was ignored,
* * true - object was released to KFENCE pool.
*
* Release a KFENCE object and mark it as freed. May be called on any object,
* even non-KFENCE objects, to simplify integration of the hooks into the
* allocator's free codepath. The allocator must check the return value to
* determine if it was a KFENCE object or not.
*/
static __always_inline __must_check bool kfence_free(void *addr)
{
if (!is_kfence_address(addr))
return false;
__kfence_free(addr);
return true;
}
/**
* kfence_handle_page_fault() - perform page fault handling for KFENCE pages
* @addr: faulting address
* @is_write: is access a write
* @regs: current struct pt_regs (can be NULL, but shows full stack trace)
*
* Return:
* * false - address outside KFENCE pool,
* * true - page fault handled by KFENCE, no additional handling required.
*
* A page fault inside KFENCE pool indicates a memory error, such as an
* out-of-bounds access, a use-after-free or an invalid memory access. In these
* cases KFENCE prints an error message and marks the offending page as
* present, so that the kernel can proceed.
*/
bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs);
#else /* CONFIG_KFENCE */
static inline bool is_kfence_address(const void *addr) { return false; }
static inline void kfence_alloc_pool(void) { }
static inline void kfence_init(void) { }
static inline void kfence_shutdown_cache(struct kmem_cache *s) { }
static inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) { return NULL; }
static inline size_t kfence_ksize(const void *addr) { return 0; }
static inline void *kfence_object_start(const void *addr) { return NULL; }
static inline void __kfence_free(void *addr) { }
static inline bool __must_check kfence_free(void *addr) { return false; }
static inline bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write,
struct pt_regs *regs)
{
return false;
}
#endif
#endif /* _LINUX_KFENCE_H */
|