diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-03-31 20:14:55 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-03-31 20:14:55 +0300 |
commit | d19cc4bfbff1ae72c3505a00fb8ce0d3fa519e6c (patch) | |
tree | a667a3358c76a97e2be749bbcaaaebcada289a1c /kernel | |
parent | 39192106d4efd482f96a0be8b7aaae7ec150d9ee (diff) | |
parent | 59300b36f85f254260c81d9dd09195fa49eb0f98 (diff) | |
download | linux-d19cc4bfbff1ae72c3505a00fb8ce0d3fa519e6c.tar.xz |
Merge tag 'trace-v5.12-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull ftrace fix from Steven Rostedt:
"Add check of order < 0 before calling free_pages()
The function addresses that are traced by ftrace are stored in pages,
and the size is held in a variable. If there's some error in creating
them, the allocate ones will be freed. In this case, it is possible
that the order of pages to be freed may end up being negative due to a
size of zero passed to get_count_order(), and then that negative
number will cause free_pages() to free a very large section.
Make sure that does not happen"
* tag 'trace-v5.12-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
ftrace: Check if pages were allocated before calling free_pages()
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/trace/ftrace.c | 9 |
1 files changed, 6 insertions, 3 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index b7e29db127fa..3ba52d4e1314 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -3231,7 +3231,8 @@ ftrace_allocate_pages(unsigned long num_to_init) pg = start_pg; while (pg) { order = get_count_order(pg->size / ENTRIES_PER_PAGE); - free_pages((unsigned long)pg->records, order); + if (order >= 0) + free_pages((unsigned long)pg->records, order); start_pg = pg->next; kfree(pg); pg = start_pg; @@ -6451,7 +6452,8 @@ void ftrace_release_mod(struct module *mod) clear_mod_from_hashes(pg); order = get_count_order(pg->size / ENTRIES_PER_PAGE); - free_pages((unsigned long)pg->records, order); + if (order >= 0) + free_pages((unsigned long)pg->records, order); tmp_page = pg->next; kfree(pg); ftrace_number_of_pages -= 1 << order; @@ -6811,7 +6813,8 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr) if (!pg->index) { *last_pg = pg->next; order = get_count_order(pg->size / ENTRIES_PER_PAGE); - free_pages((unsigned long)pg->records, order); + if (order >= 0) + free_pages((unsigned long)pg->records, order); ftrace_number_of_pages -= 1 << order; ftrace_number_of_groups--; kfree(pg); |