summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2024-10-16 14:14:55 +0300
committerIngo Molnar <mingo@kernel.org>2025-03-19 13:12:29 +0300
commitf666c92090a41ac5524dade63ff96b3adcf8c2ab (patch)
tree59f38aab723df4180daf50d1ab224bc0ee2c4132
parent775d37d8f01ea1349be8bd7cc8651b51814c48df (diff)
downloadlinux-f666c92090a41ac5524dade63ff96b3adcf8c2ab.tar.xz
x86/mm/ident_map: Fix theoretical virtual address overflow to zero
The current calculation of the 'next' virtual address in the page table initialization functions in arch/x86/mm/ident_map.c doesn't protect against wrapping to zero. This is a theoretical issue that cannot happen currently, the problematic case is possible only if the user sets a high enough x86_mapping_info::offset value - which no current code in the upstream kernel does. ( The wrapping to zero only occurs if the top PGD entry is accessed. There are no such users upstream. Only hibernate_64.c uses x86_mapping_info::offset, and it operates on the direct mapping range, which is not the top PGD entry. ) Should such an overflow happen, it can result in page table corruption and a hang. To future-proof this code, replace the manual 'next' calculation with p?d_addr_end() which handles wrapping correctly. [ Backporter's note: there's no need to backport this patch. ] Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Signed-off-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: Kai Huang <kai.huang@intel.com> Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: https://lore.kernel.org/r/20241016111458.846228-2-kirill.shutemov@linux.intel.com
-rw-r--r--arch/x86/mm/ident_map.c14
1 files changed, 3 insertions, 11 deletions
diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
index 5ab7bd2f1983..bd5d101c5c37 100644
--- a/arch/x86/mm/ident_map.c
+++ b/arch/x86/mm/ident_map.c
@@ -101,9 +101,7 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
pmd_t *pmd;
bool use_gbpage;
- next = (addr & PUD_MASK) + PUD_SIZE;
- if (next > end)
- next = end;
+ next = pud_addr_end(addr, end);
/* if this is already a gbpage, this portion is already mapped */
if (pud_leaf(*pud))
@@ -154,10 +152,7 @@ static int ident_p4d_init(struct x86_mapping_info *info, p4d_t *p4d_page,
p4d_t *p4d = p4d_page + p4d_index(addr);
pud_t *pud;
- next = (addr & P4D_MASK) + P4D_SIZE;
- if (next > end)
- next = end;
-
+ next = p4d_addr_end(addr, end);
if (p4d_present(*p4d)) {
pud = pud_offset(p4d, 0);
result = ident_pud_init(info, pud, addr, next);
@@ -199,10 +194,7 @@ int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
pgd_t *pgd = pgd_page + pgd_index(addr);
p4d_t *p4d;
- next = (addr & PGDIR_MASK) + PGDIR_SIZE;
- if (next > end)
- next = end;
-
+ next = pgd_addr_end(addr, end);
if (pgd_present(*pgd)) {
p4d = p4d_offset(pgd, 0);
result = ident_p4d_init(info, p4d, addr, next);