summaryrefslogtreecommitdiff
path: root/arch/s390/include
diff options
context:
space:
mode:
authorVasily Gorbik <gor@linux.ibm.com>2026-03-13 02:13:51 +0300
committerVasily Gorbik <gor@linux.ibm.com>2026-03-13 02:13:51 +0300
commit7bbf70131f937ee5871e22d159ed2fc66795722f (patch)
tree664b94ea2461fbb5e779462f1266c67d7b20b978 /arch/s390/include
parent92ae0c1efc6c7b9c3299a6c954bca2a928cb9f60 (diff)
parent07c4e7a6f6b1d1ac871ae93c203b20144b709ec5 (diff)
downloadlinux-7bbf70131f937ee5871e22d159ed2fc66795722f.tar.xz
Merge branch 'page-table-check-support' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux into features
Add s390 support for CONFIG_PAGE_TABLE_CHECK. * 'page-table-check-support' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: s390: Enable page table check for debug_defconfig s390/pgtable: Add s390 support for page table check s390/pgtable: Use set_pmd_bit() to invalidate PMD entry mm/page_table_check: Pass mm_struct to pxx_user_accessible_page() Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
Diffstat (limited to 'arch/s390/include')
-rw-r--r--arch/s390/include/asm/pgtable.h60
1 files changed, 53 insertions, 7 deletions
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 1c3c3be93be9..67f5df20a57e 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -16,8 +16,10 @@
#include <linux/mm_types.h>
#include <linux/cpufeature.h>
#include <linux/page-flags.h>
+#include <linux/page_table_check.h>
#include <linux/radix-tree.h>
#include <linux/atomic.h>
+#include <linux/mmap_lock.h>
#include <asm/ctlreg.h>
#include <asm/bug.h>
#include <asm/page.h>
@@ -1190,6 +1192,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
/* At this point the reference through the mapping is still present */
if (mm_is_protected(mm) && pte_present(res))
WARN_ON_ONCE(uv_convert_from_secure_pte(res));
+ page_table_check_pte_clear(mm, addr, res);
return res;
}
@@ -1208,6 +1211,7 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
/* At this point the reference through the mapping is still present */
if (mm_is_protected(vma->vm_mm) && pte_present(res))
WARN_ON_ONCE(uv_convert_from_secure_pte(res));
+ page_table_check_pte_clear(vma->vm_mm, addr, res);
return res;
}
@@ -1231,6 +1235,9 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
} else {
res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
}
+
+ page_table_check_pte_clear(mm, addr, res);
+
/* Nothing to do */
if (!mm_is_protected(mm) || !pte_present(res))
return res;
@@ -1327,6 +1334,7 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
{
if (pte_present(entry))
entry = clear_pte_bit(entry, __pgprot(_PAGE_UNUSED));
+ page_table_check_ptes_set(mm, addr, ptep, entry, nr);
for (;;) {
set_pte(ptep, entry);
if (--nr == 0)
@@ -1703,6 +1711,7 @@ static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t entry)
{
+ page_table_check_pmd_set(mm, addr, pmdp, entry);
set_pmd(pmdp, entry);
}
@@ -1717,7 +1726,11 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd)
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp)
{
- return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
+ pmd_t pmd;
+
+ pmd = pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
+ page_table_check_pmd_clear(mm, addr, pmd);
+ return pmd;
}
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
@@ -1725,12 +1738,17 @@ static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
unsigned long addr,
pmd_t *pmdp, int full)
{
+ pmd_t pmd;
+
if (full) {
- pmd_t pmd = *pmdp;
+ pmd = *pmdp;
set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
+ page_table_check_pmd_clear(vma->vm_mm, addr, pmd);
return pmd;
}
- return pmdp_xchg_lazy(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
+ pmd = pmdp_xchg_lazy(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
+ page_table_check_pmd_clear(vma->vm_mm, addr, pmd);
+ return pmd;
}
#define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
@@ -1744,11 +1762,16 @@ static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp)
{
- pmd_t pmd;
+ pmd_t pmd = *pmdp;
- VM_WARN_ON_ONCE(!pmd_present(*pmdp));
- pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
- return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
+ VM_WARN_ON_ONCE(!pmd_present(pmd));
+ pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
+#ifdef CONFIG_PAGE_TABLE_CHECK
+ pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_READ));
+#endif
+ page_table_check_pmd_set(vma->vm_mm, addr, pmdp, pmd);
+ pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
+ return pmd;
}
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
@@ -1783,6 +1806,29 @@ static inline int has_transparent_hugepage(void)
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#ifdef CONFIG_PAGE_TABLE_CHECK
+static inline bool pte_user_accessible_page(struct mm_struct *mm, unsigned long addr, pte_t pte)
+{
+ VM_BUG_ON(mm == &init_mm);
+
+ return pte_present(pte);
+}
+
+static inline bool pmd_user_accessible_page(struct mm_struct *mm, unsigned long addr, pmd_t pmd)
+{
+ VM_BUG_ON(mm == &init_mm);
+
+ return pmd_leaf(pmd) && (pmd_val(pmd) & _SEGMENT_ENTRY_READ);
+}
+
+static inline bool pud_user_accessible_page(struct mm_struct *mm, unsigned long addr, pud_t pud)
+{
+ VM_BUG_ON(mm == &init_mm);
+
+ return pud_leaf(pud);
+}
+#endif
+
/*
* 64 bit swap entry format:
* A page-table entry has some bits we have to treat in a special way.