diff options
author | Gerald Schaefer <geraldsc@de.ibm.com> | 2007-02-05 23:18:17 +0300 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2007-02-05 23:18:17 +0300 |
commit | c1821c2e9711adc3cd298a16b7237c92a2cee78d (patch) | |
tree | 9155b089db35a37d95863125ea4c5f918bd7801b /include/asm-s390/pgalloc.h | |
parent | 86aa9fc2456d8a662f299a70bdb70987209170f0 (diff) | |
download | linux-c1821c2e9711adc3cd298a16b7237c92a2cee78d.tar.xz |
[S390] noexec protection
This provides a noexec protection on s390 hardware. Our hardware does
not have any bits left in the pte for a hw noexec bit, so this is a
different approach using shadow page tables and a special addressing
mode that allows separate address spaces for code and data.
As a special feature of our "secondary-space" addressing mode, separate
page tables can be specified for the translation of data addresses
(storage operands) and instruction addresses. The shadow page table is
used for the instruction addresses and the standard page table for the
data addresses.
The shadow page table is linked to the standard page table by a pointer
in page->lru.next of the struct page corresponding to the page that
contains the standard page table (since page->private is not really
private with the pte_lock and the page table pages are not in the LRU
list).
Depending on the software bits of a pte, it is either inserted into
both page tables or just into the standard (data) page table. Pages of
a vma that does not have the VM_EXEC bit set get mapped only in the
data address space. Any try to execute code on such a page will cause a
page translation exception. The standard reaction to this is a SIGSEGV
with two exceptions: the two system call opcodes 0x0a77 (sys_sigreturn)
and 0x0aad (sys_rt_sigreturn) are allowed. They are stored by the
kernel to the signal stack frame. Unfortunately, the signal return
mechanism cannot be modified to use an SA_RESTORER because the
exception unwinding code depends on the system call opcode stored
behind the signal stack frame.
This feature requires that user space is executed in secondary-space
mode and the kernel in home-space mode, which means that the addressing
modes need to be switched and that the noexec protection only works
for user space.
After switching the addressing modes, we cannot use the mvcp/mvcs
instructions anymore to copy between kernel and user space. A new
mvcos instruction has been added to the z9 EC/BC hardware which allows
to copy between arbitrary address spaces, but on older hardware the
page tables need to be walked manually.
Signed-off-by: Gerald Schaefer <geraldsc@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'include/asm-s390/pgalloc.h')
-rw-r--r-- | include/asm-s390/pgalloc.h | 85 |
1 files changed, 80 insertions, 5 deletions
diff --git a/include/asm-s390/pgalloc.h b/include/asm-s390/pgalloc.h index 0707a7e2fc16..56c8a6c80e2e 100644 --- a/include/asm-s390/pgalloc.h +++ b/include/asm-s390/pgalloc.h @@ -47,6 +47,17 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) if (!pgd) return NULL; + if (s390_noexec) { + pgd_t *shadow_pgd = (pgd_t *) + __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER); + struct page *page = virt_to_page(pgd); + + if (!shadow_pgd) { + free_pages((unsigned long) pgd, PGD_ALLOC_ORDER); + return NULL; + } + page->lru.next = (void *) shadow_pgd; + } for (i = 0; i < PTRS_PER_PGD; i++) #ifndef __s390x__ pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE)); @@ -58,6 +69,10 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) static inline void pgd_free(pgd_t *pgd) { + pgd_t *shadow_pgd = get_shadow_pgd(pgd); + + if (shadow_pgd) + free_pages((unsigned long) shadow_pgd, PGD_ALLOC_ORDER); free_pages((unsigned long) pgd, PGD_ALLOC_ORDER); } @@ -71,6 +86,7 @@ static inline void pgd_free(pgd_t *pgd) #define pmd_free(x) do { } while (0) #define __pmd_free_tlb(tlb,x) do { } while (0) #define pgd_populate(mm, pmd, pte) BUG() +#define pgd_populate_kernel(mm, pmd, pte) BUG() #else /* __s390x__ */ static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) { @@ -79,6 +95,17 @@ static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) if (!pmd) return NULL; + if (s390_noexec) { + pmd_t *shadow_pmd = (pmd_t *) + __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER); + struct page *page = virt_to_page(pmd); + + if (!shadow_pmd) { + free_pages((unsigned long) pmd, PMD_ALLOC_ORDER); + return NULL; + } + page->lru.next = (void *) shadow_pmd; + } for (i=0; i < PTRS_PER_PMD; i++) pmd_clear(pmd + i); return pmd; @@ -86,6 +113,10 @@ static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) static inline void pmd_free (pmd_t *pmd) { + pmd_t *shadow_pmd = get_shadow_pmd(pmd); + + if (shadow_pmd) + free_pages((unsigned long) shadow_pmd, PMD_ALLOC_ORDER); free_pages((unsigned long) pmd, PMD_ALLOC_ORDER); } @@ -95,11 +126,22 @@ static inline void pmd_free (pmd_t *pmd) pmd_free(pmd); \ } while (0) -static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) +static inline void +pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) { pgd_val(*pgd) = _PGD_ENTRY | __pa(pmd); } +static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) +{ + pgd_t *shadow_pgd = get_shadow_pgd(pgd); + pmd_t *shadow_pmd = get_shadow_pmd(pmd); + + if (shadow_pgd && shadow_pmd) + pgd_populate_kernel(mm, shadow_pgd, shadow_pmd); + pgd_populate_kernel(mm, pgd, pmd); +} + #endif /* __s390x__ */ static inline void @@ -119,7 +161,13 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page) { - pmd_populate_kernel(mm, pmd, (pte_t *)page_to_phys(page)); + pte_t *pte = (pte_t *)page_to_phys(page); + pmd_t *shadow_pmd = get_shadow_pmd(pmd); + pte_t *shadow_pte = get_shadow_pte(pte); + + pmd_populate_kernel(mm, pmd, pte); + if (shadow_pmd && shadow_pte) + pmd_populate_kernel(mm, shadow_pmd, shadow_pte); } /* @@ -133,6 +181,17 @@ pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr) if (!pte) return NULL; + if (s390_noexec) { + pte_t *shadow_pte = (pte_t *) + __get_free_page(GFP_KERNEL|__GFP_REPEAT); + struct page *page = virt_to_page(pte); + + if (!shadow_pte) { + free_page((unsigned long) pte); + return NULL; + } + page->lru.next = (void *) shadow_pte; + } for (i=0; i < PTRS_PER_PTE; i++) { pte_clear(mm, vmaddr, pte + i); vmaddr += PAGE_SIZE; @@ -151,14 +210,30 @@ pte_alloc_one(struct mm_struct *mm, unsigned long vmaddr) static inline void pte_free_kernel(pte_t *pte) { - free_page((unsigned long) pte); + pte_t *shadow_pte = get_shadow_pte(pte); + + if (shadow_pte) + free_page((unsigned long) shadow_pte); + free_page((unsigned long) pte); } static inline void pte_free(struct page *pte) { - __free_page(pte); + struct page *shadow_page = get_shadow_page(pte); + + if (shadow_page) + __free_page(shadow_page); + __free_page(pte); } -#define __pte_free_tlb(tlb,pte) tlb_remove_page(tlb,pte) +#define __pte_free_tlb(tlb, pte) \ +({ \ + struct mmu_gather *__tlb = (tlb); \ + struct page *__pte = (pte); \ + struct page *shadow_page = get_shadow_page(__pte); \ + if (shadow_page) \ + tlb_remove_page(__tlb, shadow_page); \ + tlb_remove_page(__tlb, __pte); \ +}) #endif /* _S390_PGALLOC_H */ |