diff options
author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2007-02-05 23:18:41 +0300 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2007-02-05 23:18:41 +0300 |
commit | 162e006ef59266b9ebf34e3d15ca1f3d9ee956d7 (patch) | |
tree | d7f1b61fbe822e71867bd04ee4ee7f3f1cd20842 /arch/s390 | |
parent | ab14de6c37fae22911ba99f4171613e6d758050b (diff) | |
download | linux-162e006ef59266b9ebf34e3d15ca1f3d9ee956d7.tar.xz |
[S390] Mark kernel text section read-only.
Set read-only flag in the page table entries for the kernel image text
section. This will catch all instruction caused corruptions withing the
text section.
Instruction replacement via kprobes still works, since it bypasses now
dynamic address translation.
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/kernel/early.c | 1 | ||||
-rw-r--r-- | arch/s390/kernel/kprobes.c | 27 | ||||
-rw-r--r-- | arch/s390/kernel/vmlinux.lds.S | 3 | ||||
-rw-r--r-- | arch/s390/mm/init.c | 9 |
4 files changed, 29 insertions, 11 deletions
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 40dd47970a33..e518dd53eff5 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -27,7 +27,6 @@ #define DEFSYS_CMD_SIZE 96 #define SAVESYS_CMD_SIZE 32 -extern int _eshared; char kernel_nss_name[NSS_NAME_SIZE + 1]; #ifdef CONFIG_SHARED_KERNEL diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index b2e1dc89a8c6..a466bab6677e 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -155,15 +155,34 @@ void __kprobes get_instruction_type(struct arch_specific_insn *ainsn) static int __kprobes swap_instruction(void *aref) { struct ins_replace_args *args = aref; + u32 *addr; + u32 instr; int err = -EFAULT; + /* + * Text segment is read-only, hence we use stura to bypass dynamic + * address translation to exchange the instruction. Since stura + * always operates on four bytes, but we only want to exchange two + * bytes do some calculations to get things right. In addition we + * shall not cross any page boundaries (vmalloc area!) when writing + * the new instruction. + */ + addr = (u32 *)ALIGN((unsigned long)args->ptr, 4); + if ((unsigned long)args->ptr & 2) + instr = ((*addr) & 0xffff0000) | args->new; + else + instr = ((*addr) & 0x0000ffff) | args->new << 16; + asm volatile( - "0: mvc 0(2,%2),0(%3)\n" - "1: la %0,0\n" + " lra %1,0(%1)\n" + "0: stura %2,%1\n" + "1: la %0,0\n" "2:\n" EX_TABLE(0b,2b) - : "+d" (err), "=m" (*args->ptr) - : "a" (args->ptr), "a" (&args->new), "m" (args->new)); + : "+d" (err) + : "a" (addr), "d" (instr) + : "memory", "cc"); + return err; } diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 8fedb1f9fc97..a48907392522 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -35,9 +35,10 @@ SECTIONS #ifdef CONFIG_SHARED_KERNEL . = ALIGN(1048576); /* VM shared segments are 1MB aligned */ +#endif + . = ALIGN(4096); _eshared = .; /* End of shareable data */ -#endif . = ALIGN(16); /* Exception table */ __start___ex_table = .; diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 162a338a5575..b3e7c45efb63 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -26,7 +26,6 @@ #include <linux/pfn.h> #include <linux/poison.h> #include <linux/initrd.h> - #include <asm/processor.h> #include <asm/system.h> #include <asm/uaccess.h> @@ -96,8 +95,8 @@ static void __init setup_ro_region(void) pte_t new_pte; unsigned long address, end; - address = ((unsigned long)&__start_rodata) & PAGE_MASK; - end = PFN_ALIGN((unsigned long)&__end_rodata); + address = ((unsigned long)&_stext) & PAGE_MASK; + end = PFN_ALIGN((unsigned long)&_eshared); for (; address < end; address += PAGE_SIZE) { pgd = pgd_offset_k(address); @@ -173,8 +172,8 @@ void __init mem_init(void) datasize >>10, initsize >> 10); printk("Write protected kernel read-only data: %#lx - %#lx\n", - (unsigned long)&__start_rodata, - PFN_ALIGN((unsigned long)&__end_rodata) - 1); + (unsigned long)&_stext, + PFN_ALIGN((unsigned long)&_eshared) - 1); } void free_initmem(void) |