1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_POWERPC_BOOK3S_32_KUP_H
#define _ASM_POWERPC_BOOK3S_32_KUP_H
#include <asm/book3s/32/mmu-hash.h>
#ifdef __ASSEMBLY__
.macro kuep_update_sr gpr1, gpr2 /* NEVER use r0 as gpr2 due to addis */
101: mtsrin \gpr1, \gpr2
addi \gpr1, \gpr1, 0x111 /* next VSID */
rlwinm \gpr1, \gpr1, 0, 0xf0ffffff /* clear VSID overflow */
addis \gpr2, \gpr2, 0x1000 /* address of next segment */
bdnz 101b
isync
.endm
.macro kuep_lock gpr1, gpr2
#ifdef CONFIG_PPC_KUEP
li \gpr1, NUM_USER_SEGMENTS
li \gpr2, 0
mtctr \gpr1
mfsrin \gpr1, \gpr2
oris \gpr1, \gpr1, SR_NX@h /* set Nx */
kuep_update_sr \gpr1, \gpr2
#endif
.endm
.macro kuep_unlock gpr1, gpr2
#ifdef CONFIG_PPC_KUEP
li \gpr1, NUM_USER_SEGMENTS
li \gpr2, 0
mtctr \gpr1
mfsrin \gpr1, \gpr2
rlwinm \gpr1, \gpr1, 0, ~SR_NX /* Clear Nx */
kuep_update_sr \gpr1, \gpr2
#endif
.endm
#ifdef CONFIG_PPC_KUAP
.macro kuap_update_sr gpr1, gpr2, gpr3 /* NEVER use r0 as gpr2 due to addis */
101: mtsrin \gpr1, \gpr2
addi \gpr1, \gpr1, 0x111 /* next VSID */
rlwinm \gpr1, \gpr1, 0, 0xf0ffffff /* clear VSID overflow */
addis \gpr2, \gpr2, 0x1000 /* address of next segment */
cmplw \gpr2, \gpr3
blt- 101b
isync
.endm
.macro kuap_save_and_lock sp, thread, gpr1, gpr2, gpr3
lwz \gpr2, KUAP(\thread)
rlwinm. \gpr3, \gpr2, 28, 0xf0000000
stw \gpr2, STACK_REGS_KUAP(\sp)
beq+ 102f
li \gpr1, 0
stw \gpr1, KUAP(\thread)
mfsrin \gpr1, \gpr2
oris \gpr1, \gpr1, SR_KS@h /* set Ks */
kuap_update_sr \gpr1, \gpr2, \gpr3
102:
.endm
.macro kuap_restore sp, current, gpr1, gpr2, gpr3
lwz \gpr2, STACK_REGS_KUAP(\sp)
rlwinm. \gpr3, \gpr2, 28, 0xf0000000
stw \gpr2, THREAD + KUAP(\current)
beq+ 102f
mfsrin \gpr1, \gpr2
rlwinm \gpr1, \gpr1, 0, ~SR_KS /* Clear Ks */
kuap_update_sr \gpr1, \gpr2, \gpr3
102:
.endm
.macro kuap_check current, gpr
#ifdef CONFIG_PPC_KUAP_DEBUG
lwz \gpr2, KUAP(thread)
999: twnei \gpr, 0
EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
#endif
.endm
#endif /* CONFIG_PPC_KUAP */
#else /* !__ASSEMBLY__ */
#ifdef CONFIG_PPC_KUAP
#include <linux/sched.h>
static inline void kuap_update_sr(u32 sr, u32 addr, u32 end)
{
addr &= 0xf0000000; /* align addr to start of segment */
barrier(); /* make sure thread.kuap is updated before playing with SRs */
while (addr < end) {
mtsrin(sr, addr);
sr += 0x111; /* next VSID */
sr &= 0xf0ffffff; /* clear VSID overflow */
addr += 0x10000000; /* address of next segment */
}
isync(); /* Context sync required after mtsrin() */
}
static inline void allow_user_access(void __user *to, const void __user *from, u32 size)
{
u32 addr, end;
if (__builtin_constant_p(to) && to == NULL)
return;
addr = (__force u32)to;
if (!addr || addr >= TASK_SIZE || !size)
return;
end = min(addr + size, TASK_SIZE);
current->thread.kuap = (addr & 0xf0000000) | ((((end - 1) >> 28) + 1) & 0xf);
kuap_update_sr(mfsrin(addr) & ~SR_KS, addr, end); /* Clear Ks */
}
static inline void prevent_user_access(void __user *to, const void __user *from, u32 size)
{
u32 addr = (__force u32)to;
u32 end = min(addr + size, TASK_SIZE);
if (!addr || addr >= TASK_SIZE || !size)
return;
current->thread.kuap = 0;
kuap_update_sr(mfsrin(addr) | SR_KS, addr, end); /* set Ks */
}
static inline bool bad_kuap_fault(struct pt_regs *regs, bool is_write)
{
if (!is_write)
return false;
return WARN(!regs->kuap, "Bug: write fault blocked by segment registers !");
}
#endif /* CONFIG_PPC_KUAP */
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_32_KUP_H */
|