summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-04-30 21:43:31 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2023-04-30 21:43:31 +0300
commit10de638d8ea57ebab4231ea077bed01d9bade775 (patch)
treef9de1b131e1a94c1cbe051e55cda1ba9b9418ee9 /arch
parentd55571c0084465f1f7e1e29f22bd910d366a6e1d (diff)
parent2a405f6bb3a5b2baaa74dfc5aaa0e1b99145bd1b (diff)
downloadlinux-10de638d8ea57ebab4231ea077bed01d9bade775.tar.xz
Merge tag 's390-6.4-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Vasily Gorbik: - Add support for stackleak feature. Also allow specifying architecture-specific stackleak poison function to enable faster implementation. On s390, the mvc-based implementation helps decrease typical overhead from a factor of 3 to just 25% - Convert all assembler files to use SYM* style macros, deprecating the ENTRY() macro and other annotations. Select ARCH_USE_SYM_ANNOTATIONS - Improve KASLR to also randomize module and special amode31 code base load addresses - Rework decompressor memory tracking to support memory holes and improve error handling - Add support for protected virtualization AP binding - Add support for set_direct_map() calls - Implement set_memory_rox() and noexec module_alloc() - Remove obsolete overriding of mem*() functions for KASAN - Rework kexec/kdump to avoid using nodat_stack to call purgatory - Convert the rest of the s390 code to use flexible-array member instead of a zero-length array - Clean up uaccess inline asm - Enable ARCH_HAS_MEMBARRIER_SYNC_CORE - Convert to using CONFIG_FUNCTION_ALIGNMENT and enable DEBUG_FORCE_FUNCTION_ALIGN_64B - Resolve last_break in userspace fault reports - Simplify one-level sysctl registration - Clean up branch prediction handling - Rework CPU counter facility to retrieve available counter sets just once - Other various small fixes and improvements all over the code * tag 's390-6.4-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (118 commits) s390/stackleak: provide fast __stackleak_poison() implementation stackleak: allow to specify arch specific stackleak poison function s390: select ARCH_USE_SYM_ANNOTATIONS s390/mm: use VM_FLUSH_RESET_PERMS in module_alloc() s390: wire up memfd_secret system call s390/mm: enable ARCH_HAS_SET_DIRECT_MAP s390/mm: use BIT macro to generate SET_MEMORY bit masks s390/relocate_kernel: adjust indentation s390/relocate_kernel: use SYM* macros instead of ENTRY(), etc. s390/entry: use SYM* macros instead of ENTRY(), etc. s390/purgatory: use SYM* macros instead of ENTRY(), etc. s390/kprobes: use SYM* macros instead of ENTRY(), etc. s390/reipl: use SYM* macros instead of ENTRY(), etc. s390/head64: use SYM* macros instead of ENTRY(), etc. s390/earlypgm: use SYM* macros instead of ENTRY(), etc. s390/mcount: use SYM* macros instead of ENTRY(), etc. s390/crc32le: use SYM* macros instead of ENTRY(), etc. s390/crc32be: use SYM* macros instead of ENTRY(), etc. s390/crypto,chacha: use SYM* macros instead of ENTRY(), etc. s390/amode31: use SYM* macros instead of ENTRY(), etc. ...
Diffstat (limited to 'arch')
-rw-r--r--arch/s390/Kconfig10
-rw-r--r--arch/s390/appldata/appldata_base.c32
-rw-r--r--arch/s390/boot/Makefile2
-rw-r--r--arch/s390/boot/boot.h42
-rwxr-xr-xarch/s390/boot/install.sh8
-rw-r--r--arch/s390/boot/ipl_parm.c6
-rw-r--r--arch/s390/boot/ipl_report.c106
-rw-r--r--arch/s390/boot/kaslr.c171
-rw-r--r--arch/s390/boot/mem_detect.c191
-rw-r--r--arch/s390/boot/pgm_check_info.c7
-rw-r--r--arch/s390/boot/physmem_info.c328
-rw-r--r--arch/s390/boot/startup.c129
-rw-r--r--arch/s390/boot/vmem.c284
-rw-r--r--arch/s390/boot/vmlinux.lds.S2
-rw-r--r--arch/s390/crypto/chacha-s390.S47
-rw-r--r--arch/s390/crypto/crc32be-vx.S17
-rw-r--r--arch/s390/crypto/crc32le-vx.S30
-rw-r--r--arch/s390/include/asm/ap.h152
-rw-r--r--arch/s390/include/asm/checksum.h10
-rw-r--r--arch/s390/include/asm/diag.h2
-rw-r--r--arch/s390/include/asm/entry-common.h5
-rw-r--r--arch/s390/include/asm/fcx.h2
-rw-r--r--arch/s390/include/asm/kasan.h31
-rw-r--r--arch/s390/include/asm/linkage.h2
-rw-r--r--arch/s390/include/asm/mem_detect.h117
-rw-r--r--arch/s390/include/asm/nospec-insn.h3
-rw-r--r--arch/s390/include/asm/perf_event.h2
-rw-r--r--arch/s390/include/asm/pgtable.h2
-rw-r--r--arch/s390/include/asm/physmem_info.h171
-rw-r--r--arch/s390/include/asm/processor.h46
-rw-r--r--arch/s390/include/asm/set_memory.h36
-rw-r--r--arch/s390/include/asm/setup.h20
-rw-r--r--arch/s390/include/asm/stacktrace.h52
-rw-r--r--arch/s390/include/asm/string.h15
-rw-r--r--arch/s390/include/asm/thread_info.h10
-rw-r--r--arch/s390/kernel/debug.c14
-rw-r--r--arch/s390/kernel/dumpstack.c46
-rw-r--r--arch/s390/kernel/early.c23
-rw-r--r--arch/s390/kernel/earlypgm.S4
-rw-r--r--arch/s390/kernel/entry.S152
-rw-r--r--arch/s390/kernel/ftrace.c22
-rw-r--r--arch/s390/kernel/head64.S14
-rw-r--r--arch/s390/kernel/ipl.c7
-rw-r--r--arch/s390/kernel/kprobes.c2
-rw-r--r--arch/s390/kernel/kprobes_insn_page.S4
-rw-r--r--arch/s390/kernel/machine_kexec.c56
-rw-r--r--arch/s390/kernel/mcount.S26
-rw-r--r--arch/s390/kernel/module.c29
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c206
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c14
-rw-r--r--arch/s390/kernel/process.c10
-rw-r--r--arch/s390/kernel/processor.c18
-rw-r--r--arch/s390/kernel/reipl.S10
-rw-r--r--arch/s390/kernel/relocate_kernel.S96
-rw-r--r--arch/s390/kernel/setup.c152
-rw-r--r--arch/s390/kernel/smp.c11
-rw-r--r--arch/s390/kernel/syscalls/syscall.tbl2
-rw-r--r--arch/s390/kernel/text_amode31.S75
-rw-r--r--arch/s390/kernel/topology.c12
-rw-r--r--arch/s390/kernel/vdso32/vdso_user_wrapper.S3
-rw-r--r--arch/s390/kernel/vdso64/vdso_user_wrapper.S5
-rw-r--r--arch/s390/kernel/vmlinux.lds.S10
-rw-r--r--arch/s390/lib/mem.S28
-rw-r--r--arch/s390/lib/uaccess.c137
-rw-r--r--arch/s390/mm/Makefile3
-rw-r--r--arch/s390/mm/cmm.c12
-rw-r--r--arch/s390/mm/init.c5
-rw-r--r--arch/s390/mm/kasan_init.c301
-rw-r--r--arch/s390/mm/pageattr.c94
-rw-r--r--arch/s390/mm/pgalloc.c20
-rw-r--r--arch/s390/mm/vmem.c35
-rw-r--r--arch/s390/pci/pci.c23
-rw-r--r--arch/s390/pci/pci_bus.c11
-rw-r--r--arch/s390/purgatory/head.S62
-rw-r--r--arch/s390/purgatory/kexec-purgatory.S14
75 files changed, 1971 insertions, 1887 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 61d778397720..beb62f744c61 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -26,10 +26,6 @@ config GENERIC_BUG
config GENERIC_BUG_RELATIVE_POINTERS
def_bool y
-config GENERIC_CSUM
- bool
- default y if KASAN
-
config GENERIC_LOCKBREAK
def_bool y if PREEMPTION
@@ -76,10 +72,12 @@ config S390
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_GIGANTIC_PAGE
select ARCH_HAS_KCOV
+ select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_MEM_ENCRYPT
select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SCALED_CPUTIME
+ select ARCH_HAS_SET_DIRECT_MAP
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_STRICT_MODULE_RWX
@@ -123,6 +121,7 @@ config S390
select ARCH_SUPPORTS_PER_VMA_LOCK
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
+ select ARCH_USE_SYM_ANNOTATIONS
select ARCH_WANTS_DYNAMIC_TASK_STRUCT
select ARCH_WANTS_NO_INSTR
select ARCH_WANT_DEFAULT_BPF_JIT
@@ -132,6 +131,8 @@ config S390
select CLONE_BACKWARDS2
select DMA_OPS if PCI
select DYNAMIC_FTRACE if FUNCTION_TRACER
+ select FUNCTION_ALIGNMENT_8B if CC_IS_GCC
+ select FUNCTION_ALIGNMENT_16B if !CC_IS_GCC
select GCC12_NO_ARRAY_BOUNDS
select GENERIC_ALLOCATOR
select GENERIC_CPU_AUTOPROBE
@@ -153,6 +154,7 @@ config S390
select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_SOFT_DIRTY
+ select HAVE_ARCH_STACKLEAK
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select HAVE_ARCH_VMAP_STACK
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index c0fd29133f27..b07b0610950e 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -66,16 +66,6 @@ static struct ctl_table appldata_table[] = {
{ },
};
-static struct ctl_table appldata_dir_table[] = {
- {
- .procname = appldata_proc_name,
- .maxlen = 0,
- .mode = S_IRUGO | S_IXUGO,
- .child = appldata_table,
- },
- { },
-};
-
/*
* Timer
*/
@@ -291,7 +281,7 @@ appldata_generic_handler(struct ctl_table *ctl, int write,
mutex_lock(&appldata_ops_mutex);
list_for_each(lh, &appldata_ops_list) {
tmp_ops = list_entry(lh, struct appldata_ops, list);
- if (&tmp_ops->ctl_table[2] == ctl) {
+ if (&tmp_ops->ctl_table[0] == ctl) {
found = 1;
}
}
@@ -361,7 +351,8 @@ int appldata_register_ops(struct appldata_ops *ops)
if (ops->size > APPLDATA_MAX_REC_SIZE)
return -EINVAL;
- ops->ctl_table = kcalloc(4, sizeof(struct ctl_table), GFP_KERNEL);
+ /* The last entry must be an empty one */
+ ops->ctl_table = kcalloc(2, sizeof(struct ctl_table), GFP_KERNEL);
if (!ops->ctl_table)
return -ENOMEM;
@@ -369,17 +360,12 @@ int appldata_register_ops(struct appldata_ops *ops)
list_add(&ops->list, &appldata_ops_list);
mutex_unlock(&appldata_ops_mutex);
- ops->ctl_table[0].procname = appldata_proc_name;
- ops->ctl_table[0].maxlen = 0;
- ops->ctl_table[0].mode = S_IRUGO | S_IXUGO;
- ops->ctl_table[0].child = &ops->ctl_table[2];
-
- ops->ctl_table[2].procname = ops->name;
- ops->ctl_table[2].mode = S_IRUGO | S_IWUSR;
- ops->ctl_table[2].proc_handler = appldata_generic_handler;
- ops->ctl_table[2].data = ops;
+ ops->ctl_table[0].procname = ops->name;
+ ops->ctl_table[0].mode = S_IRUGO | S_IWUSR;
+ ops->ctl_table[0].proc_handler = appldata_generic_handler;
+ ops->ctl_table[0].data = ops;
- ops->sysctl_header = register_sysctl_table(ops->ctl_table);
+ ops->sysctl_header = register_sysctl(appldata_proc_name, ops->ctl_table);
if (!ops->sysctl_header)
goto out;
return 0;
@@ -422,7 +408,7 @@ static int __init appldata_init(void)
appldata_wq = alloc_ordered_workqueue("appldata", 0);
if (!appldata_wq)
return -ENOMEM;
- appldata_sysctl_header = register_sysctl_table(appldata_dir_table);
+ appldata_sysctl_header = register_sysctl(appldata_proc_name, appldata_table);
return 0;
}
diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile
index cebd4ca16916..c7c81e5f9218 100644
--- a/arch/s390/boot/Makefile
+++ b/arch/s390/boot/Makefile
@@ -35,7 +35,7 @@ endif
CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
-obj-y := head.o als.o startup.o mem_detect.o ipl_parm.o ipl_report.o vmem.o
+obj-y := head.o als.o startup.o physmem_info.o ipl_parm.o ipl_report.o vmem.o
obj-y += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o
obj-y += version.o pgm_check_info.o ctype.o ipl_data.o machine_kexec_reloc.o
obj-$(findstring y, $(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) $(CONFIG_PGSTE)) += uv.o
diff --git a/arch/s390/boot/boot.h b/arch/s390/boot/boot.h
index 58ce701d6110..222c6886acf6 100644
--- a/arch/s390/boot/boot.h
+++ b/arch/s390/boot/boot.h
@@ -8,6 +8,8 @@
#ifndef __ASSEMBLY__
+#include <asm/physmem_info.h>
+
struct machine_info {
unsigned char has_edat1 : 1;
unsigned char has_edat2 : 1;
@@ -30,24 +32,46 @@ struct vmlinux_info {
unsigned long init_mm_off;
unsigned long swapper_pg_dir_off;
unsigned long invalid_pg_dir_off;
+#ifdef CONFIG_KASAN
+ unsigned long kasan_early_shadow_page_off;
+ unsigned long kasan_early_shadow_pte_off;
+ unsigned long kasan_early_shadow_pmd_off;
+ unsigned long kasan_early_shadow_pud_off;
+ unsigned long kasan_early_shadow_p4d_off;
+#endif
};
void startup_kernel(void);
-unsigned long detect_memory(unsigned long *safe_addr);
-void mem_detect_set_usable_limit(unsigned long limit);
+unsigned long detect_max_physmem_end(void);
+void detect_physmem_online_ranges(unsigned long max_physmem_end);
+void physmem_set_usable_limit(unsigned long limit);
+void physmem_reserve(enum reserved_range_type type, unsigned long addr, unsigned long size);
+void physmem_free(enum reserved_range_type type);
+/* for continuous/multiple allocations per type */
+unsigned long physmem_alloc_top_down(enum reserved_range_type type, unsigned long size,
+ unsigned long align);
+/* for single allocations, 1 per type */
+unsigned long physmem_alloc_range(enum reserved_range_type type, unsigned long size,
+ unsigned long align, unsigned long min, unsigned long max,
+ bool die_on_oom);
+unsigned long get_physmem_alloc_pos(void);
+bool ipl_report_certs_intersects(unsigned long addr, unsigned long size,
+ unsigned long *intersection_start);
bool is_ipl_block_dump(void);
void store_ipl_parmblock(void);
-unsigned long read_ipl_report(unsigned long safe_addr);
+int read_ipl_report(void);
+void save_ipl_cert_comp_list(void);
void setup_boot_command_line(void);
void parse_boot_command_line(void);
void verify_facilities(void);
void print_missing_facilities(void);
void sclp_early_setup_buffer(void);
void print_pgm_check_info(void);
-unsigned long get_random_base(unsigned long safe_addr);
+unsigned long randomize_within_range(unsigned long size, unsigned long align,
+ unsigned long min, unsigned long max);
void setup_vmem(unsigned long asce_limit);
-unsigned long vmem_estimate_memory_needs(unsigned long online_mem_total);
void __printf(1, 2) decompressor_printk(const char *fmt, ...);
+void print_stacktrace(unsigned long sp);
void error(char *m);
extern struct machine_info machine;
@@ -57,12 +81,11 @@ extern const char kernel_version[];
extern unsigned long memory_limit;
extern unsigned long vmalloc_size;
extern int vmalloc_size_set;
-extern int kaslr_enabled;
extern char __boot_data_start[], __boot_data_end[];
extern char __boot_data_preserved_start[], __boot_data_preserved_end[];
extern char _decompressor_syms_start[], _decompressor_syms_end[];
extern char _stack_start[], _stack_end[];
-extern char _end[];
+extern char _end[], _decompressor_end[];
extern unsigned char _compressed_start[];
extern unsigned char _compressed_end[];
extern struct vmlinux_info _vmlinux_info;
@@ -70,5 +93,10 @@ extern struct vmlinux_info _vmlinux_info;
#define __abs_lowcore_pa(x) (((unsigned long)(x) - __abs_lowcore) % sizeof(struct lowcore))
+static inline bool intersects(unsigned long addr0, unsigned long size0,
+ unsigned long addr1, unsigned long size1)
+{
+ return addr0 + size0 > addr1 && addr1 + size1 > addr0;
+}
#endif /* __ASSEMBLY__ */
#endif /* BOOT_BOOT_H */
diff --git a/arch/s390/boot/install.sh b/arch/s390/boot/install.sh
index 616ba1660f08..a13dd2f2aa1c 100755
--- a/arch/s390/boot/install.sh
+++ b/arch/s390/boot/install.sh
@@ -17,8 +17,8 @@
echo "Warning: '${INSTALLKERNEL}' command not available - additional " \
"bootloader config required" >&2
-if [ -f $4/vmlinuz-$1 ]; then mv $4/vmlinuz-$1 $4/vmlinuz-$1.old; fi
-if [ -f $4/System.map-$1 ]; then mv $4/System.map-$1 $4/System.map-$1.old; fi
+if [ -f "$4/vmlinuz-$1" ]; then mv -- "$4/vmlinuz-$1" "$4/vmlinuz-$1.old"; fi
+if [ -f "$4/System.map-$1" ]; then mv -- "$4/System.map-$1" "$4/System.map-$1.old"; fi
-cat $2 > $4/vmlinuz-$1
-cp $3 $4/System.map-$1
+cat -- "$2" > "$4/vmlinuz-$1"
+cp -- "$3" "$4/System.map-$1"
diff --git a/arch/s390/boot/ipl_parm.c b/arch/s390/boot/ipl_parm.c
index c1f8f7999fed..8753cb0339e5 100644
--- a/arch/s390/boot/ipl_parm.c
+++ b/arch/s390/boot/ipl_parm.c
@@ -24,11 +24,11 @@ int __bootdata(noexec_disabled);
unsigned int __bootdata_preserved(zlib_dfltcc_support) = ZLIB_DFLTCC_FULL;
struct ipl_parameter_block __bootdata_preserved(ipl_block);
int __bootdata_preserved(ipl_block_valid);
+int __bootdata_preserved(__kaslr_enabled);
unsigned long vmalloc_size = VMALLOC_DEFAULT_SIZE;
unsigned long memory_limit;
int vmalloc_size_set;
-int kaslr_enabled;
static inline int __diag308(unsigned long subcode, void *addr)
{
@@ -264,7 +264,7 @@ void parse_boot_command_line(void)
char *args;
int rc;
- kaslr_enabled = IS_ENABLED(CONFIG_RANDOMIZE_BASE);
+ __kaslr_enabled = IS_ENABLED(CONFIG_RANDOMIZE_BASE);
args = strcpy(command_line_buf, early_command_line);
while (*args) {
args = next_arg(args, &param, &val);
@@ -300,7 +300,7 @@ void parse_boot_command_line(void)
modify_fac_list(val);
if (!strcmp(param, "nokaslr"))
- kaslr_enabled = 0;
+ __kaslr_enabled = 0;
#if IS_ENABLED(CONFIG_KVM)
if (!strcmp(param, "prot_virt")) {
diff --git a/arch/s390/boot/ipl_report.c b/arch/s390/boot/ipl_report.c
index 74b5cd264862..1803035e68d2 100644
--- a/arch/s390/boot/ipl_report.c
+++ b/arch/s390/boot/ipl_report.c
@@ -5,6 +5,7 @@
#include <asm/sclp.h>
#include <asm/sections.h>
#include <asm/boot_data.h>
+#include <asm/physmem_info.h>
#include <uapi/asm/ipl.h>
#include "boot.h"
@@ -16,20 +17,16 @@ unsigned long __bootdata_preserved(ipl_cert_list_size);
unsigned long __bootdata(early_ipl_comp_list_addr);
unsigned long __bootdata(early_ipl_comp_list_size);
+static struct ipl_rb_certificates *certs;
+static struct ipl_rb_components *comps;
+static bool ipl_report_needs_saving;
+
#define for_each_rb_entry(entry, rb) \
for (entry = rb->entries; \
(void *) entry + sizeof(*entry) <= (void *) rb + rb->len; \
entry++)
-static inline bool intersects(unsigned long addr0, unsigned long size0,
- unsigned long addr1, unsigned long size1)
-{
- return addr0 + size0 > addr1 && addr1 + size1 > addr0;
-}
-
-static unsigned long find_bootdata_space(struct ipl_rb_components *comps,
- struct ipl_rb_certificates *certs,
- unsigned long safe_addr)
+static unsigned long get_cert_comp_list_size(void)
{
struct ipl_rb_certificate_entry *cert;
struct ipl_rb_component_entry *comp;
@@ -44,44 +41,27 @@ static unsigned long find_bootdata_space(struct ipl_rb_components *comps,
ipl_cert_list_size = 0;
for_each_rb_entry(cert, certs)
ipl_cert_list_size += sizeof(unsigned int) + cert->len;
- size = ipl_cert_list_size + early_ipl_comp_list_size;
+ return ipl_cert_list_size + early_ipl_comp_list_size;
+}
- /*
- * Start from safe_addr to find a free memory area large
- * enough for the IPL report boot data. This area is used
- * for ipl_cert_list_addr/ipl_cert_list_size and
- * early_ipl_comp_list_addr/early_ipl_comp_list_size. It must
- * not overlap with any component or any certificate.
- */
-repeat:
- if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_data.start && initrd_data.size &&
- intersects(initrd_data.start, initrd_data.size, safe_addr, size))
- safe_addr = initrd_data.start + initrd_data.size;
- if (intersects(safe_addr, size, (unsigned long)comps, comps->len)) {
- safe_addr = (unsigned long)comps + comps->len;
- goto repeat;
- }
- for_each_rb_entry(comp, comps)
- if (intersects(safe_addr, size, comp->addr, comp->len)) {
- safe_addr = comp->addr + comp->len;
- goto repeat;
+bool ipl_report_certs_intersects(unsigned long addr, unsigned long size,
+ unsigned long *intersection_start)
+{
+ struct ipl_rb_certificate_entry *cert;
+
+ if (!ipl_report_needs_saving)
+ return false;
+
+ for_each_rb_entry(cert, certs) {
+ if (intersects(addr, size, cert->addr, cert->len)) {
+ *intersection_start = cert->addr;
+ return true;
}
- if (intersects(safe_addr, size, (unsigned long)certs, certs->len)) {
- safe_addr = (unsigned long)certs + certs->len;
- goto repeat;
}
- for_each_rb_entry(cert, certs)
- if (intersects(safe_addr, size, cert->addr, cert->len)) {
- safe_addr = cert->addr + cert->len;
- goto repeat;
- }
- early_ipl_comp_list_addr = safe_addr;
- ipl_cert_list_addr = safe_addr + early_ipl_comp_list_size;
-
- return safe_addr + size;
+ return false;
}
-static void copy_components_bootdata(struct ipl_rb_components *comps)
+static void copy_components_bootdata(void)
{
struct ipl_rb_component_entry *comp, *ptr;
@@ -90,7 +70,7 @@ static void copy_components_bootdata(struct ipl_rb_components *comps)
memcpy(ptr++, comp, sizeof(*ptr));
}
-static void copy_certificates_bootdata(struct ipl_rb_certificates *certs)
+static void copy_certificates_bootdata(void)
{
struct ipl_rb_certificate_entry *cert;
void *ptr;
@@ -104,10 +84,8 @@ static void copy_certificates_bootdata(struct ipl_rb_certificates *certs)
}
}
-unsigned long read_ipl_report(unsigned long safe_addr)
+int read_ipl_report(void)
{
- struct ipl_rb_certificates *certs;
- struct ipl_rb_components *comps;
struct ipl_pl_hdr *pl_hdr;
struct ipl_rl_hdr *rl_hdr;
struct ipl_rb_hdr *rb_hdr;
@@ -120,7 +98,7 @@ unsigned long read_ipl_report(unsigned long safe_addr)
*/
if (!ipl_block_valid ||
!(ipl_block.hdr.flags & IPL_PL_FLAG_IPLSR))
- return safe_addr;
+ return -1;
ipl_secure_flag = !!(ipl_block.hdr.flags & IPL_PL_FLAG_SIPL);
/*
* There is an IPL report, to find it load the pointer to the
@@ -158,16 +136,30 @@ unsigned long read_ipl_report(unsigned long safe_addr)
* With either the component list or the certificate list
* missing the kernel will stay ignorant of secure IPL.
*/
- if (!comps || !certs)
- return safe_addr;
+ if (!comps || !certs) {
+ certs = NULL;
+ return -1;
+ }
- /*
- * Copy component and certificate list to a safe area
- * where the decompressed kernel can find them.
- */
- safe_addr = find_bootdata_space(comps, certs, safe_addr);
- copy_components_bootdata(comps);
- copy_certificates_bootdata(certs);
+ ipl_report_needs_saving = true;
+ physmem_reserve(RR_IPLREPORT, (unsigned long)pl_hdr,
+ (unsigned long)rl_end - (unsigned long)pl_hdr);
+ return 0;
+}
+
+void save_ipl_cert_comp_list(void)
+{
+ unsigned long size;
+
+ if (!ipl_report_needs_saving)
+ return;
+
+ size = get_cert_comp_list_size();
+ early_ipl_comp_list_addr = physmem_alloc_top_down(RR_CERT_COMP_LIST, size, sizeof(int));
+ ipl_cert_list_addr = early_ipl_comp_list_addr + early_ipl_comp_list_size;
- return safe_addr;
+ copy_components_bootdata();
+ copy_certificates_bootdata();
+ physmem_free(RR_IPLREPORT);
+ ipl_report_needs_saving = false;
}
diff --git a/arch/s390/boot/kaslr.c b/arch/s390/boot/kaslr.c
index 3e3d846400b4..90602101e2ae 100644
--- a/arch/s390/boot/kaslr.c
+++ b/arch/s390/boot/kaslr.c
@@ -3,7 +3,7 @@
* Copyright IBM Corp. 2019
*/
#include <linux/pgtable.h>
-#include <asm/mem_detect.h>
+#include <asm/physmem_info.h>
#include <asm/cpacf.h>
#include <asm/timex.h>
#include <asm/sclp.h>
@@ -91,113 +91,108 @@ static int get_random(unsigned long limit, unsigned long *value)
return 0;
}
-/*
- * To randomize kernel base address we have to consider several facts:
- * 1. physical online memory might not be continuous and have holes. mem_detect
- * info contains list of online memory ranges we should consider.
- * 2. we have several memory regions which are occupied and we should not
- * overlap and destroy them. Currently safe_addr tells us the border below
- * which all those occupied regions are. We are safe to use anything above
- * safe_addr.
- * 3. the upper limit might apply as well, even if memory above that limit is
- * online. Currently those limitations are:
- * 3.1. Limit set by "mem=" kernel command line option
- * 3.2. memory reserved at the end for kasan initialization.
- * 4. kernel base address must be aligned to THREAD_SIZE (kernel stack size).
- * Which is required for CONFIG_CHECK_STACK. Currently THREAD_SIZE is 4 pages
- * (16 pages when the kernel is built with kasan enabled)
- * Assumptions:
- * 1. kernel size (including .bss size) and upper memory limit are page aligned.
- * 2. mem_detect memory region start is THREAD_SIZE aligned / end is PAGE_SIZE
- * aligned (in practice memory configurations granularity on z/VM and LPAR
- * is 1mb).
- *
- * To guarantee uniform distribution of kernel base address among all suitable
- * addresses we generate random value just once. For that we need to build a
- * continuous range in which every value would be suitable. We can build this
- * range by simply counting all suitable addresses (let's call them positions)
- * which would be valid as kernel base address. To count positions we iterate
- * over online memory ranges. For each range which is big enough for the
- * kernel image we count all suitable addresses we can put the kernel image at
- * that is
- * (end - start - kernel_size) / THREAD_SIZE + 1
- * Two functions count_valid_kernel_positions and position_to_address help
- * to count positions in memory range given and then convert position back
- * to address.
- */
-static unsigned long count_valid_kernel_positions(unsigned long kernel_size,
- unsigned long _min,
- unsigned long _max)
+static void sort_reserved_ranges(struct reserved_range *res, unsigned long size)
{
- unsigned long start, end, pos = 0;
- int i;
-
- for_each_mem_detect_usable_block(i, &start, &end) {
- if (_min >= end)
- continue;
- if (start >= _max)
- break;
- start = max(_min, start);
- end = min(_max, end);
- if (end - start < kernel_size)
- continue;
- pos += (end - start - kernel_size) / THREAD_SIZE + 1;
+ struct reserved_range tmp;
+ int i, j;
+
+ for (i = 1; i < size; i++) {
+ tmp = res[i];
+ for (j = i - 1; j >= 0 && res[j].start > tmp.start; j--)
+ res[j + 1] = res[j];
+ res[j + 1] = tmp;
}
-
- return pos;
}
-static unsigned long position_to_address(unsigned long pos, unsigned long kernel_size,
- unsigned long _min, unsigned long _max)
+static unsigned long iterate_valid_positions(unsigned long size, unsigned long align,
+ unsigned long _min, unsigned long _max,
+ struct reserved_range *res, size_t res_count,
+ bool pos_count, unsigned long find_pos)
{
- unsigned long start, end;
+ unsigned long start, end, tmp_end, range_pos, pos = 0;
+ struct reserved_range *res_end = res + res_count;
+ struct reserved_range *skip_res;
int i;
- for_each_mem_detect_usable_block(i, &start, &end) {
+ align = max(align, 8UL);
+ _min = round_up(_min, align);
+ for_each_physmem_usable_range(i, &start, &end) {
if (_min >= end)
continue;
+ start = round_up(start, align);
if (start >= _max)
break;
start = max(_min, start);
end = min(_max, end);
- if (end - start < kernel_size)
- continue;
- if ((end - start - kernel_size) / THREAD_SIZE + 1 >= pos)
- return start + (pos - 1) * THREAD_SIZE;
- pos -= (end - start - kernel_size) / THREAD_SIZE + 1;
+
+ while (start + size <= end) {
+ /* skip reserved ranges below the start */
+ while (res && res->end <= start) {
+ res++;
+ if (res >= res_end)
+ res = NULL;
+ }
+ skip_res = NULL;
+ tmp_end = end;
+ /* has intersecting reserved range */
+ if (res && res->start < end) {
+ skip_res = res;
+ tmp_end = res->start;
+ }
+ if (start + size <= tmp_end) {
+ range_pos = (tmp_end - start - size) / align + 1;
+ if (pos_count) {
+ pos += range_pos;
+ } else {
+ if (range_pos >= find_pos)
+ return start + (find_pos - 1) * align;
+ find_pos -= range_pos;
+ }
+ }
+ if (!skip_res)
+ break;
+ start = round_up(skip_res->end, align);
+ }
}
- return 0;
+ return pos_count ? pos : 0;
}
-unsigned long get_random_base(unsigned long safe_addr)
+/*
+ * Two types of decompressor memory allocations/reserves are considered
+ * differently.
+ *
+ * "Static" or "single" allocations are done via physmem_alloc_range() and
+ * physmem_reserve(), and they are listed in physmem_info.reserved[]. Each
+ * type of "static" allocation can only have one allocation per type and
+ * cannot have chains.
+ *
+ * On the other hand, "dynamic" or "repetitive" allocations are done via
+ * physmem_alloc_top_down(). These allocations are tightly packed together
+ * top down from the end of online memory. physmem_alloc_pos represents
+ * current position where those allocations start.
+ *
+ * Functions randomize_within_range() and iterate_valid_positions()
+ * only consider "dynamic" allocations by never looking above
+ * physmem_alloc_pos. "Static" allocations, however, are explicitly
+ * considered by checking the "res" (reserves) array. The first
+ * reserved_range of a "dynamic" allocation may also be checked along the
+ * way, but it will always be above the maximum value anyway.
+ */
+unsigned long randomize_within_range(unsigned long size, unsigned long align,
+ unsigned long min, unsigned long max)
{
- unsigned long usable_total = get_mem_detect_usable_total();
- unsigned long memory_limit = get_mem_detect_end();
- unsigned long base_pos, max_pos, kernel_size;
- int i;
-
- /*
- * Avoid putting kernel in the end of physical memory
- * which vmem and kasan code will use for shadow memory and
- * pgtable mapping allocations.
- */
- memory_limit -= kasan_estimate_memory_needs(usable_total);
- memory_limit -= vmem_estimate_memory_needs(usable_total);
+ struct reserved_range res[RR_MAX];
+ unsigned long max_pos, pos;
- safe_addr = ALIGN(safe_addr, THREAD_SIZE);
- kernel_size = vmlinux.image_size + vmlinux.bss_size;
- if (safe_addr + kernel_size > memory_limit)
- return 0;
+ memcpy(res, physmem_info.reserved, sizeof(res));
+ sort_reserved_ranges(res, ARRAY_SIZE(res));
+ max = min(max, get_physmem_alloc_pos());
- max_pos = count_valid_kernel_positions(kernel_size, safe_addr, memory_limit);
- if (!max_pos) {
- sclp_early_printk("KASLR disabled: not enough memory\n");
+ max_pos = iterate_valid_positions(size, align, min, max, res, ARRAY_SIZE(res), true, 0);
+ if (!max_pos)
return 0;
- }
-
- /* we need a value in the range [1, base_pos] inclusive */
- if (get_random(max_pos, &base_pos))
+ if (get_random(max_pos, &pos))
return 0;
- return position_to_address(base_pos + 1, kernel_size, safe_addr, memory_limit);
+ return iterate_valid_positions(size, align, min, max, res, ARRAY_SIZE(res), false, pos + 1);
}
diff --git a/arch/s390/boot/mem_detect.c b/arch/s390/boot/mem_detect.c
deleted file mode 100644
index 35f4ba11f7fd..000000000000
--- a/arch/s390/boot/mem_detect.c
+++ /dev/null
@@ -1,191 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <asm/setup.h>
-#include <asm/processor.h>
-#include <asm/sclp.h>
-#include <asm/sections.h>
-#include <asm/mem_detect.h>
-#include <asm/sparsemem.h>
-#include "decompressor.h"
-#include "boot.h"
-
-struct mem_detect_info __bootdata(mem_detect);
-
-/* up to 256 storage elements, 1020 subincrements each */
-#define ENTRIES_EXTENDED_MAX \
- (256 * (1020 / 2) * sizeof(struct mem_detect_block))
-
-static struct mem_detect_block *__get_mem_detect_block_ptr(u32 n)
-{
- if (n < MEM_INLINED_ENTRIES)
- return &mem_detect.entries[n];
- return &mem_detect.entries_extended[n - MEM_INLINED_ENTRIES];
-}
-
-/*
- * sequential calls to add_mem_detect_block with adjacent memory areas
- * are merged together into single memory block.
- */
-void add_mem_detect_block(u64 start, u64 end)
-{
- struct mem_detect_block *block;
-
- if (mem_detect.count) {
- block = __get_mem_detect_block_ptr(mem_detect.count - 1);
- if (block->end == start) {
- block->end = end;
- return;
- }
- }
-
- block = __get_mem_detect_block_ptr(mem_detect.count);
- block->start = start;
- block->end = end;
- mem_detect.count++;
-}
-
-static int __diag260(unsigned long rx1, unsigned long rx2)
-{
- unsigned long reg1, reg2, ry;
- union register_pair rx;
- psw_t old;
- int rc;
-
- rx.even = rx1;
- rx.odd = rx2;
- ry = 0x10; /* storage configuration */
- rc = -1; /* fail */
- asm volatile(
- " mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
- " epsw %[reg1],%[reg2]\n"
- " st %[reg1],0(%[psw_pgm])\n"
- " st %[reg2],4(%[psw_pgm])\n"
- " larl %[reg1],1f\n"
- " stg %[reg1],8(%[psw_pgm])\n"
- " diag %[rx],%[ry],0x260\n"
- " ipm %[rc]\n"
- " srl %[rc],28\n"
- "1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
- : [reg1] "=&d" (reg1),
- [reg2] "=&a" (reg2),
- [rc] "+&d" (rc),
- [ry] "+&d" (ry),
- "+Q" (S390_lowcore.program_new_psw),
- "=Q" (old)
- : [rx] "d" (rx.pair),
- [psw_old] "a" (&old),
- [psw_pgm] "a" (&S390_lowcore.program_new_psw)
- : "cc", "memory");
- return rc == 0 ? ry : -1;
-}
-
-static int diag260(void)
-{
- int rc, i;
-
- struct {
- unsigned long start;
- unsigned long end;
- } storage_extents[8] __aligned(16); /* VM supports up to 8 extends */
-
- memset(storage_extents, 0, sizeof(storage_extents));
- rc = __diag260((unsigned long)storage_extents, sizeof(storage_extents));
- if (rc == -1)
- return -1;
-
- for (i = 0; i < min_t(int, rc, ARRAY_SIZE(storage_extents)); i++)
- add_mem_detect_block(storage_extents[i].start, storage_extents[i].end + 1);
- return 0;
-}
-
-static int tprot(unsigned long addr)
-{
- unsigned long reg1, reg2;
- int rc = -EFAULT;
- psw_t old;
-
- asm volatile(
- " mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
- " epsw %[reg1],%[reg2]\n"
- " st %[reg1],0(%[psw_pgm])\n"
- " st %[reg2],4(%[psw_pgm])\n"
- " larl %[reg1],1f\n"
- " stg %[reg1],8(%[psw_pgm])\n"
- " tprot 0(%[addr]),0\n"
- " ipm %[rc]\n"
- " srl %[rc],28\n"
- "1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
- : [reg1] "=&d" (reg1),
- [reg2] "=&a" (reg2),
- [rc] "+&d" (rc),
- "=Q" (S390_lowcore.program_new_psw.addr),
- "=Q" (old)
- : [psw_old] "a" (&old),
- [psw_pgm] "a" (&S390_lowcore.program_new_psw),
- [addr] "a" (addr)
- : "cc", "memory");
- return rc;
-}
-
-static unsigned long search_mem_end(void)
-{
- unsigned long range = 1 << (MAX_PHYSMEM_BITS - 20); /* in 1MB blocks */
- unsigned long offset = 0;
- unsigned long pivot;
-
- while (range > 1) {
- range >>= 1;
- pivot = offset + range;
- if (!tprot(pivot << 20))
- offset = pivot;
- }
- return (offset + 1) << 20;
-}
-
-unsigned long detect_memory(unsigned long *safe_addr)
-{
- unsigned long max_physmem_end = 0;
-
- sclp_early_get_memsize(&max_physmem_end);
- mem_detect.entries_extended = (struct mem_detect_block *)ALIGN(*safe_addr, sizeof(u64));
-
- if (!sclp_early_read_storage_info()) {
- mem_detect.info_source = MEM_DETECT_SCLP_STOR_INFO;
- } else if (!diag260()) {
- mem_detect.info_source = MEM_DETECT_DIAG260;
- max_physmem_end = max_physmem_end ?: get_mem_detect_end();
- } else if (max_physmem_end) {
- add_mem_detect_block(0, max_physmem_end);
- mem_detect.info_source = MEM_DETECT_SCLP_READ_INFO;
- } else {
- max_physmem_end = search_mem_end();
- add_mem_detect_block(0, max_physmem_end);
- mem_detect.info_source = MEM_DETECT_BIN_SEARCH;
- }
-
- if (mem_detect.count > MEM_INLINED_ENTRIES) {
- *safe_addr += (mem_detect.count - MEM_INLINED_ENTRIES) *
- sizeof(struct mem_detect_block);
- }
-
- return max_physmem_end;
-}
-
-void mem_detect_set_usable_limit(unsigned long limit)
-{
- struct mem_detect_block *block;
- int i;
-
- /* make sure mem_detect.usable ends up within online memory block */
- for (i = 0; i < mem_detect.count; i++) {
- block = __get_mem_detect_block_ptr(i);
- if (block->start >= limit)
- break;
- if (block->end >= limit) {
- mem_detect.usable = limit;
- break;
- }
- mem_detect.usable = block->end;
- }
-}
diff --git a/arch/s390/boot/pgm_check_info.c b/arch/s390/boot/pgm_check_info.c
index c2a1defc79da..97244cd7a206 100644
--- a/arch/s390/boot/pgm_check_info.c
+++ b/arch/s390/boot/pgm_check_info.c
@@ -123,11 +123,10 @@ out:
sclp_early_printk(buf);
}
-static noinline void print_stacktrace(void)
+void print_stacktrace(unsigned long sp)
{
struct stack_info boot_stack = { STACK_TYPE_TASK, (unsigned long)_stack_start,
(unsigned long)_stack_end };
- unsigned long sp = S390_lowcore.gpregs_save_area[15];
bool first = true;
decompressor_printk("Call Trace:\n");
@@ -154,7 +153,7 @@ void print_pgm_check_info(void)
decompressor_printk("Kernel command line: %s\n", early_command_line);
decompressor_printk("Kernel fault: interruption code %04x ilc:%x\n",
S390_lowcore.pgm_code, S390_lowcore.pgm_ilc >> 1);
- if (kaslr_enabled)
+ if (kaslr_enabled())
decompressor_printk("Kernel random base: %lx\n", __kaslr_offset);
decompressor_printk("PSW : %016lx %016lx (%pS)\n",
S390_lowcore.psw_save_area.mask,
@@ -173,7 +172,7 @@ void print_pgm_check_info(void)
gpregs[8], gpregs[9], gpregs[10], gpregs[11]);
decompressor_printk(" %016lx %016lx %016lx %016lx\n",
gpregs[12], gpregs[13], gpregs[14], gpregs[15]);
- print_stacktrace();
+ print_stacktrace(S390_lowcore.gpregs_save_area[15]);
decompressor_printk("Last Breaking-Event-Address:\n");
decompressor_printk(" [<%016lx>] %pS\n", (unsigned long)S390_lowcore.pgm_last_break,
(void *)S390_lowcore.pgm_last_break);
diff --git a/arch/s390/boot/physmem_info.c b/arch/s390/boot/physmem_info.c
new file mode 100644
index 000000000000..0cf79826eef9
--- /dev/null
+++ b/arch/s390/boot/physmem_info.c
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/processor.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <asm/physmem_info.h>
+#include <asm/stacktrace.h>
+#include <asm/boot_data.h>
+#include <asm/sparsemem.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/sclp.h>
+#include <asm/uv.h>
+#include "decompressor.h"
+#include "boot.h"
+
+struct physmem_info __bootdata(physmem_info);
+static unsigned int physmem_alloc_ranges;
+static unsigned long physmem_alloc_pos;
+
+/* up to 256 storage elements, 1020 subincrements each */
+#define ENTRIES_EXTENDED_MAX \
+ (256 * (1020 / 2) * sizeof(struct physmem_range))
+
+static struct physmem_range *__get_physmem_range_ptr(u32 n)
+{
+ if (n < MEM_INLINED_ENTRIES)
+ return &physmem_info.online[n];
+ if (unlikely(!physmem_info.online_extended)) {
+ physmem_info.online_extended = (struct physmem_range *)physmem_alloc_range(
+ RR_MEM_DETECT_EXTENDED, ENTRIES_EXTENDED_MAX, sizeof(long), 0,
+ physmem_alloc_pos, true);
+ }
+ return &physmem_info.online_extended[n - MEM_INLINED_ENTRIES];
+}
+
+/*
+ * sequential calls to add_physmem_online_range with adjacent memory ranges
+ * are merged together into single memory range.
+ */
+void add_physmem_online_range(u64 start, u64 end)
+{
+ struct physmem_range *range;
+
+ if (physmem_info.range_count) {
+ range = __get_physmem_range_ptr(physmem_info.range_count - 1);
+ if (range->end == start) {
+ range->end = end;
+ return;
+ }
+ }
+
+ range = __get_physmem_range_ptr(physmem_info.range_count);
+ range->start = start;
+ range->end = end;
+ physmem_info.range_count++;
+}
+
+static int __diag260(unsigned long rx1, unsigned long rx2)
+{
+ unsigned long reg1, reg2, ry;
+ union register_pair rx;
+ psw_t old;
+ int rc;
+
+ rx.even = rx1;
+ rx.odd = rx2;
+ ry = 0x10; /* storage configuration */
+ rc = -1; /* fail */
+ asm volatile(
+ " mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
+ " epsw %[reg1],%[reg2]\n"
+ " st %[reg1],0(%[psw_pgm])\n"
+ " st %[reg2],4(%[psw_pgm])\n"
+ " larl %[reg1],1f\n"
+ " stg %[reg1],8(%[psw_pgm])\n"
+ " diag %[rx],%[ry],0x260\n"
+ " ipm %[rc]\n"
+ " srl %[rc],28\n"
+ "1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
+ : [reg1] "=&d" (reg1),
+ [reg2] "=&a" (reg2),
+ [rc] "+&d" (rc),
+ [ry] "+&d" (ry),
+ "+Q" (S390_lowcore.program_new_psw),
+ "=Q" (old)
+ : [rx] "d" (rx.pair),
+ [psw_old] "a" (&old),
+ [psw_pgm] "a" (&S390_lowcore.program_new_psw)
+ : "cc", "memory");
+ return rc == 0 ? ry : -1;
+}
+
+static int diag260(void)
+{
+ int rc, i;
+
+ struct {
+ unsigned long start;
+ unsigned long end;
+ } storage_extents[8] __aligned(16); /* VM supports up to 8 extends */
+
+ memset(storage_extents, 0, sizeof(storage_extents));
+ rc = __diag260((unsigned long)storage_extents, sizeof(storage_extents));
+ if (rc == -1)
+ return -1;
+
+ for (i = 0; i < min_t(int, rc, ARRAY_SIZE(storage_extents)); i++)
+ add_physmem_online_range(storage_extents[i].start, storage_extents[i].end + 1);
+ return 0;
+}
+
+static int tprot(unsigned long addr)
+{
+ unsigned long reg1, reg2;
+ int rc = -EFAULT;
+ psw_t old;
+
+ asm volatile(
+ " mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
+ " epsw %[reg1],%[reg2]\n"
+ " st %[reg1],0(%[psw_pgm])\n"
+ " st %[reg2],4(%[psw_pgm])\n"
+ " larl %[reg1],1f\n"
+ " stg %[reg1],8(%[psw_pgm])\n"
+ " tprot 0(%[addr]),0\n"
+ " ipm %[rc]\n"
+ " srl %[rc],28\n"
+ "1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
+ : [reg1] "=&d" (reg1),
+ [reg2] "=&a" (reg2),
+ [rc] "+&d" (rc),
+ "=Q" (S390_lowcore.program_new_psw.addr),
+ "=Q" (old)
+ : [psw_old] "a" (&old),
+ [psw_pgm] "a" (&S390_lowcore.program_new_psw),
+ [addr] "a" (addr)
+ : "cc", "memory");
+ return rc;
+}
+
+static unsigned long search_mem_end(void)
+{
+ unsigned long range = 1 << (MAX_PHYSMEM_BITS - 20); /* in 1MB blocks */
+ unsigned long offset = 0;
+ unsigned long pivot;
+
+ while (range > 1) {
+ range >>= 1;
+ pivot = offset + range;
+ if (!tprot(pivot << 20))
+ offset = pivot;
+ }
+ return (offset + 1) << 20;
+}
+
+unsigned long detect_max_physmem_end(void)
+{
+ unsigned long max_physmem_end = 0;
+
+ if (!sclp_early_get_memsize(&max_physmem_end)) {
+ physmem_info.info_source = MEM_DETECT_SCLP_READ_INFO;
+ } else {
+ max_physmem_end = search_mem_end();
+ physmem_info.info_source = MEM_DETECT_BIN_SEARCH;
+ }
+ return max_physmem_end;
+}
+
+void detect_physmem_online_ranges(unsigned long max_physmem_end)
+{
+ if (!sclp_early_read_storage_info()) {
+ physmem_info.info_source = MEM_DETECT_SCLP_STOR_INFO;
+ } else if (!diag260()) {
+ physmem_info.info_source = MEM_DETECT_DIAG260;
+ } else if (max_physmem_end) {
+ add_physmem_online_range(0, max_physmem_end);
+ }
+}
+
+void physmem_set_usable_limit(unsigned long limit)
+{
+ physmem_info.usable = limit;
+ physmem_alloc_pos = limit;
+}
+
+static void die_oom(unsigned long size, unsigned long align, unsigned long min, unsigned long max)
+{
+ unsigned long start, end, total_mem = 0, total_reserved_mem = 0;
+ struct reserved_range *range;
+ enum reserved_range_type t;
+ int i;
+
+ decompressor_printk("Linux version %s\n", kernel_version);
+ if (!is_prot_virt_guest() && early_command_line[0])
+ decompressor_printk("Kernel command line: %s\n", early_command_line);
+ decompressor_printk("Out of memory allocating %lx bytes %lx aligned in range %lx:%lx\n",
+ size, align, min, max);
+ decompressor_printk("Reserved memory ranges:\n");
+ for_each_physmem_reserved_range(t, range, &start, &end) {
+ decompressor_printk("%016lx %016lx %s\n", start, end, get_rr_type_name(t));
+ total_reserved_mem += end - start;
+ }
+ decompressor_printk("Usable online memory ranges (info source: %s [%x]):\n",
+ get_physmem_info_source(), physmem_info.info_source);
+ for_each_physmem_usable_range(i, &start, &end) {
+ decompressor_printk("%016lx %016lx\n", start, end);
+ total_mem += end - start;
+ }
+ decompressor_printk("Usable online memory total: %lx Reserved: %lx Free: %lx\n",
+ total_mem, total_reserved_mem,
+ total_mem > total_reserved_mem ? total_mem - total_reserved_mem : 0);
+ print_stacktrace(current_frame_address());
+ sclp_early_printk("\n\n -- System halted\n");
+ disabled_wait();
+}
+
+void physmem_reserve(enum reserved_range_type type, unsigned long addr, unsigned long size)
+{
+ physmem_info.reserved[type].start = addr;
+ physmem_info.reserved[type].end = addr + size;
+}
+
+void physmem_free(enum reserved_range_type type)
+{
+ physmem_info.reserved[type].start = 0;
+ physmem_info.reserved[type].end = 0;
+}
+
+static bool __physmem_alloc_intersects(unsigned long addr, unsigned long size,
+ unsigned long *intersection_start)
+{
+ unsigned long res_addr, res_size;
+ int t;
+
+ for (t = 0; t < RR_MAX; t++) {
+ if (!get_physmem_reserved(t, &res_addr, &res_size))
+ continue;
+ if (intersects(addr, size, res_addr, res_size)) {
+ *intersection_start = res_addr;
+ return true;
+ }
+ }
+ return ipl_report_certs_intersects(addr, size, intersection_start);
+}
+
+static unsigned long __physmem_alloc_range(unsigned long size, unsigned long align,
+ unsigned long min, unsigned long max,
+ unsigned int from_ranges, unsigned int *ranges_left,
+ bool die_on_oom)
+{
+ unsigned int nranges = from_ranges ?: physmem_info.range_count;
+ unsigned long range_start, range_end;
+ unsigned long intersection_start;
+ unsigned long addr, pos = max;
+
+ align = max(align, 8UL);
+ while (nranges) {
+ __get_physmem_range(nranges - 1, &range_start, &range_end, false);
+ pos = min(range_end, pos);
+
+ if (round_up(min, align) + size > pos)
+ break;
+ addr = round_down(pos - size, align);
+ if (range_start > addr) {
+ nranges--;
+ continue;
+ }
+ if (__physmem_alloc_intersects(addr, size, &intersection_start)) {
+ pos = intersection_start;
+ continue;
+ }
+
+ if (ranges_left)
+ *ranges_left = nranges;
+ return addr;
+ }
+ if (die_on_oom)
+ die_oom(size, align, min, max);
+ return 0;
+}
+
+unsigned long physmem_alloc_range(enum reserved_range_type type, unsigned long size,
+ unsigned long align, unsigned long min, unsigned long max,
+ bool die_on_oom)
+{
+ unsigned long addr;
+
+ max = min(max, physmem_alloc_pos);
+ addr = __physmem_alloc_range(size, align, min, max, 0, NULL, die_on_oom);
+ if (addr)
+ physmem_reserve(type, addr, size);
+ return addr;
+}
+
+unsigned long physmem_alloc_top_down(enum reserved_range_type type, unsigned long size,
+ unsigned long align)
+{
+ struct reserved_range *range = &physmem_info.reserved[type];
+ struct reserved_range *new_range;
+ unsigned int ranges_left;
+ unsigned long addr;
+
+ addr = __physmem_alloc_range(size, align, 0, physmem_alloc_pos, physmem_alloc_ranges,
+ &ranges_left, true);
+ /* if not a consecutive allocation of the same type or first allocation */
+ if (range->start != addr + size) {
+ if (range->end) {
+ physmem_alloc_pos = __physmem_alloc_range(
+ sizeof(struct reserved_range), 0, 0, physmem_alloc_pos,
+ physmem_alloc_ranges, &ranges_left, true);
+ new_range = (struct reserved_range *)physmem_alloc_pos;
+ *new_range = *range;
+ range->chain = new_range;
+ addr = __physmem_alloc_range(size, align, 0, physmem_alloc_pos,
+ ranges_left, &ranges_left, true);
+ }
+ range->end = addr + size;
+ }
+ range->start = addr;
+ physmem_alloc_pos = addr;
+ physmem_alloc_ranges = ranges_left;
+ return addr;
+}
+
+unsigned long get_physmem_alloc_pos(void)
+{
+ return physmem_alloc_pos;
+}
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
index 11413f0baabc..64bd7ac3e35d 100644
--- a/arch/s390/boot/startup.c
+++ b/arch/s390/boot/startup.c
@@ -12,7 +12,7 @@
#include <asm/diag.h>
#include <asm/uv.h>
#include <asm/abs_lowcore.h>
-#include <asm/mem_detect.h>
+#include <asm/physmem_info.h>
#include "decompressor.h"
#include "boot.h"
#include "uv.h"
@@ -21,7 +21,6 @@ unsigned long __bootdata_preserved(__kaslr_offset);
unsigned long __bootdata_preserved(__abs_lowcore);
unsigned long __bootdata_preserved(__memcpy_real_area);
pte_t *__bootdata_preserved(memcpy_real_ptep);
-unsigned long __bootdata(__amode31_base);
unsigned long __bootdata_preserved(VMALLOC_START);
unsigned long __bootdata_preserved(VMALLOC_END);
struct page *__bootdata_preserved(vmemmap);
@@ -29,8 +28,6 @@ unsigned long __bootdata_preserved(vmemmap_size);
unsigned long __bootdata_preserved(MODULES_VADDR);
unsigned long __bootdata_preserved(MODULES_END);
unsigned long __bootdata(ident_map_size);
-int __bootdata(is_full_image) = 1;
-struct initrd_data __bootdata(initrd_data);
u64 __bootdata_preserved(stfle_fac_list[16]);
u64 __bootdata_preserved(alt_stfle_fac_list[16]);
@@ -76,17 +73,20 @@ unsigned long mem_safe_offset(void)
}
#endif
-static unsigned long rescue_initrd(unsigned long safe_addr)
+static void rescue_initrd(unsigned long min, unsigned long max)
{
+ unsigned long old_addr, addr, size;
+
if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
- return safe_addr;
- if (!initrd_data.start || !initrd_data.size)
- return safe_addr;
- if (initrd_data.start < safe_addr) {
- memmove((void *)safe_addr, (void *)initrd_data.start, initrd_data.size);
- initrd_data.start = safe_addr;
- }
- return initrd_data.start + initrd_data.size;
+ return;
+ if (!get_physmem_reserved(RR_INITRD, &addr, &size))
+ return;
+ if (addr >= min && addr + size <= max)
+ return;
+ old_addr = addr;
+ physmem_free(RR_INITRD);
+ addr = physmem_alloc_top_down(RR_INITRD, size, 0);
+ memmove((void *)addr, (void *)old_addr, size);
}
static void copy_bootdata(void)
@@ -140,7 +140,7 @@ static void handle_relocs(unsigned long offset)
*
* Consider the following factors:
* 1. max_physmem_end - end of physical memory online or standby.
- * Always <= end of the last online memory block (get_mem_detect_end()).
+ * Always >= end of the last online memory range (get_physmem_online_end()).
* 2. CONFIG_MAX_PHYSMEM_BITS - the maximum size of physical memory the
* kernel is able to support.
* 3. "mem=" kernel command line option which limits physical memory usage.
@@ -160,10 +160,10 @@ static void setup_ident_map_size(unsigned long max_physmem_end)
#ifdef CONFIG_CRASH_DUMP
if (oldmem_data.start) {
- kaslr_enabled = 0;
+ __kaslr_enabled = 0;
ident_map_size = min(ident_map_size, oldmem_data.size);
} else if (ipl_block_valid && is_ipl_block_dump()) {
- kaslr_enabled = 0;
+ __kaslr_enabled = 0;
if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size)
ident_map_size = min(ident_map_size, hsa_size);
}
@@ -235,9 +235,9 @@ static unsigned long setup_kernel_memory_layout(void)
/*
* This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's.
*/
-static void clear_bss_section(void)
+static void clear_bss_section(unsigned long vmlinux_lma)
{
- memset((void *)vmlinux.default_lma + vmlinux.image_size, 0, vmlinux.bss_size);
+ memset((void *)vmlinux_lma + vmlinux.image_size, 0, vmlinux.bss_size);
}
/*
@@ -256,7 +256,6 @@ static void setup_vmalloc_size(void)
static void offset_vmlinux_info(unsigned long offset)
{
- vmlinux.default_lma += offset;
*(unsigned long *)(&vmlinux.entry) += offset;
vmlinux.bootdata_off += offset;
vmlinux.bootdata_preserved_off += offset;
@@ -266,60 +265,83 @@ static void offset_vmlinux_info(unsigned long offset)
vmlinux.init_mm_off += offset;
vmlinux.swapper_pg_dir_off += offset;
vmlinux.invalid_pg_dir_off += offset;
-}
-
-static unsigned long reserve_amode31(unsigned long safe_addr)
-{
- __amode31_base = PAGE_ALIGN(safe_addr);
- return __amode31_base + vmlinux.amode31_size;
+#ifdef CONFIG_KASAN
+ vmlinux.kasan_early_shadow_page_off += offset;
+ vmlinux.kasan_early_shadow_pte_off += offset;
+ vmlinux.kasan_early_shadow_pmd_off += offset;
+ vmlinux.kasan_early_shadow_pud_off += offset;
+ vmlinux.kasan_early_shadow_p4d_off += offset;
+#endif
}
void startup_kernel(void)
{
unsigned long max_physmem_end;
- unsigned long random_lma;
- unsigned long safe_addr;
+ unsigned long vmlinux_lma = 0;
+ unsigned long amode31_lma = 0;
unsigned long asce_limit;
+ unsigned long safe_addr;
void *img;
psw_t psw;
- initrd_data.start = parmarea.initrd_start;
- initrd_data.size = parmarea.initrd_size;
+ setup_lpp();
+ safe_addr = mem_safe_offset();
+ /*
+ * reserve decompressor memory together with decompression heap, buffer and
+ * memory which might be occupied by uncompressed kernel at default 1Mb
+ * position (if KASLR is off or failed).
+ */
+ physmem_reserve(RR_DECOMPRESSOR, 0, safe_addr);
+ if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && parmarea.initrd_size)
+ physmem_reserve(RR_INITRD, parmarea.initrd_start, parmarea.initrd_size);
oldmem_data.start = parmarea.oldmem_base;
oldmem_data.size = parmarea.oldmem_size;
- setup_lpp();
store_ipl_parmblock();
- safe_addr = mem_safe_offset();
- safe_addr = reserve_amode31(safe_addr);
- safe_addr = read_ipl_report(safe_addr);
+ read_ipl_report();
uv_query_info();
- safe_addr = rescue_initrd(safe_addr);
sclp_early_read_info();
setup_boot_command_line();
parse_boot_command_line();
detect_facilities();
sanitize_prot_virt_host();
- max_physmem_end = detect_memory(&safe_addr);
+ max_physmem_end = detect_max_physmem_end();
setup_ident_map_size(max_physmem_end);
setup_vmalloc_size();
asce_limit = setup_kernel_memory_layout();
- mem_detect_set_usable_limit(ident_map_size);
-
- if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) {
- random_lma = get_random_base(safe_addr);
- if (random_lma) {
- __kaslr_offset = random_lma - vmlinux.default_lma;
- img = (void *)vmlinux.default_lma;
+ /* got final ident_map_size, physmem allocations could be performed now */
+ physmem_set_usable_limit(ident_map_size);
+ detect_physmem_online_ranges(max_physmem_end);
+ save_ipl_cert_comp_list();
+ rescue_initrd(safe_addr, ident_map_size);
+
+ if (kaslr_enabled()) {
+ vmlinux_lma = randomize_within_range(vmlinux.image_size + vmlinux.bss_size,
+ THREAD_SIZE, vmlinux.default_lma,
+ ident_map_size);
+ if (vmlinux_lma) {
+ __kaslr_offset = vmlinux_lma - vmlinux.default_lma;
offset_vmlinux_info(__kaslr_offset);
}
}
+ vmlinux_lma = vmlinux_lma ?: vmlinux.default_lma;
+ physmem_reserve(RR_VMLINUX, vmlinux_lma, vmlinux.image_size + vmlinux.bss_size);
if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) {
img = decompress_kernel();
- memmove((void *)vmlinux.default_lma, img, vmlinux.image_size);
- } else if (__kaslr_offset)
- memcpy((void *)vmlinux.default_lma, img, vmlinux.image_size);
+ memmove((void *)vmlinux_lma, img, vmlinux.image_size);
+ } else if (__kaslr_offset) {
+ img = (void *)vmlinux.default_lma;
+ memmove((void *)vmlinux_lma, img, vmlinux.image_size);
+ memset(img, 0, vmlinux.image_size);
+ }
+
+ /* vmlinux decompression is done, shrink reserved low memory */
+ physmem_reserve(RR_DECOMPRESSOR, 0, (unsigned long)_decompressor_end);
+ if (kaslr_enabled())
+ amode31_lma = randomize_within_range(vmlinux.amode31_size, PAGE_SIZE, 0, SZ_2G);
+ amode31_lma = amode31_lma ?: vmlinux.default_lma - vmlinux.amode31_size;
+ physmem_reserve(RR_AMODE31, amode31_lma, vmlinux.amode31_size);
/*
* The order of the following operations is important:
@@ -334,21 +356,16 @@ void startup_kernel(void)
* - copy_bootdata() must follow setup_vmem() to propagate changes to
* bootdata made by setup_vmem()
*/
- clear_bss_section();
+ clear_bss_section(vmlinux_lma);
handle_relocs(__kaslr_offset);
setup_vmem(asce_limit);
copy_bootdata();
- if (__kaslr_offset) {
- /*
- * Save KASLR offset for early dumps, before vmcore_info is set.
- * Mark as uneven to distinguish from real vmcore_info pointer.
- */
- S390_lowcore.vmcore_info = __kaslr_offset | 0x1UL;
- /* Clear non-relocated kernel */
- if (IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED))
- memset(img, 0, vmlinux.image_size);
- }
+ /*
+ * Save KASLR offset for early dumps, before vmcore_info is set.
+ * Mark as uneven to distinguish from real vmcore_info pointer.
+ */
+ S390_lowcore.vmcore_info = __kaslr_offset ? __kaslr_offset | 0x1UL : 0;
/*
* Jump to the decompressed kernel entry point and switch DAT mode on.
diff --git a/arch/s390/boot/vmem.c b/arch/s390/boot/vmem.c
index 4d1d0d8e99cb..acb1f8b53105 100644
--- a/arch/s390/boot/vmem.c
+++ b/arch/s390/boot/vmem.c
@@ -1,81 +1,217 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/sched/task.h>
#include <linux/pgtable.h>
+#include <linux/kasan.h>
#include <asm/pgalloc.h>
#include <asm/facility.h>
#include <asm/sections.h>
-#include <asm/mem_detect.h>
+#include <asm/physmem_info.h>
#include <asm/maccess.h>
#include <asm/abs_lowcore.h>
#include "decompressor.h"
#include "boot.h"
+unsigned long __bootdata_preserved(s390_invalid_asce);
+
+#ifdef CONFIG_PROC_FS
+atomic_long_t __bootdata_preserved(direct_pages_count[PG_DIRECT_MAP_MAX]);
+#endif
+
#define init_mm (*(struct mm_struct *)vmlinux.init_mm_off)
#define swapper_pg_dir vmlinux.swapper_pg_dir_off
#define invalid_pg_dir vmlinux.invalid_pg_dir_off
-/*
- * Mimic virt_to_kpte() in lack of init_mm symbol. Skip pmd NULL check though.
- */
-static inline pte_t *__virt_to_kpte(unsigned long va)
-{
- return pte_offset_kernel(pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va), va);
-}
-
-unsigned long __bootdata_preserved(s390_invalid_asce);
-unsigned long __bootdata(pgalloc_pos);
-unsigned long __bootdata(pgalloc_end);
-unsigned long __bootdata(pgalloc_low);
-
enum populate_mode {
POPULATE_NONE,
- POPULATE_ONE2ONE,
+ POPULATE_DIRECT,
POPULATE_ABS_LOWCORE,
+#ifdef CONFIG_KASAN
+ POPULATE_KASAN_MAP_SHADOW,
+ POPULATE_KASAN_ZERO_SHADOW,
+ POPULATE_KASAN_SHALLOW
+#endif
};
-static void boot_check_oom(void)
+static void pgtable_populate(unsigned long addr, unsigned long end, enum populate_mode mode);
+
+#ifdef CONFIG_KASAN
+
+#define kasan_early_shadow_page vmlinux.kasan_early_shadow_page_off
+#define kasan_early_shadow_pte ((pte_t *)vmlinux.kasan_early_shadow_pte_off)
+#define kasan_early_shadow_pmd ((pmd_t *)vmlinux.kasan_early_shadow_pmd_off)
+#define kasan_early_shadow_pud ((pud_t *)vmlinux.kasan_early_shadow_pud_off)
+#define kasan_early_shadow_p4d ((p4d_t *)vmlinux.kasan_early_shadow_p4d_off)
+#define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
+
+static pte_t pte_z;
+
+static void kasan_populate_shadow(void)
+{
+ pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
+ pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
+ p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
+ unsigned long untracked_end;
+ unsigned long start, end;
+ int i;
+
+ pte_z = __pte(__pa(kasan_early_shadow_page) | pgprot_val(PAGE_KERNEL_RO));
+ if (!machine.has_nx)
+ pte_z = clear_pte_bit(pte_z, __pgprot(_PAGE_NOEXEC));
+ crst_table_init((unsigned long *)kasan_early_shadow_p4d, p4d_val(p4d_z));
+ crst_table_init((unsigned long *)kasan_early_shadow_pud, pud_val(pud_z));
+ crst_table_init((unsigned long *)kasan_early_shadow_pmd, pmd_val(pmd_z));
+ memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);
+
+ /*
+ * Current memory layout:
+ * +- 0 -------------+ +- shadow start -+
+ * |1:1 ident mapping| /|1/8 of ident map|
+ * | | / | |
+ * +-end of ident map+ / +----------------+
+ * | ... gap ... | / | kasan |
+ * | | / | zero page |
+ * +- vmalloc area -+ / | mapping |
+ * | vmalloc_size | / | (untracked) |
+ * +- modules vaddr -+ / +----------------+
+ * | 2Gb |/ | unmapped | allocated per module
+ * +- shadow start -+ +----------------+
+ * | 1/8 addr space | | zero pg mapping| (untracked)
+ * +- shadow end ----+---------+- shadow end ---+
+ *
+ * Current memory layout (KASAN_VMALLOC):
+ * +- 0 -------------+ +- shadow start -+
+ * |1:1 ident mapping| /|1/8 of ident map|
+ * | | / | |
+ * +-end of ident map+ / +----------------+
+ * | ... gap ... | / | kasan zero page| (untracked)
+ * | | / | mapping |
+ * +- vmalloc area -+ / +----------------+
+ * | vmalloc_size | / |shallow populate|
+ * +- modules vaddr -+ / +----------------+
+ * | 2Gb |/ |shallow populate|
+ * +- shadow start -+ +----------------+
+ * | 1/8 addr space | | zero pg mapping| (untracked)
+ * +- shadow end ----+---------+- shadow end ---+
+ */
+
+ for_each_physmem_usable_range(i, &start, &end)
+ pgtable_populate(__sha(start), __sha(end), POPULATE_KASAN_MAP_SHADOW);
+ if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
+ untracked_end = VMALLOC_START;
+ /* shallowly populate kasan shadow for vmalloc and modules */
+ pgtable_populate(__sha(VMALLOC_START), __sha(MODULES_END), POPULATE_KASAN_SHALLOW);
+ } else {
+ untracked_end = MODULES_VADDR;
+ }
+ /* populate kasan shadow for untracked memory */
+ pgtable_populate(__sha(ident_map_size), __sha(untracked_end), POPULATE_KASAN_ZERO_SHADOW);
+ pgtable_populate(__sha(MODULES_END), __sha(_REGION1_SIZE), POPULATE_KASAN_ZERO_SHADOW);
+}
+
+static bool kasan_pgd_populate_zero_shadow(pgd_t *pgd, unsigned long addr,
+ unsigned long end, enum populate_mode mode)
{
- if (pgalloc_pos < pgalloc_low)
- error("out of memory on boot\n");
+ if (mode == POPULATE_KASAN_ZERO_SHADOW &&
+ IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) {
+ pgd_populate(&init_mm, pgd, kasan_early_shadow_p4d);
+ return true;
+ }
+ return false;
}
-static void pgtable_populate_init(void)
+static bool kasan_p4d_populate_zero_shadow(p4d_t *p4d, unsigned long addr,
+ unsigned long end, enum populate_mode mode)
{
- unsigned long initrd_end;
- unsigned long kernel_end;
-
- kernel_end = vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size;
- pgalloc_low = round_up(kernel_end, PAGE_SIZE);
- if (IS_ENABLED(CONFIG_BLK_DEV_INITRD)) {
- initrd_end = round_up(initrd_data.start + initrd_data.size, _SEGMENT_SIZE);
- pgalloc_low = max(pgalloc_low, initrd_end);
+ if (mode == POPULATE_KASAN_ZERO_SHADOW &&
+ IS_ALIGNED(addr, P4D_SIZE) && end - addr >= P4D_SIZE) {
+ p4d_populate(&init_mm, p4d, kasan_early_shadow_pud);
+ return true;
}
+ return false;
+}
- pgalloc_end = round_down(get_mem_detect_end(), PAGE_SIZE);
- pgalloc_pos = pgalloc_end;
+static bool kasan_pud_populate_zero_shadow(pud_t *pud, unsigned long addr,
+ unsigned long end, enum populate_mode mode)
+{
+ if (mode == POPULATE_KASAN_ZERO_SHADOW &&
+ IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) {
+ pud_populate(&init_mm, pud, kasan_early_shadow_pmd);
+ return true;
+ }
+ return false;
+}
- boot_check_oom();
+static bool kasan_pmd_populate_zero_shadow(pmd_t *pmd, unsigned long addr,
+ unsigned long end, enum populate_mode mode)
+{
+ if (mode == POPULATE_KASAN_ZERO_SHADOW &&
+ IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
+ pmd_populate(&init_mm, pmd, kasan_early_shadow_pte);
+ return true;
+ }
+ return false;
}
-static void *boot_alloc_pages(unsigned int order)
+static bool kasan_pte_populate_zero_shadow(pte_t *pte, enum populate_mode mode)
{
- unsigned long size = PAGE_SIZE << order;
+ pte_t entry;
- pgalloc_pos -= size;
- pgalloc_pos = round_down(pgalloc_pos, size);
+ if (mode == POPULATE_KASAN_ZERO_SHADOW) {
+ set_pte(pte, pte_z);
+ return true;
+ }
+ return false;
+}
+#else
- boot_check_oom();
+static inline void kasan_populate_shadow(void) {}
- return (void *)pgalloc_pos;
+static inline bool kasan_pgd_populate_zero_shadow(pgd_t *pgd, unsigned long addr,
+ unsigned long end, enum populate_mode mode)
+{
+ return false;
+}
+
+static inline bool kasan_p4d_populate_zero_shadow(p4d_t *p4d, unsigned long addr,
+ unsigned long end, enum populate_mode mode)
+{
+ return false;
+}
+
+static inline bool kasan_pud_populate_zero_shadow(pud_t *pud, unsigned long addr,
+ unsigned long end, enum populate_mode mode)
+{
+ return false;
+}
+
+static inline bool kasan_pmd_populate_zero_shadow(pmd_t *pmd, unsigned long addr,
+ unsigned long end, enum populate_mode mode)
+{
+ return false;
+}
+
+static bool kasan_pte_populate_zero_shadow(pte_t *pte, enum populate_mode mode)
+{
+ return false;
+}
+
+#endif
+
+/*
+ * Mimic virt_to_kpte() in lack of init_mm symbol. Skip pmd NULL check though.
+ */
+static inline pte_t *__virt_to_kpte(unsigned long va)
+{
+ return pte_offset_kernel(pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va), va);
}
static void *boot_crst_alloc(unsigned long val)
{
+ unsigned long size = PAGE_SIZE << CRST_ALLOC_ORDER;
unsigned long *table;
- table = boot_alloc_pages(CRST_ALLOC_ORDER);
- if (table)
- crst_table_init(table, val);
+ table = (unsigned long *)physmem_alloc_top_down(RR_VMEM, size, size);
+ crst_table_init(table, val);
return table;
}
@@ -84,28 +220,37 @@ static pte_t *boot_pte_alloc(void)
static void *pte_leftover;
pte_t *pte;
- BUILD_BUG_ON(_PAGE_TABLE_SIZE * 2 != PAGE_SIZE);
-
+ /*
+ * handling pte_leftovers this way helps to avoid memory fragmentation
+ * during POPULATE_KASAN_MAP_SHADOW when EDAT is off
+ */
if (!pte_leftover) {
- pte_leftover = boot_alloc_pages(0);
+ pte_leftover = (void *)physmem_alloc_top_down(RR_VMEM, PAGE_SIZE, PAGE_SIZE);
pte = pte_leftover + _PAGE_TABLE_SIZE;
} else {
pte = pte_leftover;
pte_leftover = NULL;
}
+
memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
return pte;
}
-static unsigned long _pa(unsigned long addr, enum populate_mode mode)
+static unsigned long _pa(unsigned long addr, unsigned long size, enum populate_mode mode)
{
switch (mode) {
case POPULATE_NONE:
return -1;
- case POPULATE_ONE2ONE:
+ case POPULATE_DIRECT:
return addr;
case POPULATE_ABS_LOWCORE:
return __abs_lowcore_pa(addr);
+#ifdef CONFIG_KASAN
+ case POPULATE_KASAN_MAP_SHADOW:
+ addr = physmem_alloc_top_down(RR_VMEM, size, size);
+ memset((void *)addr, 0, size);
+ return addr;
+#endif
default:
return -1;
}
@@ -126,23 +271,28 @@ static bool can_large_pmd(pmd_t *pm_dir, unsigned long addr, unsigned long end)
static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long end,
enum populate_mode mode)
{
- unsigned long next;
+ unsigned long pages = 0;
pte_t *pte, entry;
pte = pte_offset_kernel(pmd, addr);
for (; addr < end; addr += PAGE_SIZE, pte++) {
if (pte_none(*pte)) {
- entry = __pte(_pa(addr, mode));
+ if (kasan_pte_populate_zero_shadow(pte, mode))
+ continue;
+ entry = __pte(_pa(addr, PAGE_SIZE, mode));
entry = set_pte_bit(entry, PAGE_KERNEL_EXEC);
set_pte(pte, entry);
+ pages++;
}
}
+ if (mode == POPULATE_DIRECT)
+ update_page_count(PG_DIRECT_MAP_4K, pages);
}
static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long end,
enum populate_mode mode)
{
- unsigned long next;
+ unsigned long next, pages = 0;
pmd_t *pmd, entry;
pte_t *pte;
@@ -150,10 +300,13 @@ static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long e
for (; addr < end; addr = next, pmd++) {
next = pmd_addr_end(addr, end);
if (pmd_none(*pmd)) {
+ if (kasan_pmd_populate_zero_shadow(pmd, addr, next, mode))
+ continue;
if (can_large_pmd(pmd, addr, next)) {
- entry = __pmd(_pa(addr, mode));
+ entry = __pmd(_pa(addr, _SEGMENT_SIZE, mode));
entry = set_pmd_bit(entry, SEGMENT_KERNEL_EXEC);
set_pmd(pmd, entry);
+ pages++;
continue;
}
pte = boot_pte_alloc();
@@ -163,12 +316,14 @@ static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long e
}
pgtable_pte_populate(pmd, addr, next, mode);
}
+ if (mode == POPULATE_DIRECT)
+ update_page_count(PG_DIRECT_MAP_1M, pages);
}
static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long end,
enum populate_mode mode)
{
- unsigned long next;
+ unsigned long next, pages = 0;
pud_t *pud, entry;
pmd_t *pmd;
@@ -176,10 +331,13 @@ static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long e
for (; addr < end; addr = next, pud++) {
next = pud_addr_end(addr, end);
if (pud_none(*pud)) {
+ if (kasan_pud_populate_zero_shadow(pud, addr, next, mode))
+ continue;
if (can_large_pud(pud, addr, next)) {
- entry = __pud(_pa(addr, mode));
+ entry = __pud(_pa(addr, _REGION3_SIZE, mode));
entry = set_pud_bit(entry, REGION3_KERNEL_EXEC);
set_pud(pud, entry);
+ pages++;
continue;
}
pmd = boot_crst_alloc(_SEGMENT_ENTRY_EMPTY);
@@ -189,6 +347,8 @@ static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long e
}
pgtable_pmd_populate(pud, addr, next, mode);
}
+ if (mode == POPULATE_DIRECT)
+ update_page_count(PG_DIRECT_MAP_2G, pages);
}
static void pgtable_p4d_populate(pgd_t *pgd, unsigned long addr, unsigned long end,
@@ -202,6 +362,8 @@ static void pgtable_p4d_populate(pgd_t *pgd, unsigned long addr, unsigned long e
for (; addr < end; addr = next, p4d++) {
next = p4d_addr_end(addr, end);
if (p4d_none(*p4d)) {
+ if (kasan_p4d_populate_zero_shadow(p4d, addr, next, mode))
+ continue;
pud = boot_crst_alloc(_REGION3_ENTRY_EMPTY);
p4d_populate(&init_mm, p4d, pud);
}
@@ -219,9 +381,15 @@ static void pgtable_populate(unsigned long addr, unsigned long end, enum populat
for (; addr < end; addr = next, pgd++) {
next = pgd_addr_end(addr, end);
if (pgd_none(*pgd)) {
+ if (kasan_pgd_populate_zero_shadow(pgd, addr, next, mode))
+ continue;
p4d = boot_crst_alloc(_REGION2_ENTRY_EMPTY);
pgd_populate(&init_mm, pgd, p4d);
}
+#ifdef CONFIG_KASAN
+ if (mode == POPULATE_KASAN_SHALLOW)
+ continue;
+#endif
pgtable_p4d_populate(pgd, addr, next, mode);
}
}
@@ -250,16 +418,17 @@ void setup_vmem(unsigned long asce_limit)
* To prevent creation of a large page at address 0 first map
* the lowcore and create the identity mapping only afterwards.
*/
- pgtable_populate_init();
- pgtable_populate(0, sizeof(struct lowcore), POPULATE_ONE2ONE);
- for_each_mem_detect_usable_block(i, &start, &end)
- pgtable_populate(start, end, POPULATE_ONE2ONE);
+ pgtable_populate(0, sizeof(struct lowcore), POPULATE_DIRECT);
+ for_each_physmem_usable_range(i, &start, &end)
+ pgtable_populate(start, end, POPULATE_DIRECT);
pgtable_populate(__abs_lowcore, __abs_lowcore + sizeof(struct lowcore),
POPULATE_ABS_LOWCORE);
pgtable_populate(__memcpy_real_area, __memcpy_real_area + PAGE_SIZE,
POPULATE_NONE);
memcpy_real_ptep = __virt_to_kpte(__memcpy_real_area);
+ kasan_populate_shadow();
+
S390_lowcore.kernel_asce = swapper_pg_dir | asce_bits;
S390_lowcore.user_asce = s390_invalid_asce;
@@ -269,10 +438,3 @@ void setup_vmem(unsigned long asce_limit)
init_mm.context.asce = S390_lowcore.kernel_asce;
}
-
-unsigned long vmem_estimate_memory_needs(unsigned long online_mem_total)
-{
- unsigned long pages = DIV_ROUND_UP(online_mem_total, PAGE_SIZE);
-
- return DIV_ROUND_UP(pages, _PAGE_ENTRIES) * _PAGE_TABLE_SIZE * 2;
-}
diff --git a/arch/s390/boot/vmlinux.lds.S b/arch/s390/boot/vmlinux.lds.S
index fa9d33b01b85..389df0e0d9e5 100644
--- a/arch/s390/boot/vmlinux.lds.S
+++ b/arch/s390/boot/vmlinux.lds.S
@@ -93,6 +93,8 @@ SECTIONS
_decompressor_syms_end = .;
}
+ _decompressor_end = .;
+
#ifdef CONFIG_KERNEL_UNCOMPRESSED
. = 0x100000;
#else
diff --git a/arch/s390/crypto/chacha-s390.S b/arch/s390/crypto/chacha-s390.S
index 9b033622191c..37cb63f25b17 100644
--- a/arch/s390/crypto/chacha-s390.S
+++ b/arch/s390/crypto/chacha-s390.S
@@ -13,27 +13,28 @@
#define SP %r15
#define FRAME (16 * 8 + 4 * 8)
-.data
-.align 32
-
-.Lsigma:
-.long 0x61707865,0x3320646e,0x79622d32,0x6b206574 # endian-neutral
-.long 1,0,0,0
-.long 2,0,0,0
-.long 3,0,0,0
-.long 0x03020100,0x07060504,0x0b0a0908,0x0f0e0d0c # byte swap
-
-.long 0,1,2,3
-.long 0x61707865,0x61707865,0x61707865,0x61707865 # smashed sigma
-.long 0x3320646e,0x3320646e,0x3320646e,0x3320646e
-.long 0x79622d32,0x79622d32,0x79622d32,0x79622d32
-.long 0x6b206574,0x6b206574,0x6b206574,0x6b206574
+ .data
+ .balign 32
-.previous
+SYM_DATA_START_LOCAL(sigma)
+ .long 0x61707865,0x3320646e,0x79622d32,0x6b206574 # endian-neutral
+ .long 1,0,0,0
+ .long 2,0,0,0
+ .long 3,0,0,0
+ .long 0x03020100,0x07060504,0x0b0a0908,0x0f0e0d0c # byte swap
+
+ .long 0,1,2,3
+ .long 0x61707865,0x61707865,0x61707865,0x61707865 # smashed sigma
+ .long 0x3320646e,0x3320646e,0x3320646e,0x3320646e
+ .long 0x79622d32,0x79622d32,0x79622d32,0x79622d32
+ .long 0x6b206574,0x6b206574,0x6b206574,0x6b206574
+SYM_DATA_END(sigma)
+
+ .previous
GEN_BR_THUNK %r14
-.text
+ .text
#############################################################################
# void chacha20_vx_4x(u8 *out, counst u8 *inp, size_t len,
@@ -78,10 +79,10 @@
#define XT2 %v29
#define XT3 %v30
-ENTRY(chacha20_vx_4x)
+SYM_FUNC_START(chacha20_vx_4x)
stmg %r6,%r7,6*8(SP)
- larl %r7,.Lsigma
+ larl %r7,sigma
lhi %r0,10
lhi %r1,0
@@ -403,7 +404,7 @@ ENTRY(chacha20_vx_4x)
lmg %r6,%r7,6*8(SP)
BR_EX %r14
-ENDPROC(chacha20_vx_4x)
+SYM_FUNC_END(chacha20_vx_4x)
#undef OUT
#undef INP
@@ -471,7 +472,7 @@ ENDPROC(chacha20_vx_4x)
#define T2 %v29
#define T3 %v30
-ENTRY(chacha20_vx)
+SYM_FUNC_START(chacha20_vx)
clgfi LEN,256
jle chacha20_vx_4x
stmg %r6,%r7,6*8(SP)
@@ -481,7 +482,7 @@ ENTRY(chacha20_vx)
la SP,0(%r1,SP)
stg %r0,0(SP) # back-chain
- larl %r7,.Lsigma
+ larl %r7,sigma
lhi %r0,10
VLM K1,K2,0,KEY,0 # load key
@@ -902,6 +903,6 @@ ENTRY(chacha20_vx)
lmg %r6,%r7,FRAME+6*8(SP)
la SP,FRAME(SP)
BR_EX %r14
-ENDPROC(chacha20_vx)
+SYM_FUNC_END(chacha20_vx)
.previous
diff --git a/arch/s390/crypto/crc32be-vx.S b/arch/s390/crypto/crc32be-vx.S
index 6b3d1009c392..6ea17628ea10 100644
--- a/arch/s390/crypto/crc32be-vx.S
+++ b/arch/s390/crypto/crc32be-vx.S
@@ -24,8 +24,8 @@
#define CONST_RU_POLY %v13
#define CONST_CRC_POLY %v14
-.data
-.align 8
+ .data
+ .balign 8
/*
* The CRC-32 constant block contains reduction constants to fold and
@@ -58,19 +58,20 @@
* P'(x) = 0xEDB88320
*/
-.Lconstants_CRC_32_BE:
+SYM_DATA_START_LOCAL(constants_CRC_32_BE)
.quad 0x08833794c, 0x0e6228b11 # R1, R2
.quad 0x0c5b9cd4c, 0x0e8a45605 # R3, R4
.quad 0x0f200aa66, 1 << 32 # R5, x32
.quad 0x0490d678d, 1 # R6, 1
.quad 0x104d101df, 0 # u
.quad 0x104C11DB7, 0 # P(x)
+SYM_DATA_END(constants_CRC_32_BE)
-.previous
+ .previous
GEN_BR_THUNK %r14
-.text
+ .text
/*
* The CRC-32 function(s) use these calling conventions:
*
@@ -90,9 +91,9 @@
*
* V9..V14: CRC-32 constants.
*/
-ENTRY(crc32_be_vgfm_16)
+SYM_FUNC_START(crc32_be_vgfm_16)
/* Load CRC-32 constants */
- larl %r5,.Lconstants_CRC_32_BE
+ larl %r5,constants_CRC_32_BE
VLM CONST_R1R2,CONST_CRC_POLY,0,%r5
/* Load the initial CRC value into the leftmost word of V0. */
@@ -207,6 +208,6 @@ ENTRY(crc32_be_vgfm_16)
.Ldone:
VLGVF %r2,%v2,3
BR_EX %r14
-ENDPROC(crc32_be_vgfm_16)
+SYM_FUNC_END(crc32_be_vgfm_16)
.previous
diff --git a/arch/s390/crypto/crc32le-vx.S b/arch/s390/crypto/crc32le-vx.S
index 71caf0f4ec08..5a819ae09a0b 100644
--- a/arch/s390/crypto/crc32le-vx.S
+++ b/arch/s390/crypto/crc32le-vx.S
@@ -25,8 +25,8 @@
#define CONST_RU_POLY %v13
#define CONST_CRC_POLY %v14
-.data
-.align 8
+ .data
+ .balign 8
/*
* The CRC-32 constant block contains reduction constants to fold and
@@ -59,27 +59,29 @@
* P'(x) = 0x82F63B78
*/
-.Lconstants_CRC_32_LE:
+SYM_DATA_START_LOCAL(constants_CRC_32_LE)
.octa 0x0F0E0D0C0B0A09080706050403020100 # BE->LE mask
.quad 0x1c6e41596, 0x154442bd4 # R2, R1
.quad 0x0ccaa009e, 0x1751997d0 # R4, R3
.octa 0x163cd6124 # R5
.octa 0x1F7011641 # u'
.octa 0x1DB710641 # P'(x) << 1
+SYM_DATA_END(constants_CRC_32_LE)
-.Lconstants_CRC_32C_LE:
+SYM_DATA_START_LOCAL(constants_CRC_32C_LE)
.octa 0x0F0E0D0C0B0A09080706050403020100 # BE->LE mask
.quad 0x09e4addf8, 0x740eef02 # R2, R1
.quad 0x14cd00bd6, 0xf20c0dfe # R4, R3
.octa 0x0dd45aab8 # R5
.octa 0x0dea713f1 # u'
.octa 0x105ec76f0 # P'(x) << 1
+SYM_DATA_END(constants_CRC_32C_LE)
-.previous
+ .previous
GEN_BR_THUNK %r14
-.text
+ .text
/*
* The CRC-32 functions use these calling conventions:
@@ -102,17 +104,17 @@
* V10..V14: CRC-32 constants.
*/
-ENTRY(crc32_le_vgfm_16)
- larl %r5,.Lconstants_CRC_32_LE
+SYM_FUNC_START(crc32_le_vgfm_16)
+ larl %r5,constants_CRC_32_LE
j crc32_le_vgfm_generic
-ENDPROC(crc32_le_vgfm_16)
+SYM_FUNC_END(crc32_le_vgfm_16)
-ENTRY(crc32c_le_vgfm_16)
- larl %r5,.Lconstants_CRC_32C_LE
+SYM_FUNC_START(crc32c_le_vgfm_16)
+ larl %r5,constants_CRC_32C_LE
j crc32_le_vgfm_generic
-ENDPROC(crc32c_le_vgfm_16)
+SYM_FUNC_END(crc32c_le_vgfm_16)
-ENTRY(crc32_le_vgfm_generic)
+SYM_FUNC_START(crc32_le_vgfm_generic)
/* Load CRC-32 constants */
VLM CONST_PERM_LE2BE,CONST_CRC_POLY,0,%r5
@@ -268,6 +270,6 @@ ENTRY(crc32_le_vgfm_generic)
.Ldone:
VLGVF %r2,%v2,2
BR_EX %r14
-ENDPROC(crc32_le_vgfm_generic)
+SYM_FUNC_END(crc32_le_vgfm_generic)
.previous
diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h
index c699f251a464..d5d967166bac 100644
--- a/arch/s390/include/asm/ap.h
+++ b/arch/s390/include/asm/ap.h
@@ -43,10 +43,11 @@ struct ap_queue_status {
unsigned int queue_empty : 1;
unsigned int replies_waiting : 1;
unsigned int queue_full : 1;
- unsigned int _pad1 : 4;
+ unsigned int : 3;
+ unsigned int async : 1;
unsigned int irq_enabled : 1;
unsigned int response_code : 8;
- unsigned int _pad2 : 16;
+ unsigned int : 16;
};
/*
@@ -86,6 +87,42 @@ static inline bool ap_instructions_available(void)
return reg1 != 0;
}
+/* TAPQ register GR2 response struct */
+struct ap_tapq_gr2 {
+ union {
+ unsigned long value;
+ struct {
+ unsigned int fac : 32; /* facility bits */
+ unsigned int apinfo : 32; /* ap type, ... */
+ };
+ struct {
+ unsigned int s : 1; /* APSC */
+ unsigned int m : 1; /* AP4KM */
+ unsigned int c : 1; /* AP4KC */
+ unsigned int mode : 3;
+ unsigned int n : 1; /* APXA */
+ unsigned int : 1;
+ unsigned int class : 8;
+ unsigned int bs : 2; /* SE bind/assoc */
+ unsigned int : 14;
+ unsigned int at : 8; /* ap type */
+ unsigned int nd : 8; /* nr of domains */
+ unsigned int : 4;
+ unsigned int ml : 4; /* apxl ml */
+ unsigned int : 4;
+ unsigned int qd : 4; /* queue depth */
+ };
+ };
+};
+
+/*
+ * Convenience defines to be used with the bs field from struct ap_tapq_gr2
+ */
+#define AP_BS_Q_USABLE 0
+#define AP_BS_Q_USABLE_NO_SECURE_KEY 1
+#define AP_BS_Q_AVAIL_FOR_BINDING 2
+#define AP_BS_Q_UNUSABLE 3
+
/**
* ap_tapq(): Test adjunct processor queue.
* @qid: The AP queue number
@@ -93,7 +130,7 @@ static inline bool ap_instructions_available(void)
*
* Returns AP queue status structure.
*/
-static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info)
+static inline struct ap_queue_status ap_tapq(ap_qid_t qid, struct ap_tapq_gr2 *info)
{
union ap_queue_status_reg reg1;
unsigned long reg2;
@@ -108,7 +145,7 @@ static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info)
: [qid] "d" (qid)
: "cc", "0", "1", "2");
if (info)
- *info = reg2;
+ info->value = reg2;
return reg1.status;
}
@@ -116,13 +153,12 @@ static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info)
* ap_test_queue(): Test adjunct processor queue.
* @qid: The AP queue number
* @tbit: Test facilities bit
- * @info: Pointer to queue descriptor
+ * @info: Ptr to tapq gr2 struct
*
* Returns AP queue status structure.
*/
-static inline struct ap_queue_status ap_test_queue(ap_qid_t qid,
- int tbit,
- unsigned long *info)
+static inline struct ap_queue_status ap_test_queue(ap_qid_t qid, int tbit,
+ struct ap_tapq_gr2 *info)
{
if (tbit)
qid |= 1UL << 23; /* set T bit*/
@@ -132,14 +168,18 @@ static inline struct ap_queue_status ap_test_queue(ap_qid_t qid,
/**
* ap_pqap_rapq(): Reset adjunct processor queue.
* @qid: The AP queue number
+ * @fbit: if != 0 set F bit
*
* Returns AP queue status structure.
*/
-static inline struct ap_queue_status ap_rapq(ap_qid_t qid)
+static inline struct ap_queue_status ap_rapq(ap_qid_t qid, int fbit)
{
unsigned long reg0 = qid | (1UL << 24); /* fc 1UL is RAPQ */
union ap_queue_status_reg reg1;
+ if (fbit)
+ reg0 |= 1UL << 22;
+
asm volatile(
" lgr 0,%[reg0]\n" /* qid arg into gr0 */
" .insn rre,0xb2af0000,0,0\n" /* PQAP(RAPQ) */
@@ -153,14 +193,18 @@ static inline struct ap_queue_status ap_rapq(ap_qid_t qid)
/**
* ap_pqap_zapq(): Reset and zeroize adjunct processor queue.
* @qid: The AP queue number
+ * @fbit: if != 0 set F bit
*
* Returns AP queue status structure.
*/
-static inline struct ap_queue_status ap_zapq(ap_qid_t qid)
+static inline struct ap_queue_status ap_zapq(ap_qid_t qid, int fbit)
{
unsigned long reg0 = qid | (2UL << 24); /* fc 2UL is ZAPQ */
union ap_queue_status_reg reg1;
+ if (fbit)
+ reg0 |= 1UL << 22;
+
asm volatile(
" lgr 0,%[reg0]\n" /* qid arg into gr0 */
" .insn rre,0xb2af0000,0,0\n" /* PQAP(ZAPQ) */
@@ -180,15 +224,16 @@ struct ap_config_info {
unsigned int apxa : 1; /* N bit */
unsigned int qact : 1; /* C bit */
unsigned int rc8a : 1; /* R bit */
- unsigned char _reserved1 : 4;
- unsigned char _reserved2[3];
- unsigned char Na; /* max # of APs - 1 */
- unsigned char Nd; /* max # of Domains - 1 */
- unsigned char _reserved3[10];
+ unsigned int : 4;
+ unsigned int apsb : 1; /* B bit */
+ unsigned int : 23;
+ unsigned char na; /* max # of APs - 1 */
+ unsigned char nd; /* max # of Domains - 1 */
+ unsigned char _reserved0[10];
unsigned int apm[8]; /* AP ID mask */
unsigned int aqm[8]; /* AP (usage) queue mask */
unsigned int adm[8]; /* AP (control) domain mask */
- unsigned char _reserved4[16];
+ unsigned char _reserved1[16];
} __aligned(8);
/**
@@ -318,6 +363,59 @@ static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit,
return reg1.status;
}
+/*
+ * ap_bapq(): SE bind AP queue.
+ * @qid: The AP queue number
+ *
+ * Returns AP queue status structure.
+ *
+ * Invoking this function in a non-SE environment
+ * may case a specification exception.
+ */
+static inline struct ap_queue_status ap_bapq(ap_qid_t qid)
+{
+ unsigned long reg0 = qid | (7UL << 24); /* fc 7 is BAPQ */
+ union ap_queue_status_reg reg1;
+
+ asm volatile(
+ " lgr 0,%[reg0]\n" /* qid arg into gr0 */
+ " .insn rre,0xb2af0000,0,0\n" /* PQAP(BAPQ) */
+ " lgr %[reg1],1\n" /* gr1 (status) into reg1 */
+ : [reg1] "=&d" (reg1.value)
+ : [reg0] "d" (reg0)
+ : "cc", "0", "1");
+
+ return reg1.status;
+}
+
+/*
+ * ap_aapq(): SE associate AP queue.
+ * @qid: The AP queue number
+ * @sec_idx: The secret index
+ *
+ * Returns AP queue status structure.
+ *
+ * Invoking this function in a non-SE environment
+ * may case a specification exception.
+ */
+static inline struct ap_queue_status ap_aapq(ap_qid_t qid, unsigned int sec_idx)
+{
+ unsigned long reg0 = qid | (8UL << 24); /* fc 8 is AAPQ */
+ unsigned long reg2 = sec_idx;
+ union ap_queue_status_reg reg1;
+
+ asm volatile(
+ " lgr 0,%[reg0]\n" /* qid arg into gr0 */
+ " lgr 2,%[reg2]\n" /* secret index into gr2 */
+ " .insn rre,0xb2af0000,0,0\n" /* PQAP(AAPQ) */
+ " lgr %[reg1],1\n" /* gr1 (status) into reg1 */
+ : [reg1] "=&d" (reg1.value)
+ : [reg0] "d" (reg0), [reg2] "d" (reg2)
+ : "cc", "0", "1", "2");
+
+ return reg1.status;
+}
+
/**
* ap_nqap(): Send message to adjunct processor queue.
* @qid: The AP queue number
@@ -359,10 +457,11 @@ static inline struct ap_queue_status ap_nqap(ap_qid_t qid,
* ap_dqap(): Receive message from adjunct processor queue.
* @qid: The AP queue number
* @psmid: Pointer to program supplied message identifier
- * @msg: The message text
- * @length: The message length
- * @reslength: Resitual length on return
- * @resgr0: input: gr0 value (only used if != 0), output: resitual gr0 content
+ * @msg: Pointer to message buffer
+ * @msglen: Message buffer size
+ * @length: Pointer to length of actually written bytes
+ * @reslength: Residual length on return
+ * @resgr0: input: gr0 value (only used if != 0), output: residual gr0 content
*
* Returns AP queue status structure.
* Condition code 1 on DQAP means the receive has taken place
@@ -386,8 +485,9 @@ static inline struct ap_queue_status ap_nqap(ap_qid_t qid,
* *resgr0 is to be used instead of qid to further process this entry.
*/
static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
- unsigned long long *psmid,
- void *msg, size_t length,
+ unsigned long *psmid,
+ void *msg, size_t msglen,
+ size_t *length,
size_t *reslength,
unsigned long *resgr0)
{
@@ -399,7 +499,7 @@ static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
rp1.even = 0UL;
rp1.odd = 0UL;
rp2.even = (unsigned long)msg;
- rp2.odd = (unsigned long)length;
+ rp2.odd = (unsigned long)msglen;
asm volatile(
" lgr 0,%[reg0]\n" /* qid param into gr0 */
@@ -429,11 +529,15 @@ static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
if (resgr0)
*resgr0 = reg0;
} else {
- *psmid = (((unsigned long long)rp1.even) << 32) + rp1.odd;
+ *psmid = (rp1.even << 32) + rp1.odd;
if (resgr0)
*resgr0 = 0;
}
+ /* update *length with the nr of bytes stored into the msg buffer */
+ if (length)
+ *length = msglen - rp2.odd;
+
return reg1.status;
}
diff --git a/arch/s390/include/asm/checksum.h b/arch/s390/include/asm/checksum.h
index d977a3a2f619..69837eec2ff5 100644
--- a/arch/s390/include/asm/checksum.h
+++ b/arch/s390/include/asm/checksum.h
@@ -12,13 +12,7 @@
#ifndef _S390_CHECKSUM_H
#define _S390_CHECKSUM_H
-#ifdef CONFIG_GENERIC_CSUM
-
-#include <asm-generic/checksum.h>
-
-#else /* CONFIG_GENERIC_CSUM */
-
-#include <linux/uaccess.h>
+#include <linux/kasan-checks.h>
#include <linux/in6.h>
/*
@@ -40,6 +34,7 @@ static inline __wsum csum_partial(const void *buff, int len, __wsum sum)
.odd = (unsigned long) len,
};
+ kasan_check_read(buff, len);
asm volatile(
"0: cksm %[sum],%[rp]\n"
" jo 0b\n"
@@ -135,5 +130,4 @@ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
return csum_fold((__force __wsum)(sum >> 32));
}
-#endif /* CONFIG_GENERIC_CSUM */
#endif /* _S390_CHECKSUM_H */
diff --git a/arch/s390/include/asm/diag.h b/arch/s390/include/asm/diag.h
index 674a939f16ee..902e0330dd91 100644
--- a/arch/s390/include/asm/diag.h
+++ b/arch/s390/include/asm/diag.h
@@ -90,7 +90,7 @@ struct diag8c {
u8 num_partitions;
u16 width;
u16 height;
- u8 data[0];
+ u8 data[];
} __packed __aligned(4);
extern int diag8c(struct diag8c *out, struct ccw_dev_id *devno);
diff --git a/arch/s390/include/asm/entry-common.h b/arch/s390/include/asm/entry-common.h
index 000de2b1e67a..fdd319a622b0 100644
--- a/arch/s390/include/asm/entry-common.h
+++ b/arch/s390/include/asm/entry-common.h
@@ -60,9 +60,4 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
#define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
-static inline bool on_thread_stack(void)
-{
- return !(((unsigned long)(current->stack) ^ current_stack_pointer) & ~(THREAD_SIZE - 1));
-}
-
#endif
diff --git a/arch/s390/include/asm/fcx.h b/arch/s390/include/asm/fcx.h
index b8a028a36173..29784b4b44f6 100644
--- a/arch/s390/include/asm/fcx.h
+++ b/arch/s390/include/asm/fcx.h
@@ -286,7 +286,7 @@ struct tccb_tcat {
*/
struct tccb {
struct tccb_tcah tcah;
- u8 tca[0];
+ u8 tca[];
} __attribute__ ((packed, aligned(8)));
struct tcw *tcw_get_intrg(struct tcw *tcw);
diff --git a/arch/s390/include/asm/kasan.h b/arch/s390/include/asm/kasan.h
index e5cfc81d5b61..0cffead0f2f2 100644
--- a/arch/s390/include/asm/kasan.h
+++ b/arch/s390/include/asm/kasan.h
@@ -2,7 +2,7 @@
#ifndef __ASM_KASAN_H
#define __ASM_KASAN_H
-#include <asm/pgtable.h>
+#include <linux/const.h>
#ifdef CONFIG_KASAN
@@ -13,35 +13,6 @@
#define KASAN_SHADOW_START KASAN_SHADOW_OFFSET
#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
-extern void kasan_early_init(void);
-
-/*
- * Estimate kasan memory requirements, which it will reserve
- * at the very end of available physical memory. To estimate
- * that, we take into account that kasan would require
- * 1/8 of available physical memory (for shadow memory) +
- * creating page tables for the shadow memory region.
- * To keep page tables estimates simple take the double of
- * combined ptes size.
- *
- * physmem parameter has to be already adjusted if not entire physical memory
- * would be used (e.g. due to effect of "mem=" option).
- */
-static inline unsigned long kasan_estimate_memory_needs(unsigned long physmem)
-{
- unsigned long kasan_needs;
- unsigned long pages;
- /* for shadow memory */
- kasan_needs = round_up(physmem / 8, PAGE_SIZE);
- /* for paging structures */
- pages = DIV_ROUND_UP(kasan_needs, PAGE_SIZE);
- kasan_needs += DIV_ROUND_UP(pages, _PAGE_ENTRIES) * _PAGE_TABLE_SIZE * 2;
-
- return kasan_needs;
-}
-#else
-static inline void kasan_early_init(void) { }
-static inline unsigned long kasan_estimate_memory_needs(unsigned long physmem) { return 0; }
#endif
#endif
diff --git a/arch/s390/include/asm/linkage.h b/arch/s390/include/asm/linkage.h
index c76777b15fec..df3fb7d8227b 100644
--- a/arch/s390/include/asm/linkage.h
+++ b/arch/s390/include/asm/linkage.h
@@ -4,7 +4,7 @@
#include <linux/stringify.h>
-#define __ALIGN .align 16, 0x07
+#define __ALIGN .balign CONFIG_FUNCTION_ALIGNMENT, 0x07
#define __ALIGN_STR __stringify(__ALIGN)
#endif
diff --git a/arch/s390/include/asm/mem_detect.h b/arch/s390/include/asm/mem_detect.h
deleted file mode 100644
index f9e7354036d2..000000000000
--- a/arch/s390/include/asm/mem_detect.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_S390_MEM_DETECT_H
-#define _ASM_S390_MEM_DETECT_H
-
-#include <linux/types.h>
-
-enum mem_info_source {
- MEM_DETECT_NONE = 0,
- MEM_DETECT_SCLP_STOR_INFO,
- MEM_DETECT_DIAG260,
- MEM_DETECT_SCLP_READ_INFO,
- MEM_DETECT_BIN_SEARCH
-};
-
-struct mem_detect_block {
- u64 start;
- u64 end;
-};
-
-/*
- * Storage element id is defined as 1 byte (up to 256 storage elements).
- * In practise only storage element id 0 and 1 are used).
- * According to architecture one storage element could have as much as
- * 1020 subincrements. 255 mem_detect_blocks are embedded in mem_detect_info.
- * If more mem_detect_blocks are required, a block of memory from already
- * known mem_detect_block is taken (entries_extended points to it).
- */
-#define MEM_INLINED_ENTRIES 255 /* (PAGE_SIZE - 16) / 16 */
-
-struct mem_detect_info {
- u32 count;
- u8 info_source;
- unsigned long usable;
- struct mem_detect_block entries[MEM_INLINED_ENTRIES];
- struct mem_detect_block *entries_extended;
-};
-extern struct mem_detect_info mem_detect;
-
-void add_mem_detect_block(u64 start, u64 end);
-
-static inline int __get_mem_detect_block(u32 n, unsigned long *start,
- unsigned long *end, bool respect_usable_limit)
-{
- if (n >= mem_detect.count) {
- *start = 0;
- *end = 0;
- return -1;
- }
-
- if (n < MEM_INLINED_ENTRIES) {
- *start = (unsigned long)mem_detect.entries[n].start;
- *end = (unsigned long)mem_detect.entries[n].end;
- } else {
- *start = (unsigned long)mem_detect.entries_extended[n - MEM_INLINED_ENTRIES].start;
- *end = (unsigned long)mem_detect.entries_extended[n - MEM_INLINED_ENTRIES].end;
- }
-
- if (respect_usable_limit && mem_detect.usable) {
- if (*start >= mem_detect.usable)
- return -1;
- if (*end > mem_detect.usable)
- *end = mem_detect.usable;
- }
- return 0;
-}
-
-/**
- * for_each_mem_detect_usable_block - early online memory range iterator
- * @i: an integer used as loop variable
- * @p_start: ptr to unsigned long for start address of the range
- * @p_end: ptr to unsigned long for end address of the range
- *
- * Walks over detected online memory ranges below usable limit.
- */
-#define for_each_mem_detect_usable_block(i, p_start, p_end) \
- for (i = 0; !__get_mem_detect_block(i, p_start, p_end, true); i++)
-
-/* Walks over all detected online memory ranges disregarding usable limit. */
-#define for_each_mem_detect_block(i, p_start, p_end) \
- for (i = 0; !__get_mem_detect_block(i, p_start, p_end, false); i++)
-
-static inline unsigned long get_mem_detect_usable_total(void)
-{
- unsigned long start, end, total = 0;
- int i;
-
- for_each_mem_detect_usable_block(i, &start, &end)
- total += end - start;
-
- return total;
-}
-
-static inline void get_mem_detect_reserved(unsigned long *start,
- unsigned long *size)
-{
- *start = (unsigned long)mem_detect.entries_extended;
- if (mem_detect.count > MEM_INLINED_ENTRIES)
- *size = (mem_detect.count - MEM_INLINED_ENTRIES) * sizeof(struct mem_detect_block);
- else
- *size = 0;
-}
-
-static inline unsigned long get_mem_detect_end(void)
-{
- unsigned long start;
- unsigned long end;
-
- if (mem_detect.usable)
- return mem_detect.usable;
- if (mem_detect.count) {
- __get_mem_detect_block(mem_detect.count - 1, &start, &end, false);
- return end;
- }
- return 0;
-}
-
-#endif
diff --git a/arch/s390/include/asm/nospec-insn.h b/arch/s390/include/asm/nospec-insn.h
index 7e9e99523e95..7a946c42ad13 100644
--- a/arch/s390/include/asm/nospec-insn.h
+++ b/arch/s390/include/asm/nospec-insn.h
@@ -2,6 +2,7 @@
#ifndef _ASM_S390_NOSPEC_ASM_H
#define _ASM_S390_NOSPEC_ASM_H
+#include <linux/linkage.h>
#include <asm/dwarf.h>
#ifdef __ASSEMBLY__
@@ -16,7 +17,7 @@
.macro __THUNK_PROLOG_NAME name
#ifdef CONFIG_EXPOLINE_EXTERN
.pushsection .text,"ax",@progbits
- .align 16,0x07
+ __ALIGN
#else
.pushsection .text.\name,"axG",@progbits,\name,comdat
#endif
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
index b9da71632827..9917e2717b2b 100644
--- a/arch/s390/include/asm/perf_event.h
+++ b/arch/s390/include/asm/perf_event.h
@@ -60,7 +60,6 @@ struct perf_sf_sde_regs {
#define PERF_CPUM_SF_DIAG_MODE 0x0002 /* Diagnostic-sampling flag */
#define PERF_CPUM_SF_MODE_MASK (PERF_CPUM_SF_BASIC_MODE| \
PERF_CPUM_SF_DIAG_MODE)
-#define PERF_CPUM_SF_FULL_BLOCKS 0x0004 /* Process full SDBs only */
#define PERF_CPUM_SF_FREQ_MODE 0x0008 /* Sampling with frequency */
#define REG_NONE 0
@@ -71,7 +70,6 @@ struct perf_sf_sde_regs {
#define SAMPL_RATE(hwc) ((hwc)->event_base)
#define SAMPL_FLAGS(hwc) ((hwc)->config_base)
#define SAMPL_DIAG_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_DIAG_MODE)
-#define SDB_FULL_BLOCKS(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FULL_BLOCKS)
#define SAMPLE_FREQ_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FREQ_MODE)
#define perf_arch_fetch_caller_regs(regs, __ip) do { \
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index c1f6b46ec555..6822a11c2c8a 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -34,7 +34,7 @@ enum {
PG_DIRECT_MAP_MAX
};
-extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
+extern atomic_long_t __bootdata_preserved(direct_pages_count[PG_DIRECT_MAP_MAX]);
static inline void update_page_count(int level, long count)
{
diff --git a/arch/s390/include/asm/physmem_info.h b/arch/s390/include/asm/physmem_info.h
new file mode 100644
index 000000000000..8e9c582592b3
--- /dev/null
+++ b/arch/s390/include/asm/physmem_info.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_MEM_DETECT_H
+#define _ASM_S390_MEM_DETECT_H
+
+#include <linux/types.h>
+
+enum physmem_info_source {
+ MEM_DETECT_NONE = 0,
+ MEM_DETECT_SCLP_STOR_INFO,
+ MEM_DETECT_DIAG260,
+ MEM_DETECT_SCLP_READ_INFO,
+ MEM_DETECT_BIN_SEARCH
+};
+
+struct physmem_range {
+ u64 start;
+ u64 end;
+};
+
+enum reserved_range_type {
+ RR_DECOMPRESSOR,
+ RR_INITRD,
+ RR_VMLINUX,
+ RR_AMODE31,
+ RR_IPLREPORT,
+ RR_CERT_COMP_LIST,
+ RR_MEM_DETECT_EXTENDED,
+ RR_VMEM,
+ RR_MAX
+};
+
+struct reserved_range {
+ unsigned long start;
+ unsigned long end;
+ struct reserved_range *chain;
+};
+
+/*
+ * Storage element id is defined as 1 byte (up to 256 storage elements).
+ * In practise only storage element id 0 and 1 are used).
+ * According to architecture one storage element could have as much as
+ * 1020 subincrements. 255 physmem_ranges are embedded in physmem_info.
+ * If more physmem_ranges are required, a block of memory from already
+ * known physmem_range is taken (online_extended points to it).
+ */
+#define MEM_INLINED_ENTRIES 255 /* (PAGE_SIZE - 16) / 16 */
+
+struct physmem_info {
+ u32 range_count;
+ u8 info_source;
+ unsigned long usable;
+ struct reserved_range reserved[RR_MAX];
+ struct physmem_range online[MEM_INLINED_ENTRIES];
+ struct physmem_range *online_extended;
+};
+
+extern struct physmem_info physmem_info;
+
+void add_physmem_online_range(u64 start, u64 end);
+
+static inline int __get_physmem_range(u32 n, unsigned long *start,
+ unsigned long *end, bool respect_usable_limit)
+{
+ if (n >= physmem_info.range_count) {
+ *start = 0;
+ *end = 0;
+ return -1;
+ }
+
+ if (n < MEM_INLINED_ENTRIES) {
+ *start = (unsigned long)physmem_info.online[n].start;
+ *end = (unsigned long)physmem_info.online[n].end;
+ } else {
+ *start = (unsigned long)physmem_info.online_extended[n - MEM_INLINED_ENTRIES].start;
+ *end = (unsigned long)physmem_info.online_extended[n - MEM_INLINED_ENTRIES].end;
+ }
+
+ if (respect_usable_limit && physmem_info.usable) {
+ if (*start >= physmem_info.usable)
+ return -1;
+ if (*end > physmem_info.usable)
+ *end = physmem_info.usable;
+ }
+ return 0;
+}
+
+/**
+ * for_each_physmem_usable_range - early online memory range iterator
+ * @i: an integer used as loop variable
+ * @p_start: ptr to unsigned long for start address of the range
+ * @p_end: ptr to unsigned long for end address of the range
+ *
+ * Walks over detected online memory ranges below usable limit.
+ */
+#define for_each_physmem_usable_range(i, p_start, p_end) \
+ for (i = 0; !__get_physmem_range(i, p_start, p_end, true); i++)
+
+/* Walks over all detected online memory ranges disregarding usable limit. */
+#define for_each_physmem_online_range(i, p_start, p_end) \
+ for (i = 0; !__get_physmem_range(i, p_start, p_end, false); i++)
+
+static inline const char *get_physmem_info_source(void)
+{
+ switch (physmem_info.info_source) {
+ case MEM_DETECT_SCLP_STOR_INFO:
+ return "sclp storage info";
+ case MEM_DETECT_DIAG260:
+ return "diag260";
+ case MEM_DETECT_SCLP_READ_INFO:
+ return "sclp read info";
+ case MEM_DETECT_BIN_SEARCH:
+ return "binary search";
+ }
+ return "none";
+}
+
+#define RR_TYPE_NAME(t) case RR_ ## t: return #t
+static inline const char *get_rr_type_name(enum reserved_range_type t)
+{
+ switch (t) {
+ RR_TYPE_NAME(DECOMPRESSOR);
+ RR_TYPE_NAME(INITRD);
+ RR_TYPE_NAME(VMLINUX);
+ RR_TYPE_NAME(AMODE31);
+ RR_TYPE_NAME(IPLREPORT);
+ RR_TYPE_NAME(CERT_COMP_LIST);
+ RR_TYPE_NAME(MEM_DETECT_EXTENDED);
+ RR_TYPE_NAME(VMEM);
+ default:
+ return "UNKNOWN";
+ }
+}
+
+#define for_each_physmem_reserved_type_range(t, range, p_start, p_end) \
+ for (range = &physmem_info.reserved[t], *p_start = range->start, *p_end = range->end; \
+ range && range->end; range = range->chain, \
+ *p_start = range ? range->start : 0, *p_end = range ? range->end : 0)
+
+static inline struct reserved_range *__physmem_reserved_next(enum reserved_range_type *t,
+ struct reserved_range *range)
+{
+ if (!range) {
+ range = &physmem_info.reserved[*t];
+ if (range->end)
+ return range;
+ }
+ if (range->chain)
+ return range->chain;
+ while (++*t < RR_MAX) {
+ range = &physmem_info.reserved[*t];
+ if (range->end)
+ return range;
+ }
+ return NULL;
+}
+
+#define for_each_physmem_reserved_range(t, range, p_start, p_end) \
+ for (t = 0, range = __physmem_reserved_next(&t, NULL), \
+ *p_start = range ? range->start : 0, *p_end = range ? range->end : 0; \
+ range; range = __physmem_reserved_next(&t, range), \
+ *p_start = range ? range->start : 0, *p_end = range ? range->end : 0)
+
+static inline unsigned long get_physmem_reserved(enum reserved_range_type type,
+ unsigned long *addr, unsigned long *size)
+{
+ *addr = physmem_info.reserved[type].start;
+ *size = physmem_info.reserved[type].end - physmem_info.reserved[type].start;
+ return *size;
+}
+
+#endif
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index e98d9650764b..dc17896a001a 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -99,7 +99,6 @@ void cpu_detect_mhz_feature(void);
extern const struct seq_operations cpuinfo_op;
extern void execve_tail(void);
-extern void __bpon(void);
unsigned long vdso_size(void);
/*
@@ -119,6 +118,41 @@ unsigned long vdso_size(void);
#define HAVE_ARCH_PICK_MMAP_LAYOUT
+#define __stackleak_poison __stackleak_poison
+static __always_inline void __stackleak_poison(unsigned long erase_low,
+ unsigned long erase_high,
+ unsigned long poison)
+{
+ unsigned long tmp, count;
+
+ count = erase_high - erase_low;
+ if (!count)
+ return;
+ asm volatile(
+ " cghi %[count],8\n"
+ " je 2f\n"
+ " aghi %[count],-(8+1)\n"
+ " srlg %[tmp],%[count],8\n"
+ " ltgr %[tmp],%[tmp]\n"
+ " jz 1f\n"
+ "0: stg %[poison],0(%[addr])\n"
+ " mvc 8(256-8,%[addr]),0(%[addr])\n"
+ " la %[addr],256(%[addr])\n"
+ " brctg %[tmp],0b\n"
+ "1: stg %[poison],0(%[addr])\n"
+ " larl %[tmp],3f\n"
+ " ex %[count],0(%[tmp])\n"
+ " j 4f\n"
+ "2: stg %[poison],0(%[addr])\n"
+ " j 4f\n"
+ "3: mvc 8(1,%[addr]),0(%[addr])\n"
+ "4:\n"
+ : [addr] "+&a" (erase_low), [count] "+&d" (count), [tmp] "=&a" (tmp)
+ : [poison] "d" (poison)
+ : "memory", "cc"
+ );
+}
+
/*
* Thread structure
*/
@@ -227,6 +261,13 @@ static __always_inline unsigned long __current_stack_pointer(void)
return sp;
}
+static __always_inline bool on_thread_stack(void)
+{
+ unsigned long ksp = S390_lowcore.kernel_stack;
+
+ return !((ksp ^ current_stack_pointer) & ~(THREAD_SIZE - 1));
+}
+
static __always_inline unsigned short stap(void)
{
unsigned short cpu_address;
@@ -329,9 +370,6 @@ static __always_inline void __noreturn disabled_wait(void)
#define ARCH_LOW_ADDRESS_LIMIT 0x7fffffffUL
-extern int s390_isolate_bp(void);
-extern int s390_isolate_bp_guest(void);
-
static __always_inline bool regs_irqs_disabled(struct pt_regs *regs)
{
return arch_irqs_disabled_flags(regs->psw.mask);
diff --git a/arch/s390/include/asm/set_memory.h b/arch/s390/include/asm/set_memory.h
index 950d87bd997a..7a3eefd7a242 100644
--- a/arch/s390/include/asm/set_memory.h
+++ b/arch/s390/include/asm/set_memory.h
@@ -6,11 +6,23 @@
extern struct mutex cpa_mutex;
-#define SET_MEMORY_RO 1UL
-#define SET_MEMORY_RW 2UL
-#define SET_MEMORY_NX 4UL
-#define SET_MEMORY_X 8UL
-#define SET_MEMORY_4K 16UL
+enum {
+ _SET_MEMORY_RO_BIT,
+ _SET_MEMORY_RW_BIT,
+ _SET_MEMORY_NX_BIT,
+ _SET_MEMORY_X_BIT,
+ _SET_MEMORY_4K_BIT,
+ _SET_MEMORY_INV_BIT,
+ _SET_MEMORY_DEF_BIT,
+};
+
+#define SET_MEMORY_RO BIT(_SET_MEMORY_RO_BIT)
+#define SET_MEMORY_RW BIT(_SET_MEMORY_RW_BIT)
+#define SET_MEMORY_NX BIT(_SET_MEMORY_NX_BIT)
+#define SET_MEMORY_X BIT(_SET_MEMORY_X_BIT)
+#define SET_MEMORY_4K BIT(_SET_MEMORY_4K_BIT)
+#define SET_MEMORY_INV BIT(_SET_MEMORY_INV_BIT)
+#define SET_MEMORY_DEF BIT(_SET_MEMORY_DEF_BIT)
int __set_memory(unsigned long addr, int numpages, unsigned long flags);
@@ -34,9 +46,23 @@ static inline int set_memory_x(unsigned long addr, int numpages)
return __set_memory(addr, numpages, SET_MEMORY_X);
}
+#define set_memory_rox set_memory_rox
+static inline int set_memory_rox(unsigned long addr, int numpages)
+{
+ return __set_memory(addr, numpages, SET_MEMORY_RO | SET_MEMORY_X);
+}
+
+static inline int set_memory_rwnx(unsigned long addr, int numpages)
+{
+ return __set_memory(addr, numpages, SET_MEMORY_RW | SET_MEMORY_NX);
+}
+
static inline int set_memory_4k(unsigned long addr, int numpages)
{
return __set_memory(addr, numpages, SET_MEMORY_4K);
}
+int set_direct_map_invalid_noflush(struct page *page);
+int set_direct_map_default_noflush(struct page *page);
+
#endif
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 3a1f8825bc7d..f191255c60db 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -74,10 +74,6 @@ extern unsigned int zlib_dfltcc_support;
extern int noexec_disabled;
extern unsigned long ident_map_size;
-extern unsigned long pgalloc_pos;
-extern unsigned long pgalloc_end;
-extern unsigned long pgalloc_low;
-extern unsigned long __amode31_base;
/* The Write Back bit position in the physaddr is given by the SLPC PCI */
extern unsigned long mio_wb_bit_mask;
@@ -150,13 +146,13 @@ static inline unsigned long kaslr_offset(void)
return __kaslr_offset;
}
-extern int is_full_image;
-
-struct initrd_data {
- unsigned long start;
- unsigned long size;
-};
-extern struct initrd_data initrd_data;
+extern int __kaslr_enabled;
+static inline int kaslr_enabled(void)
+{
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+ return __kaslr_enabled;
+ return 0;
+}
struct oldmem_data {
unsigned long start;
@@ -164,7 +160,7 @@ struct oldmem_data {
};
extern struct oldmem_data oldmem_data;
-static inline u32 gen_lpswe(unsigned long addr)
+static __always_inline u32 gen_lpswe(unsigned long addr)
{
BUILD_BUG_ON(addr > 0xfff);
return 0xb2b20000 | addr;
diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h
index 1802be5abb5d..78f7b729b65f 100644
--- a/arch/s390/include/asm/stacktrace.h
+++ b/arch/s390/include/asm/stacktrace.h
@@ -189,17 +189,53 @@ static __always_inline unsigned long get_stack_pointer(struct task_struct *task,
(rettype)r2; \
})
-#define call_on_stack_noreturn(fn, stack) \
+/*
+ * Use call_nodat() to call a function with DAT disabled.
+ * Proper sign and zero extension of function arguments is done.
+ * Usage:
+ *
+ * rc = call_nodat(nr, rettype, fn, t1, a1, t2, a2, ...)
+ *
+ * - nr specifies the number of function arguments of fn.
+ * - fn is the function to be called, where fn is a physical address.
+ * - rettype is the return type of fn.
+ * - t1, a1, ... are pairs, where t1 must match the type of the first
+ * argument of fn, t2 the second, etc. a1 is the corresponding
+ * first function argument (not name), etc.
+ *
+ * fn() is called with standard C function call ABI, with the exception
+ * that no useful stackframe or stackpointer is passed via register 15.
+ * Therefore the called function must not use r15 to access the stack.
+ */
+#define call_nodat(nr, rettype, fn, ...) \
({ \
- void (*__fn)(void) = fn; \
+ rettype (*__fn)(CALL_PARM_##nr(__VA_ARGS__)) = (fn); \
+ /* aligned since psw_leave must not cross page boundary */ \
+ psw_t __aligned(16) psw_leave; \
+ psw_t psw_enter; \
+ CALL_LARGS_##nr(__VA_ARGS__); \
+ CALL_REGS_##nr; \
\
+ CALL_TYPECHECK_##nr(__VA_ARGS__); \
+ psw_enter.mask = PSW_KERNEL_BITS & ~PSW_MASK_DAT; \
+ psw_enter.addr = (unsigned long)__fn; \
asm volatile( \
- " la 15,0(%[_stack])\n" \
- " xc %[_bc](8,15),%[_bc](15)\n" \
- " brasl 14,%[_fn]\n" \
- ::[_bc] "i" (offsetof(struct stack_frame, back_chain)), \
- [_stack] "a" (stack), [_fn] "X" (__fn)); \
- BUG(); \
+ " epsw 0,1\n" \
+ " risbg 1,0,0,31,32\n" \
+ " larl 7,1f\n" \
+ " stg 1,%[psw_leave]\n" \
+ " stg 7,8+%[psw_leave]\n" \
+ " la 7,%[psw_leave]\n" \
+ " lra 7,0(7)\n" \
+ " larl 1,0f\n" \
+ " lra 14,0(1)\n" \
+ " lpswe %[psw_enter]\n" \
+ "0: lpswe 0(7)\n" \
+ "1:\n" \
+ : CALL_FMT_##nr, [psw_leave] "=Q" (psw_leave) \
+ : [psw_enter] "Q" (psw_enter) \
+ : "7", CALL_CLOBBER_##nr); \
+ (rettype)r2; \
})
#endif /* _ASM_S390_STACKTRACE_H */
diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h
index 3fae93ddb322..351685de53d2 100644
--- a/arch/s390/include/asm/string.h
+++ b/arch/s390/include/asm/string.h
@@ -55,18 +55,6 @@ char *strstr(const char *s1, const char *s2);
#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
-extern void *__memcpy(void *dest, const void *src, size_t n);
-extern void *__memset(void *s, int c, size_t n);
-extern void *__memmove(void *dest, const void *src, size_t n);
-
-/*
- * For files that are not instrumented (e.g. mm/slub.c) we
- * should use not instrumented version of mem* functions.
- */
-
-#define memcpy(dst, src, len) __memcpy(dst, src, len)
-#define memmove(dst, src, len) __memmove(dst, src, len)
-#define memset(s, c, n) __memset(s, c, n)
#define strlen(s) __strlen(s)
#define __no_sanitize_prefix_strfunc(x) __##x
@@ -79,6 +67,9 @@ extern void *__memmove(void *dest, const void *src, size_t n);
#define __no_sanitize_prefix_strfunc(x) x
#endif /* defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) */
+void *__memcpy(void *dest, const void *src, size_t n);
+void *__memset(void *s, int c, size_t n);
+void *__memmove(void *dest, const void *src, size_t n);
void *__memset16(uint16_t *s, uint16_t v, size_t count);
void *__memset32(uint32_t *s, uint32_t v, size_t count);
void *__memset64(uint64_t *s, uint64_t v, size_t count);
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index b2ffcb4fe000..c7c97921ed8d 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -9,6 +9,9 @@
#define _ASM_THREAD_INFO_H
#include <linux/bits.h>
+#ifndef ASM_OFFSETS_C
+#include <asm/asm-offsets.h>
+#endif
/*
* General size of kernel stacks
@@ -21,13 +24,12 @@
#define BOOT_STACK_SIZE (PAGE_SIZE << 2)
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
+#define STACK_INIT_OFFSET (THREAD_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
+
#ifndef __ASSEMBLY__
#include <asm/lowcore.h>
#include <asm/page.h>
-#define STACK_INIT_OFFSET \
- (THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs))
-
/*
* low level task data that entry.S needs immediate access to
* - this struct should fit entirely inside of one cache line
@@ -70,7 +72,6 @@ void arch_setup_new_exec(void);
#define TIF_PATCH_PENDING 5 /* pending live patching update */
#define TIF_PGSTE 6 /* New mm's will use 4K page tables */
#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */
-#define TIF_ISOLATE_BP 8 /* Run process with isolated BP */
#define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */
#define TIF_PER_TRAP 10 /* Need to handle PER trap on exit to usermode */
@@ -94,7 +95,6 @@ void arch_setup_new_exec(void);
#define _TIF_UPROBE BIT(TIF_UPROBE)
#define _TIF_GUARDED_STORAGE BIT(TIF_GUARDED_STORAGE)
#define _TIF_PATCH_PENDING BIT(TIF_PATCH_PENDING)
-#define _TIF_ISOLATE_BP BIT(TIF_ISOLATE_BP)
#define _TIF_ISOLATE_BP_GUEST BIT(TIF_ISOLATE_BP_GUEST)
#define _TIF_PER_TRAP BIT(TIF_PER_TRAP)
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index b376f0377a2c..a85e0c3e7027 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -60,7 +60,7 @@ typedef struct {
* except of floats, and long long (32 bit)
*
*/
- long args[0];
+ long args[];
} debug_sprintf_entry_t;
/* internal function prototyes */
@@ -981,16 +981,6 @@ static struct ctl_table s390dbf_table[] = {
{ }
};
-static struct ctl_table s390dbf_dir_table[] = {
- {
- .procname = "s390dbf",
- .maxlen = 0,
- .mode = S_IRUGO | S_IXUGO,
- .child = s390dbf_table,
- },
- { }
-};
-
static struct ctl_table_header *s390dbf_sysctl_header;
/**
@@ -1574,7 +1564,7 @@ out:
*/
static int __init debug_init(void)
{
- s390dbf_sysctl_header = register_sysctl_table(s390dbf_dir_table);
+ s390dbf_sysctl_header = register_sysctl("s390dbf", s390dbf_table);
mutex_lock(&debug_mutex);
debug_debugfs_root_entry = debugfs_create_dir(DEBUG_DIR_ROOT, NULL);
initialized = 1;
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index 1e3233eb510a..d2012635b093 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -41,60 +41,50 @@ const char *stack_type_name(enum stack_type type)
EXPORT_SYMBOL_GPL(stack_type_name);
static inline bool in_stack(unsigned long sp, struct stack_info *info,
- enum stack_type type, unsigned long low,
- unsigned long high)
+ enum stack_type type, unsigned long stack)
{
- if (sp < low || sp >= high)
+ if (sp < stack || sp >= stack + THREAD_SIZE)
return false;
info->type = type;
- info->begin = low;
- info->end = high;
+ info->begin = stack;
+ info->end = stack + THREAD_SIZE;
return true;
}
static bool in_task_stack(unsigned long sp, struct task_struct *task,
struct stack_info *info)
{
- unsigned long stack;
+ unsigned long stack = (unsigned long)task_stack_page(task);
- stack = (unsigned long) task_stack_page(task);
- return in_stack(sp, info, STACK_TYPE_TASK, stack, stack + THREAD_SIZE);
+ return in_stack(sp, info, STACK_TYPE_TASK, stack);
}
static bool in_irq_stack(unsigned long sp, struct stack_info *info)
{
- unsigned long frame_size, top;
+ unsigned long stack = S390_lowcore.async_stack - STACK_INIT_OFFSET;
- frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
- top = S390_lowcore.async_stack + frame_size;
- return in_stack(sp, info, STACK_TYPE_IRQ, top - THREAD_SIZE, top);
+ return in_stack(sp, info, STACK_TYPE_IRQ, stack);
}
static bool in_nodat_stack(unsigned long sp, struct stack_info *info)
{
- unsigned long frame_size, top;
+ unsigned long stack = S390_lowcore.nodat_stack - STACK_INIT_OFFSET;
- frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
- top = S390_lowcore.nodat_stack + frame_size;
- return in_stack(sp, info, STACK_TYPE_NODAT, top - THREAD_SIZE, top);
+ return in_stack(sp, info, STACK_TYPE_NODAT, stack);
}
static bool in_mcck_stack(unsigned long sp, struct stack_info *info)
{
- unsigned long frame_size, top;
+ unsigned long stack = S390_lowcore.mcck_stack - STACK_INIT_OFFSET;
- frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
- top = S390_lowcore.mcck_stack + frame_size;
- return in_stack(sp, info, STACK_TYPE_MCCK, top - THREAD_SIZE, top);
+ return in_stack(sp, info, STACK_TYPE_MCCK, stack);
}
static bool in_restart_stack(unsigned long sp, struct stack_info *info)
{
- unsigned long frame_size, top;
+ unsigned long stack = S390_lowcore.restart_stack - STACK_INIT_OFFSET;
- frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
- top = S390_lowcore.restart_stack + frame_size;
- return in_stack(sp, info, STACK_TYPE_RESTART, top - THREAD_SIZE, top);
+ return in_stack(sp, info, STACK_TYPE_RESTART, stack);
}
int get_stack_info(unsigned long sp, struct task_struct *task,
@@ -152,7 +142,13 @@ void show_stack(struct task_struct *task, unsigned long *stack,
static void show_last_breaking_event(struct pt_regs *regs)
{
printk("Last Breaking-Event-Address:\n");
- printk(" [<%016lx>] %pSR\n", regs->last_break, (void *)regs->last_break);
+ printk(" [<%016lx>] ", regs->last_break);
+ if (user_mode(regs)) {
+ print_vma_addr(KERN_CONT, regs->last_break);
+ pr_cont("\n");
+ } else {
+ pr_cont("%pSR\n", (void *)regs->last_break);
+ }
}
void show_registers(struct pt_regs *regs)
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index d26f02495636..2dd5976a55ac 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -34,8 +34,6 @@
#include <asm/switch_to.h>
#include "entry.h"
-int __bootdata(is_full_image);
-
#define decompressor_handled_param(param) \
static int __init ignore_decompressor_param_##param(char *s) \
{ \
@@ -53,6 +51,14 @@ decompressor_handled_param(nokaslr);
decompressor_handled_param(prot_virt);
#endif
+static void __init kasan_early_init(void)
+{
+#ifdef CONFIG_KASAN
+ init_task.kasan_depth = 0;
+ sclp_early_printk("KernelAddressSanitizer initialized\n");
+#endif
+}
+
static void __init reset_tod_clock(void)
{
union tod_clock clk;
@@ -288,17 +294,6 @@ static void __init setup_boot_command_line(void)
strscpy(boot_command_line, early_command_line, COMMAND_LINE_SIZE);
}
-static void __init check_image_bootable(void)
-{
- if (is_full_image)
- return;
-
- sclp_early_printk("Linux kernel boot failure: An attempt to boot a vmlinux ELF image failed.\n");
- sclp_early_printk("This image does not contain all parts necessary for starting up. Use\n");
- sclp_early_printk("bzImage or arch/s390/boot/compressed/vmlinux instead.\n");
- disabled_wait();
-}
-
static void __init sort_amode31_extable(void)
{
sort_extable(__start_amode31_ex_table, __stop_amode31_ex_table);
@@ -306,8 +301,8 @@ static void __init sort_amode31_extable(void)
void __init startup_init(void)
{
+ kasan_early_init();
reset_tod_clock();
- check_image_bootable();
time_early_init();
init_kernel_storage_key();
lockdep_off();
diff --git a/arch/s390/kernel/earlypgm.S b/arch/s390/kernel/earlypgm.S
index f521c6da37b8..c634871f0d90 100644
--- a/arch/s390/kernel/earlypgm.S
+++ b/arch/s390/kernel/earlypgm.S
@@ -7,7 +7,7 @@
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
-ENTRY(early_pgm_check_handler)
+SYM_CODE_START(early_pgm_check_handler)
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
aghi %r15,-(STACK_FRAME_OVERHEAD+__PT_SIZE)
la %r11,STACK_FRAME_OVERHEAD(%r15)
@@ -20,4 +20,4 @@ ENTRY(early_pgm_check_handler)
mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
lpswe __LC_RETURN_PSW
-ENDPROC(early_pgm_check_handler)
+SYM_CODE_END(early_pgm_check_handler)
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 76a06f3d3671..e5b6c1369e8e 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -29,10 +29,6 @@
#include <asm/export.h>
#include <asm/nospec-insn.h>
-STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
-STACK_SIZE = 1 << STACK_SHIFT
-STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
-
_LPP_OFFSET = __LC_LPP
.macro STBEAR address
@@ -53,7 +49,7 @@ _LPP_OFFSET = __LC_LPP
.macro CHECK_STACK savearea
#ifdef CONFIG_CHECK_STACK
- tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
+ tml %r15,THREAD_SIZE - CONFIG_STACK_GUARD
lghi %r14,\savearea
jz stack_overflow
#endif
@@ -62,8 +58,8 @@ _LPP_OFFSET = __LC_LPP
.macro CHECK_VMAP_STACK savearea,oklabel
#ifdef CONFIG_VMAP_STACK
lgr %r14,%r15
- nill %r14,0x10000 - STACK_SIZE
- oill %r14,STACK_INIT
+ nill %r14,0x10000 - THREAD_SIZE
+ oill %r14,STACK_INIT_OFFSET
clg %r14,__LC_KERNEL_STACK
je \oklabel
clg %r14,__LC_ASYNC_STACK
@@ -154,26 +150,26 @@ _LPP_OFFSET = __LC_LPP
.endm
#endif
+ .macro STACKLEAK_ERASE
+#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+ brasl %r14,stackleak_erase_on_task_stack
+#endif
+ .endm
+
GEN_BR_THUNK %r14
.section .kprobes.text, "ax"
.Ldummy:
/*
- * This nop exists only in order to avoid that __bpon starts at
- * the beginning of the kprobes text section. In that case we would
- * have several symbols at the same address. E.g. objdump would take
- * an arbitrary symbol name when disassembling this code.
- * With the added nop in between the __bpon symbol is unique
- * again.
+ * The following nop exists only in order to avoid that the next
+ * symbol starts at the beginning of the kprobes text section.
+ * In that case there would be several symbols at the same address.
+ * E.g. objdump would take an arbitrary symbol when disassembling
+ * the code.
+ * With the added nop in between this cannot happen.
*/
nop 0
-ENTRY(__bpon)
- .globl __bpon
- BPON
- BR_EX %r14
-ENDPROC(__bpon)
-
/*
* Scheduler resume function, called by switch_to
* gpr2 = (task_struct *) prev
@@ -181,11 +177,11 @@ ENDPROC(__bpon)
* Returns:
* gpr2 = prev
*/
-ENTRY(__switch_to)
+SYM_FUNC_START(__switch_to)
stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
lghi %r4,__TASK_stack
lghi %r1,__TASK_thread
- llill %r5,STACK_INIT
+ llill %r5,STACK_INIT_OFFSET
stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev
lg %r15,0(%r4,%r3) # start of kernel stack of next
agr %r15,%r5 # end of kernel stack of next
@@ -197,7 +193,7 @@ ENTRY(__switch_to)
lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40
BR_EX %r14
-ENDPROC(__switch_to)
+SYM_FUNC_END(__switch_to)
#if IS_ENABLED(CONFIG_KVM)
/*
@@ -206,7 +202,7 @@ ENDPROC(__switch_to)
* %r3 pointer to sie control block virt
* %r4 guest register save area
*/
-ENTRY(__sie64a)
+SYM_FUNC_START(__sie64a)
stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
lg %r12,__LC_CURRENT
stg %r2,__SF_SIE_CONTROL_PHYS(%r15) # save sie block physical..
@@ -227,7 +223,7 @@ ENTRY(__sie64a)
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
jo .Lsie_skip # exit if fp/vx regs changed
lg %r14,__SF_SIE_CONTROL_PHYS(%r15) # get sie block phys addr
- BPEXIT __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
+ BPEXIT __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
.Lsie_entry:
sie 0(%r14)
# Let the next instruction be NOP to avoid triggering a machine check
@@ -235,7 +231,7 @@ ENTRY(__sie64a)
nopr 7
.Lsie_leave:
BPOFF
- BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
+ BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
.Lsie_skip:
lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer
ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
@@ -252,8 +248,7 @@ ENTRY(__sie64a)
nopr 7
.Lrewind_pad2:
nopr 7
- .globl sie_exit
-sie_exit:
+SYM_INNER_LABEL(sie_exit, SYM_L_GLOBAL)
lg %r14,__SF_SIE_SAVEAREA(%r15) # load guest register save area
stmg %r0,%r13,0(%r14) # save guest gprs 0-13
xgr %r0,%r0 # clear guest registers to
@@ -273,7 +268,7 @@ sie_exit:
EX_TABLE(.Lrewind_pad4,.Lsie_fault)
EX_TABLE(.Lrewind_pad2,.Lsie_fault)
EX_TABLE(sie_exit,.Lsie_fault)
-ENDPROC(__sie64a)
+SYM_FUNC_END(__sie64a)
EXPORT_SYMBOL(__sie64a)
EXPORT_SYMBOL(sie_exit)
#endif
@@ -283,7 +278,7 @@ EXPORT_SYMBOL(sie_exit)
* are entered with interrupts disabled.
*/
-ENTRY(system_call)
+SYM_CODE_START(system_call)
stpt __LC_SYS_ENTER_TIMER
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
BPOFF
@@ -291,11 +286,9 @@ ENTRY(system_call)
.Lsysc_per:
STBEAR __LC_LAST_BREAK
lctlg %c1,%c1,__LC_KERNEL_ASCE
- lg %r12,__LC_CURRENT
lg %r15,__LC_KERNEL_STACK
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
stmg %r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
- BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
# clear user controlled register to prevent speculative use
xgr %r0,%r0
xgr %r1,%r1
@@ -312,39 +305,40 @@ ENTRY(system_call)
MBEAR %r2
lgr %r3,%r14
brasl %r14,__do_syscall
+ STACKLEAK_ERASE
lctlg %c1,%c1,__LC_USER_ASCE
mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
- BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
+ BPON
LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
stpt __LC_EXIT_TIMER
LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE
-ENDPROC(system_call)
+SYM_CODE_END(system_call)
#
# a new process exits the kernel with ret_from_fork
#
-ENTRY(ret_from_fork)
+SYM_CODE_START(ret_from_fork)
lgr %r3,%r11
brasl %r14,__ret_from_fork
+ STACKLEAK_ERASE
lctlg %c1,%c1,__LC_USER_ASCE
mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
- BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
+ BPON
LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
stpt __LC_EXIT_TIMER
LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE
-ENDPROC(ret_from_fork)
+SYM_CODE_END(ret_from_fork)
/*
* Program check handler routine
*/
-ENTRY(pgm_check_handler)
+SYM_CODE_START(pgm_check_handler)
stpt __LC_SYS_ENTER_TIMER
BPOFF
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
- lg %r12,__LC_CURRENT
lghi %r10,0
lmg %r8,%r9,__LC_PGM_OLD_PSW
tmhh %r8,0x0001 # coming from user space?
@@ -355,6 +349,7 @@ ENTRY(pgm_check_handler)
#if IS_ENABLED(CONFIG_KVM)
# cleanup critical section for program checks in __sie64a
OUTSIDE %r9,.Lsie_gmap,.Lsie_done,1f
+ BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
SIEEXIT
lghi %r10,_PIF_GUEST_FAULT
#endif
@@ -366,8 +361,7 @@ ENTRY(pgm_check_handler)
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
# CHECK_VMAP_STACK branches to stack_overflow or 4f
CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f
-3: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
- lg %r15,__LC_KERNEL_STACK
+3: lg %r15,__LC_KERNEL_STACK
4: la %r11,STACK_FRAME_OVERHEAD(%r15)
stg %r10,__PT_FLAGS(%r11)
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
@@ -388,8 +382,9 @@ ENTRY(pgm_check_handler)
brasl %r14,__do_pgm_check
tmhh %r8,0x0001 # returning to user space?
jno .Lpgm_exit_kernel
+ STACKLEAK_ERASE
lctlg %c1,%c1,__LC_USER_ASCE
- BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
+ BPON
stpt __LC_EXIT_TIMER
.Lpgm_exit_kernel:
mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
@@ -407,32 +402,30 @@ ENTRY(pgm_check_handler)
lghi %r14,1
LBEAR __LC_PGM_LAST_BREAK
LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE # branch to .Lsysc_per
-ENDPROC(pgm_check_handler)
+SYM_CODE_END(pgm_check_handler)
/*
* Interrupt handler macro used for external and IO interrupts.
*/
.macro INT_HANDLER name,lc_old_psw,handler
-ENTRY(\name)
+SYM_CODE_START(\name)
stckf __LC_INT_CLOCK
stpt __LC_SYS_ENTER_TIMER
STBEAR __LC_LAST_BREAK
BPOFF
stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
- lg %r12,__LC_CURRENT
lmg %r8,%r9,\lc_old_psw
tmhh %r8,0x0001 # interrupting from user ?
jnz 1f
#if IS_ENABLED(CONFIG_KVM)
OUTSIDE %r9,.Lsie_gmap,.Lsie_done,0f
- BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
+ BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
SIEEXIT
#endif
0: CHECK_STACK __LC_SAVE_AREA_ASYNC
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 2f
-1: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
- lctlg %c1,%c1,__LC_KERNEL_ASCE
+1: lctlg %c1,%c1,__LC_KERNEL_ASCE
lg %r15,__LC_KERNEL_STACK
2: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
la %r11,STACK_FRAME_OVERHEAD(%r15)
@@ -455,13 +448,14 @@ ENTRY(\name)
mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
tmhh %r8,0x0001 # returning to user ?
jno 2f
+ STACKLEAK_ERASE
lctlg %c1,%c1,__LC_USER_ASCE
- BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
+ BPON
stpt __LC_EXIT_TIMER
2: LBEAR __PT_LAST_BREAK(%r11)
lmg %r0,%r15,__PT_R0(%r11)
LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE
-ENDPROC(\name)
+SYM_CODE_END(\name)
.endm
INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq
@@ -470,7 +464,7 @@ INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq
/*
* Load idle PSW.
*/
-ENTRY(psw_idle)
+SYM_FUNC_START(psw_idle)
stg %r14,(__SF_GPRS+8*8)(%r15)
stg %r3,__SF_EMPTY(%r15)
larl %r1,psw_idle_exit
@@ -486,29 +480,26 @@ ENTRY(psw_idle)
stckf __CLOCK_IDLE_ENTER(%r2)
stpt __TIMER_IDLE_ENTER(%r2)
lpswe __SF_EMPTY(%r15)
-.globl psw_idle_exit
-psw_idle_exit:
+SYM_INNER_LABEL(psw_idle_exit, SYM_L_GLOBAL)
BR_EX %r14
-ENDPROC(psw_idle)
+SYM_FUNC_END(psw_idle)
/*
* Machine check handler routines
*/
-ENTRY(mcck_int_handler)
+SYM_CODE_START(mcck_int_handler)
stckf __LC_MCCK_CLOCK
BPOFF
la %r1,4095 # validate r1
spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer
LBEAR __LC_LAST_BREAK_SAVE_AREA-4095(%r1) # validate bear
- lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs
- lg %r12,__LC_CURRENT
+ lmg %r0,%r15,__LC_GPREGS_SAVE_AREA # validate gprs
lmg %r8,%r9,__LC_MCK_OLD_PSW
TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
jo .Lmcck_panic # yes -> rest of mcck code invalid
TSTMSK __LC_MCCK_CODE,MCCK_CODE_CR_VALID
jno .Lmcck_panic # control registers invalid -> panic
- la %r14,4095
- lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs
+ lctlg %c0,%c15,__LC_CREGS_SAVE_AREA # validate ctl regs
ptlb
lghi %r14,__LC_CPU_TIMER_SAVE_AREA
mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
@@ -530,16 +521,13 @@ ENTRY(mcck_int_handler)
TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID
jno .Lmcck_panic
#if IS_ENABLED(CONFIG_KVM)
- OUTSIDE %r9,.Lsie_gmap,.Lsie_done,.Lmcck_stack
+ OUTSIDE %r9,.Lsie_gmap,.Lsie_done,.Lmcck_user
OUTSIDE %r9,.Lsie_entry,.Lsie_leave,4f
oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
-4: BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
+4: BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
SIEEXIT
- j .Lmcck_stack
#endif
.Lmcck_user:
- BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
-.Lmcck_stack:
lg %r15,__LC_MCCK_STACK
la %r11,STACK_FRAME_OVERHEAD(%r15)
stctg %c1,%c1,__PT_CR1(%r11)
@@ -567,7 +555,7 @@ ENTRY(mcck_int_handler)
mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
jno 0f
- BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
+ BPON
stpt __LC_EXIT_TIMER
0: ALTERNATIVE "nop", __stringify(lghi %r12,__LC_LAST_BREAK_SAVE_AREA),193
LBEAR 0(%r12)
@@ -583,10 +571,10 @@ ENTRY(mcck_int_handler)
*/
lhi %r5,0
lhi %r6,1
- larl %r7,.Lstop_lock
+ larl %r7,stop_lock
cs %r5,%r6,0(%r7) # single CPU-stopper only
jnz 4f
- larl %r7,.Lthis_cpu
+ larl %r7,this_cpu
stap 0(%r7) # this CPU address
lh %r4,0(%r7)
nilh %r4,0
@@ -602,16 +590,15 @@ ENTRY(mcck_int_handler)
3: sigp %r1,%r4,SIGP_STOP # stop this CPU
brc SIGP_CC_BUSY,3b
4: j 4b
-ENDPROC(mcck_int_handler)
+SYM_CODE_END(mcck_int_handler)
-ENTRY(restart_int_handler)
+SYM_CODE_START(restart_int_handler)
ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40
stg %r15,__LC_SAVE_AREA_RESTART
TSTMSK __LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4
jz 0f
- la %r15,4095
- lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r15)
-0: larl %r15,.Lstosm_tmp
+ lctlg %c0,%c15,__LC_CREGS_SAVE_AREA
+0: larl %r15,stosm_tmp
stosm 0(%r15),0x04 # turn dat on, keep irqs off
lg %r15,__LC_RESTART_STACK
xc STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
@@ -632,7 +619,7 @@ ENTRY(restart_int_handler)
2: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu
brc 2,2b
3: j 3b
-ENDPROC(restart_int_handler)
+SYM_CODE_END(restart_int_handler)
.section .kprobes.text, "ax"
@@ -642,7 +629,7 @@ ENDPROC(restart_int_handler)
* No need to properly save the registers, we are going to panic anyway.
* Setup a pt_regs so that show_trace can provide a good call trace.
*/
-ENTRY(stack_overflow)
+SYM_CODE_START(stack_overflow)
lg %r15,__LC_NODAT_STACK # change to panic stack
la %r11,STACK_FRAME_OVERHEAD(%r15)
stmg %r0,%r7,__PT_R0(%r11)
@@ -652,26 +639,27 @@ ENTRY(stack_overflow)
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lgr %r2,%r11 # pass pointer to pt_regs
jg kernel_stack_overflow
-ENDPROC(stack_overflow)
+SYM_CODE_END(stack_overflow)
#endif
.section .data, "aw"
- .align 4
-.Lstop_lock: .long 0
-.Lthis_cpu: .short 0
-.Lstosm_tmp: .byte 0
+ .balign 4
+SYM_DATA_LOCAL(stop_lock, .long 0)
+SYM_DATA_LOCAL(this_cpu, .short 0)
+SYM_DATA_LOCAL(stosm_tmp, .byte 0)
+
.section .rodata, "a"
#define SYSCALL(esame,emu) .quad __s390x_ ## esame
- .globl sys_call_table
-sys_call_table:
+SYM_DATA_START(sys_call_table)
#include "asm/syscall_table.h"
+SYM_DATA_END(sys_call_table)
#undef SYSCALL
#ifdef CONFIG_COMPAT
#define SYSCALL(esame,emu) .quad __s390_ ## emu
- .globl sys_call_table_emu
-sys_call_table_emu:
+SYM_DATA_START(sys_call_table_emu)
#include "asm/syscall_table.h"
+SYM_DATA_END(sys_call_table_emu)
#undef SYSCALL
#endif
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 416b5a94353d..c46381ea04ec 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -49,26 +49,6 @@ struct ftrace_insn {
s32 disp;
} __packed;
-asm(
- " .align 16\n"
- "ftrace_shared_hotpatch_trampoline_br:\n"
- " lmg %r0,%r1,2(%r1)\n"
- " br %r1\n"
- "ftrace_shared_hotpatch_trampoline_br_end:\n"
-);
-
-#ifdef CONFIG_EXPOLINE
-asm(
- " .align 16\n"
- "ftrace_shared_hotpatch_trampoline_exrl:\n"
- " lmg %r0,%r1,2(%r1)\n"
- " exrl %r0,0f\n"
- " j .\n"
- "0: br %r1\n"
- "ftrace_shared_hotpatch_trampoline_exrl_end:\n"
-);
-#endif /* CONFIG_EXPOLINE */
-
#ifdef CONFIG_MODULES
static char *ftrace_plt;
#endif /* CONFIG_MODULES */
@@ -246,7 +226,7 @@ static int __init ftrace_plt_init(void)
start = ftrace_shared_hotpatch_trampoline(&end);
memcpy(ftrace_plt, start, end - start);
- set_memory_ro((unsigned long)ftrace_plt, 1);
+ set_memory_rox((unsigned long)ftrace_plt, 1);
return 0;
}
device_initcall(ftrace_plt_init);
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 3b3bf8329e6c..df77ba102096 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -16,7 +16,7 @@
#include <asm/ptrace.h>
__HEAD
-ENTRY(startup_continue)
+SYM_CODE_START(startup_continue)
larl %r1,tod_clock_base
mvc 0(16,%r1),__LC_BOOT_CLOCK
#
@@ -24,19 +24,17 @@ ENTRY(startup_continue)
#
larl %r14,init_task
stg %r14,__LC_CURRENT
- larl %r15,init_thread_union+THREAD_SIZE-STACK_FRAME_OVERHEAD-__PT_SIZE
+ larl %r15,init_thread_union+STACK_INIT_OFFSET
+ stg %r15,__LC_KERNEL_STACK
brasl %r14,sclp_early_adjust_va # allow sclp_early_printk
-#ifdef CONFIG_KASAN
- brasl %r14,kasan_early_init
-#endif
brasl %r14,startup_init # s390 specific early init
brasl %r14,start_kernel # common init code
#
# We returned from start_kernel ?!? PANIK
#
basr %r13,0
- lpswe .Ldw-.(%r13) # load disabled wait psw
+ lpswe dw_psw-.(%r13) # load disabled wait psw
+SYM_CODE_END(startup_continue)
.align 16
-.LPG1:
-.Ldw: .quad 0x0002000180000000,0x0000000000000000
+SYM_DATA_LOCAL(dw_psw, .quad 0x0002000180000000,0x0000000000000000)
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 5f0f5c86963a..43de939b7af1 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -176,11 +176,11 @@ static bool reipl_fcp_clear;
static bool reipl_ccw_clear;
static bool reipl_eckd_clear;
-static inline int __diag308(unsigned long subcode, void *addr)
+static inline int __diag308(unsigned long subcode, unsigned long addr)
{
union register_pair r1;
- r1.even = (unsigned long) addr;
+ r1.even = addr;
r1.odd = 0;
asm volatile(
" diag %[r1],%[subcode],0x308\n"
@@ -195,7 +195,7 @@ static inline int __diag308(unsigned long subcode, void *addr)
int diag308(unsigned long subcode, void *addr)
{
diag_stat_inc(DIAG_STAT_X308);
- return __diag308(subcode, addr);
+ return __diag308(subcode, addr ? virt_to_phys(addr) : 0);
}
EXPORT_SYMBOL_GPL(diag308);
@@ -649,7 +649,6 @@ static struct kset *ipl_kset;
static void __ipl_run(void *unused)
{
- __bpon();
diag308(DIAG308_LOAD_CLEAR, NULL);
}
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 7b41ceecbb25..d4b863ed0aa7 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -41,7 +41,7 @@ void *alloc_insn_page(void)
page = module_alloc(PAGE_SIZE);
if (!page)
return NULL;
- __set_memory((unsigned long) page, 1, SET_MEMORY_RO | SET_MEMORY_X);
+ set_memory_rox((unsigned long)page, 1);
return page;
}
diff --git a/arch/s390/kernel/kprobes_insn_page.S b/arch/s390/kernel/kprobes_insn_page.S
index f6cb022ef8c8..b6335296dcd8 100644
--- a/arch/s390/kernel/kprobes_insn_page.S
+++ b/arch/s390/kernel/kprobes_insn_page.S
@@ -14,9 +14,9 @@
*/
.section .kprobes.text, "ax"
.align 4096
-ENTRY(kprobes_insn_page)
+SYM_CODE_START(kprobes_insn_page)
.rept 2048
.word 0x07fe
.endr
-ENDPROC(kprobes_insn_page)
+SYM_CODE_END(kprobes_insn_page)
.previous
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 2a8e73266428..6d9276c096a6 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -29,8 +29,8 @@
#include <asm/nmi.h>
#include <asm/sclp.h>
-typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long,
- unsigned long);
+typedef void (*relocate_kernel_t)(unsigned long, unsigned long, unsigned long);
+typedef int (*purgatory_t)(int);
extern const unsigned char relocate_kernel[];
extern const unsigned long long relocate_kernel_len;
@@ -41,11 +41,14 @@ extern const unsigned long long relocate_kernel_len;
* Reset the system, copy boot CPU registers to absolute zero,
* and jump to the kdump image
*/
-static void __do_machine_kdump(void *image)
+static void __do_machine_kdump(void *data)
{
- int (*start_kdump)(int);
+ struct kimage *image = data;
+ purgatory_t purgatory;
unsigned long prefix;
+ purgatory = (purgatory_t)image->start;
+
/* store_status() saved the prefix register to lowcore */
prefix = (unsigned long) S390_lowcore.prefixreg_save_area;
@@ -58,13 +61,11 @@ static void __do_machine_kdump(void *image)
* prefix register of this CPU to zero
*/
memcpy(absolute_pointer(__LC_FPREGS_SAVE_AREA),
- (void *)(prefix + __LC_FPREGS_SAVE_AREA), 512);
+ phys_to_virt(prefix + __LC_FPREGS_SAVE_AREA), 512);
- __load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA);
- start_kdump = (void *)((struct kimage *) image)->start;
- start_kdump(1);
+ call_nodat(1, int, purgatory, int, 1);
- /* Die if start_kdump returns */
+ /* Die if kdump returns */
disabled_wait();
}
@@ -111,18 +112,6 @@ static noinline void __machine_kdump(void *image)
store_status(__do_machine_kdump, image);
}
-static unsigned long do_start_kdump(unsigned long addr)
-{
- struct kimage *image = (struct kimage *) addr;
- int (*start_kdump)(int) = (void *)image->start;
- int rc;
-
- __arch_local_irq_stnsm(0xfb); /* disable DAT */
- rc = start_kdump(0);
- __arch_local_irq_stosm(0x04); /* enable DAT */
- return rc;
-}
-
#endif /* CONFIG_CRASH_DUMP */
/*
@@ -131,12 +120,10 @@ static unsigned long do_start_kdump(unsigned long addr)
static bool kdump_csum_valid(struct kimage *image)
{
#ifdef CONFIG_CRASH_DUMP
+ purgatory_t purgatory = (purgatory_t)image->start;
int rc;
- preempt_disable();
- rc = call_on_stack(1, S390_lowcore.nodat_stack, unsigned long, do_start_kdump,
- unsigned long, (unsigned long)image);
- preempt_enable();
+ rc = call_nodat(1, int, purgatory, int, 0);
return rc == 0;
#else
return false;
@@ -210,7 +197,7 @@ int machine_kexec_prepare(struct kimage *image)
return -EINVAL;
/* Get the destination where the assembler code should be copied to.*/
- reboot_code_buffer = (void *) page_to_phys(image->control_code_page);
+ reboot_code_buffer = page_to_virt(image->control_code_page);
/* Then copy it */
memcpy(reboot_code_buffer, relocate_kernel, relocate_kernel_len);
@@ -250,19 +237,20 @@ void machine_crash_shutdown(struct pt_regs *regs)
*/
static void __do_machine_kexec(void *data)
{
- unsigned long diag308_subcode;
- relocate_kernel_t data_mover;
+ unsigned long data_mover, entry, diag308_subcode;
struct kimage *image = data;
- s390_reset_system();
- data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page);
-
- __arch_local_irq_stnsm(0xfb); /* disable DAT - avoid no-execute */
- /* Call the moving routine */
+ data_mover = page_to_phys(image->control_code_page);
+ entry = virt_to_phys(&image->head);
diag308_subcode = DIAG308_CLEAR_RESET;
if (sclp.has_iplcc)
diag308_subcode |= DIAG308_FLAG_EI;
- (*data_mover)(&image->head, image->start, diag308_subcode);
+ s390_reset_system();
+
+ call_nodat(3, void, (relocate_kernel_t)data_mover,
+ unsigned long, entry,
+ unsigned long, image->start,
+ unsigned long, diag308_subcode);
/* Die if kexec returns */
disabled_wait();
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index 6c10da43b538..dbece2803c50 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -28,9 +28,9 @@
.section .kprobes.text, "ax"
-ENTRY(ftrace_stub)
+SYM_FUNC_START(ftrace_stub)
BR_EX %r14
-ENDPROC(ftrace_stub)
+SYM_FUNC_END(ftrace_stub)
SYM_CODE_START(ftrace_stub_direct_tramp)
lgr %r1, %r0
@@ -140,10 +140,25 @@ SYM_FUNC_END(return_to_handler)
#endif
#endif /* CONFIG_FUNCTION_TRACER */
-#ifdef CONFIG_RETHOOK
+SYM_CODE_START(ftrace_shared_hotpatch_trampoline_br)
+ lmg %r0,%r1,2(%r1)
+ br %r1
+SYM_INNER_LABEL(ftrace_shared_hotpatch_trampoline_br_end, SYM_L_GLOBAL)
+SYM_CODE_END(ftrace_shared_hotpatch_trampoline_br)
+
+#ifdef CONFIG_EXPOLINE
+SYM_CODE_START(ftrace_shared_hotpatch_trampoline_exrl)
+ lmg %r0,%r1,2(%r1)
+ exrl %r0,0f
+ j .
+0: br %r1
+SYM_INNER_LABEL(ftrace_shared_hotpatch_trampoline_exrl_end, SYM_L_GLOBAL)
+SYM_CODE_END(ftrace_shared_hotpatch_trampoline_exrl)
+#endif /* CONFIG_EXPOLINE */
-SYM_FUNC_START(arch_rethook_trampoline)
+#ifdef CONFIG_RETHOOK
+SYM_CODE_START(arch_rethook_trampoline)
stg %r14,(__SF_GPRS+8*8)(%r15)
lay %r15,-STACK_FRAME_SIZE(%r15)
stmg %r0,%r14,STACK_PTREGS_GPRS(%r15)
@@ -166,7 +181,6 @@ SYM_FUNC_START(arch_rethook_trampoline)
mvc __SF_EMPTY(16,%r7),STACK_PTREGS_PSW(%r15)
lmg %r0,%r15,STACK_PTREGS_GPRS(%r15)
lpswe __SF_EMPTY(%r15)
-
-SYM_FUNC_END(arch_rethook_trampoline)
+SYM_CODE_END(arch_rethook_trampoline)
#endif /* CONFIG_RETHOOK */
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index b9dbf0c483ed..f1b35dcdf3eb 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -26,6 +26,7 @@
#include <asm/facility.h>
#include <asm/ftrace.lds.h>
#include <asm/set_memory.h>
+#include <asm/setup.h>
#if 0
#define DEBUGP printk
@@ -35,6 +36,24 @@
#define PLT_ENTRY_SIZE 22
+static unsigned long get_module_load_offset(void)
+{
+ static DEFINE_MUTEX(module_kaslr_mutex);
+ static unsigned long module_load_offset;
+
+ if (!kaslr_enabled())
+ return 0;
+ /*
+ * Calculate the module_load_offset the first time this code
+ * is called. Once calculated it stays the same until reboot.
+ */
+ mutex_lock(&module_kaslr_mutex);
+ if (!module_load_offset)
+ module_load_offset = get_random_u32_inclusive(1, 1024) * PAGE_SIZE;
+ mutex_unlock(&module_kaslr_mutex);
+ return module_load_offset;
+}
+
void *module_alloc(unsigned long size)
{
gfp_t gfp_mask = GFP_KERNEL;
@@ -42,9 +61,11 @@ void *module_alloc(unsigned long size)
if (PAGE_ALIGN(size) > MODULES_LEN)
return NULL;
- p = __vmalloc_node_range(size, MODULE_ALIGN, MODULES_VADDR, MODULES_END,
- gfp_mask, PAGE_KERNEL_EXEC, VM_DEFER_KMEMLEAK, NUMA_NO_NODE,
- __builtin_return_address(0));
+ p = __vmalloc_node_range(size, MODULE_ALIGN,
+ MODULES_VADDR + get_module_load_offset(),
+ MODULES_END, gfp_mask, PAGE_KERNEL,
+ VM_FLUSH_RESET_PERMS | VM_DEFER_KMEMLEAK,
+ NUMA_NO_NODE, __builtin_return_address(0));
if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) {
vfree(p);
return NULL;
@@ -491,7 +512,7 @@ static int module_alloc_ftrace_hotpatch_trampolines(struct module *me,
start = module_alloc(numpages * PAGE_SIZE);
if (!start)
return -ENOMEM;
- set_memory_ro((unsigned long)start, numpages);
+ set_memory_rox((unsigned long)start, numpages);
end = start + size;
me->arch.trampolines_start = (struct ftrace_hotpatch_trampoline *)start;
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index c9ab971498d6..cf1b6e8a708d 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -76,7 +76,6 @@ static inline int ctr_stcctm(enum cpumf_ctr_set set, u64 range, u64 *dest)
}
struct cpu_cf_events {
- struct cpumf_ctr_info info;
atomic_t ctr_set[CPUMF_CTR_SET_MAX];
u64 state; /* For perf_event_open SVC */
u64 dev_state; /* For /dev/hwctr */
@@ -95,6 +94,15 @@ static DEFINE_PER_CPU(struct cpu_cf_events, cpu_cf_events);
static unsigned int cfdiag_cpu_speed; /* CPU speed for CF_DIAG trailer */
static debug_info_t *cf_dbg;
+/*
+ * The CPU Measurement query counter information instruction contains
+ * information which varies per machine generation, but is constant and
+ * does not change when running on a particular machine, such as counter
+ * first and second version number. This is needed to determine the size
+ * of counter sets. Extract this information at device driver initialization.
+ */
+static struct cpumf_ctr_info cpumf_ctr_info;
+
#define CF_DIAG_CTRSET_DEF 0xfeef /* Counter set header mark */
/* interval in seconds */
@@ -167,11 +175,10 @@ struct cf_trailer_entry { /* CPU-M CF_DIAG trailer (64 byte) */
/* Create the trailer data at the end of a page. */
static void cfdiag_trailer(struct cf_trailer_entry *te)
{
- struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
struct cpuid cpuid;
- te->cfvn = cpuhw->info.cfvn; /* Counter version numbers */
- te->csvn = cpuhw->info.csvn;
+ te->cfvn = cpumf_ctr_info.cfvn; /* Counter version numbers */
+ te->csvn = cpumf_ctr_info.csvn;
get_cpu_id(&cpuid); /* Machine type */
te->mach_type = cpuid.machine;
@@ -184,50 +191,60 @@ static void cfdiag_trailer(struct cf_trailer_entry *te)
}
/*
- * Return the maximum possible counter set size (in number of 8 byte counters)
- * depending on type and model number.
+ * The number of counters per counter set varies between machine generations,
+ * but is constant when running on a particular machine generation.
+ * Determine each counter set size at device driver initialization and
+ * retrieve it later.
*/
-static size_t cpum_cf_ctrset_size(enum cpumf_ctr_set ctrset,
- struct cpumf_ctr_info *info)
+static size_t cpumf_ctr_setsizes[CPUMF_CTR_SET_MAX];
+static void cpum_cf_make_setsize(enum cpumf_ctr_set ctrset)
{
size_t ctrset_size = 0;
switch (ctrset) {
case CPUMF_CTR_SET_BASIC:
- if (info->cfvn >= 1)
+ if (cpumf_ctr_info.cfvn >= 1)
ctrset_size = 6;
break;
case CPUMF_CTR_SET_USER:
- if (info->cfvn == 1)
+ if (cpumf_ctr_info.cfvn == 1)
ctrset_size = 6;
- else if (info->cfvn >= 3)
+ else if (cpumf_ctr_info.cfvn >= 3)
ctrset_size = 2;
break;
case CPUMF_CTR_SET_CRYPTO:
- if (info->csvn >= 1 && info->csvn <= 5)
+ if (cpumf_ctr_info.csvn >= 1 && cpumf_ctr_info.csvn <= 5)
ctrset_size = 16;
- else if (info->csvn == 6 || info->csvn == 7)
+ else if (cpumf_ctr_info.csvn == 6 || cpumf_ctr_info.csvn == 7)
ctrset_size = 20;
break;
case CPUMF_CTR_SET_EXT:
- if (info->csvn == 1)
+ if (cpumf_ctr_info.csvn == 1)
ctrset_size = 32;
- else if (info->csvn == 2)
+ else if (cpumf_ctr_info.csvn == 2)
ctrset_size = 48;
- else if (info->csvn >= 3 && info->csvn <= 5)
+ else if (cpumf_ctr_info.csvn >= 3 && cpumf_ctr_info.csvn <= 5)
ctrset_size = 128;
- else if (info->csvn == 6 || info->csvn == 7)
+ else if (cpumf_ctr_info.csvn == 6 || cpumf_ctr_info.csvn == 7)
ctrset_size = 160;
break;
case CPUMF_CTR_SET_MT_DIAG:
- if (info->csvn > 3)
+ if (cpumf_ctr_info.csvn > 3)
ctrset_size = 48;
break;
case CPUMF_CTR_SET_MAX:
break;
}
+ cpumf_ctr_setsizes[ctrset] = ctrset_size;
+}
- return ctrset_size;
+/*
+ * Return the maximum possible counter set size (in number of 8 byte counters)
+ * depending on type and model number.
+ */
+static size_t cpum_cf_read_setsize(enum cpumf_ctr_set ctrset)
+{
+ return cpumf_ctr_setsizes[ctrset];
}
/* Read a counter set. The counter set number determines the counter set and
@@ -248,14 +265,13 @@ static size_t cpum_cf_ctrset_size(enum cpumf_ctr_set ctrset,
static size_t cfdiag_getctrset(struct cf_ctrset_entry *ctrdata, int ctrset,
size_t room, bool error_ok)
{
- struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
size_t ctrset_size, need = 0;
int rc = 3; /* Assume write failure */
ctrdata->def = CF_DIAG_CTRSET_DEF;
ctrdata->set = ctrset;
ctrdata->res1 = 0;
- ctrset_size = cpum_cf_ctrset_size(ctrset, &cpuhw->info);
+ ctrset_size = cpum_cf_read_setsize(ctrset);
if (ctrset_size) { /* Save data */
need = ctrset_size * sizeof(u64) + sizeof(*ctrdata);
@@ -269,10 +285,6 @@ static size_t cfdiag_getctrset(struct cf_ctrset_entry *ctrdata, int ctrset,
need = 0;
}
- debug_sprintf_event(cf_dbg, 3,
- "%s ctrset %d ctrset_size %zu cfvn %d csvn %d"
- " need %zd rc %d\n", __func__, ctrset, ctrset_size,
- cpuhw->info.cfvn, cpuhw->info.csvn, need, rc);
return need;
}
@@ -377,40 +389,35 @@ static enum cpumf_ctr_set get_counter_set(u64 event)
return set;
}
-static int validate_ctr_version(const struct hw_perf_event *hwc,
- enum cpumf_ctr_set set)
+static int validate_ctr_version(const u64 config, enum cpumf_ctr_set set)
{
- struct cpu_cf_events *cpuhw;
- int err = 0;
u16 mtdiag_ctl;
-
- cpuhw = &get_cpu_var(cpu_cf_events);
+ int err = 0;
/* check required version for counter sets */
switch (set) {
case CPUMF_CTR_SET_BASIC:
case CPUMF_CTR_SET_USER:
- if (cpuhw->info.cfvn < 1)
+ if (cpumf_ctr_info.cfvn < 1)
err = -EOPNOTSUPP;
break;
case CPUMF_CTR_SET_CRYPTO:
- if ((cpuhw->info.csvn >= 1 && cpuhw->info.csvn <= 5 &&
- hwc->config > 79) ||
- (cpuhw->info.csvn >= 6 && hwc->config > 83))
+ if ((cpumf_ctr_info.csvn >= 1 && cpumf_ctr_info.csvn <= 5 &&
+ config > 79) || (cpumf_ctr_info.csvn >= 6 && config > 83))
err = -EOPNOTSUPP;
break;
case CPUMF_CTR_SET_EXT:
- if (cpuhw->info.csvn < 1)
+ if (cpumf_ctr_info.csvn < 1)
err = -EOPNOTSUPP;
- if ((cpuhw->info.csvn == 1 && hwc->config > 159) ||
- (cpuhw->info.csvn == 2 && hwc->config > 175) ||
- (cpuhw->info.csvn >= 3 && cpuhw->info.csvn <= 5
- && hwc->config > 255) ||
- (cpuhw->info.csvn >= 6 && hwc->config > 287))
+ if ((cpumf_ctr_info.csvn == 1 && config > 159) ||
+ (cpumf_ctr_info.csvn == 2 && config > 175) ||
+ (cpumf_ctr_info.csvn >= 3 && cpumf_ctr_info.csvn <= 5 &&
+ config > 255) ||
+ (cpumf_ctr_info.csvn >= 6 && config > 287))
err = -EOPNOTSUPP;
break;
case CPUMF_CTR_SET_MT_DIAG:
- if (cpuhw->info.csvn <= 3)
+ if (cpumf_ctr_info.csvn <= 3)
err = -EOPNOTSUPP;
/*
* MT-diagnostic counters are read-only. The counter set
@@ -425,35 +432,15 @@ static int validate_ctr_version(const struct hw_perf_event *hwc,
* counter set is enabled and active.
*/
mtdiag_ctl = cpumf_ctr_ctl[CPUMF_CTR_SET_MT_DIAG];
- if (!((cpuhw->info.auth_ctl & mtdiag_ctl) &&
- (cpuhw->info.enable_ctl & mtdiag_ctl) &&
- (cpuhw->info.act_ctl & mtdiag_ctl)))
+ if (!((cpumf_ctr_info.auth_ctl & mtdiag_ctl) &&
+ (cpumf_ctr_info.enable_ctl & mtdiag_ctl) &&
+ (cpumf_ctr_info.act_ctl & mtdiag_ctl)))
err = -EOPNOTSUPP;
break;
case CPUMF_CTR_SET_MAX:
err = -EOPNOTSUPP;
}
- put_cpu_var(cpu_cf_events);
- return err;
-}
-
-static int validate_ctr_auth(const struct hw_perf_event *hwc)
-{
- struct cpu_cf_events *cpuhw;
- int err = 0;
-
- cpuhw = &get_cpu_var(cpu_cf_events);
-
- /* Check authorization for cpu counter sets.
- * If the particular CPU counter set is not authorized,
- * return with -ENOENT in order to fall back to other
- * PMUs that might suffice the event request.
- */
- if (!(hwc->config_base & cpuhw->info.auth_ctl))
- err = -ENOENT;
-
- put_cpu_var(cpu_cf_events);
return err;
}
@@ -471,13 +458,10 @@ static void cpumf_pmu_enable(struct pmu *pmu)
return;
err = lcctl(cpuhw->state | cpuhw->dev_state);
- if (err) {
- pr_err("Enabling the performance measuring unit "
- "failed with rc=%x\n", err);
- return;
- }
-
- cpuhw->flags |= PMU_F_ENABLED;
+ if (err)
+ pr_err("Enabling the performance measuring unit failed with rc=%x\n", err);
+ else
+ cpuhw->flags |= PMU_F_ENABLED;
}
/*
@@ -497,13 +481,10 @@ static void cpumf_pmu_disable(struct pmu *pmu)
inactive = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1);
inactive |= cpuhw->dev_state;
err = lcctl(inactive);
- if (err) {
- pr_err("Disabling the performance measuring unit "
- "failed with rc=%x\n", err);
- return;
- }
-
- cpuhw->flags &= ~PMU_F_ENABLED;
+ if (err)
+ pr_err("Disabling the performance measuring unit failed with rc=%x\n", err);
+ else
+ cpuhw->flags &= ~PMU_F_ENABLED;
}
#define PMC_INIT 0UL
@@ -515,8 +496,6 @@ static void cpum_cf_setup_cpu(void *flags)
switch ((unsigned long)flags) {
case PMC_INIT:
- memset(&cpuhw->info, 0, sizeof(cpuhw->info));
- qctri(&cpuhw->info);
cpuhw->flags |= PMU_F_RESERVED;
break;
@@ -602,7 +581,6 @@ static int __hw_perf_event_init(struct perf_event *event, unsigned int type)
struct perf_event_attr *attr = &event->attr;
struct hw_perf_event *hwc = &event->hw;
enum cpumf_ctr_set set;
- int err = 0;
u64 ev;
switch (type) {
@@ -678,12 +656,15 @@ static int __hw_perf_event_init(struct perf_event *event, unsigned int type)
cpumf_hw_inuse();
event->destroy = hw_perf_event_destroy;
- /* Finally, validate version and authorization of the counter set */
- err = validate_ctr_auth(hwc);
- if (!err)
- err = validate_ctr_version(hwc, set);
-
- return err;
+ /*
+ * Finally, validate version and authorization of the counter set.
+ * If the particular CPU counter set is not authorized,
+ * return with -ENOENT in order to fall back to other
+ * PMUs that might suffice the event request.
+ */
+ if (!(hwc->config_base & cpumf_ctr_info.auth_ctl))
+ return -ENOENT;
+ return validate_ctr_version(hwc->config, set);
}
/* Events CPU_CYLCES and INSTRUCTIONS can be submitted with two different
@@ -983,7 +964,7 @@ static void cpumf_measurement_alert(struct ext_code ext_code,
/* counter authorization change alert */
if (alert & CPU_MF_INT_CF_CACA)
- qctri(&cpuhw->info);
+ qctri(&cpumf_ctr_info);
/* loss of counter data alert */
if (alert & CPU_MF_INT_CF_LCDA)
@@ -1000,9 +981,14 @@ static int __init cpumf_pmu_init(void)
{
int rc;
- if (!cpum_cf_avail())
+ /* Extract counter measurement facility information */
+ if (!cpum_cf_avail() || qctri(&cpumf_ctr_info))
return -ENODEV;
+ /* Determine and store counter set sizes for later reference */
+ for (rc = CPUMF_CTR_SET_BASIC; rc < CPUMF_CTR_SET_MAX; ++rc)
+ cpum_cf_make_setsize(rc);
+
/*
* Clear bit 15 of cr0 to unauthorize problem-state to
* extract measurement counters
@@ -1269,28 +1255,26 @@ static int cfset_all_start(struct cfset_request *req)
*/
static size_t cfset_needspace(unsigned int sets)
{
- struct cpu_cf_events *cpuhw = get_cpu_ptr(&cpu_cf_events);
size_t bytes = 0;
int i;
for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
if (!(sets & cpumf_ctr_ctl[i]))
continue;
- bytes += cpum_cf_ctrset_size(i, &cpuhw->info) * sizeof(u64) +
+ bytes += cpum_cf_read_setsize(i) * sizeof(u64) +
sizeof(((struct s390_ctrset_setdata *)0)->set) +
sizeof(((struct s390_ctrset_setdata *)0)->no_cnts);
}
bytes = sizeof(((struct s390_ctrset_read *)0)->no_cpus) + nr_cpu_ids *
(bytes + sizeof(((struct s390_ctrset_cpudata *)0)->cpu_nr) +
sizeof(((struct s390_ctrset_cpudata *)0)->no_sets));
- put_cpu_ptr(&cpu_cf_events);
return bytes;
}
static int cfset_all_copy(unsigned long arg, cpumask_t *mask)
{
struct s390_ctrset_read __user *ctrset_read;
- unsigned int cpu, cpus, rc;
+ unsigned int cpu, cpus, rc = 0;
void __user *uptr;
ctrset_read = (struct s390_ctrset_read __user *)arg;
@@ -1304,17 +1288,20 @@ static int cfset_all_copy(unsigned long arg, cpumask_t *mask)
rc |= put_user(cpuhw->sets, &ctrset_cpudata->no_sets);
rc |= copy_to_user(ctrset_cpudata->data, cpuhw->data,
cpuhw->used);
- if (rc)
- return -EFAULT;
+ if (rc) {
+ rc = -EFAULT;
+ goto out;
+ }
uptr += sizeof(struct s390_ctrset_cpudata) + cpuhw->used;
cond_resched();
}
cpus = cpumask_weight(mask);
if (put_user(cpus, &ctrset_read->no_cpus))
- return -EFAULT;
- debug_sprintf_event(cf_dbg, 4, "%s copied %ld\n", __func__,
+ rc = -EFAULT;
+out:
+ debug_sprintf_event(cf_dbg, 4, "%s rc %d copied %ld\n", __func__, rc,
uptr - (void __user *)ctrset_read->data);
- return 0;
+ return rc;
}
static size_t cfset_cpuset_read(struct s390_ctrset_setdata *p, int ctrset,
@@ -1354,7 +1341,7 @@ static void cfset_cpu_read(void *parm)
if (!(p->sets & cpumf_ctr_ctl[set]))
continue; /* Counter set not in list */
- set_size = cpum_cf_ctrset_size(set, &cpuhw->info);
+ set_size = cpum_cf_read_setsize(set);
space = sizeof(cpuhw->data) - cpuhw->used;
space = cfset_cpuset_read(sp, set, set_size, space);
if (space) {
@@ -1385,14 +1372,10 @@ static int cfset_all_read(unsigned long arg, struct cfset_request *req)
static long cfset_ioctl_read(unsigned long arg, struct cfset_request *req)
{
- struct s390_ctrset_read read;
int ret = -ENODATA;
- if (req && req->ctrset) {
- if (copy_from_user(&read, (char __user *)arg, sizeof(read)))
- return -EFAULT;
+ if (req && req->ctrset)
ret = cfset_all_read(arg, req);
- }
return ret;
}
@@ -1569,16 +1552,13 @@ static void cfdiag_read(struct perf_event *event)
static int get_authctrsets(void)
{
- struct cpu_cf_events *cpuhw;
unsigned long auth = 0;
enum cpumf_ctr_set i;
- cpuhw = &get_cpu_var(cpu_cf_events);
for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
- if (cpuhw->info.auth_ctl & cpumf_ctr_ctl[i])
+ if (cpumf_ctr_info.auth_ctl & cpumf_ctr_ctl[i])
auth |= cpumf_ctr_ctl[i];
}
- put_cpu_var(cpu_cf_events);
return auth;
}
@@ -1716,7 +1696,7 @@ static size_t cfdiag_maxsize(struct cpumf_ctr_info *info)
enum cpumf_ctr_set i;
for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
- size_t size = cpum_cf_ctrset_size(i, info);
+ size_t size = cpum_cf_read_setsize(i);
if (size)
max_size += size * sizeof(u64) +
@@ -1750,16 +1730,12 @@ static void cfdiag_get_cpu_speed(void)
static int cfset_init(void)
{
- struct cpumf_ctr_info info;
size_t need;
int rc;
- if (qctri(&info))
- return -ENODEV;
-
cfdiag_get_cpu_speed();
/* Make sure the counter set data fits into predefined buffer. */
- need = cfdiag_maxsize(&info);
+ need = cfdiag_maxsize(&cpumf_ctr_info);
if (need > sizeof(((struct cpu_cf_events *)0)->start)) {
pr_err("Insufficient memory for PMU(cpum_cf_diag) need=%zu\n",
need);
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index e7b867e2f73f..7ef72f5ff52e 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -882,10 +882,6 @@ static int __hw_perf_event_init(struct perf_event *event)
SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_DIAG_MODE;
}
- /* Check and set other sampling flags */
- if (attr->config1 & PERF_CPUM_SF_FULL_BLOCKS)
- SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_FULL_BLOCKS;
-
err = __hw_perf_event_init_rate(event, &si);
if (err)
goto out;
@@ -1293,11 +1289,8 @@ static inline __uint128_t __cdsg(__uint128_t *ptr, __uint128_t old, __uint128_t
* The sampling buffer position are retrieved and saved in the TEAR_REG
* register of the specified perf event.
*
- * Only full sample-data-blocks are processed. Specify the flash_all flag
- * to also walk through partially filled sample-data-blocks. It is ignored
- * if PERF_CPUM_SF_FULL_BLOCKS is set. The PERF_CPUM_SF_FULL_BLOCKS flag
- * enforces the processing of full sample-data-blocks only (trailer entries
- * with the block-full-indicator bit set).
+ * Only full sample-data-blocks are processed. Specify the flush_all flag
+ * to also walk through partially filled sample-data-blocks.
*/
static void hw_perf_event_update(struct perf_event *event, int flush_all)
{
@@ -1315,9 +1308,6 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
if (SAMPL_DIAG_MODE(&event->hw))
return;
- if (flush_all && SDB_FULL_BLOCKS(hwc))
- flush_all = 0;
-
sdbt = (unsigned long *) TEAR_REG(hwc);
done = event_overflow = sampl_overflow = num_sdb = 0;
while (!done) {
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 67df64ef4839..87ca3a727604 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -136,12 +136,12 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
p->thread.last_break = 1;
frame->sf.back_chain = 0;
- frame->sf.gprs[5] = (unsigned long)frame + sizeof(struct stack_frame);
- frame->sf.gprs[6] = (unsigned long)p;
+ frame->sf.gprs[11 - 6] = (unsigned long)&frame->childregs;
+ frame->sf.gprs[12 - 6] = (unsigned long)p;
/* new return point is ret_from_fork */
- frame->sf.gprs[8] = (unsigned long)ret_from_fork;
+ frame->sf.gprs[14 - 6] = (unsigned long)ret_from_fork;
/* fake return stack for resume(), don't go back to schedule */
- frame->sf.gprs[9] = (unsigned long)frame;
+ frame->sf.gprs[15 - 6] = (unsigned long)frame;
/* Store access registers to kernel stack of new process. */
if (unlikely(args->fn)) {
@@ -149,8 +149,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
memset(&frame->childregs, 0, sizeof(struct pt_regs));
frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_IO |
PSW_MASK_EXT | PSW_MASK_MCHECK;
- frame->childregs.psw.addr =
- (unsigned long)__ret_from_fork;
frame->childregs.gprs[9] = (unsigned long)args->fn;
frame->childregs.gprs[10] = (unsigned long)args->fn_arg;
frame->childregs.orig_gpr2 = -1;
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index a194611ba88c..0a999c8226d7 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -364,21 +364,3 @@ const struct seq_operations cpuinfo_op = {
.stop = c_stop,
.show = show_cpuinfo,
};
-
-int s390_isolate_bp(void)
-{
- if (!test_facility(82))
- return -EOPNOTSUPP;
- set_thread_flag(TIF_ISOLATE_BP);
- return 0;
-}
-EXPORT_SYMBOL(s390_isolate_bp);
-
-int s390_isolate_bp_guest(void)
-{
- if (!test_facility(82))
- return -EOPNOTSUPP;
- set_thread_flag(TIF_ISOLATE_BP_GUEST);
- return 0;
-}
-EXPORT_SYMBOL(s390_isolate_bp_guest);
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
index 4a22163962eb..88087a32ebc6 100644
--- a/arch/s390/kernel/reipl.S
+++ b/arch/s390/kernel/reipl.S
@@ -19,7 +19,7 @@
# r2 = Function to be called after store status
# r3 = Parameter for function
#
-ENTRY(store_status)
+SYM_CODE_START(store_status)
/* Save register one and load save area base */
stg %r1,__LC_SAVE_AREA_RESTART
/* General purpose registers */
@@ -61,7 +61,7 @@ ENTRY(store_status)
stpx 0(%r1)
/* Clock comparator - seven bytes */
lghi %r1,__LC_CLOCK_COMP_SAVE_AREA
- larl %r4,.Lclkcmp
+ larl %r4,clkcmp
stckc 0(%r4)
mvc 1(7,%r1),1(%r4)
/* Program status word */
@@ -73,9 +73,9 @@ ENTRY(store_status)
lgr %r9,%r2
lgr %r2,%r3
BR_EX %r9
-ENDPROC(store_status)
+SYM_CODE_END(store_status)
.section .bss
- .align 8
-.Lclkcmp: .quad 0x0000000000000000
+ .balign 8
+SYM_DATA_LOCAL(clkcmp, .quad 0x0000000000000000)
.previous
diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S
index a9a1a6f45375..0ae297c82afd 100644
--- a/arch/s390/kernel/relocate_kernel.S
+++ b/arch/s390/kernel/relocate_kernel.S
@@ -26,53 +26,51 @@
*/
.text
-ENTRY(relocate_kernel)
- basr %r13,0 # base address
- .base:
- lghi %r7,PAGE_SIZE # load PAGE_SIZE in r7
- lghi %r9,PAGE_SIZE # load PAGE_SIZE in r9
- lg %r5,0(%r2) # read another word for indirection page
- aghi %r2,8 # increment pointer
- tml %r5,0x1 # is it a destination page?
- je .indir_check # NO, goto "indir_check"
- lgr %r6,%r5 # r6 = r5
- nill %r6,0xf000 # mask it out and...
- j .base # ...next iteration
- .indir_check:
- tml %r5,0x2 # is it a indirection page?
- je .done_test # NO, goto "done_test"
- nill %r5,0xf000 # YES, mask out,
- lgr %r2,%r5 # move it into the right register,
- j .base # and read next...
- .done_test:
- tml %r5,0x4 # is it the done indicator?
- je .source_test # NO! Well, then it should be the source indicator...
- j .done # ok, lets finish it here...
- .source_test:
- tml %r5,0x8 # it should be a source indicator...
- je .base # NO, ignore it...
- lgr %r8,%r5 # r8 = r5
- nill %r8,0xf000 # masking
- 0: mvcle %r6,%r8,0x0 # copy PAGE_SIZE bytes from r8 to r6 - pad with 0
- jo 0b
- j .base
- .done:
- lgr %r0,%r4 # subcode
- cghi %r3,0
- je .diag
- la %r4,load_psw-.base(%r13) # load psw-address into the register
- o %r3,4(%r4) # or load address into psw
- st %r3,4(%r4)
- mvc 0(8,%r0),0(%r4) # copy psw to absolute address 0
- .diag:
- diag %r0,%r0,0x308
-ENDPROC(relocate_kernel)
+SYM_CODE_START(relocate_kernel)
+ basr %r13,0 # base address
+.base:
+ lghi %r7,PAGE_SIZE # load PAGE_SIZE in r7
+ lghi %r9,PAGE_SIZE # load PAGE_SIZE in r9
+ lg %r5,0(%r2) # read another word for indirection page
+ aghi %r2,8 # increment pointer
+ tml %r5,0x1 # is it a destination page?
+ je .indir_check # NO, goto "indir_check"
+ lgr %r6,%r5 # r6 = r5
+ nill %r6,0xf000 # mask it out and...
+ j .base # ...next iteration
+.indir_check:
+ tml %r5,0x2 # is it a indirection page?
+ je .done_test # NO, goto "done_test"
+ nill %r5,0xf000 # YES, mask out,
+ lgr %r2,%r5 # move it into the right register,
+ j .base # and read next...
+.done_test:
+ tml %r5,0x4 # is it the done indicator?
+ je .source_test # NO! Well, then it should be the source indicator...
+ j .done # ok, lets finish it here...
+.source_test:
+ tml %r5,0x8 # it should be a source indicator...
+ je .base # NO, ignore it...
+ lgr %r8,%r5 # r8 = r5
+ nill %r8,0xf000 # masking
+0: mvcle %r6,%r8,0x0 # copy PAGE_SIZE bytes from r8 to r6 - pad with 0
+ jo 0b
+ j .base
+.done:
+ lgr %r0,%r4 # subcode
+ cghi %r3,0
+ je .diag
+ la %r4,load_psw-.base(%r13) # load psw-address into the register
+ o %r3,4(%r4) # or load address into psw
+ st %r3,4(%r4)
+ mvc 0(8,%r0),0(%r4) # copy psw to absolute address 0
+.diag:
+ diag %r0,%r0,0x308
+SYM_CODE_END(relocate_kernel)
- .align 8
- load_psw:
- .long 0x00080000,0x80000000
- relocate_kernel_end:
- .align 8
- .globl relocate_kernel_len
- relocate_kernel_len:
- .quad relocate_kernel_end - relocate_kernel
+ .balign 8
+SYM_DATA_START_LOCAL(load_psw)
+ .long 0x00080000,0x80000000
+SYM_DATA_END_LABEL(load_psw, SYM_L_LOCAL, relocate_kernel_end)
+ .balign 8
+SYM_DATA(relocate_kernel_len, .quad relocate_kernel_end - relocate_kernel)
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 4259b6c50516..fe10da1a271e 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -74,7 +74,7 @@
#include <asm/numa.h>
#include <asm/alternative.h>
#include <asm/nospec-branch.h>
-#include <asm/mem_detect.h>
+#include <asm/physmem_info.h>
#include <asm/maccess.h>
#include <asm/uv.h>
#include <asm/asm-offsets.h>
@@ -147,14 +147,10 @@ static u32 __amode31_ref *__ctl_duct = __ctl_duct_amode31;
int __bootdata(noexec_disabled);
unsigned long __bootdata(ident_map_size);
-struct mem_detect_info __bootdata(mem_detect);
-struct initrd_data __bootdata(initrd_data);
-unsigned long __bootdata(pgalloc_pos);
-unsigned long __bootdata(pgalloc_end);
-unsigned long __bootdata(pgalloc_low);
+struct physmem_info __bootdata(physmem_info);
unsigned long __bootdata_preserved(__kaslr_offset);
-unsigned long __bootdata(__amode31_base);
+int __bootdata_preserved(__kaslr_enabled);
unsigned int __bootdata_preserved(zlib_dfltcc_support);
EXPORT_SYMBOL(zlib_dfltcc_support);
u64 __bootdata_preserved(stfle_fac_list[16]);
@@ -385,39 +381,27 @@ void stack_free(unsigned long stack)
#endif
}
-int __init arch_early_irq_init(void)
+void __init __noreturn arch_call_rest_init(void)
{
- unsigned long stack;
-
- stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
- if (!stack)
- panic("Couldn't allocate async stack");
- S390_lowcore.async_stack = stack + STACK_INIT_OFFSET;
- return 0;
+ smp_reinit_ipl_cpu();
+ rest_init();
}
-void __init __noreturn arch_call_rest_init(void)
+static unsigned long __init stack_alloc_early(void)
{
unsigned long stack;
- smp_reinit_ipl_cpu();
- stack = stack_alloc();
- if (!stack)
- panic("Couldn't allocate kernel stack");
- current->stack = (void *) stack;
-#ifdef CONFIG_VMAP_STACK
- current->stack_vm_area = (void *) stack;
-#endif
- set_task_stack_end_magic(current);
- stack += STACK_INIT_OFFSET;
- S390_lowcore.kernel_stack = stack;
- call_on_stack_noreturn(rest_init, stack);
+ stack = (unsigned long)memblock_alloc(THREAD_SIZE, THREAD_SIZE);
+ if (!stack) {
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, THREAD_SIZE, THREAD_SIZE);
+ }
+ return stack;
}
static void __init setup_lowcore(void)
{
struct lowcore *lc, *abs_lc;
- unsigned long mcck_stack;
/*
* Setup lowcore for boot cpu
@@ -441,8 +425,6 @@ static void __init setup_lowcore(void)
lc->io_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
lc->io_new_psw.addr = (unsigned long) io_int_handler;
lc->clock_comparator = clock_comparator_max;
- lc->nodat_stack = ((unsigned long) &init_thread_union)
- + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->current_task = (unsigned long)&init_task;
lc->lpp = LPP_MAGIC;
lc->machine_flags = S390_lowcore.machine_flags;
@@ -455,17 +437,15 @@ static void __init setup_lowcore(void)
lc->steal_timer = S390_lowcore.steal_timer;
lc->last_update_timer = S390_lowcore.last_update_timer;
lc->last_update_clock = S390_lowcore.last_update_clock;
-
/*
* Allocate the global restart stack which is the same for
- * all CPUs in cast *one* of them does a PSW restart.
+ * all CPUs in case *one* of them does a PSW restart.
*/
- restart_stack = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
- if (!restart_stack)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, THREAD_SIZE, THREAD_SIZE);
- restart_stack += STACK_INIT_OFFSET;
-
+ restart_stack = (void *)(stack_alloc_early() + STACK_INIT_OFFSET);
+ lc->mcck_stack = stack_alloc_early() + STACK_INIT_OFFSET;
+ lc->async_stack = stack_alloc_early() + STACK_INIT_OFFSET;
+ lc->nodat_stack = stack_alloc_early() + STACK_INIT_OFFSET;
+ lc->kernel_stack = S390_lowcore.kernel_stack;
/*
* Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
* restart data to the absolute zero lowcore. This is necessary if
@@ -476,13 +456,6 @@ static void __init setup_lowcore(void)
lc->restart_data = 0;
lc->restart_source = -1U;
__ctl_store(lc->cregs_save_area, 0, 15);
-
- mcck_stack = (unsigned long)memblock_alloc(THREAD_SIZE, THREAD_SIZE);
- if (!mcck_stack)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, THREAD_SIZE, THREAD_SIZE);
- lc->mcck_stack = mcck_stack + STACK_INIT_OFFSET;
-
lc->spinlock_lockval = arch_spin_lockval(0);
lc->spinlock_index = 0;
arch_spin_lock_setup(0);
@@ -635,7 +608,11 @@ static struct notifier_block kdump_mem_nb = {
*/
static void __init reserve_pgtables(void)
{
- memblock_reserve(pgalloc_pos, pgalloc_end - pgalloc_pos);
+ unsigned long start, end;
+ struct reserved_range *range;
+
+ for_each_physmem_reserved_type_range(RR_VMEM, range, &start, &end)
+ memblock_reserve(start, end - start);
}
/*
@@ -712,13 +689,13 @@ static void __init reserve_crashkernel(void)
*/
static void __init reserve_initrd(void)
{
-#ifdef CONFIG_BLK_DEV_INITRD
- if (!initrd_data.start || !initrd_data.size)
+ unsigned long addr, size;
+
+ if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD) || !get_physmem_reserved(RR_INITRD, &addr, &size))
return;
- initrd_start = (unsigned long)__va(initrd_data.start);
- initrd_end = initrd_start + initrd_data.size;
- memblock_reserve(initrd_data.start, initrd_data.size);
-#endif
+ initrd_start = (unsigned long)__va(addr);
+ initrd_end = initrd_start + size;
+ memblock_reserve(addr, size);
}
/*
@@ -730,72 +707,40 @@ static void __init reserve_certificate_list(void)
memblock_reserve(ipl_cert_list_addr, ipl_cert_list_size);
}
-static void __init reserve_mem_detect_info(void)
+static void __init reserve_physmem_info(void)
{
- unsigned long start, size;
+ unsigned long addr, size;
- get_mem_detect_reserved(&start, &size);
- if (size)
- memblock_reserve(start, size);
+ if (get_physmem_reserved(RR_MEM_DETECT_EXTENDED, &addr, &size))
+ memblock_reserve(addr, size);
}
-static void __init free_mem_detect_info(void)
+static void __init free_physmem_info(void)
{
- unsigned long start, size;
+ unsigned long addr, size;
- get_mem_detect_reserved(&start, &size);
- if (size)
- memblock_phys_free(start, size);
+ if (get_physmem_reserved(RR_MEM_DETECT_EXTENDED, &addr, &size))
+ memblock_phys_free(addr, size);
}
-static const char * __init get_mem_info_source(void)
-{
- switch (mem_detect.info_source) {
- case MEM_DETECT_SCLP_STOR_INFO:
- return "sclp storage info";
- case MEM_DETECT_DIAG260:
- return "diag260";
- case MEM_DETECT_SCLP_READ_INFO:
- return "sclp read info";
- case MEM_DETECT_BIN_SEARCH:
- return "binary search";
- }
- return "none";
-}
-
-static void __init memblock_add_mem_detect_info(void)
+static void __init memblock_add_physmem_info(void)
{
unsigned long start, end;
int i;
pr_debug("physmem info source: %s (%hhd)\n",
- get_mem_info_source(), mem_detect.info_source);
+ get_physmem_info_source(), physmem_info.info_source);
/* keep memblock lists close to the kernel */
memblock_set_bottom_up(true);
- for_each_mem_detect_usable_block(i, &start, &end)
+ for_each_physmem_usable_range(i, &start, &end)
memblock_add(start, end - start);
- for_each_mem_detect_block(i, &start, &end)
+ for_each_physmem_online_range(i, &start, &end)
memblock_physmem_add(start, end - start);
memblock_set_bottom_up(false);
memblock_set_node(0, ULONG_MAX, &memblock.memory, 0);
}
/*
- * Check for initrd being in usable memory
- */
-static void __init check_initrd(void)
-{
-#ifdef CONFIG_BLK_DEV_INITRD
- if (initrd_data.start && initrd_data.size &&
- !memblock_is_region_memory(initrd_data.start, initrd_data.size)) {
- pr_err("The initial RAM disk does not fit into the memory\n");
- memblock_phys_free(initrd_data.start, initrd_data.size);
- initrd_start = initrd_end = 0;
- }
-#endif
-}
-
-/*
* Reserve memory used for lowcore/command line/kernel image.
*/
static void __init reserve_kernel(void)
@@ -803,7 +748,7 @@ static void __init reserve_kernel(void)
memblock_reserve(0, STARTUP_NORMAL_OFFSET);
memblock_reserve(OLDMEM_BASE, sizeof(unsigned long));
memblock_reserve(OLDMEM_SIZE, sizeof(unsigned long));
- memblock_reserve(__amode31_base, __eamode31 - __samode31);
+ memblock_reserve(physmem_info.reserved[RR_AMODE31].start, __eamode31 - __samode31);
memblock_reserve(__pa(sclp_early_sccb), EXT_SCCB_READ_SCP);
memblock_reserve(__pa(_stext), _end - _stext);
}
@@ -825,13 +770,13 @@ static void __init setup_memory(void)
static void __init relocate_amode31_section(void)
{
unsigned long amode31_size = __eamode31 - __samode31;
- long amode31_offset = __amode31_base - __samode31;
+ long amode31_offset = physmem_info.reserved[RR_AMODE31].start - __samode31;
long *ptr;
pr_info("Relocating AMODE31 section of size 0x%08lx\n", amode31_size);
/* Move original AMODE31 section to the new one */
- memmove((void *)__amode31_base, (void *)__samode31, amode31_size);
+ memmove((void *)physmem_info.reserved[RR_AMODE31].start, (void *)__samode31, amode31_size);
/* Zero out the old AMODE31 section to catch invalid accesses within it */
memset((void *)__samode31, 0, amode31_size);
@@ -997,14 +942,14 @@ void __init setup_arch(char **cmdline_p)
reserve_kernel();
reserve_initrd();
reserve_certificate_list();
- reserve_mem_detect_info();
+ reserve_physmem_info();
memblock_set_current_limit(ident_map_size);
memblock_allow_resize();
/* Get information about *all* installed memory */
- memblock_add_mem_detect_info();
+ memblock_add_physmem_info();
- free_mem_detect_info();
+ free_physmem_info();
setup_memory_end();
memblock_dump_all();
setup_memory();
@@ -1017,7 +962,6 @@ void __init setup_arch(char **cmdline_p)
if (MACHINE_HAS_EDAT2)
hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
- check_initrd();
reserve_crashkernel();
#ifdef CONFIG_CRASH_DUMP
/*
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 70a84748f806..726de4f4df01 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -280,9 +280,8 @@ static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
cpu = pcpu - pcpu_devices;
lc = lowcore_ptr[cpu];
- lc->kernel_stack = (unsigned long) task_stack_page(tsk)
- + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
- lc->current_task = (unsigned long) tsk;
+ lc->kernel_stack = (unsigned long)task_stack_page(tsk) + STACK_INIT_OFFSET;
+ lc->current_task = (unsigned long)tsk;
lc->lpp = LPP_MAGIC;
lc->current_pid = tsk->pid;
lc->user_timer = tsk->thread.user_timer;
@@ -348,7 +347,6 @@ static void pcpu_delegate(struct pcpu *pcpu,
abs_lc->restart_source = source_cpu;
put_abs_lowcore(abs_lc);
}
- __bpon();
asm volatile(
"0: sigp 0,%0,%2 # sigp restart to target cpu\n"
" brc 2,0b # busy, try again\n"
@@ -986,7 +984,6 @@ void __cpu_die(unsigned int cpu)
void __noreturn cpu_die(void)
{
idle_task_exit();
- __bpon();
pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
for (;;) ;
}
@@ -1302,9 +1299,9 @@ int __init smp_reinit_ipl_cpu(void)
local_mcck_enable();
local_irq_restore(flags);
- free_pages(lc_ipl->async_stack - STACK_INIT_OFFSET, THREAD_SIZE_ORDER);
memblock_free_late(__pa(lc_ipl->mcck_stack - STACK_INIT_OFFSET), THREAD_SIZE);
+ memblock_free_late(__pa(lc_ipl->async_stack - STACK_INIT_OFFSET), THREAD_SIZE);
+ memblock_free_late(__pa(lc_ipl->nodat_stack - STACK_INIT_OFFSET), THREAD_SIZE);
memblock_free_late(__pa(lc_ipl), sizeof(*lc_ipl));
-
return 0;
}
diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl
index 799147658dee..b68f47541169 100644
--- a/arch/s390/kernel/syscalls/syscall.tbl
+++ b/arch/s390/kernel/syscalls/syscall.tbl
@@ -449,7 +449,7 @@
444 common landlock_create_ruleset sys_landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self sys_landlock_restrict_self
-# 447 reserved for memfd_secret
+447 common memfd_secret sys_memfd_secret sys_memfd_secret
448 common process_mrelease sys_process_mrelease sys_process_mrelease
449 common futex_waitv sys_futex_waitv sys_futex_waitv
450 common set_mempolicy_home_node sys_set_mempolicy_home_node sys_set_mempolicy_home_node
diff --git a/arch/s390/kernel/text_amode31.S b/arch/s390/kernel/text_amode31.S
index e0f01ce251f5..14c6d25c035f 100644
--- a/arch/s390/kernel/text_amode31.S
+++ b/arch/s390/kernel/text_amode31.S
@@ -27,7 +27,7 @@
/*
* int _diag14_amode31(unsigned long rx, unsigned long ry1, unsigned long subcode)
*/
-ENTRY(_diag14_amode31)
+SYM_FUNC_START(_diag14_amode31)
lgr %r1,%r2
lgr %r2,%r3
lgr %r3,%r4
@@ -42,12 +42,12 @@ ENTRY(_diag14_amode31)
lgfr %r2,%r5
BR_EX_AMODE31_r14
EX_TABLE_AMODE31(.Ldiag14_ex, .Ldiag14_fault)
-ENDPROC(_diag14_amode31)
+SYM_FUNC_END(_diag14_amode31)
/*
* int _diag210_amode31(struct diag210 *addr)
*/
-ENTRY(_diag210_amode31)
+SYM_FUNC_START(_diag210_amode31)
lgr %r1,%r2
lhi %r2,-1
sam31
@@ -60,12 +60,12 @@ ENTRY(_diag210_amode31)
lgfr %r2,%r2
BR_EX_AMODE31_r14
EX_TABLE_AMODE31(.Ldiag210_ex, .Ldiag210_fault)
-ENDPROC(_diag210_amode31)
+SYM_FUNC_END(_diag210_amode31)
/*
* int diag8c(struct diag8c *addr, struct ccw_dev_id *devno, size_t len)
*/
-ENTRY(_diag8c_amode31)
+SYM_FUNC_START(_diag8c_amode31)
llgf %r3,0(%r3)
sam31
diag %r2,%r4,0x8c
@@ -74,11 +74,11 @@ ENTRY(_diag8c_amode31)
lgfr %r2,%r3
BR_EX_AMODE31_r14
EX_TABLE_AMODE31(.Ldiag8c_ex, .Ldiag8c_ex)
-ENDPROC(_diag8c_amode31)
+SYM_FUNC_END(_diag8c_amode31)
/*
* int _diag26c_amode31(void *req, void *resp, enum diag26c_sc subcode)
*/
-ENTRY(_diag26c_amode31)
+SYM_FUNC_START(_diag26c_amode31)
lghi %r5,-EOPNOTSUPP
sam31
diag %r2,%r4,0x26c
@@ -87,42 +87,42 @@ ENTRY(_diag26c_amode31)
lgfr %r2,%r5
BR_EX_AMODE31_r14
EX_TABLE_AMODE31(.Ldiag26c_ex, .Ldiag26c_ex)
-ENDPROC(_diag26c_amode31)
+SYM_FUNC_END(_diag26c_amode31)
/*
* void _diag0c_amode31(struct hypfs_diag0c_entry *entry)
*/
-ENTRY(_diag0c_amode31)
+SYM_FUNC_START(_diag0c_amode31)
sam31
diag %r2,%r2,0x0c
sam64
BR_EX_AMODE31_r14
-ENDPROC(_diag0c_amode31)
+SYM_FUNC_END(_diag0c_amode31)
/*
* void _diag308_reset_amode31(void)
*
* Calls diag 308 subcode 1 and continues execution
*/
-ENTRY(_diag308_reset_amode31)
- larl %r4,.Lctlregs # Save control registers
+SYM_FUNC_START(_diag308_reset_amode31)
+ larl %r4,ctlregs # Save control registers
stctg %c0,%c15,0(%r4)
lg %r2,0(%r4) # Disable lowcore protection
nilh %r2,0xefff
- larl %r4,.Lctlreg0
+ larl %r4,ctlreg0
stg %r2,0(%r4)
lctlg %c0,%c0,0(%r4)
- larl %r4,.Lfpctl # Floating point control register
+ larl %r4,fpctl # Floating point control register
stfpc 0(%r4)
- larl %r4,.Lprefix # Save prefix register
+ larl %r4,prefix # Save prefix register
stpx 0(%r4)
- larl %r4,.Lprefix_zero # Set prefix register to 0
+ larl %r4,prefix_zero # Set prefix register to 0
spx 0(%r4)
- larl %r4,.Lcontinue_psw # Save PSW flags
+ larl %r4,continue_psw # Save PSW flags
epsw %r2,%r3
stm %r2,%r3,0(%r4)
larl %r4,.Lrestart_part2 # Setup restart PSW at absolute 0
- larl %r3,.Lrestart_diag308_psw
+ larl %r3,restart_diag308_psw
og %r4,0(%r3) # Save PSW
lghi %r3,0
sturg %r4,%r3 # Use sturg, because of large pages
@@ -134,39 +134,26 @@ ENTRY(_diag308_reset_amode31)
lhi %r1,2 # Use mode 2 = ESAME (dump)
sigp %r1,%r0,SIGP_SET_ARCHITECTURE # Switch to ESAME mode
sam64 # Switch to 64 bit addressing mode
- larl %r4,.Lctlregs # Restore control registers
+ larl %r4,ctlregs # Restore control registers
lctlg %c0,%c15,0(%r4)
- larl %r4,.Lfpctl # Restore floating point ctl register
+ larl %r4,fpctl # Restore floating point ctl register
lfpc 0(%r4)
- larl %r4,.Lprefix # Restore prefix register
+ larl %r4,prefix # Restore prefix register
spx 0(%r4)
- larl %r4,.Lcontinue_psw # Restore PSW flags
+ larl %r4,continue_psw # Restore PSW flags
larl %r2,.Lcontinue
stg %r2,8(%r4)
lpswe 0(%r4)
.Lcontinue:
BR_EX_AMODE31_r14
-ENDPROC(_diag308_reset_amode31)
+SYM_FUNC_END(_diag308_reset_amode31)
.section .amode31.data,"aw",@progbits
-.align 8
-.Lrestart_diag308_psw:
- .long 0x00080000,0x80000000
-
-.align 8
-.Lcontinue_psw:
- .quad 0,0
-
-.align 8
-.Lctlreg0:
- .quad 0
-.Lctlregs:
- .rept 16
- .quad 0
- .endr
-.Lfpctl:
- .long 0
-.Lprefix:
- .long 0
-.Lprefix_zero:
- .long 0
+ .balign 8
+SYM_DATA_LOCAL(restart_diag308_psw, .long 0x00080000,0x80000000)
+SYM_DATA_LOCAL(continue_psw, .quad 0,0)
+SYM_DATA_LOCAL(ctlreg0, .quad 0)
+SYM_DATA_LOCAL(ctlregs, .fill 16,8,0)
+SYM_DATA_LOCAL(fpctl, .long 0)
+SYM_DATA_LOCAL(prefix, .long 0)
+SYM_DATA_LOCAL(prefix_zero, .long 0)
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 72af753d1bba..9fd19530c9a5 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -637,16 +637,6 @@ static struct ctl_table topology_ctl_table[] = {
{ },
};
-static struct ctl_table topology_dir_table[] = {
- {
- .procname = "s390",
- .maxlen = 0,
- .mode = 0555,
- .child = topology_ctl_table,
- },
- { },
-};
-
static int __init topology_init(void)
{
struct device *dev_root;
@@ -657,7 +647,7 @@ static int __init topology_init(void)
set_topology_timer();
else
topology_update_polarization_simple();
- register_sysctl_table(topology_dir_table);
+ register_sysctl("s390", topology_ctl_table);
dev_root = bus_get_dev_root(&cpu_subsys);
if (dev_root) {
diff --git a/arch/s390/kernel/vdso32/vdso_user_wrapper.S b/arch/s390/kernel/vdso32/vdso_user_wrapper.S
index 3f42f27f978c..2e645003fdaf 100644
--- a/arch/s390/kernel/vdso32/vdso_user_wrapper.S
+++ b/arch/s390/kernel/vdso32/vdso_user_wrapper.S
@@ -1,12 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/linkage.h>
#include <asm/unistd.h>
#include <asm/dwarf.h>
.macro vdso_syscall func,syscall
.globl __kernel_compat_\func
.type __kernel_compat_\func,@function
- .align 8
+ __ALIGN
__kernel_compat_\func:
CFI_STARTPROC
svc \syscall
diff --git a/arch/s390/kernel/vdso64/vdso_user_wrapper.S b/arch/s390/kernel/vdso64/vdso_user_wrapper.S
index 97f0c0a669a5..57f62596e53b 100644
--- a/arch/s390/kernel/vdso64/vdso_user_wrapper.S
+++ b/arch/s390/kernel/vdso64/vdso_user_wrapper.S
@@ -1,4 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/linkage.h>
#include <asm/vdso.h>
#include <asm/unistd.h>
#include <asm/asm-offsets.h>
@@ -16,7 +17,7 @@
.macro vdso_func func
.globl __kernel_\func
.type __kernel_\func,@function
- .align 8
+ __ALIGN
__kernel_\func:
CFI_STARTPROC
aghi %r15,-WRAPPER_FRAME_SIZE
@@ -41,7 +42,7 @@ vdso_func getcpu
.macro vdso_syscall func,syscall
.globl __kernel_\func
.type __kernel_\func,@function
- .align 8
+ __ALIGN
__kernel_\func:
CFI_STARTPROC
svc \syscall
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index b653ba8d51e6..2ae201ebf90b 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -14,6 +14,8 @@
#define BSS_FIRST_SECTIONS *(.bss..swapper_pg_dir) \
*(.bss..invalid_pg_dir)
+#define RO_EXCEPTION_TABLE_ALIGN 16
+
/* Handle ro_after_init data on our own. */
#define RO_AFTER_INIT_DATA
@@ -66,7 +68,6 @@ SECTIONS
*(.data..ro_after_init)
JUMP_TABLE_DATA
} :data
- EXCEPTION_TABLE(16)
. = ALIGN(PAGE_SIZE);
__end_ro_after_init = .;
@@ -219,6 +220,13 @@ SECTIONS
QUAD(init_mm)
QUAD(swapper_pg_dir)
QUAD(invalid_pg_dir)
+#ifdef CONFIG_KASAN
+ QUAD(kasan_early_shadow_page)
+ QUAD(kasan_early_shadow_pte)
+ QUAD(kasan_early_shadow_pmd)
+ QUAD(kasan_early_shadow_pud)
+ QUAD(kasan_early_shadow_p4d)
+#endif
} :NONE
/* Debugging sections. */
diff --git a/arch/s390/lib/mem.S b/arch/s390/lib/mem.S
index dc0874f2e203..5a9a55de2e10 100644
--- a/arch/s390/lib/mem.S
+++ b/arch/s390/lib/mem.S
@@ -14,8 +14,7 @@
/*
* void *memmove(void *dest, const void *src, size_t n)
*/
-WEAK(memmove)
-ENTRY(__memmove)
+SYM_FUNC_START(__memmove)
ltgr %r4,%r4
lgr %r1,%r2
jz .Lmemmove_exit
@@ -48,7 +47,10 @@ ENTRY(__memmove)
BR_EX %r14
.Lmemmove_mvc:
mvc 0(1,%r1),0(%r3)
-ENDPROC(__memmove)
+SYM_FUNC_END(__memmove)
+EXPORT_SYMBOL(__memmove)
+
+SYM_FUNC_ALIAS(memmove, __memmove)
EXPORT_SYMBOL(memmove)
/*
@@ -66,8 +68,7 @@ EXPORT_SYMBOL(memmove)
* return __builtin_memset(s, c, n);
* }
*/
-WEAK(memset)
-ENTRY(__memset)
+SYM_FUNC_START(__memset)
ltgr %r4,%r4
jz .Lmemset_exit
ltgr %r3,%r3
@@ -111,7 +112,10 @@ ENTRY(__memset)
xc 0(1,%r1),0(%r1)
.Lmemset_mvc:
mvc 1(1,%r1),0(%r1)
-ENDPROC(__memset)
+SYM_FUNC_END(__memset)
+EXPORT_SYMBOL(__memset)
+
+SYM_FUNC_ALIAS(memset, __memset)
EXPORT_SYMBOL(memset)
/*
@@ -119,8 +123,7 @@ EXPORT_SYMBOL(memset)
*
* void *memcpy(void *dest, const void *src, size_t n)
*/
-WEAK(memcpy)
-ENTRY(__memcpy)
+SYM_FUNC_START(__memcpy)
ltgr %r4,%r4
jz .Lmemcpy_exit
aghi %r4,-1
@@ -141,7 +144,10 @@ ENTRY(__memcpy)
j .Lmemcpy_remainder
.Lmemcpy_mvc:
mvc 0(1,%r1),0(%r3)
-ENDPROC(__memcpy)
+SYM_FUNC_END(__memcpy)
+EXPORT_SYMBOL(__memcpy)
+
+SYM_FUNC_ALIAS(memcpy, __memcpy)
EXPORT_SYMBOL(memcpy)
/*
@@ -152,7 +158,7 @@ EXPORT_SYMBOL(memcpy)
* void *__memset64(uint64_t *s, uint64_t v, size_t count)
*/
.macro __MEMSET bits,bytes,insn
-ENTRY(__memset\bits)
+SYM_FUNC_START(__memset\bits)
ltgr %r4,%r4
jz .L__memset_exit\bits
cghi %r4,\bytes
@@ -178,7 +184,7 @@ ENTRY(__memset\bits)
BR_EX %r14
.L__memset_mvc\bits:
mvc \bytes(1,%r1),0(%r1)
-ENDPROC(__memset\bits)
+SYM_FUNC_END(__memset\bits)
.endm
__MEMSET 16,2,sth
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index d44214072779..e4a13d7cab6e 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -27,14 +27,13 @@ void debug_user_asce(int exit)
"kernel: %016llx user: %016llx\n",
exit ? "exit" : "entry", cr1, cr7,
S390_lowcore.kernel_asce, S390_lowcore.user_asce);
-
}
#endif /*CONFIG_DEBUG_ENTRY */
static unsigned long raw_copy_from_user_key(void *to, const void __user *from,
unsigned long size, unsigned long key)
{
- unsigned long tmp1, tmp2;
+ unsigned long rem;
union oac spec = {
.oac2.key = key,
.oac2.as = PSW_BITS_AS_SECONDARY,
@@ -42,28 +41,30 @@ static unsigned long raw_copy_from_user_key(void *to, const void __user *from,
.oac2.a = 1,
};
- tmp1 = -4096UL;
asm volatile(
- " lr 0,%[spec]\n"
- "0: mvcos 0(%2),0(%1),%0\n"
- "6: jz 4f\n"
- "1: algr %0,%3\n"
- " slgr %1,%3\n"
- " slgr %2,%3\n"
- " j 0b\n"
- "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
- " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
- " slgr %4,%1\n"
- " clgr %0,%4\n" /* copy crosses next page boundary? */
- " jnh 5f\n"
- "3: mvcos 0(%2),0(%1),%4\n"
- "7: slgr %0,%4\n"
- " j 5f\n"
- "4: slgr %0,%0\n"
- "5:\n"
- EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
- : "+a" (size), "+a" (from), "+a" (to), "+a" (tmp1), "=a" (tmp2)
- : [spec] "d" (spec.val)
+ " lr 0,%[spec]\n"
+ "0: mvcos 0(%[to]),0(%[from]),%[size]\n"
+ "1: jz 5f\n"
+ " algr %[size],%[val]\n"
+ " slgr %[from],%[val]\n"
+ " slgr %[to],%[val]\n"
+ " j 0b\n"
+ "2: la %[rem],4095(%[from])\n" /* rem = from + 4095 */
+ " nr %[rem],%[val]\n" /* rem = (from + 4095) & -4096 */
+ " slgr %[rem],%[from]\n"
+ " clgr %[size],%[rem]\n" /* copy crosses next page boundary? */
+ " jnh 6f\n"
+ "3: mvcos 0(%[to]),0(%[from]),%[rem]\n"
+ "4: slgr %[size],%[rem]\n"
+ " j 6f\n"
+ "5: slgr %[size],%[size]\n"
+ "6:\n"
+ EX_TABLE(0b, 2b)
+ EX_TABLE(1b, 2b)
+ EX_TABLE(3b, 6b)
+ EX_TABLE(4b, 6b)
+ : [size] "+&a" (size), [from] "+&a" (from), [to] "+&a" (to), [rem] "=&a" (rem)
+ : [val] "a" (-4096UL), [spec] "d" (spec.val)
: "cc", "memory", "0");
return size;
}
@@ -94,7 +95,7 @@ EXPORT_SYMBOL(_copy_from_user_key);
static unsigned long raw_copy_to_user_key(void __user *to, const void *from,
unsigned long size, unsigned long key)
{
- unsigned long tmp1, tmp2;
+ unsigned long rem;
union oac spec = {
.oac1.key = key,
.oac1.as = PSW_BITS_AS_SECONDARY,
@@ -102,28 +103,30 @@ static unsigned long raw_copy_to_user_key(void __user *to, const void *from,
.oac1.a = 1,
};
- tmp1 = -4096UL;
asm volatile(
- " lr 0,%[spec]\n"
- "0: mvcos 0(%1),0(%2),%0\n"
- "6: jz 4f\n"
- "1: algr %0,%3\n"
- " slgr %1,%3\n"
- " slgr %2,%3\n"
- " j 0b\n"
- "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
- " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
- " slgr %4,%1\n"
- " clgr %0,%4\n" /* copy crosses next page boundary? */
- " jnh 5f\n"
- "3: mvcos 0(%1),0(%2),%4\n"
- "7: slgr %0,%4\n"
- " j 5f\n"
- "4: slgr %0,%0\n"
- "5:\n"
- EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
- : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2)
- : [spec] "d" (spec.val)
+ " lr 0,%[spec]\n"
+ "0: mvcos 0(%[to]),0(%[from]),%[size]\n"
+ "1: jz 5f\n"
+ " algr %[size],%[val]\n"
+ " slgr %[to],%[val]\n"
+ " slgr %[from],%[val]\n"
+ " j 0b\n"
+ "2: la %[rem],4095(%[to])\n" /* rem = to + 4095 */
+ " nr %[rem],%[val]\n" /* rem = (to + 4095) & -4096 */
+ " slgr %[rem],%[to]\n"
+ " clgr %[size],%[rem]\n" /* copy crosses next page boundary? */
+ " jnh 6f\n"
+ "3: mvcos 0(%[to]),0(%[from]),%[rem]\n"
+ "4: slgr %[size],%[rem]\n"
+ " j 6f\n"
+ "5: slgr %[size],%[size]\n"
+ "6:\n"
+ EX_TABLE(0b, 2b)
+ EX_TABLE(1b, 2b)
+ EX_TABLE(3b, 6b)
+ EX_TABLE(4b, 6b)
+ : [size] "+&a" (size), [to] "+&a" (to), [from] "+&a" (from), [rem] "=&a" (rem)
+ : [val] "a" (-4096UL), [spec] "d" (spec.val)
: "cc", "memory", "0");
return size;
}
@@ -147,33 +150,35 @@ EXPORT_SYMBOL(_copy_to_user_key);
unsigned long __clear_user(void __user *to, unsigned long size)
{
- unsigned long tmp1, tmp2;
+ unsigned long rem;
union oac spec = {
.oac1.as = PSW_BITS_AS_SECONDARY,
.oac1.a = 1,
};
- tmp1 = -4096UL;
asm volatile(
- " lr 0,%[spec]\n"
- "0: mvcos 0(%1),0(%4),%0\n"
- "6: jz 4f\n"
- "1: algr %0,%2\n"
- " slgr %1,%2\n"
- " j 0b\n"
- "2: la %3,4095(%1)\n"/* %4 = to + 4095 */
- " nr %3,%2\n" /* %4 = (to + 4095) & -4096 */
- " slgr %3,%1\n"
- " clgr %0,%3\n" /* copy crosses next page boundary? */
- " jnh 5f\n"
- "3: mvcos 0(%1),0(%4),%3\n"
- "7: slgr %0,%3\n"
- " j 5f\n"
- "4: slgr %0,%0\n"
- "5:\n"
- EX_TABLE(0b,2b) EX_TABLE(6b,2b) EX_TABLE(3b,5b) EX_TABLE(7b,5b)
- : "+&a" (size), "+&a" (to), "+a" (tmp1), "=&a" (tmp2)
- : "a" (empty_zero_page), [spec] "d" (spec.val)
+ " lr 0,%[spec]\n"
+ "0: mvcos 0(%[to]),0(%[zeropg]),%[size]\n"
+ "1: jz 5f\n"
+ " algr %[size],%[val]\n"
+ " slgr %[to],%[val]\n"
+ " j 0b\n"
+ "2: la %[rem],4095(%[to])\n" /* rem = to + 4095 */
+ " nr %[rem],%[val]\n" /* rem = (to + 4095) & -4096 */
+ " slgr %[rem],%[to]\n"
+ " clgr %[size],%[rem]\n" /* copy crosses next page boundary? */
+ " jnh 6f\n"
+ "3: mvcos 0(%[to]),0(%[zeropg]),%[rem]\n"
+ "4: slgr %[size],%[rem]\n"
+ " j 6f\n"
+ "5: slgr %[size],%[size]\n"
+ "6:\n"
+ EX_TABLE(0b, 2b)
+ EX_TABLE(1b, 2b)
+ EX_TABLE(3b, 6b)
+ EX_TABLE(4b, 6b)
+ : [size] "+&a" (size), [to] "+&a" (to), [rem] "=&a" (rem)
+ : [val] "a" (-4096UL), [zeropg] "a" (empty_zero_page), [spec] "d" (spec.val)
: "cc", "memory", "0");
return size;
}
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile
index 57e4f3a24829..d90db06a8af5 100644
--- a/arch/s390/mm/Makefile
+++ b/arch/s390/mm/Makefile
@@ -10,6 +10,3 @@ obj-$(CONFIG_CMM) += cmm.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_PTDUMP_CORE) += dump_pagetables.o
obj-$(CONFIG_PGSTE) += gmap.o
-
-KASAN_SANITIZE_kasan_init.o := n
-obj-$(CONFIG_KASAN) += kasan_init.o
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index 9141ed4c52e9..5300c6867d5e 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -335,16 +335,6 @@ static struct ctl_table cmm_table[] = {
{ }
};
-static struct ctl_table cmm_dir_table[] = {
- {
- .procname = "vm",
- .maxlen = 0,
- .mode = 0555,
- .child = cmm_table,
- },
- { }
-};
-
#ifdef CONFIG_CMM_IUCV
#define SMSG_PREFIX "CMM"
static void cmm_smsg_target(const char *from, char *msg)
@@ -389,7 +379,7 @@ static int __init cmm_init(void)
{
int rc = -ENOMEM;
- cmm_sysctl_header = register_sysctl_table(cmm_dir_table);
+ cmm_sysctl_header = register_sysctl("vm", cmm_table);
if (!cmm_sysctl_header)
goto out_sysctl;
#ifdef CONFIG_CMM_IUCV
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 144447d5cb4c..8d94e29adcdb 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -176,9 +176,8 @@ void __init mem_init(void)
void free_initmem(void)
{
- __set_memory((unsigned long)_sinittext,
- (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
- SET_MEMORY_RW | SET_MEMORY_NX);
+ set_memory_rwnx((unsigned long)_sinittext,
+ (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT);
free_initmem_default(POISON_FREE_INITMEM);
}
diff --git a/arch/s390/mm/kasan_init.c b/arch/s390/mm/kasan_init.c
deleted file mode 100644
index ef89a5f26853..000000000000
--- a/arch/s390/mm/kasan_init.c
+++ /dev/null
@@ -1,301 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/kasan.h>
-#include <linux/sched/task.h>
-#include <linux/pgtable.h>
-#include <asm/pgalloc.h>
-#include <asm/kasan.h>
-#include <asm/mem_detect.h>
-#include <asm/processor.h>
-#include <asm/sclp.h>
-#include <asm/facility.h>
-#include <asm/sections.h>
-#include <asm/setup.h>
-#include <asm/uv.h>
-
-static unsigned long segment_pos __initdata;
-static unsigned long segment_low __initdata;
-static bool has_edat __initdata;
-static bool has_nx __initdata;
-
-#define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
-
-static void __init kasan_early_panic(const char *reason)
-{
- sclp_early_printk("The Linux kernel failed to boot with the KernelAddressSanitizer:\n");
- sclp_early_printk(reason);
- disabled_wait();
-}
-
-static void * __init kasan_early_alloc_segment(void)
-{
- segment_pos -= _SEGMENT_SIZE;
-
- if (segment_pos < segment_low)
- kasan_early_panic("out of memory during initialisation\n");
-
- return __va(segment_pos);
-}
-
-static void * __init kasan_early_alloc_pages(unsigned int order)
-{
- pgalloc_pos -= (PAGE_SIZE << order);
-
- if (pgalloc_pos < pgalloc_low)
- kasan_early_panic("out of memory during initialisation\n");
-
- return __va(pgalloc_pos);
-}
-
-static void * __init kasan_early_crst_alloc(unsigned long val)
-{
- unsigned long *table;
-
- table = kasan_early_alloc_pages(CRST_ALLOC_ORDER);
- if (table)
- crst_table_init(table, val);
- return table;
-}
-
-static pte_t * __init kasan_early_pte_alloc(void)
-{
- static void *pte_leftover;
- pte_t *pte;
-
- BUILD_BUG_ON(_PAGE_TABLE_SIZE * 2 != PAGE_SIZE);
-
- if (!pte_leftover) {
- pte_leftover = kasan_early_alloc_pages(0);
- pte = pte_leftover + _PAGE_TABLE_SIZE;
- } else {
- pte = pte_leftover;
- pte_leftover = NULL;
- }
- memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
- return pte;
-}
-
-enum populate_mode {
- POPULATE_MAP,
- POPULATE_ZERO_SHADOW,
- POPULATE_SHALLOW
-};
-
-static inline pgprot_t pgprot_clear_bit(pgprot_t pgprot, unsigned long bit)
-{
- return __pgprot(pgprot_val(pgprot) & ~bit);
-}
-
-static void __init kasan_early_pgtable_populate(unsigned long address,
- unsigned long end,
- enum populate_mode mode)
-{
- pgprot_t pgt_prot_zero = PAGE_KERNEL_RO;
- pgprot_t pgt_prot = PAGE_KERNEL;
- pgprot_t sgt_prot = SEGMENT_KERNEL;
- pgd_t *pg_dir;
- p4d_t *p4_dir;
- pud_t *pu_dir;
- pmd_t *pm_dir;
- pte_t *pt_dir;
- pmd_t pmd;
- pte_t pte;
-
- if (!has_nx) {
- pgt_prot_zero = pgprot_clear_bit(pgt_prot_zero, _PAGE_NOEXEC);
- pgt_prot = pgprot_clear_bit(pgt_prot, _PAGE_NOEXEC);
- sgt_prot = pgprot_clear_bit(sgt_prot, _SEGMENT_ENTRY_NOEXEC);
- }
-
- while (address < end) {
- pg_dir = pgd_offset_k(address);
- if (pgd_none(*pg_dir)) {
- if (mode == POPULATE_ZERO_SHADOW &&
- IS_ALIGNED(address, PGDIR_SIZE) &&
- end - address >= PGDIR_SIZE) {
- pgd_populate(&init_mm, pg_dir,
- kasan_early_shadow_p4d);
- address = (address + PGDIR_SIZE) & PGDIR_MASK;
- continue;
- }
- p4_dir = kasan_early_crst_alloc(_REGION2_ENTRY_EMPTY);
- pgd_populate(&init_mm, pg_dir, p4_dir);
- }
-
- if (mode == POPULATE_SHALLOW) {
- address = (address + P4D_SIZE) & P4D_MASK;
- continue;
- }
-
- p4_dir = p4d_offset(pg_dir, address);
- if (p4d_none(*p4_dir)) {
- if (mode == POPULATE_ZERO_SHADOW &&
- IS_ALIGNED(address, P4D_SIZE) &&
- end - address >= P4D_SIZE) {
- p4d_populate(&init_mm, p4_dir,
- kasan_early_shadow_pud);
- address = (address + P4D_SIZE) & P4D_MASK;
- continue;
- }
- pu_dir = kasan_early_crst_alloc(_REGION3_ENTRY_EMPTY);
- p4d_populate(&init_mm, p4_dir, pu_dir);
- }
-
- pu_dir = pud_offset(p4_dir, address);
- if (pud_none(*pu_dir)) {
- if (mode == POPULATE_ZERO_SHADOW &&
- IS_ALIGNED(address, PUD_SIZE) &&
- end - address >= PUD_SIZE) {
- pud_populate(&init_mm, pu_dir,
- kasan_early_shadow_pmd);
- address = (address + PUD_SIZE) & PUD_MASK;
- continue;
- }
- pm_dir = kasan_early_crst_alloc(_SEGMENT_ENTRY_EMPTY);
- pud_populate(&init_mm, pu_dir, pm_dir);
- }
-
- pm_dir = pmd_offset(pu_dir, address);
- if (pmd_none(*pm_dir)) {
- if (IS_ALIGNED(address, PMD_SIZE) &&
- end - address >= PMD_SIZE) {
- if (mode == POPULATE_ZERO_SHADOW) {
- pmd_populate(&init_mm, pm_dir, kasan_early_shadow_pte);
- address = (address + PMD_SIZE) & PMD_MASK;
- continue;
- } else if (has_edat) {
- void *page = kasan_early_alloc_segment();
-
- memset(page, 0, _SEGMENT_SIZE);
- pmd = __pmd(__pa(page));
- pmd = set_pmd_bit(pmd, sgt_prot);
- set_pmd(pm_dir, pmd);
- address = (address + PMD_SIZE) & PMD_MASK;
- continue;
- }
- }
- pt_dir = kasan_early_pte_alloc();
- pmd_populate(&init_mm, pm_dir, pt_dir);
- } else if (pmd_large(*pm_dir)) {
- address = (address + PMD_SIZE) & PMD_MASK;
- continue;
- }
-
- pt_dir = pte_offset_kernel(pm_dir, address);
- if (pte_none(*pt_dir)) {
- void *page;
-
- switch (mode) {
- case POPULATE_MAP:
- page = kasan_early_alloc_pages(0);
- memset(page, 0, PAGE_SIZE);
- pte = __pte(__pa(page));
- pte = set_pte_bit(pte, pgt_prot);
- set_pte(pt_dir, pte);
- break;
- case POPULATE_ZERO_SHADOW:
- page = kasan_early_shadow_page;
- pte = __pte(__pa(page));
- pte = set_pte_bit(pte, pgt_prot_zero);
- set_pte(pt_dir, pte);
- break;
- case POPULATE_SHALLOW:
- /* should never happen */
- break;
- }
- }
- address += PAGE_SIZE;
- }
-}
-
-static void __init kasan_early_detect_facilities(void)
-{
- if (test_facility(8)) {
- has_edat = true;
- __ctl_set_bit(0, 23);
- }
- if (!noexec_disabled && test_facility(130)) {
- has_nx = true;
- __ctl_set_bit(0, 20);
- }
-}
-
-void __init kasan_early_init(void)
-{
- pte_t pte_z = __pte(__pa(kasan_early_shadow_page) | pgprot_val(PAGE_KERNEL_RO));
- pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
- pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
- p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
- unsigned long untracked_end = MODULES_VADDR;
- unsigned long shadow_alloc_size;
- unsigned long start, end;
- int i;
-
- kasan_early_detect_facilities();
- if (!has_nx)
- pte_z = clear_pte_bit(pte_z, __pgprot(_PAGE_NOEXEC));
-
- BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, P4D_SIZE));
- BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE));
-
- /* init kasan zero shadow */
- crst_table_init((unsigned long *)kasan_early_shadow_p4d, p4d_val(p4d_z));
- crst_table_init((unsigned long *)kasan_early_shadow_pud, pud_val(pud_z));
- crst_table_init((unsigned long *)kasan_early_shadow_pmd, pmd_val(pmd_z));
- memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);
-
- if (has_edat) {
- shadow_alloc_size = get_mem_detect_usable_total() >> KASAN_SHADOW_SCALE_SHIFT;
- segment_pos = round_down(pgalloc_pos, _SEGMENT_SIZE);
- segment_low = segment_pos - shadow_alloc_size;
- segment_low = round_down(segment_low, _SEGMENT_SIZE);
- pgalloc_pos = segment_low;
- }
- /*
- * Current memory layout:
- * +- 0 -------------+ +- shadow start -+
- * |1:1 ident mapping| /|1/8 of ident map|
- * | | / | |
- * +-end of ident map+ / +----------------+
- * | ... gap ... | / | kasan |
- * | | / | zero page |
- * +- vmalloc area -+ / | mapping |
- * | vmalloc_size | / | (untracked) |
- * +- modules vaddr -+ / +----------------+
- * | 2Gb |/ | unmapped | allocated per module
- * +- shadow start -+ +----------------+
- * | 1/8 addr space | | zero pg mapping| (untracked)
- * +- shadow end ----+---------+- shadow end ---+
- *
- * Current memory layout (KASAN_VMALLOC):
- * +- 0 -------------+ +- shadow start -+
- * |1:1 ident mapping| /|1/8 of ident map|
- * | | / | |
- * +-end of ident map+ / +----------------+
- * | ... gap ... | / | kasan zero page| (untracked)
- * | | / | mapping |
- * +- vmalloc area -+ / +----------------+
- * | vmalloc_size | / |shallow populate|
- * +- modules vaddr -+ / +----------------+
- * | 2Gb |/ |shallow populate|
- * +- shadow start -+ +----------------+
- * | 1/8 addr space | | zero pg mapping| (untracked)
- * +- shadow end ----+---------+- shadow end ---+
- */
- /* populate kasan shadow (for identity mapping and zero page mapping) */
- for_each_mem_detect_usable_block(i, &start, &end)
- kasan_early_pgtable_populate(__sha(start), __sha(end), POPULATE_MAP);
- if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
- untracked_end = VMALLOC_START;
- /* shallowly populate kasan shadow for vmalloc and modules */
- kasan_early_pgtable_populate(__sha(VMALLOC_START), __sha(MODULES_END),
- POPULATE_SHALLOW);
- }
- /* populate kasan shadow for untracked memory */
- kasan_early_pgtable_populate(__sha(ident_map_size), __sha(untracked_end),
- POPULATE_ZERO_SHADOW);
- kasan_early_pgtable_populate(__sha(MODULES_END), __sha(_REGION1_SIZE),
- POPULATE_ZERO_SHADOW);
- /* enable kasan */
- init_task.kasan_depth = 0;
- sclp_early_printk("KernelAddressSanitizer initialized\n");
-}
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 85195c18b2e8..5ba3bd8a7b12 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -4,6 +4,7 @@
* Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
*/
#include <linux/hugetlb.h>
+#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <asm/cacheflush.h>
#include <asm/facility.h>
@@ -41,7 +42,7 @@ void __storage_key_init_range(unsigned long start, unsigned long end)
}
#ifdef CONFIG_PROC_FS
-atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
+atomic_long_t __bootdata_preserved(direct_pages_count[PG_DIRECT_MAP_MAX]);
void arch_report_meminfo(struct seq_file *m)
{
@@ -101,6 +102,14 @@ static int walk_pte_level(pmd_t *pmdp, unsigned long addr, unsigned long end,
new = set_pte_bit(new, __pgprot(_PAGE_NOEXEC));
else if (flags & SET_MEMORY_X)
new = clear_pte_bit(new, __pgprot(_PAGE_NOEXEC));
+ if (flags & SET_MEMORY_INV) {
+ new = set_pte_bit(new, __pgprot(_PAGE_INVALID));
+ } else if (flags & SET_MEMORY_DEF) {
+ new = __pte(pte_val(new) & PAGE_MASK);
+ new = set_pte_bit(new, PAGE_KERNEL);
+ if (!MACHINE_HAS_NX)
+ new = clear_pte_bit(new, __pgprot(_PAGE_NOEXEC));
+ }
pgt_set((unsigned long *)ptep, pte_val(new), addr, CRDTE_DTT_PAGE);
ptep++;
addr += PAGE_SIZE;
@@ -151,6 +160,14 @@ static void modify_pmd_page(pmd_t *pmdp, unsigned long addr,
new = set_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_NOEXEC));
else if (flags & SET_MEMORY_X)
new = clear_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_NOEXEC));
+ if (flags & SET_MEMORY_INV) {
+ new = set_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_INVALID));
+ } else if (flags & SET_MEMORY_DEF) {
+ new = __pmd(pmd_val(new) & PMD_MASK);
+ new = set_pmd_bit(new, SEGMENT_KERNEL);
+ if (!MACHINE_HAS_NX)
+ new = clear_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_NOEXEC));
+ }
pgt_set((unsigned long *)pmdp, pmd_val(new), addr, CRDTE_DTT_SEGMENT);
}
@@ -232,6 +249,14 @@ static void modify_pud_page(pud_t *pudp, unsigned long addr,
new = set_pud_bit(new, __pgprot(_REGION_ENTRY_NOEXEC));
else if (flags & SET_MEMORY_X)
new = clear_pud_bit(new, __pgprot(_REGION_ENTRY_NOEXEC));
+ if (flags & SET_MEMORY_INV) {
+ new = set_pud_bit(new, __pgprot(_REGION_ENTRY_INVALID));
+ } else if (flags & SET_MEMORY_DEF) {
+ new = __pud(pud_val(new) & PUD_MASK);
+ new = set_pud_bit(new, REGION3_KERNEL);
+ if (!MACHINE_HAS_NX)
+ new = clear_pud_bit(new, __pgprot(_REGION_ENTRY_NOEXEC));
+ }
pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3);
}
@@ -298,11 +323,6 @@ static int change_page_attr(unsigned long addr, unsigned long end,
int rc = -EINVAL;
pgd_t *pgdp;
- if (addr == end)
- return 0;
- if (end >= MODULES_END)
- return -EINVAL;
- mutex_lock(&cpa_mutex);
pgdp = pgd_offset_k(addr);
do {
if (pgd_none(*pgdp))
@@ -313,18 +333,76 @@ static int change_page_attr(unsigned long addr, unsigned long end,
break;
cond_resched();
} while (pgdp++, addr = next, addr < end && !rc);
- mutex_unlock(&cpa_mutex);
+ return rc;
+}
+
+static int change_page_attr_alias(unsigned long addr, unsigned long end,
+ unsigned long flags)
+{
+ unsigned long alias, offset, va_start, va_end;
+ struct vm_struct *area;
+ int rc = 0;
+
+ /*
+ * Changes to read-only permissions on kernel VA mappings are also
+ * applied to the kernel direct mapping. Execute permissions are
+ * intentionally not transferred to keep all allocated pages within
+ * the direct mapping non-executable.
+ */
+ flags &= SET_MEMORY_RO | SET_MEMORY_RW;
+ if (!flags)
+ return 0;
+ area = NULL;
+ while (addr < end) {
+ if (!area)
+ area = find_vm_area((void *)addr);
+ if (!area || !(area->flags & VM_ALLOC))
+ return 0;
+ va_start = (unsigned long)area->addr;
+ va_end = va_start + area->nr_pages * PAGE_SIZE;
+ offset = (addr - va_start) >> PAGE_SHIFT;
+ alias = (unsigned long)page_address(area->pages[offset]);
+ rc = change_page_attr(alias, alias + PAGE_SIZE, flags);
+ if (rc)
+ break;
+ addr += PAGE_SIZE;
+ if (addr >= va_end)
+ area = NULL;
+ }
return rc;
}
int __set_memory(unsigned long addr, int numpages, unsigned long flags)
{
+ unsigned long end;
+ int rc;
+
if (!MACHINE_HAS_NX)
flags &= ~(SET_MEMORY_NX | SET_MEMORY_X);
if (!flags)
return 0;
+ if (!numpages)
+ return 0;
addr &= PAGE_MASK;
- return change_page_attr(addr, addr + numpages * PAGE_SIZE, flags);
+ end = addr + numpages * PAGE_SIZE;
+ mutex_lock(&cpa_mutex);
+ rc = change_page_attr(addr, end, flags);
+ if (rc)
+ goto out;
+ rc = change_page_attr_alias(addr, end, flags);
+out:
+ mutex_unlock(&cpa_mutex);
+ return rc;
+}
+
+int set_direct_map_invalid_noflush(struct page *page)
+{
+ return __set_memory((unsigned long)page_to_virt(page), 1, SET_MEMORY_INV);
+}
+
+int set_direct_map_default_noflush(struct page *page)
+{
+ return __set_memory((unsigned long)page_to_virt(page), 1, SET_MEMORY_DEF);
}
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 2de48b2c1b04..66ab68db9842 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -33,19 +33,9 @@ static struct ctl_table page_table_sysctl[] = {
{ }
};
-static struct ctl_table page_table_sysctl_dir[] = {
- {
- .procname = "vm",
- .maxlen = 0,
- .mode = 0555,
- .child = page_table_sysctl,
- },
- { }
-};
-
static int __init page_table_register_sysctl(void)
{
- return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
+ return register_sysctl("vm", page_table_sysctl) ? 0 : -ENOMEM;
}
__initcall(page_table_register_sysctl);
@@ -143,13 +133,7 @@ err_p4d:
static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
{
- unsigned int old, new;
-
- do {
- old = atomic_read(v);
- new = old ^ bits;
- } while (atomic_cmpxchg(v, old, new) != old);
- return new;
+ return atomic_fetch_xor(bits, v) ^ bits;
}
#ifdef CONFIG_PGSTE
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 4113a7ffa149..5b22c6e24528 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -5,6 +5,7 @@
#include <linux/memory_hotplug.h>
#include <linux/memblock.h>
+#include <linux/kasan.h>
#include <linux/pfn.h>
#include <linux/mm.h>
#include <linux/init.h>
@@ -664,6 +665,9 @@ static void __init memblock_region_swap(void *a, void *b, int size)
swap(*(struct memblock_region *)a, *(struct memblock_region *)b);
}
+#ifdef CONFIG_KASAN
+#define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
+#endif
/*
* map whole physical memory to virtual memory (identity mapping)
* we reserve enough space in the vmalloc area for vmemmap to hotplug
@@ -728,23 +732,24 @@ void __init vmem_map_init(void)
memblock_region_cmp, memblock_region_swap);
__for_each_mem_range(i, &memblock.memory, &memory_rwx,
NUMA_NO_NODE, MEMBLOCK_NONE, &base, &end, NULL) {
- __set_memory((unsigned long)__va(base),
- (end - base) >> PAGE_SHIFT,
- SET_MEMORY_RW | SET_MEMORY_NX);
+ set_memory_rwnx((unsigned long)__va(base),
+ (end - base) >> PAGE_SHIFT);
}
- __set_memory((unsigned long)_stext,
- (unsigned long)(_etext - _stext) >> PAGE_SHIFT,
- SET_MEMORY_RO | SET_MEMORY_X);
- __set_memory((unsigned long)_etext,
- (unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT,
- SET_MEMORY_RO);
- __set_memory((unsigned long)_sinittext,
- (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
- SET_MEMORY_RO | SET_MEMORY_X);
- __set_memory(__stext_amode31,
- (__etext_amode31 - __stext_amode31) >> PAGE_SHIFT,
- SET_MEMORY_RO | SET_MEMORY_X);
+#ifdef CONFIG_KASAN
+ for_each_mem_range(i, &base, &end) {
+ set_memory_rwnx(__sha(base),
+ (__sha(end) - __sha(base)) >> PAGE_SHIFT);
+ }
+#endif
+ set_memory_rox((unsigned long)_stext,
+ (unsigned long)(_etext - _stext) >> PAGE_SHIFT);
+ set_memory_ro((unsigned long)_etext,
+ (unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT);
+ set_memory_rox((unsigned long)_sinittext,
+ (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT);
+ set_memory_rox(__stext_amode31,
+ (__etext_amode31 - __stext_amode31) >> PAGE_SHIFT);
/* lowcore must be executable for LPSWE */
if (static_key_enabled(&cpu_has_bear))
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index e16afacc8fd1..afc3f33788da 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -874,32 +874,15 @@ bool zpci_is_device_configured(struct zpci_dev *zdev)
* @fh: The general function handle supplied by the platform
*
* Given a device in the configuration state Configured, enables, scans and
- * adds it to the common code PCI subsystem if possible. If the PCI device is
- * parked because we can not yet create a PCI bus because we have not seen
- * function 0, it is ignored but will be scanned once function 0 appears.
- * If any failure occurs, the zpci_dev is left disabled.
+ * adds it to the common code PCI subsystem if possible. If any failure occurs,
+ * the zpci_dev is left disabled.
*
* Return: 0 on success, or an error code otherwise
*/
int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh)
{
- int rc;
-
zpci_update_fh(zdev, fh);
- /* the PCI function will be scanned once function 0 appears */
- if (!zdev->zbus->bus)
- return 0;
-
- /* For function 0 on a multi-function bus scan whole bus as we might
- * have to pick up existing functions waiting for it to allow creating
- * the PCI bus
- */
- if (zdev->devfn == 0 && zdev->zbus->multifunction)
- rc = zpci_bus_scan_bus(zdev->zbus);
- else
- rc = zpci_bus_scan_device(zdev);
-
- return rc;
+ return zpci_bus_scan_device(zdev);
}
/**
diff --git a/arch/s390/pci/pci_bus.c b/arch/s390/pci/pci_bus.c
index a99926af2b69..32245b970a0c 100644
--- a/arch/s390/pci/pci_bus.c
+++ b/arch/s390/pci/pci_bus.c
@@ -85,9 +85,8 @@ int zpci_bus_scan_device(struct zpci_dev *zdev)
if (!pdev)
return -ENODEV;
- pci_bus_add_device(pdev);
pci_lock_rescan_remove();
- pci_bus_add_devices(zdev->zbus->bus);
+ pci_bus_add_device(pdev);
pci_unlock_rescan_remove();
return 0;
@@ -130,11 +129,8 @@ void zpci_bus_remove_device(struct zpci_dev *zdev, bool set_error)
* @zbus: the zbus to be scanned
*
* Enables and scans all PCI functions on the bus making them available to the
- * common PCI code. If there is no function 0 on the zbus nothing is scanned. If
- * a function does not have a slot yet because it was added to the zbus before
- * function 0 the slot is created. If a PCI function fails to be initialized
- * an error will be returned but attempts will still be made for all other
- * functions on the bus.
+ * common PCI code. If a PCI function fails to be initialized an error will be
+ * returned but attempts will still be made for all other functions on the bus.
*
* Return: 0 on success, an error value otherwise
*/
@@ -211,7 +207,6 @@ static int zpci_bus_create_pci_bus(struct zpci_bus *zbus, struct zpci_dev *fr, s
}
zbus->bus = bus;
- pci_bus_add_devices(bus);
return 0;
}
diff --git a/arch/s390/purgatory/head.S b/arch/s390/purgatory/head.S
index 6f835124ee82..e5bd1a503528 100644
--- a/arch/s390/purgatory/head.S
+++ b/arch/s390/purgatory/head.S
@@ -76,9 +76,9 @@
diag %r0,%r1,0x308
.endm
-.text
-.align PAGE_SIZE
-ENTRY(purgatory_start)
+ .text
+ .balign PAGE_SIZE
+SYM_CODE_START(purgatory_start)
/* The purgatory might be called after a diag308 so better set
* architecture and addressing mode.
*/
@@ -245,45 +245,21 @@ ENTRY(purgatory_start)
/* start crash kernel */
START_NEXT_KERNEL .base_dst 1
-
-
-load_psw_mask:
- .long 0x00080000,0x80000000
-
- .align 8
-disabled_wait_psw:
- .quad 0x0002000180000000
- .quad 0x0000000000000000 + .do_checksum_verification
-
-gprregs:
- .rept 10
- .quad 0
- .endr
-
-/* Macro to define a global variable with name and size (in bytes) to be
- * shared with C code.
- *
- * Add the .size and .type attribute to satisfy checks on the Elf_Sym during
- * purgatory load.
- */
-.macro GLOBAL_VARIABLE name,size
-\name:
- .global \name
- .size \name,\size
- .type \name,object
- .skip \size,0
-.endm
-
-GLOBAL_VARIABLE purgatory_sha256_digest,32
-GLOBAL_VARIABLE purgatory_sha_regions,16*__KEXEC_SHA_REGION_SIZE
-GLOBAL_VARIABLE kernel_entry,8
-GLOBAL_VARIABLE kernel_type,8
-GLOBAL_VARIABLE crash_start,8
-GLOBAL_VARIABLE crash_size,8
-
- .align PAGE_SIZE
-stack:
+SYM_CODE_END(purgatory_start)
+
+SYM_DATA_LOCAL(load_psw_mask, .long 0x00080000,0x80000000)
+ .balign 8
+SYM_DATA_LOCAL(disabled_wait_psw, .quad 0x0002000180000000,.do_checksum_verification)
+SYM_DATA_LOCAL(gprregs, .fill 10,8,0)
+SYM_DATA(purgatory_sha256_digest, .skip 32)
+SYM_DATA(purgatory_sha_regions, .skip 16*__KEXEC_SHA_REGION_SIZE)
+SYM_DATA(kernel_entry, .skip 8)
+SYM_DATA(kernel_type, .skip 8)
+SYM_DATA(crash_start, .skip 8)
+SYM_DATA(crash_size, .skip 8)
+ .balign PAGE_SIZE
+SYM_DATA_START_LOCAL(stack)
/* The buffer to move this code must be as big as the code. */
.skip stack-purgatory_start
- .align PAGE_SIZE
-purgatory_end:
+ .balign PAGE_SIZE
+SYM_DATA_END_LABEL(stack, SYM_L_LOCAL, purgatory_end)
diff --git a/arch/s390/purgatory/kexec-purgatory.S b/arch/s390/purgatory/kexec-purgatory.S
index 8293753100ae..25f512b1de12 100644
--- a/arch/s390/purgatory/kexec-purgatory.S
+++ b/arch/s390/purgatory/kexec-purgatory.S
@@ -1,14 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/linkage.h>
.section .rodata, "a"
- .align 8
-kexec_purgatory:
- .globl kexec_purgatory
+ .balign 8
+SYM_DATA_START(kexec_purgatory)
.incbin "arch/s390/purgatory/purgatory.ro"
-.Lkexec_purgatroy_end:
+SYM_DATA_END_LABEL(kexec_purgatory, SYM_L_LOCAL, kexec_purgatory_end)
- .align 8
-kexec_purgatory_size:
- .globl kexec_purgatory_size
- .quad .Lkexec_purgatroy_end - kexec_purgatory
+ .balign 8
+SYM_DATA(kexec_purgatory_size, .quad kexec_purgatory_end-kexec_purgatory)