From 4c78796301700bb9de82fce34c33adb1699b0ed9 Mon Sep 17 00:00:00 2001 From: Thomas Richter Date: Thu, 20 Oct 2022 10:52:24 +0200 Subject: s390/pai: move enum definition to header file Move enum definition to header file. This is done in preparation for a follow on patch where this enum will be used in another source file. Also change the enum name from paiext_mode to paievt_mode to indicate this enum is now used for several events. Make naming consistent and rename PAI_MODE_COUNTER to PAI_MODE_COUNTING. Signed-off-by: Thomas Richter Acked-by: Sumanth Korikkar Signed-off-by: Vasily Gorbik --- arch/s390/include/asm/pai.h | 6 ++++++ arch/s390/kernel/perf_pai_ext.c | 12 +++--------- 2 files changed, 9 insertions(+), 9 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/pai.h b/arch/s390/include/asm/pai.h index 1a8a6b15d121..7d1888e3dee6 100644 --- a/arch/s390/include/asm/pai.h +++ b/arch/s390/include/asm/pai.h @@ -75,4 +75,10 @@ static __always_inline void pai_kernel_exit(struct pt_regs *regs) WRITE_ONCE(S390_lowcore.ccd, S390_lowcore.ccd & ~PAI_CRYPTO_KERNEL_OFFSET); } +enum paievt_mode { + PAI_MODE_NONE, + PAI_MODE_SAMPLING, + PAI_MODE_COUNTING, +}; + #endif diff --git a/arch/s390/kernel/perf_pai_ext.c b/arch/s390/kernel/perf_pai_ext.c index d5c7c1e30c17..2da586936d95 100644 --- a/arch/s390/kernel/perf_pai_ext.c +++ b/arch/s390/kernel/perf_pai_ext.c @@ -28,12 +28,6 @@ static debug_info_t *paiext_dbg; static unsigned int paiext_cnt; /* Extracted with QPACI instruction */ -enum paiext_mode { - PAI_MODE_NONE, - PAI_MODE_SAMPLING, - PAI_MODE_COUNTER, -}; - struct pai_userdata { u16 num; u64 value; @@ -54,7 +48,7 @@ struct paiext_cb { /* PAI extension 1 control block */ struct paiext_map { unsigned long *area; /* Area for CPU to store counters */ struct pai_userdata *save; /* Area to store non-zero counters */ - enum paiext_mode mode; /* Type of event */ + enum paievt_mode mode; /* Type of event */ unsigned int active_events; /* # of PAI Extension users */ unsigned int refcnt; struct perf_event *event; /* Perf event for sampling */ @@ -192,14 +186,14 @@ static int paiext_alloc(struct perf_event_attr *a, struct perf_event *event) goto unlock; } cpump->mode = a->sample_period ? PAI_MODE_SAMPLING - : PAI_MODE_COUNTER; + : PAI_MODE_COUNTING; } else { /* Multiple invocation, check whats active. * Supported are multiple counter events or only one sampling * event concurrently at any one time. */ if (cpump->mode == PAI_MODE_SAMPLING || - (cpump->mode == PAI_MODE_COUNTER && a->sample_period)) { + (cpump->mode == PAI_MODE_COUNTING && a->sample_period)) { rc = -EBUSY; goto unlock; } -- cgit v1.2.3 From d3db4ac3c761def3d3a8e5ea6d05d1636c44c2ba Mon Sep 17 00:00:00 2001 From: Thomas Richter Date: Thu, 20 Oct 2022 11:38:05 +0200 Subject: s390/pai: rework pai_crypto mapped buffer reference count Rework the mapped buffer reference count in PMU pai_crypto to match the same technique as in PMU pai_ext. This simplifies the logic. Do not count the individual number of counter and sampling processes. Remember the type of access and the total number of references to the buffer. Signed-off-by: Thomas Richter Acked-by: Sumanth Korikkar Signed-off-by: Vasily Gorbik --- arch/s390/kernel/perf_pai_crypto.c | 42 ++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 22 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/perf_pai_crypto.c b/arch/s390/kernel/perf_pai_crypto.c index 6826e2a69a21..92184611af68 100644 --- a/arch/s390/kernel/perf_pai_crypto.c +++ b/arch/s390/kernel/perf_pai_crypto.c @@ -36,8 +36,8 @@ struct paicrypt_map { unsigned long *page; /* Page for CPU to store counters */ struct pai_userdata *save; /* Page to store no-zero counters */ unsigned int users; /* # of PAI crypto users */ - unsigned int sampler; /* # of PAI crypto samplers */ - unsigned int counter; /* # of PAI crypto counters */ + unsigned int refcnt; /* Reference count mapped buffers */ + enum paievt_mode mode; /* Type of event */ struct perf_event *event; /* Perf event for sampling */ }; @@ -56,15 +56,11 @@ static void paicrypt_event_destroy(struct perf_event *event) cpump->event = NULL; static_branch_dec(&pai_key); mutex_lock(&pai_reserve_mutex); - if (event->attr.sample_period) - cpump->sampler -= 1; - else - cpump->counter -= 1; - debug_sprintf_event(cfm_dbg, 5, "%s event %#llx cpu %d" - " sampler %d counter %d\n", __func__, - event->attr.config, event->cpu, cpump->sampler, - cpump->counter); - if (!cpump->counter && !cpump->sampler) { + debug_sprintf_event(cfm_dbg, 5, "%s event %#llx cpu %d users %d" + " mode %d refcnt %d\n", __func__, + event->attr.config, event->cpu, cpump->users, + cpump->mode, cpump->refcnt); + if (!--cpump->refcnt) { debug_sprintf_event(cfm_dbg, 4, "%s page %#lx save %p\n", __func__, (unsigned long)cpump->page, cpump->save); @@ -72,6 +68,7 @@ static void paicrypt_event_destroy(struct perf_event *event) cpump->page = NULL; kvfree(cpump->save); cpump->save = NULL; + cpump->mode = PAI_MODE_NONE; } mutex_unlock(&pai_reserve_mutex); } @@ -136,17 +133,14 @@ static u64 paicrypt_getall(struct perf_event *event) */ static int paicrypt_busy(struct perf_event_attr *a, struct paicrypt_map *cpump) { - unsigned int *use_ptr; int rc = 0; mutex_lock(&pai_reserve_mutex); if (a->sample_period) { /* Sampling requested */ - use_ptr = &cpump->sampler; - if (cpump->counter || cpump->sampler) + if (cpump->mode != PAI_MODE_NONE) rc = -EBUSY; /* ... sampling/counting active */ } else { /* Counting requested */ - use_ptr = &cpump->counter; - if (cpump->sampler) + if (cpump->mode == PAI_MODE_SAMPLING) rc = -EBUSY; /* ... and sampling active */ } if (rc) @@ -172,12 +166,16 @@ static int paicrypt_busy(struct perf_event_attr *a, struct paicrypt_map *cpump) rc = 0; unlock: - /* If rc is non-zero, do not increment counter/sampler. */ - if (!rc) - *use_ptr += 1; - debug_sprintf_event(cfm_dbg, 5, "%s sample_period %#llx sampler %d" - " counter %d page %#lx save %p rc %d\n", __func__, - a->sample_period, cpump->sampler, cpump->counter, + /* If rc is non-zero, do not set mode and reference count */ + if (!rc) { + cpump->refcnt++; + cpump->mode = a->sample_period ? PAI_MODE_SAMPLING + : PAI_MODE_COUNTING; + } + debug_sprintf_event(cfm_dbg, 5, "%s sample_period %#llx users %d" + " mode %d refcnt %d page %#lx save %p rc %d\n", + __func__, a->sample_period, cpump->users, + cpump->mode, cpump->refcnt, (unsigned long)cpump->page, cpump->save, rc); mutex_unlock(&pai_reserve_mutex); return rc; -- cgit v1.2.3 From 58354c7d35d35dd119ada18ff84a6686ccc8743f Mon Sep 17 00:00:00 2001 From: Thomas Richter Date: Thu, 20 Oct 2022 11:55:52 +0200 Subject: s390/pai: rename structure member users to active_events Rename structure member users to active_events to make it consistent with PMU pai_ext. Also use the same prefix syntax for increment and decrement operators in both PMUs. Signed-off-by: Thomas Richter Acked-by: Sumanth Korikkar Signed-off-by: Vasily Gorbik --- arch/s390/kernel/perf_pai_crypto.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/perf_pai_crypto.c b/arch/s390/kernel/perf_pai_crypto.c index 92184611af68..529a2fee4ea5 100644 --- a/arch/s390/kernel/perf_pai_crypto.c +++ b/arch/s390/kernel/perf_pai_crypto.c @@ -35,7 +35,7 @@ struct pai_userdata { struct paicrypt_map { unsigned long *page; /* Page for CPU to store counters */ struct pai_userdata *save; /* Page to store no-zero counters */ - unsigned int users; /* # of PAI crypto users */ + unsigned int active_events; /* # of PAI crypto users */ unsigned int refcnt; /* Reference count mapped buffers */ enum paievt_mode mode; /* Type of event */ struct perf_event *event; /* Perf event for sampling */ @@ -58,8 +58,8 @@ static void paicrypt_event_destroy(struct perf_event *event) mutex_lock(&pai_reserve_mutex); debug_sprintf_event(cfm_dbg, 5, "%s event %#llx cpu %d users %d" " mode %d refcnt %d\n", __func__, - event->attr.config, event->cpu, cpump->users, - cpump->mode, cpump->refcnt); + event->attr.config, event->cpu, + cpump->active_events, cpump->mode, cpump->refcnt); if (!--cpump->refcnt) { debug_sprintf_event(cfm_dbg, 4, "%s page %#lx save %p\n", __func__, (unsigned long)cpump->page, @@ -174,7 +174,7 @@ unlock: } debug_sprintf_event(cfm_dbg, 5, "%s sample_period %#llx users %d" " mode %d refcnt %d page %#lx save %p rc %d\n", - __func__, a->sample_period, cpump->users, + __func__, a->sample_period, cpump->active_events, cpump->mode, cpump->refcnt, (unsigned long)cpump->page, cpump->save, rc); mutex_unlock(&pai_reserve_mutex); @@ -260,7 +260,7 @@ static int paicrypt_add(struct perf_event *event, int flags) struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map); unsigned long ccd; - if (cpump->users++ == 0) { + if (++cpump->active_events == 1) { ccd = virt_to_phys(cpump->page) | PAI_CRYPTO_KERNEL_OFFSET; WRITE_ONCE(S390_lowcore.ccd, ccd); __ctl_set_bit(0, 50); @@ -291,7 +291,7 @@ static void paicrypt_del(struct perf_event *event, int flags) if (!event->attr.sample_period) /* Only counting needs to read counter */ paicrypt_stop(event, PERF_EF_UPDATE); - if (cpump->users-- == 1) { + if (--cpump->active_events == 0) { __ctl_clear_bit(0, 50); WRITE_ONCE(S390_lowcore.ccd, 0); } -- cgit v1.2.3 From 00a34d5a99c0631bd780b14cbe3813d0b39c3886 Mon Sep 17 00:00:00 2001 From: Gerald Schaefer Date: Tue, 19 Jul 2022 11:08:37 +0200 Subject: s390: select ARCH_WANT_HUGETLB_PAGE_OPTIMIZE_VMEMMAP Enable HUGETLB_PAGE_OPTIMIZE_VMEMMAP for s390. With this, vmemmap pages used to back struct pages for compound tail pages of hugetlb pages are freed and remapped to compound head page frame as RO, see also Documentation/vm/vmemmap_dedup.rst. For 1M hugetlb pages, this results in freeing 3 of 4 vmemmap pages, saving 12K of memory for each 1M hugetlb page (~1.2%). /sys/kernel/debug/kernel_page_tables will show the impact: ---[ vmemmap Area Start ]--- [...] 0x0000037202d84000-0x0000037202d85000 4K PTE RW NX 0x0000037202d85000-0x0000037202d88000 12K PTE RO NX For 2G hugetlb pages, this results in freeing 8191 of 8192 vmemmap pages, saving 32764K of memory for each 2G hugetlb page (~1.6%) /sys/kernel/debug/kernel_page_tables will show the impact: ---[ vmemmap Area Start ]--- [...] 0x000003720a000000-0x000003720a001000 4K PTE RW NX 0x000003720a001000-0x000003720c000000 32764K PTE RO NX The memory savings come with some costs: - vmemmap mapping for compound hugetlb pages is not a PMD mapping any more, but split to 4K PTE mappings, and it will not be coalesced back to PMD mapping after freeing hugetlb pages from the pool. Apart from theoretical performance impact, this will also (slightly) relativize the memory savings because of additional 2K PTE pagetable allocations. - Workload using "on the fly" hugetlb allocations via "nr_overcommit_hugepages" instead of using the hugetlb pool via "nr_hugepages" will suffer from considerably increased fault handling time, see also description from commit 78f39084b41d ("mm: hugetlb_vmemmap: add hugetlb_optimize_vmemmap sysctl"). - Freeing hugetlb pages from the pool will require re-allocation of the freed struct pages, and therefore needs some memory available to the kernel. This might fail in memory constrained scenarios. - For the same reason, memory offline might fail even for ZONE_MOVABLE when hugetlb pages are present (but not for s390, since we do not support ARCH_ENABLE_HUGEPAGE_MIGRATION, and therefore cannot have hugetlb pages in ZONE_MOVABLE). - General increased complexity and overhead in kernel handling of compound (head) pages. Therefore, this feature is disabled by default, and has to be enabled explicitly either by adding "hugetlb_free_vmemmap=on" kernel parameter, or during run-time via "/proc/sys/vm/hugetlb_optimize_vmemmap" sysctl. Acked-by: Heiko Carstens Signed-off-by: Gerald Schaefer Signed-off-by: Alexander Gordeev --- arch/s390/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/s390') diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 318fce77601d..a006dbb44890 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -121,6 +121,7 @@ config S390 select ARCH_WANTS_NO_INSTR select ARCH_WANT_DEFAULT_BPF_JIT select ARCH_WANT_IPC_PARSE_VERSION + select ARCH_WANT_HUGETLB_PAGE_OPTIMIZE_VMEMMAP select BUILDTIME_TABLE_SORT select CLONE_BACKWARDS2 select DMA_OPS if PCI -- cgit v1.2.3 From b381d047aad80ed6d0b41fc83180061cde0fc705 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 11 Nov 2022 13:14:18 +0100 Subject: s390: use generic serial.h header file There is no serial driver on s390, especially none that relies on a bogus BASE_BAUD define. Therefore use the generic header file. Signed-off-by: Heiko Carstens Signed-off-by: Alexander Gordeev --- arch/s390/include/asm/serial.h | 7 ------- 1 file changed, 7 deletions(-) delete mode 100644 arch/s390/include/asm/serial.h (limited to 'arch/s390') diff --git a/arch/s390/include/asm/serial.h b/arch/s390/include/asm/serial.h deleted file mode 100644 index aaf85a69061c..000000000000 --- a/arch/s390/include/asm/serial.h +++ /dev/null @@ -1,7 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_S390_SERIAL_H -#define _ASM_S390_SERIAL_H - -#define BASE_BAUD 0 - -#endif /* _ASM_S390_SERIAL_H */ -- cgit v1.2.3 From a3a6f55cc6fed675989647ab37bf9812a367f5f1 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 11 Nov 2022 13:15:54 +0100 Subject: s390: use generic bugs.h header file Use the generic bugs.h header file. Except for an excellent comment the header files are identical. Signed-off-by: Heiko Carstens Signed-off-by: Alexander Gordeev --- arch/s390/include/asm/bugs.h | 21 --------------------- 1 file changed, 21 deletions(-) delete mode 100644 arch/s390/include/asm/bugs.h (limited to 'arch/s390') diff --git a/arch/s390/include/asm/bugs.h b/arch/s390/include/asm/bugs.h deleted file mode 100644 index aa42a179be33..000000000000 --- a/arch/s390/include/asm/bugs.h +++ /dev/null @@ -1,21 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * S390 version - * Copyright IBM Corp. 1999 - * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) - * - * Derived from "include/asm-i386/bugs.h" - * Copyright (C) 1994 Linus Torvalds - */ - -/* - * This is included by init/main.c to check for architecture-dependent bugs. - * - * Needs: - * void check_bugs(void); - */ - -static inline void check_bugs(void) -{ - /* s390 has no bugs ... */ -} -- cgit v1.2.3 From 438d43d252e00d0eaac6e36b0616fe41de5e0d35 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 11 Nov 2022 13:18:21 +0100 Subject: s390: use generic shmparam.h header file Use generic shmparam.h header file since the contents are identical. Signed-off-by: Heiko Carstens Signed-off-by: Alexander Gordeev --- arch/s390/include/asm/shmparam.h | 12 ------------ 1 file changed, 12 deletions(-) delete mode 100644 arch/s390/include/asm/shmparam.h (limited to 'arch/s390') diff --git a/arch/s390/include/asm/shmparam.h b/arch/s390/include/asm/shmparam.h deleted file mode 100644 index e75d45649c54..000000000000 --- a/arch/s390/include/asm/shmparam.h +++ /dev/null @@ -1,12 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * S390 version - * - * Derived from "include/asm-i386/shmparam.h" - */ -#ifndef _ASM_S390_SHMPARAM_H -#define _ASM_S390_SHMPARAM_H - -#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */ - -#endif /* _ASM_S390_SHMPARAM_H */ -- cgit v1.2.3 From 2e71df94695cdc930fb373575ea4edabd4e4bcc7 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 11 Nov 2022 13:20:28 +0100 Subject: s390: use generic vga.h header file The generic vga.h contains a couple of defines, which do no harm on s390. Therefore use the generic version and git rid of the s390 specific empty header file. Suggested-by: Sven Schnelle Signed-off-by: Heiko Carstens Signed-off-by: Alexander Gordeev --- arch/s390/include/asm/vga.h | 7 ------- 1 file changed, 7 deletions(-) delete mode 100644 arch/s390/include/asm/vga.h (limited to 'arch/s390') diff --git a/arch/s390/include/asm/vga.h b/arch/s390/include/asm/vga.h deleted file mode 100644 index 605dc46bac5e..000000000000 --- a/arch/s390/include/asm/vga.h +++ /dev/null @@ -1,7 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_S390_VGA_H -#define _ASM_S390_VGA_H - -/* Avoid compile errors due to missing asm/vga.h */ - -#endif /* _ASM_S390_VGA_H */ -- cgit v1.2.3 From 254b2fd02e34a5761cd2a3aad8b24a7ddd8962e1 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 31 Oct 2022 14:11:57 +0100 Subject: s390/mm: provide minimal setup_per_cpu_areas() implementation s390 allows to enable CONFIG_NUMA, mainly to enable a couple of system calls which are only present if NUMA is enabled. The NUMA specific system calls are required by a couple of applications, which wouldn't work if the system calls wouldn't be present. The NUMA implementation itself maps all CPUs and memory to node 0. A special case is the generic percpu setup code, which doesn't expect an s390 like implementation and therefore emits a message/warning: "percpu: cpu 0 has no node -1 or node-local memory". In order to get rid of this message, and also to provide sane CPU to node and CPU distance mappings implement a minimal setup_per_cpu_areas() function, which is very close to the generic variant. Signed-off-by: Heiko Carstens Signed-off-by: Alexander Gordeev --- arch/s390/Kconfig | 2 ++ arch/s390/mm/init.c | 36 ++++++++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index a006dbb44890..0c154e8bc4f5 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -197,6 +197,7 @@ config S390 select HAVE_RSEQ select HAVE_SAMPLE_FTRACE_DIRECT select HAVE_SAMPLE_FTRACE_DIRECT_MULTI + select HAVE_SETUP_PER_CPU_AREA select HAVE_SOFTIRQ_ON_OWN_STACK select HAVE_SYSCALL_TRACEPOINTS select HAVE_VIRT_CPU_ACCOUNTING @@ -208,6 +209,7 @@ config S390 select MMU_GATHER_MERGE_VMAS select MODULES_USE_ELF_RELA select NEED_DMA_MAP_STATE if PCI + select NEED_PER_CPU_EMBED_FIRST_CHUNK select NEED_SG_DMA_LENGTH if PCI select OLD_SIGACTION select OLD_SIGSUSPEND3 diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 97d66a3e60fb..a28832eefb06 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -222,6 +223,41 @@ unsigned long memory_block_size_bytes(void) return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm); } +unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; +EXPORT_SYMBOL(__per_cpu_offset); + +static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) +{ + return LOCAL_DISTANCE; +} + +static int __init pcpu_cpu_to_node(int cpu) +{ + return 0; +} + +void __init setup_per_cpu_areas(void) +{ + unsigned long delta; + unsigned int cpu; + int rc; + + /* + * Always reserve area for module percpu variables. That's + * what the legacy allocator did. + */ + rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, + PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, + pcpu_cpu_distance, + pcpu_cpu_to_node); + if (rc < 0) + panic("Failed to initialize percpu areas."); + + delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; + for_each_possible_cpu(cpu) + __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; +} + #ifdef CONFIG_MEMORY_HOTPLUG #ifdef CONFIG_CMA -- cgit v1.2.3 From d9b25bdf57e4ab2a3084f9649318738ac9c006d3 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Tue, 1 Nov 2022 22:14:14 +0100 Subject: s390/ipl: Use kstrtobool() instead of strtobool() strtobool() is the same as kstrtobool(). However, the latter is more used within the kernel. In order to remove strtobool() and slightly simplify kstrtox.h, switch to the other function name. While at it, include the corresponding header file () Link: https://lore.kernel.org/all/cover.1667336095.git.christophe.jaillet@wanadoo.fr/ Signed-off-by: Christophe JAILLET Signed-off-by: Alexander Gordeev --- arch/s390/kernel/ipl.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 325cbf69ebbd..cdd575d7a91a 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -779,7 +780,7 @@ static ssize_t reipl_fcp_clear_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t len) { - if (strtobool(buf, &reipl_fcp_clear) < 0) + if (kstrtobool(buf, &reipl_fcp_clear) < 0) return -EINVAL; return len; } @@ -899,7 +900,7 @@ static ssize_t reipl_nvme_clear_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t len) { - if (strtobool(buf, &reipl_nvme_clear) < 0) + if (kstrtobool(buf, &reipl_nvme_clear) < 0) return -EINVAL; return len; } @@ -952,7 +953,7 @@ static ssize_t reipl_ccw_clear_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t len) { - if (strtobool(buf, &reipl_ccw_clear) < 0) + if (kstrtobool(buf, &reipl_ccw_clear) < 0) return -EINVAL; return len; } -- cgit v1.2.3 From b9ea48e9e8c62bfce0953d843689bec2777d26c8 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 12 Nov 2022 19:47:52 +0100 Subject: s390/pci: Use irq_data_get_msi_desc() No point in doing another lookup of irq_data, it's already provided as an argument. Signed-off-by: Thomas Gleixner Reviewed-by: Alexander Gordeev Reviewed-by: Niklas Schnelle Signed-off-by: Niklas Schnelle Link: https://lore.kernel.org/linux-s390/8735aoui07.ffs@tglx/ [agordeev@linux.ibm.com added Link tag] Signed-off-by: Alexander Gordeev --- arch/s390/pci/pci_irq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/pci/pci_irq.c b/arch/s390/pci/pci_irq.c index a2b42a63a53b..4ab0cf829999 100644 --- a/arch/s390/pci/pci_irq.c +++ b/arch/s390/pci/pci_irq.c @@ -132,7 +132,7 @@ static int zpci_clear_irq(struct zpci_dev *zdev) static int zpci_set_irq_affinity(struct irq_data *data, const struct cpumask *dest, bool force) { - struct msi_desc *entry = irq_get_msi_desc(data->irq); + struct msi_desc *entry = irq_data_get_msi_desc(data); struct msi_msg msg = entry->msg; int cpu_addr = smp_cpu_get_cpu_address(cpumask_first(dest)); -- cgit v1.2.3 From a78c2e31f6c769d7746989273cf9348944cf504d Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 18 Nov 2022 15:21:02 +0100 Subject: s390/debug: remove function type cast clang warns about an incompatible function type cast: CC arch/s390/kernel/debug.o arch/s390/kernel/debug.c:142:2: error: cast from 'int (*)(debug_info_t *, struct debug_view *, char *, debug_sprintf_entry_t *)' (aka 'int (*)(struct debug_info *, struct debug_view *, char *, debug_sprintf_entry_t *)') to 'debug_format_proc_t *' (aka 'int (*)(struct debug_info *, struct debug_view *, char *, const char *)') converts to incompatible function type [-Werror,-Wcast-function-type-strict] (debug_format_proc_t *)&debug_sprintf_format_fn, ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Get rid of this warning by changing debug_sprintf_format_fn() so it matches the debug_format_proc_t function type, and do the cast of the last parameter within the function itself. This is the standard way of handling such cases anyway. Signed-off-by: Heiko Carstens Signed-off-by: Alexander Gordeev --- arch/s390/kernel/debug.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index d7a82066a638..b376f0377a2c 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -92,7 +92,7 @@ static int debug_input_flush_fn(debug_info_t *id, struct debug_view *view, static int debug_hex_ascii_format_fn(debug_info_t *id, struct debug_view *view, char *out_buf, const char *in_buf); static int debug_sprintf_format_fn(debug_info_t *id, struct debug_view *view, - char *out_buf, debug_sprintf_entry_t *curr_event); + char *out_buf, const char *inbuf); static void debug_areas_swap(debug_info_t *a, debug_info_t *b); static void debug_events_append(debug_info_t *dest, debug_info_t *src); @@ -139,7 +139,7 @@ struct debug_view debug_sprintf_view = { "sprintf", NULL, &debug_dflt_header_fn, - (debug_format_proc_t *)&debug_sprintf_format_fn, + &debug_sprintf_format_fn, NULL, NULL }; @@ -1532,8 +1532,9 @@ EXPORT_SYMBOL(debug_dflt_header_fn); #define DEBUG_SPRINTF_MAX_ARGS 10 static int debug_sprintf_format_fn(debug_info_t *id, struct debug_view *view, - char *out_buf, debug_sprintf_entry_t *curr_event) + char *out_buf, const char *inbuf) { + debug_sprintf_entry_t *curr_event = (debug_sprintf_entry_t *)inbuf; int num_longs, num_used_args = 0, i, rc = 0; int index[DEBUG_SPRINTF_MAX_ARGS]; -- cgit v1.2.3 From 12a6c2c182c1160c46500d80c2f8922cd2747f41 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 18 Nov 2022 15:21:49 +0100 Subject: s390/hypfs: remove unused info_blk_hdr__pcpus() function Remove unused info_blk_hdr__pcpus() function: arch/s390/hypfs/hypfs_diag.c:71:21: error: unused function 'info_blk_hdr__pcpus' [-Werror,-Wunused-function] Signed-off-by: Heiko Carstens Signed-off-by: Alexander Gordeev --- arch/s390/hypfs/hypfs_diag.c | 8 -------- 1 file changed, 8 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c index 6511d15ace45..c3be533c4cd3 100644 --- a/arch/s390/hypfs/hypfs_diag.c +++ b/arch/s390/hypfs/hypfs_diag.c @@ -68,14 +68,6 @@ static inline __u8 info_blk_hdr__flags(enum diag204_format type, void *hdr) return ((struct diag204_x_info_blk_hdr *)hdr)->flags; } -static inline __u16 info_blk_hdr__pcpus(enum diag204_format type, void *hdr) -{ - if (type == DIAG204_INFO_SIMPLE) - return ((struct diag204_info_blk_hdr *)hdr)->phys_cpus; - else /* DIAG204_INFO_EXT */ - return ((struct diag204_x_info_blk_hdr *)hdr)->phys_cpus; -} - /* Partition header */ static inline int part_hdr__size(enum diag204_format type) -- cgit v1.2.3 From 17ca7df6fcc44470199b66752b8d8459e17aa077 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 18 Nov 2022 15:22:14 +0100 Subject: s390/mm: remove unused get_page_state() function Remove unused get_page_state() function: arch/s390/mm/page-states.c:61:29: error: unused function 'get_page_state' [-Werror,-Wunused-function] Signed-off-by: Heiko Carstens Signed-off-by: Alexander Gordeev --- arch/s390/mm/page-states.c | 11 ----------- 1 file changed, 11 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c index d5ea09d78938..1e2ea706aa22 100644 --- a/arch/s390/mm/page-states.c +++ b/arch/s390/mm/page-states.c @@ -58,17 +58,6 @@ void __init cmma_init(void) cmma_flag = 2; } -static inline unsigned char get_page_state(struct page *page) -{ - unsigned char state; - - asm volatile(" .insn rrf,0xb9ab0000,%0,%1,%2,0" - : "=&d" (state) - : "a" (page_to_phys(page)), - "i" (ESSA_GET_STATE)); - return state & 0x3f; -} - static inline void set_page_unused(struct page *page, int order) { int i, rc; -- cgit v1.2.3 From 9a435b7bc911d971490f0ce37aa0c6a1308722f4 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 18 Nov 2022 15:22:51 +0100 Subject: s390/kprobes: define insn cache ops within private header file clang warns about an unused insn cache ops function: arch/s390/kernel/kprobes.c:34:1: error: unused function 'is_kprobe_s390_insn_slot' [-Werror,-Wunused-function] DEFINE_INSN_CACHE_OPS(s390_insn); ^ ./include/linux/kprobes.h:335:20: note: expanded from macro 'DEFINE_INSN_CACHE_OPS' static inline bool is_kprobe_##__name##_slot(unsigned long addr) \ ^ :88:1: note: expanded from here is_kprobe_s390_insn_slot ^ Move the definition to a private header file, which is also similar to the generic insn cache ops. Signed-off-by: Heiko Carstens Signed-off-by: Alexander Gordeev --- arch/s390/kernel/kprobes.c | 3 +-- arch/s390/kernel/kprobes.h | 9 +++++++++ 2 files changed, 10 insertions(+), 2 deletions(-) create mode 100644 arch/s390/kernel/kprobes.h (limited to 'arch/s390') diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 0032bdbe8e3f..401f9c933ff9 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -24,6 +24,7 @@ #include #include #include +#include "kprobes.h" #include "entry.h" DEFINE_PER_CPU(struct kprobe *, current_kprobe); @@ -31,8 +32,6 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); struct kretprobe_blackpoint kretprobe_blacklist[] = { }; -DEFINE_INSN_CACHE_OPS(s390_insn); - static int insn_page_in_use; void *alloc_insn_page(void) diff --git a/arch/s390/kernel/kprobes.h b/arch/s390/kernel/kprobes.h new file mode 100644 index 000000000000..dc3ed5098ee7 --- /dev/null +++ b/arch/s390/kernel/kprobes.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +#ifndef _ARCH_S390_KPROBES_H +#define _ARCH_S390_KPROBES_H + +#include + +DEFINE_INSN_CACHE_OPS(s390_insn); + +#endif -- cgit v1.2.3 From 78c045c08a393285e615260c50cedf9cea5a7fa1 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 29 Nov 2022 17:31:45 +0100 Subject: s390/appldata: remove power management callbacks Support for power managemant has been removed from s390 since quite some time. Therefore remove unused power managemant code from the appldata device driver. Reviewed-by: Gerald Schaefer Signed-off-by: Heiko Carstens Signed-off-by: Alexander Gordeev --- arch/s390/appldata/appldata_base.c | 113 +------------------------------------ 1 file changed, 2 insertions(+), 111 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index d74a4c7d5df6..c0fd29133f27 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c @@ -26,8 +26,6 @@ #include #include #include -#include -#include #include #include #include @@ -44,8 +42,6 @@ #define TOD_MICRO 0x01000 /* nr. of TOD clock units for 1 microsecond */ -static struct platform_device *appldata_pdev; - /* * /proc entries (sysctl) */ @@ -88,7 +84,6 @@ static struct vtimer_list appldata_timer; static DEFINE_SPINLOCK(appldata_timer_lock); static int appldata_interval = APPLDATA_CPU_INTERVAL; static int appldata_timer_active; -static int appldata_timer_suspended = 0; /* * Work queue @@ -412,88 +407,6 @@ void appldata_unregister_ops(struct appldata_ops *ops) /********************** module-ops management **************************/ -/**************************** suspend / resume *******************************/ -static int appldata_freeze(struct device *dev) -{ - struct appldata_ops *ops; - int rc; - struct list_head *lh; - - spin_lock(&appldata_timer_lock); - if (appldata_timer_active) { - __appldata_vtimer_setup(APPLDATA_DEL_TIMER); - appldata_timer_suspended = 1; - } - spin_unlock(&appldata_timer_lock); - - mutex_lock(&appldata_ops_mutex); - list_for_each(lh, &appldata_ops_list) { - ops = list_entry(lh, struct appldata_ops, list); - if (ops->active == 1) { - rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC, - (unsigned long) ops->data, ops->size, - ops->mod_lvl); - if (rc != 0) - pr_err("Stopping the data collection for %s " - "failed with rc=%d\n", ops->name, rc); - } - } - mutex_unlock(&appldata_ops_mutex); - return 0; -} - -static int appldata_restore(struct device *dev) -{ - struct appldata_ops *ops; - int rc; - struct list_head *lh; - - spin_lock(&appldata_timer_lock); - if (appldata_timer_suspended) { - __appldata_vtimer_setup(APPLDATA_ADD_TIMER); - appldata_timer_suspended = 0; - } - spin_unlock(&appldata_timer_lock); - - mutex_lock(&appldata_ops_mutex); - list_for_each(lh, &appldata_ops_list) { - ops = list_entry(lh, struct appldata_ops, list); - if (ops->active == 1) { - ops->callback(ops->data); // init record - rc = appldata_diag(ops->record_nr, - APPLDATA_START_INTERVAL_REC, - (unsigned long) ops->data, ops->size, - ops->mod_lvl); - if (rc != 0) { - pr_err("Starting the data collection for %s " - "failed with rc=%d\n", ops->name, rc); - } - } - } - mutex_unlock(&appldata_ops_mutex); - return 0; -} - -static int appldata_thaw(struct device *dev) -{ - return appldata_restore(dev); -} - -static const struct dev_pm_ops appldata_pm_ops = { - .freeze = appldata_freeze, - .thaw = appldata_thaw, - .restore = appldata_restore, -}; - -static struct platform_driver appldata_pdrv = { - .driver = { - .name = "appldata", - .pm = &appldata_pm_ops, - }, -}; -/************************* suspend / resume ****************************/ - - /******************************* init / exit *********************************/ /* @@ -503,36 +416,14 @@ static struct platform_driver appldata_pdrv = { */ static int __init appldata_init(void) { - int rc; - init_virt_timer(&appldata_timer); appldata_timer.function = appldata_timer_function; appldata_timer.data = (unsigned long) &appldata_work; - - rc = platform_driver_register(&appldata_pdrv); - if (rc) - return rc; - - appldata_pdev = platform_device_register_simple("appldata", -1, NULL, - 0); - if (IS_ERR(appldata_pdev)) { - rc = PTR_ERR(appldata_pdev); - goto out_driver; - } appldata_wq = alloc_ordered_workqueue("appldata", 0); - if (!appldata_wq) { - rc = -ENOMEM; - goto out_device; - } - + if (!appldata_wq) + return -ENOMEM; appldata_sysctl_header = register_sysctl_table(appldata_dir_table); return 0; - -out_device: - platform_device_unregister(appldata_pdev); -out_driver: - platform_driver_unregister(&appldata_pdrv); - return rc; } __initcall(appldata_init); -- cgit v1.2.3 From dfe843dce775f16e3d15a1bf14e5363bff2321f3 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 30 Nov 2022 10:40:34 +0100 Subject: s390/checksum: support GENERIC_CSUM, enable it for KASAN This is the s390 variant of commit d911c67e10b4 ("x86: kasan: kmsan: support CONFIG_GENERIC_CSUM on x86, enable it for KASAN/KMSAN"). Even though most of the s390 specific checksum code is written in C there is still the csum_partial() inline assembly which could prevent KASAN and KMSAN from seeing all memory accesses. Therefore switch to GENERIC_CSUM if KASAN is enabled just like x86. Reviewed-by: Vasily Gorbik Signed-off-by: Heiko Carstens Signed-off-by: Alexander Gordeev --- arch/s390/Kconfig | 4 ++++ arch/s390/include/asm/checksum.h | 7 +++++++ 2 files changed, 11 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 0c154e8bc4f5..72d20d3f35cd 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -26,6 +26,10 @@ config GENERIC_BUG config GENERIC_BUG_RELATIVE_POINTERS def_bool y +config GENERIC_CSUM + bool + default y if KASAN + config GENERIC_LOCKBREAK def_bool y if PREEMPTION diff --git a/arch/s390/include/asm/checksum.h b/arch/s390/include/asm/checksum.h index cdd19d326345..d977a3a2f619 100644 --- a/arch/s390/include/asm/checksum.h +++ b/arch/s390/include/asm/checksum.h @@ -12,6 +12,12 @@ #ifndef _S390_CHECKSUM_H #define _S390_CHECKSUM_H +#ifdef CONFIG_GENERIC_CSUM + +#include + +#else /* CONFIG_GENERIC_CSUM */ + #include #include @@ -129,4 +135,5 @@ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, return csum_fold((__force __wsum)(sum >> 32)); } +#endif /* CONFIG_GENERIC_CSUM */ #endif /* _S390_CHECKSUM_H */ -- cgit v1.2.3 From 87fd22e0ae9239f695266d3181b53ad9f758bd74 Mon Sep 17 00:00:00 2001 From: Sven Schnelle Date: Wed, 5 Oct 2022 10:17:40 +0200 Subject: s390/ipl: add eckd support This adds support to IPL from ECKD DASDs to linux. It introduces a few sysfs files in /sys/firmware/reipl/eckd: bootprog: the boot program selector clear: whether to issue a diag308 LOAD_NORMAL or LOAD_CLEAR device: the device to ipl from br_chr: Cylinder/Head/Record number to read the bootrecord from. Might be '0' or 'auto' if it should be read from the volume label. scpdata: data to be passed to the ipl'd program. The new ipl type is called 'eckd'. Signed-off-by: Sven Schnelle Reviewed-by: Vasily Gorbik Signed-off-by: Alexander Gordeev --- arch/s390/boot/ipl_parm.c | 6 + arch/s390/include/asm/ipl.h | 9 ++ arch/s390/include/asm/sclp.h | 1 + arch/s390/include/uapi/asm/ipl.h | 28 ++++ arch/s390/kernel/ipl.c | 282 +++++++++++++++++++++++++++++++++++++++ drivers/s390/char/sclp_early.c | 4 +- 6 files changed, 329 insertions(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/boot/ipl_parm.c b/arch/s390/boot/ipl_parm.c index ca78d6162245..c358f51ed3e5 100644 --- a/arch/s390/boot/ipl_parm.c +++ b/arch/s390/boot/ipl_parm.c @@ -108,6 +108,11 @@ static size_t ipl_block_get_ascii_scpdata(char *dest, size_t size, scp_data_len = ipb->nvme.scp_data_len; scp_data = ipb->nvme.scp_data; break; + case IPL_PBT_ECKD: + scp_data_len = ipb->eckd.scp_data_len; + scp_data = ipb->eckd.scp_data; + break; + default: goto out; } @@ -153,6 +158,7 @@ static void append_ipl_block_parm(void) break; case IPL_PBT_FCP: case IPL_PBT_NVME: + case IPL_PBT_ECKD: rc = ipl_block_get_ascii_scpdata( parm, COMMAND_LINE_SIZE - len - 1, &ipl_block); break; diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h index a405b6bb89fb..1396ed05c6aa 100644 --- a/arch/s390/include/asm/ipl.h +++ b/arch/s390/include/asm/ipl.h @@ -22,6 +22,7 @@ struct ipl_parameter_block { struct ipl_pb0_common common; struct ipl_pb0_fcp fcp; struct ipl_pb0_ccw ccw; + struct ipl_pb0_eckd eckd; struct ipl_pb0_nvme nvme; char raw[PAGE_SIZE - sizeof(struct ipl_pl_hdr)]; }; @@ -41,6 +42,10 @@ struct ipl_parameter_block { sizeof(struct ipl_pb0_ccw)) #define IPL_BP0_CCW_LEN (sizeof(struct ipl_pb0_ccw)) +#define IPL_BP_ECKD_LEN (sizeof(struct ipl_pl_hdr) + \ + sizeof(struct ipl_pb0_eckd)) +#define IPL_BP0_ECKD_LEN (sizeof(struct ipl_pb0_eckd)) + #define IPL_MAX_SUPPORTED_VERSION (0) #define IPL_RB_CERT_UNKNOWN ((unsigned short)-1) @@ -68,6 +73,7 @@ enum ipl_type { IPL_TYPE_NSS = 16, IPL_TYPE_NVME = 32, IPL_TYPE_NVME_DUMP = 64, + IPL_TYPE_ECKD = 128, }; struct ipl_info @@ -77,6 +83,9 @@ struct ipl_info struct { struct ccw_dev_id dev_id; } ccw; + struct { + struct ccw_dev_id dev_id; + } eckd; struct { struct ccw_dev_id dev_id; u64 wwpn; diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index 9d4c7f71e070..67a24dda17b6 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h @@ -87,6 +87,7 @@ struct sclp_info { unsigned char has_gisaf : 1; unsigned char has_diag318 : 1; unsigned char has_sipl : 1; + unsigned char has_sipl_eckd : 1; unsigned char has_dirq : 1; unsigned char has_iplcc : 1; unsigned char has_zpci_lsi : 1; diff --git a/arch/s390/include/uapi/asm/ipl.h b/arch/s390/include/uapi/asm/ipl.h index d1ecd5d722a0..3eb71a594590 100644 --- a/arch/s390/include/uapi/asm/ipl.h +++ b/arch/s390/include/uapi/asm/ipl.h @@ -27,6 +27,7 @@ enum ipl_pbt { IPL_PBT_FCP = 0, IPL_PBT_SCP_DATA = 1, IPL_PBT_CCW = 2, + IPL_PBT_ECKD = 3, IPL_PBT_NVME = 4, }; @@ -111,6 +112,33 @@ struct ipl_pb0_ccw { __u8 reserved5[8]; } __packed; +/* IPL Parameter Block 0 for ECKD */ +struct ipl_pb0_eckd { + __u32 len; + __u8 pbt; + __u8 reserved1[3]; + __u32 reserved2[78]; + __u8 opt; + __u8 reserved4[4]; + __u8 reserved5:5; + __u8 ssid:3; + __u16 devno; + __u32 reserved6[5]; + __u32 bootprog; + __u8 reserved7[12]; + struct { + __u16 cyl; + __u8 head; + __u8 record; + __u32 reserved; + } br_chr __packed; + __u32 scp_data_len; + __u8 reserved8[260]; + __u8 scp_data[]; +} __packed; + +#define IPL_PB0_ECKD_OPT_IPL 0x10 + #define IPL_PB0_CCW_VM_FLAG_NSS 0x80 #define IPL_PB0_CCW_VM_FLAG_VP 0x40 diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index cdd575d7a91a..7d8ddd8b79d5 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -40,6 +40,7 @@ #define IPL_UNKNOWN_STR "unknown" #define IPL_CCW_STR "ccw" +#define IPL_ECKD_STR "eckd" #define IPL_FCP_STR "fcp" #define IPL_FCP_DUMP_STR "fcp_dump" #define IPL_NVME_STR "nvme" @@ -93,6 +94,8 @@ static char *ipl_type_str(enum ipl_type type) switch (type) { case IPL_TYPE_CCW: return IPL_CCW_STR; + case IPL_TYPE_ECKD: + return IPL_ECKD_STR; case IPL_TYPE_FCP: return IPL_FCP_STR; case IPL_TYPE_FCP_DUMP: @@ -148,6 +151,7 @@ static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN; static struct ipl_parameter_block *reipl_block_fcp; static struct ipl_parameter_block *reipl_block_nvme; static struct ipl_parameter_block *reipl_block_ccw; +static struct ipl_parameter_block *reipl_block_eckd; static struct ipl_parameter_block *reipl_block_nss; static struct ipl_parameter_block *reipl_block_actual; @@ -162,6 +166,7 @@ static struct sclp_ipl_info sclp_ipl_info; static bool reipl_nvme_clear; static bool reipl_fcp_clear; static bool reipl_ccw_clear; +static bool reipl_eckd_clear; static inline int __diag308(unsigned long subcode, void *addr) { @@ -282,6 +287,8 @@ static __init enum ipl_type get_ipl_type(void) return IPL_TYPE_NVME_DUMP; else return IPL_TYPE_NVME; + case IPL_PBT_ECKD: + return IPL_TYPE_ECKD; } return IPL_TYPE_UNKNOWN; } @@ -335,6 +342,9 @@ static ssize_t sys_ipl_device_show(struct kobject *kobj, case IPL_TYPE_CCW: return sprintf(page, "0.%x.%04x\n", ipl_block.ccw.ssid, ipl_block.ccw.devno); + case IPL_TYPE_ECKD: + return sprintf(page, "0.%x.%04x\n", ipl_block.eckd.ssid, + ipl_block.eckd.devno); case IPL_TYPE_FCP: case IPL_TYPE_FCP_DUMP: return sprintf(page, "0.0.%04x\n", ipl_block.fcp.devno); @@ -380,12 +390,25 @@ static ssize_t ipl_nvme_scp_data_read(struct file *filp, struct kobject *kobj, return memory_read_from_buffer(buf, count, &off, scp_data, size); } +static ssize_t ipl_eckd_scp_data_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t off, size_t count) +{ + unsigned int size = ipl_block.eckd.scp_data_len; + void *scp_data = &ipl_block.eckd.scp_data; + + return memory_read_from_buffer(buf, count, &off, scp_data, size); +} + static struct bin_attribute ipl_scp_data_attr = __BIN_ATTR(scp_data, S_IRUGO, ipl_scp_data_read, NULL, PAGE_SIZE); static struct bin_attribute ipl_nvme_scp_data_attr = __BIN_ATTR(scp_data, S_IRUGO, ipl_nvme_scp_data_read, NULL, PAGE_SIZE); +static struct bin_attribute ipl_eckd_scp_data_attr = + __BIN_ATTR(scp_data, S_IRUGO, ipl_eckd_scp_data_read, NULL, PAGE_SIZE); + static struct bin_attribute *ipl_fcp_bin_attrs[] = { &ipl_parameter_attr, &ipl_scp_data_attr, @@ -398,6 +421,12 @@ static struct bin_attribute *ipl_nvme_bin_attrs[] = { NULL, }; +static struct bin_attribute *ipl_eckd_bin_attrs[] = { + &ipl_parameter_attr, + &ipl_eckd_scp_data_attr, + NULL, +}; + /* FCP ipl device attributes */ DEFINE_IPL_ATTR_RO(ipl_fcp, wwpn, "0x%016llx\n", @@ -419,6 +448,88 @@ DEFINE_IPL_ATTR_RO(ipl_nvme, bootprog, "%lld\n", DEFINE_IPL_ATTR_RO(ipl_nvme, br_lba, "%lld\n", (unsigned long long)ipl_block.nvme.br_lba); +/* ECKD ipl device attributes */ +DEFINE_IPL_ATTR_RO(ipl_eckd, bootprog, "%lld\n", + (unsigned long long)ipl_block.eckd.bootprog); + +#define IPL_ATTR_BR_CHR_SHOW_FN(_name, _ipb) \ +static ssize_t eckd_##_name##_br_chr_show(struct kobject *kobj, \ + struct kobj_attribute *attr, \ + char *buf) \ +{ \ + struct ipl_pb0_eckd *ipb = &(_ipb); \ + \ + if (!ipb->br_chr.cyl && \ + !ipb->br_chr.head && \ + !ipb->br_chr.record) \ + return sprintf(buf, "auto\n"); \ + \ + return sprintf(buf, "0x%x,0x%x,0x%x\n", \ + ipb->br_chr.cyl, \ + ipb->br_chr.head, \ + ipb->br_chr.record); \ +} + +#define IPL_ATTR_BR_CHR_STORE_FN(_name, _ipb) \ +static ssize_t eckd_##_name##_br_chr_store(struct kobject *kobj, \ + struct kobj_attribute *attr, \ + const char *buf, size_t len) \ +{ \ + struct ipl_pb0_eckd *ipb = &(_ipb); \ + unsigned long args[3] = { 0 }; \ + char *p, *p1, *tmp = NULL; \ + int i, rc; \ + \ + if (!strncmp(buf, "auto", 4)) \ + goto out; \ + \ + tmp = kstrdup(buf, GFP_KERNEL); \ + p = tmp; \ + for (i = 0; i < 3; i++) { \ + p1 = strsep(&p, ", "); \ + if (!p1) { \ + rc = -EINVAL; \ + goto err; \ + } \ + rc = kstrtoul(p1, 0, args + i); \ + if (rc) \ + goto err; \ + } \ + \ + rc = -EINVAL; \ + if (i != 3) \ + goto err; \ + \ + if ((args[0] || args[1]) && !args[2]) \ + goto err; \ + \ + if (args[0] > UINT_MAX || args[1] > 255 || args[2] > 255) \ + goto err; \ + \ +out: \ + ipb->br_chr.cyl = args[0]; \ + ipb->br_chr.head = args[1]; \ + ipb->br_chr.record = args[2]; \ + rc = len; \ +err: \ + kfree(tmp); \ + return rc; \ +} + +IPL_ATTR_BR_CHR_SHOW_FN(ipl, ipl_block.eckd); +static struct kobj_attribute sys_ipl_eckd_br_chr_attr = + __ATTR(br_chr, (S_IRUGO | S_IWUSR), + eckd_ipl_br_chr_show, + NULL); + +IPL_ATTR_BR_CHR_SHOW_FN(reipl, reipl_block_eckd->eckd); +IPL_ATTR_BR_CHR_STORE_FN(reipl, reipl_block_eckd->eckd); + +static struct kobj_attribute sys_reipl_eckd_br_chr_attr = + __ATTR(br_chr, (S_IRUGO | S_IWUSR), + eckd_reipl_br_chr_show, + eckd_reipl_br_chr_store); + static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj, struct kobj_attribute *attr, char *page) { @@ -470,6 +581,20 @@ static struct attribute_group ipl_nvme_attr_group = { .bin_attrs = ipl_nvme_bin_attrs, }; +static struct attribute *ipl_eckd_attrs[] = { + &sys_ipl_type_attr.attr, + &sys_ipl_eckd_bootprog_attr.attr, + &sys_ipl_eckd_br_chr_attr.attr, + &sys_ipl_device_attr.attr, + &sys_ipl_secure_attr.attr, + &sys_ipl_has_secure_attr.attr, + NULL, +}; + +static struct attribute_group ipl_eckd_attr_group = { + .attrs = ipl_eckd_attrs, + .bin_attrs = ipl_eckd_bin_attrs, +}; /* CCW ipl device attributes */ @@ -542,6 +667,9 @@ static int __init ipl_init(void) rc = sysfs_create_group(&ipl_kset->kobj, &ipl_ccw_attr_group_lpar); break; + case IPL_TYPE_ECKD: + rc = sysfs_create_group(&ipl_kset->kobj, &ipl_eckd_attr_group); + break; case IPL_TYPE_FCP: case IPL_TYPE_FCP_DUMP: rc = sysfs_create_group(&ipl_kset->kobj, &ipl_fcp_attr_group); @@ -986,6 +1114,85 @@ static struct attribute_group reipl_ccw_attr_group_lpar = { .attrs = reipl_ccw_attrs_lpar, }; +/* ECKD reipl device attributes */ + +static ssize_t reipl_eckd_scpdata_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + size_t size = reipl_block_eckd->eckd.scp_data_len; + void *scp_data = reipl_block_eckd->eckd.scp_data; + + return memory_read_from_buffer(buf, count, &off, scp_data, size); +} + +static ssize_t reipl_eckd_scpdata_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + size_t scpdata_len = count; + size_t padding; + + if (off) + return -EINVAL; + + memcpy(reipl_block_eckd->eckd.scp_data, buf, count); + if (scpdata_len % 8) { + padding = 8 - (scpdata_len % 8); + memset(reipl_block_eckd->eckd.scp_data + scpdata_len, + 0, padding); + scpdata_len += padding; + } + + reipl_block_eckd->hdr.len = IPL_BP_ECKD_LEN + scpdata_len; + reipl_block_eckd->eckd.len = IPL_BP0_ECKD_LEN + scpdata_len; + reipl_block_eckd->eckd.scp_data_len = scpdata_len; + + return count; +} + +static struct bin_attribute sys_reipl_eckd_scp_data_attr = + __BIN_ATTR(scp_data, (S_IRUGO | S_IWUSR), reipl_eckd_scpdata_read, + reipl_eckd_scpdata_write, DIAG308_SCPDATA_SIZE); + +static struct bin_attribute *reipl_eckd_bin_attrs[] = { + &sys_reipl_eckd_scp_data_attr, + NULL, +}; + +DEFINE_IPL_CCW_ATTR_RW(reipl_eckd, device, reipl_block_eckd->eckd); +DEFINE_IPL_ATTR_RW(reipl_eckd, bootprog, "%lld\n", "%lld\n", + reipl_block_eckd->eckd.bootprog); + +static struct attribute *reipl_eckd_attrs[] = { + &sys_reipl_eckd_device_attr.attr, + &sys_reipl_eckd_bootprog_attr.attr, + &sys_reipl_eckd_br_chr_attr.attr, + NULL, +}; + +static struct attribute_group reipl_eckd_attr_group = { + .attrs = reipl_eckd_attrs, + .bin_attrs = reipl_eckd_bin_attrs +}; + +static ssize_t reipl_eckd_clear_show(struct kobject *kobj, + struct kobj_attribute *attr, char *page) +{ + return sprintf(page, "%u\n", reipl_eckd_clear); +} + +static ssize_t reipl_eckd_clear_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t len) +{ + if (strtobool(buf, &reipl_eckd_clear) < 0) + return -EINVAL; + return len; +} + +static struct kobj_attribute sys_reipl_eckd_clear_attr = + __ATTR(clear, 0644, reipl_eckd_clear_show, reipl_eckd_clear_store); /* NSS reipl device attributes */ static void reipl_get_ascii_nss_name(char *dst, @@ -1069,6 +1276,9 @@ static int reipl_set_type(enum ipl_type type) case IPL_TYPE_CCW: reipl_block_actual = reipl_block_ccw; break; + case IPL_TYPE_ECKD: + reipl_block_actual = reipl_block_eckd; + break; case IPL_TYPE_FCP: reipl_block_actual = reipl_block_fcp; break; @@ -1099,6 +1309,8 @@ static ssize_t reipl_type_store(struct kobject *kobj, if (strncmp(buf, IPL_CCW_STR, strlen(IPL_CCW_STR)) == 0) rc = reipl_set_type(IPL_TYPE_CCW); + else if (strncmp(buf, IPL_ECKD_STR, strlen(IPL_ECKD_STR)) == 0) + rc = reipl_set_type(IPL_TYPE_ECKD); else if (strncmp(buf, IPL_FCP_STR, strlen(IPL_FCP_STR)) == 0) rc = reipl_set_type(IPL_TYPE_FCP); else if (strncmp(buf, IPL_NVME_STR, strlen(IPL_NVME_STR)) == 0) @@ -1114,6 +1326,7 @@ static struct kobj_attribute reipl_type_attr = static struct kset *reipl_kset; static struct kset *reipl_fcp_kset; static struct kset *reipl_nvme_kset; +static struct kset *reipl_eckd_kset; static void __reipl_run(void *unused) { @@ -1125,6 +1338,13 @@ static void __reipl_run(void *unused) else diag308(DIAG308_LOAD_NORMAL_DUMP, NULL); break; + case IPL_TYPE_ECKD: + diag308(DIAG308_SET, reipl_block_eckd); + if (reipl_eckd_clear) + diag308(DIAG308_LOAD_CLEAR, NULL); + else + diag308(DIAG308_LOAD_NORMAL, NULL); + break; case IPL_TYPE_FCP: diag308(DIAG308_SET, reipl_block_fcp); if (reipl_fcp_clear) @@ -1345,6 +1565,58 @@ out1: return rc; } +static int __init reipl_eckd_init(void) +{ + int rc; + + if (!sclp.has_sipl_eckd) + return 0; + + reipl_block_eckd = (void *)get_zeroed_page(GFP_KERNEL); + if (!reipl_block_eckd) + return -ENOMEM; + + /* sysfs: create kset for mixing attr group and bin attrs */ + reipl_eckd_kset = kset_create_and_add(IPL_ECKD_STR, NULL, + &reipl_kset->kobj); + if (!reipl_eckd_kset) { + free_page((unsigned long)reipl_block_eckd); + return -ENOMEM; + } + + rc = sysfs_create_group(&reipl_eckd_kset->kobj, &reipl_eckd_attr_group); + if (rc) + goto out1; + + if (test_facility(141)) { + rc = sysfs_create_file(&reipl_eckd_kset->kobj, + &sys_reipl_eckd_clear_attr.attr); + if (rc) + goto out2; + } else { + reipl_eckd_clear = true; + } + + if (ipl_info.type == IPL_TYPE_ECKD) { + memcpy(reipl_block_eckd, &ipl_block, sizeof(ipl_block)); + } else { + reipl_block_eckd->hdr.len = IPL_BP_ECKD_LEN; + reipl_block_eckd->hdr.version = IPL_PARM_BLOCK_VERSION; + reipl_block_eckd->eckd.len = IPL_BP0_ECKD_LEN; + reipl_block_eckd->eckd.pbt = IPL_PBT_ECKD; + reipl_block_eckd->eckd.opt = IPL_PB0_ECKD_OPT_IPL; + } + reipl_capabilities |= IPL_TYPE_ECKD; + return 0; + +out2: + sysfs_remove_group(&reipl_eckd_kset->kobj, &reipl_eckd_attr_group); +out1: + kset_unregister(reipl_eckd_kset); + free_page((unsigned long)reipl_block_eckd); + return rc; +} + static int __init reipl_type_init(void) { enum ipl_type reipl_type = ipl_info.type; @@ -1366,6 +1638,9 @@ static int __init reipl_type_init(void) } else if (reipl_block->pb0_hdr.pbt == IPL_PBT_CCW) { memcpy(reipl_block_ccw, reipl_block, size); reipl_type = IPL_TYPE_CCW; + } else if (reipl_block->pb0_hdr.pbt == IPL_PBT_ECKD) { + memcpy(reipl_block_eckd, reipl_block, size); + reipl_type = IPL_TYPE_ECKD; } out: return reipl_set_type(reipl_type); @@ -1384,6 +1659,9 @@ static int __init reipl_init(void) return rc; } rc = reipl_ccw_init(); + if (rc) + return rc; + rc = reipl_eckd_init(); if (rc) return rc; rc = reipl_fcp_init(); @@ -2058,6 +2336,10 @@ void __init setup_ipl(void) ipl_info.data.ccw.dev_id.ssid = ipl_block.ccw.ssid; ipl_info.data.ccw.dev_id.devno = ipl_block.ccw.devno; break; + case IPL_TYPE_ECKD: + ipl_info.data.eckd.dev_id.ssid = ipl_block.eckd.ssid; + ipl_info.data.eckd.dev_id.devno = ipl_block.eckd.devno; + break; case IPL_TYPE_FCP: case IPL_TYPE_FCP_DUMP: ipl_info.data.fcp.dev_id.ssid = 0; diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c index d15b0d541de3..c1c70a161c0e 100644 --- a/drivers/s390/char/sclp_early.c +++ b/drivers/s390/char/sclp_early.c @@ -57,8 +57,10 @@ static void __init sclp_early_facilities_detect(void) sclp.has_diag318 = !!(sccb->byte_134 & 0x80); sclp.has_iplcc = !!(sccb->byte_134 & 0x02); } - if (sccb->cpuoff > 137) + if (sccb->cpuoff > 137) { sclp.has_sipl = !!(sccb->cbl & 0x4000); + sclp.has_sipl_eckd = !!(sccb->cbl & 0x2000); + } sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; sclp.rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; sclp.rzm <<= 20; -- cgit v1.2.3 From e2d2a2968f2abe1b8215fd99bfc68d6284d51ac2 Mon Sep 17 00:00:00 2001 From: Sven Schnelle Date: Wed, 5 Oct 2022 10:17:41 +0200 Subject: s390/ipl: add eckd dump support This adds support to use ECKD disks as dump device to linux. The new dump type is called 'eckd_dump', parameters are the same as for eckd ipl. Signed-off-by: Sven Schnelle Reviewed-by: Vasily Gorbik Signed-off-by: Alexander Gordeev --- arch/s390/boot/ipl_parm.c | 3 ++ arch/s390/include/asm/ipl.h | 2 ++ arch/s390/include/uapi/asm/ipl.h | 1 + arch/s390/kernel/ipl.c | 72 +++++++++++++++++++++++++++++++++++++++- drivers/s390/char/zcore.c | 4 +++ 5 files changed, 81 insertions(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/boot/ipl_parm.c b/arch/s390/boot/ipl_parm.c index c358f51ed3e5..c1f8f7999fed 100644 --- a/arch/s390/boot/ipl_parm.c +++ b/arch/s390/boot/ipl_parm.c @@ -77,6 +77,9 @@ bool is_ipl_block_dump(void) if (ipl_block.pb0_hdr.pbt == IPL_PBT_NVME && ipl_block.nvme.opt == IPL_PB0_NVME_OPT_DUMP) return true; + if (ipl_block.pb0_hdr.pbt == IPL_PBT_ECKD && + ipl_block.eckd.opt == IPL_PB0_ECKD_OPT_DUMP) + return true; return false; } diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h index 1396ed05c6aa..b0d00032479d 100644 --- a/arch/s390/include/asm/ipl.h +++ b/arch/s390/include/asm/ipl.h @@ -74,6 +74,7 @@ enum ipl_type { IPL_TYPE_NVME = 32, IPL_TYPE_NVME_DUMP = 64, IPL_TYPE_ECKD = 128, + IPL_TYPE_ECKD_DUMP = 256, }; struct ipl_info @@ -108,6 +109,7 @@ extern void set_os_info_reipl_block(void); static inline bool is_ipl_type_dump(void) { return (ipl_info.type == IPL_TYPE_FCP_DUMP) || + (ipl_info.type == IPL_TYPE_ECKD_DUMP) || (ipl_info.type == IPL_TYPE_NVME_DUMP); } diff --git a/arch/s390/include/uapi/asm/ipl.h b/arch/s390/include/uapi/asm/ipl.h index 3eb71a594590..2cd28af50dd4 100644 --- a/arch/s390/include/uapi/asm/ipl.h +++ b/arch/s390/include/uapi/asm/ipl.h @@ -138,6 +138,7 @@ struct ipl_pb0_eckd { } __packed; #define IPL_PB0_ECKD_OPT_IPL 0x10 +#define IPL_PB0_ECKD_OPT_DUMP 0x20 #define IPL_PB0_CCW_VM_FLAG_NSS 0x80 #define IPL_PB0_CCW_VM_FLAG_VP 0x40 diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 7d8ddd8b79d5..b8d4f075e4ad 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -41,6 +41,7 @@ #define IPL_UNKNOWN_STR "unknown" #define IPL_CCW_STR "ccw" #define IPL_ECKD_STR "eckd" +#define IPL_ECKD_DUMP_STR "eckd_dump" #define IPL_FCP_STR "fcp" #define IPL_FCP_DUMP_STR "fcp_dump" #define IPL_NVME_STR "nvme" @@ -48,6 +49,7 @@ #define IPL_NSS_STR "nss" #define DUMP_CCW_STR "ccw" +#define DUMP_ECKD_STR "eckd" #define DUMP_FCP_STR "fcp" #define DUMP_NVME_STR "nvme" #define DUMP_NONE_STR "none" @@ -96,6 +98,8 @@ static char *ipl_type_str(enum ipl_type type) return IPL_CCW_STR; case IPL_TYPE_ECKD: return IPL_ECKD_STR; + case IPL_TYPE_ECKD_DUMP: + return IPL_ECKD_DUMP_STR; case IPL_TYPE_FCP: return IPL_FCP_STR; case IPL_TYPE_FCP_DUMP: @@ -117,6 +121,7 @@ enum dump_type { DUMP_TYPE_CCW = 2, DUMP_TYPE_FCP = 4, DUMP_TYPE_NVME = 8, + DUMP_TYPE_ECKD = 16, }; static char *dump_type_str(enum dump_type type) @@ -126,6 +131,8 @@ static char *dump_type_str(enum dump_type type) return DUMP_NONE_STR; case DUMP_TYPE_CCW: return DUMP_CCW_STR; + case DUMP_TYPE_ECKD: + return DUMP_ECKD_STR; case DUMP_TYPE_FCP: return DUMP_FCP_STR; case DUMP_TYPE_NVME: @@ -160,6 +167,7 @@ static enum dump_type dump_type = DUMP_TYPE_NONE; static struct ipl_parameter_block *dump_block_fcp; static struct ipl_parameter_block *dump_block_nvme; static struct ipl_parameter_block *dump_block_ccw; +static struct ipl_parameter_block *dump_block_eckd; static struct sclp_ipl_info sclp_ipl_info; @@ -288,7 +296,10 @@ static __init enum ipl_type get_ipl_type(void) else return IPL_TYPE_NVME; case IPL_PBT_ECKD: - return IPL_TYPE_ECKD; + if (ipl_block.eckd.opt == IPL_PB0_ECKD_OPT_DUMP) + return IPL_TYPE_ECKD_DUMP; + else + return IPL_TYPE_ECKD; } return IPL_TYPE_UNKNOWN; } @@ -343,6 +354,7 @@ static ssize_t sys_ipl_device_show(struct kobject *kobj, return sprintf(page, "0.%x.%04x\n", ipl_block.ccw.ssid, ipl_block.ccw.devno); case IPL_TYPE_ECKD: + case IPL_TYPE_ECKD_DUMP: return sprintf(page, "0.%x.%04x\n", ipl_block.eckd.ssid, ipl_block.eckd.devno); case IPL_TYPE_FCP: @@ -1368,6 +1380,7 @@ static void __reipl_run(void *unused) break; case IPL_TYPE_FCP_DUMP: case IPL_TYPE_NVME_DUMP: + case IPL_TYPE_ECKD_DUMP: break; } disabled_wait(); @@ -1736,6 +1749,31 @@ static struct attribute_group dump_nvme_attr_group = { .attrs = dump_nvme_attrs, }; +/* ECKD dump device attributes */ +DEFINE_IPL_CCW_ATTR_RW(dump_eckd, device, dump_block_eckd->eckd); +DEFINE_IPL_ATTR_RW(dump_eckd, bootprog, "%lld\n", "%llx\n", + dump_block_eckd->eckd.bootprog); + +IPL_ATTR_BR_CHR_SHOW_FN(dump, dump_block_eckd->eckd); +IPL_ATTR_BR_CHR_STORE_FN(dump, dump_block_eckd->eckd); + +static struct kobj_attribute sys_dump_eckd_br_chr_attr = + __ATTR(br_chr, (S_IRUGO | S_IWUSR), + eckd_dump_br_chr_show, + eckd_dump_br_chr_store); + +static struct attribute *dump_eckd_attrs[] = { + &sys_dump_eckd_device_attr.attr, + &sys_dump_eckd_bootprog_attr.attr, + &sys_dump_eckd_br_chr_attr.attr, + NULL, +}; + +static struct attribute_group dump_eckd_attr_group = { + .name = IPL_ECKD_STR, + .attrs = dump_eckd_attrs, +}; + /* CCW dump device attributes */ DEFINE_IPL_CCW_ATTR_RW(dump_ccw, device, dump_block_ccw->ccw); @@ -1775,6 +1813,8 @@ static ssize_t dump_type_store(struct kobject *kobj, rc = dump_set_type(DUMP_TYPE_NONE); else if (strncmp(buf, DUMP_CCW_STR, strlen(DUMP_CCW_STR)) == 0) rc = dump_set_type(DUMP_TYPE_CCW); + else if (strncmp(buf, DUMP_ECKD_STR, strlen(DUMP_ECKD_STR)) == 0) + rc = dump_set_type(DUMP_TYPE_ECKD); else if (strncmp(buf, DUMP_FCP_STR, strlen(DUMP_FCP_STR)) == 0) rc = dump_set_type(DUMP_TYPE_FCP); else if (strncmp(buf, DUMP_NVME_STR, strlen(DUMP_NVME_STR)) == 0) @@ -1803,6 +1843,9 @@ static void __dump_run(void *unused) case DUMP_TYPE_CCW: diag308_dump(dump_block_ccw); break; + case DUMP_TYPE_ECKD: + diag308_dump(dump_block_eckd); + break; case DUMP_TYPE_FCP: diag308_dump(dump_block_fcp); break; @@ -1888,6 +1931,29 @@ static int __init dump_nvme_init(void) return 0; } +static int __init dump_eckd_init(void) +{ + int rc; + + if (!sclp_ipl_info.has_dump || !sclp.has_sipl_eckd) + return 0; /* LDIPL DUMP is not installed */ + dump_block_eckd = (void *)get_zeroed_page(GFP_KERNEL); + if (!dump_block_eckd) + return -ENOMEM; + rc = sysfs_create_group(&dump_kset->kobj, &dump_eckd_attr_group); + if (rc) { + free_page((unsigned long)dump_block_eckd); + return rc; + } + dump_block_eckd->hdr.len = IPL_BP_ECKD_LEN; + dump_block_eckd->hdr.version = IPL_PARM_BLOCK_VERSION; + dump_block_eckd->eckd.len = IPL_BP0_ECKD_LEN; + dump_block_eckd->eckd.pbt = IPL_PBT_ECKD; + dump_block_eckd->eckd.opt = IPL_PB0_ECKD_OPT_DUMP; + dump_capabilities |= DUMP_TYPE_ECKD; + return 0; +} + static int __init dump_init(void) { int rc; @@ -1901,6 +1967,9 @@ static int __init dump_init(void) return rc; } rc = dump_ccw_init(); + if (rc) + return rc; + rc = dump_eckd_init(); if (rc) return rc; rc = dump_fcp_init(); @@ -2337,6 +2406,7 @@ void __init setup_ipl(void) ipl_info.data.ccw.dev_id.devno = ipl_block.ccw.devno; break; case IPL_TYPE_ECKD: + case IPL_TYPE_ECKD_DUMP: ipl_info.data.eckd.dev_id.ssid = ipl_block.eckd.ssid; ipl_info.data.eckd.dev_id.devno = ipl_block.eckd.devno; break; diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index 6165e6aae762..b30670ca6e5d 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c @@ -282,6 +282,10 @@ static int __init zcore_init(void) TRACE("type: nvme\n"); TRACE("fid: %x\n", ipl_info.data.nvme.fid); TRACE("nsid: %x\n", ipl_info.data.nvme.nsid); + } else if (ipl_info.type == IPL_TYPE_ECKD_DUMP) { + TRACE("type: eckd\n"); + TRACE("devno: %x\n", ipl_info.data.eckd.dev_id.devno); + TRACE("ssid: %x\n", ipl_info.data.eckd.dev_id.ssid); } rc = sclp_sdias_init(); -- cgit v1.2.3 From a70f72767fa17d76c48d1123213dbe631556ec06 Mon Sep 17 00:00:00 2001 From: Sven Schnelle Date: Thu, 1 Dec 2022 16:05:43 +0100 Subject: s390/ipl: use octal values instead of S_* macros octal values are easier to read and checkpatch also recommends to use them, so replace all the S_* macros with their counterparts. Signed-off-by: Sven Schnelle Reviewed-by: Heiko Carstens Signed-off-by: Alexander Gordeev --- arch/s390/kernel/ipl.c | 66 +++++++++++++++++++++++--------------------------- 1 file changed, 30 insertions(+), 36 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index b8d4f075e4ad..fbd646dbf440 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -232,14 +232,14 @@ IPL_ATTR_SHOW_FN(_prefix, _name, "0.%x.%04x\n", \ _ipl_blk.ssid, _ipl_blk.devno); \ IPL_ATTR_CCW_STORE_FN(_prefix, _name, _ipl_blk); \ static struct kobj_attribute sys_##_prefix##_##_name##_attr = \ - __ATTR(_name, (S_IRUGO | S_IWUSR), \ + __ATTR(_name, 0644, \ sys_##_prefix##_##_name##_show, \ sys_##_prefix##_##_name##_store) \ #define DEFINE_IPL_ATTR_RO(_prefix, _name, _format, _value) \ IPL_ATTR_SHOW_FN(_prefix, _name, _format, _value) \ static struct kobj_attribute sys_##_prefix##_##_name##_attr = \ - __ATTR(_name, S_IRUGO, sys_##_prefix##_##_name##_show, NULL) + __ATTR(_name, 0444, sys_##_prefix##_##_name##_show, NULL) #define DEFINE_IPL_ATTR_RW(_prefix, _name, _fmt_out, _fmt_in, _value) \ IPL_ATTR_SHOW_FN(_prefix, _name, _fmt_out, (unsigned long long) _value) \ @@ -254,7 +254,7 @@ static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \ return len; \ } \ static struct kobj_attribute sys_##_prefix##_##_name##_attr = \ - __ATTR(_name,(S_IRUGO | S_IWUSR), \ + __ATTR(_name, 0644, \ sys_##_prefix##_##_name##_show, \ sys_##_prefix##_##_name##_store) @@ -269,7 +269,7 @@ static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \ return len; \ } \ static struct kobj_attribute sys_##_prefix##_##_name##_attr = \ - __ATTR(_name,(S_IRUGO | S_IWUSR), \ + __ATTR(_name, 0644, \ sys_##_prefix##_##_name##_show, \ sys_##_prefix##_##_name##_store) @@ -344,7 +344,7 @@ static ssize_t ipl_vm_parm_show(struct kobject *kobj, } static struct kobj_attribute sys_ipl_vm_parm_attr = - __ATTR(parm, S_IRUGO, ipl_vm_parm_show, NULL); + __ATTR(parm, 0444, ipl_vm_parm_show, NULL); static ssize_t sys_ipl_device_show(struct kobject *kobj, struct kobj_attribute *attr, char *page) @@ -369,7 +369,7 @@ static ssize_t sys_ipl_device_show(struct kobject *kobj, } static struct kobj_attribute sys_ipl_device_attr = - __ATTR(device, S_IRUGO, sys_ipl_device_show, NULL); + __ATTR(device, 0444, sys_ipl_device_show, NULL); static ssize_t ipl_parameter_read(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, @@ -379,7 +379,7 @@ static ssize_t ipl_parameter_read(struct file *filp, struct kobject *kobj, ipl_block.hdr.len); } static struct bin_attribute ipl_parameter_attr = - __BIN_ATTR(binary_parameter, S_IRUGO, ipl_parameter_read, NULL, + __BIN_ATTR(binary_parameter, 0444, ipl_parameter_read, NULL, PAGE_SIZE); static ssize_t ipl_scp_data_read(struct file *filp, struct kobject *kobj, @@ -413,13 +413,13 @@ static ssize_t ipl_eckd_scp_data_read(struct file *filp, struct kobject *kobj, } static struct bin_attribute ipl_scp_data_attr = - __BIN_ATTR(scp_data, S_IRUGO, ipl_scp_data_read, NULL, PAGE_SIZE); + __BIN_ATTR(scp_data, 0444, ipl_scp_data_read, NULL, PAGE_SIZE); static struct bin_attribute ipl_nvme_scp_data_attr = - __BIN_ATTR(scp_data, S_IRUGO, ipl_nvme_scp_data_read, NULL, PAGE_SIZE); + __BIN_ATTR(scp_data, 0444, ipl_nvme_scp_data_read, NULL, PAGE_SIZE); static struct bin_attribute ipl_eckd_scp_data_attr = - __BIN_ATTR(scp_data, S_IRUGO, ipl_eckd_scp_data_read, NULL, PAGE_SIZE); + __BIN_ATTR(scp_data, 0444, ipl_eckd_scp_data_read, NULL, PAGE_SIZE); static struct bin_attribute *ipl_fcp_bin_attrs[] = { &ipl_parameter_attr, @@ -530,17 +530,13 @@ err: \ IPL_ATTR_BR_CHR_SHOW_FN(ipl, ipl_block.eckd); static struct kobj_attribute sys_ipl_eckd_br_chr_attr = - __ATTR(br_chr, (S_IRUGO | S_IWUSR), - eckd_ipl_br_chr_show, - NULL); + __ATTR(br_chr, 0644, eckd_ipl_br_chr_show, NULL); IPL_ATTR_BR_CHR_SHOW_FN(reipl, reipl_block_eckd->eckd); IPL_ATTR_BR_CHR_STORE_FN(reipl, reipl_block_eckd->eckd); static struct kobj_attribute sys_reipl_eckd_br_chr_attr = - __ATTR(br_chr, (S_IRUGO | S_IWUSR), - eckd_reipl_br_chr_show, - eckd_reipl_br_chr_store); + __ATTR(br_chr, 0644, eckd_reipl_br_chr_show, eckd_reipl_br_chr_store); static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj, struct kobj_attribute *attr, char *page) @@ -783,11 +779,11 @@ static ssize_t reipl_ccw_vmparm_store(struct kobject *kobj, } static struct kobj_attribute sys_reipl_nss_vmparm_attr = - __ATTR(parm, S_IRUGO | S_IWUSR, reipl_nss_vmparm_show, - reipl_nss_vmparm_store); + __ATTR(parm, 0644, reipl_nss_vmparm_show, + reipl_nss_vmparm_store); static struct kobj_attribute sys_reipl_ccw_vmparm_attr = - __ATTR(parm, S_IRUGO | S_IWUSR, reipl_ccw_vmparm_show, - reipl_ccw_vmparm_store); + __ATTR(parm, 0644, reipl_ccw_vmparm_show, + reipl_ccw_vmparm_store); /* FCP reipl device attributes */ @@ -827,7 +823,7 @@ static ssize_t reipl_fcp_scpdata_write(struct file *filp, struct kobject *kobj, return count; } static struct bin_attribute sys_reipl_fcp_scp_data_attr = - __BIN_ATTR(scp_data, (S_IRUGO | S_IWUSR), reipl_fcp_scpdata_read, + __BIN_ATTR(scp_data, 0644, reipl_fcp_scpdata_read, reipl_fcp_scpdata_write, DIAG308_SCPDATA_SIZE); static struct bin_attribute *reipl_fcp_bin_attrs[] = { @@ -907,8 +903,8 @@ static ssize_t reipl_fcp_loadparm_store(struct kobject *kobj, } static struct kobj_attribute sys_reipl_fcp_loadparm_attr = - __ATTR(loadparm, S_IRUGO | S_IWUSR, reipl_fcp_loadparm_show, - reipl_fcp_loadparm_store); + __ATTR(loadparm, 0644, reipl_fcp_loadparm_show, + reipl_fcp_loadparm_store); static ssize_t reipl_fcp_clear_show(struct kobject *kobj, struct kobj_attribute *attr, char *page) @@ -981,7 +977,7 @@ static ssize_t reipl_nvme_scpdata_write(struct file *filp, struct kobject *kobj, } static struct bin_attribute sys_reipl_nvme_scp_data_attr = - __BIN_ATTR(scp_data, (S_IRUGO | S_IWUSR), reipl_nvme_scpdata_read, + __BIN_ATTR(scp_data, 0644, reipl_nvme_scpdata_read, reipl_nvme_scpdata_write, DIAG308_SCPDATA_SIZE); static struct bin_attribute *reipl_nvme_bin_attrs[] = { @@ -1013,8 +1009,8 @@ static ssize_t reipl_nvme_loadparm_store(struct kobject *kobj, } static struct kobj_attribute sys_reipl_nvme_loadparm_attr = - __ATTR(loadparm, S_IRUGO | S_IWUSR, reipl_nvme_loadparm_show, - reipl_nvme_loadparm_store); + __ATTR(loadparm, 0644, reipl_nvme_loadparm_show, + reipl_nvme_loadparm_store); static struct attribute *reipl_nvme_attrs[] = { &sys_reipl_nvme_fid_attr.attr, @@ -1080,8 +1076,8 @@ static ssize_t reipl_ccw_loadparm_store(struct kobject *kobj, } static struct kobj_attribute sys_reipl_ccw_loadparm_attr = - __ATTR(loadparm, S_IRUGO | S_IWUSR, reipl_ccw_loadparm_show, - reipl_ccw_loadparm_store); + __ATTR(loadparm, 0644, reipl_ccw_loadparm_show, + reipl_ccw_loadparm_store); static ssize_t reipl_ccw_clear_show(struct kobject *kobj, struct kobj_attribute *attr, char *page) @@ -1164,7 +1160,7 @@ static ssize_t reipl_eckd_scpdata_write(struct file *filp, struct kobject *kobj, } static struct bin_attribute sys_reipl_eckd_scp_data_attr = - __BIN_ATTR(scp_data, (S_IRUGO | S_IWUSR), reipl_eckd_scpdata_read, + __BIN_ATTR(scp_data, 0644, reipl_eckd_scpdata_read, reipl_eckd_scpdata_write, DIAG308_SCPDATA_SIZE); static struct bin_attribute *reipl_eckd_bin_attrs[] = { @@ -1252,12 +1248,12 @@ static ssize_t reipl_nss_name_store(struct kobject *kobj, } static struct kobj_attribute sys_reipl_nss_name_attr = - __ATTR(name, S_IRUGO | S_IWUSR, reipl_nss_name_show, - reipl_nss_name_store); + __ATTR(name, 0644, reipl_nss_name_show, + reipl_nss_name_store); static struct kobj_attribute sys_reipl_nss_loadparm_attr = - __ATTR(loadparm, S_IRUGO | S_IWUSR, reipl_nss_loadparm_show, - reipl_nss_loadparm_store); + __ATTR(loadparm, 0644, reipl_nss_loadparm_show, + reipl_nss_loadparm_store); static struct attribute *reipl_nss_attrs[] = { &sys_reipl_nss_name_attr.attr, @@ -1758,9 +1754,7 @@ IPL_ATTR_BR_CHR_SHOW_FN(dump, dump_block_eckd->eckd); IPL_ATTR_BR_CHR_STORE_FN(dump, dump_block_eckd->eckd); static struct kobj_attribute sys_dump_eckd_br_chr_attr = - __ATTR(br_chr, (S_IRUGO | S_IWUSR), - eckd_dump_br_chr_show, - eckd_dump_br_chr_store); + __ATTR(br_chr, 0644, eckd_dump_br_chr_show, eckd_dump_br_chr_store); static struct attribute *dump_eckd_attrs[] = { &sys_dump_eckd_device_attr.attr, -- cgit v1.2.3 From 706f2ada822280a1f8f64bbe03ec5362ef46dd78 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 1 Dec 2022 17:24:54 +0100 Subject: s390/vx: add vx-insn.h wrapper include file The vector instruction macros can also be used in inline assemblies. For this the magic asm(".include \"asm/vx-insn.h\"\n"); must be added to C files in order to avoid that the pre-processor eliminates the __ASSEMBLY__ guarded macros. This however comes with the problem that changes to asm/vx-insn.h do not cause a recompile of C files which have only this magic statement instead of a proper include statement. This can be observed with the arch/s390/kernel/fpu.c file. In order to fix this problem and also to avoid that the include must be specified twice, add a wrapper include header file which will do all necessary steps. This way only the vx-insn.h header file needs to be included and changes to the new vx-insn-asm.h header file cause a recompile of all dependent files like it should. Signed-off-by: Heiko Carstens Signed-off-by: Alexander Gordeev --- arch/s390/include/asm/vx-insn-asm.h | 681 ++++++++++++++++++++++++++++++++++++ arch/s390/include/asm/vx-insn.h | 671 +---------------------------------- arch/s390/kernel/fpu.c | 3 +- lib/raid6/s390vx.uc | 3 +- 4 files changed, 689 insertions(+), 669 deletions(-) create mode 100644 arch/s390/include/asm/vx-insn-asm.h (limited to 'arch/s390') diff --git a/arch/s390/include/asm/vx-insn-asm.h b/arch/s390/include/asm/vx-insn-asm.h new file mode 100644 index 000000000000..360f8b36d962 --- /dev/null +++ b/arch/s390/include/asm/vx-insn-asm.h @@ -0,0 +1,681 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Support for Vector Instructions + * + * Assembler macros to generate .byte/.word code for particular + * vector instructions that are supported by recent binutils (>= 2.26) only. + * + * Copyright IBM Corp. 2015 + * Author(s): Hendrik Brueckner + */ + +#ifndef __ASM_S390_VX_INSN_INTERNAL_H +#define __ASM_S390_VX_INSN_INTERNAL_H + +#ifndef __ASM_S390_VX_INSN_H +#error only can be included directly +#endif + +#ifdef __ASSEMBLY__ + +/* Macros to generate vector instruction byte code */ + +/* GR_NUM - Retrieve general-purpose register number + * + * @opd: Operand to store register number + * @r64: String designation register in the format "%rN" + */ +.macro GR_NUM opd gr + \opd = 255 + .ifc \gr,%r0 + \opd = 0 + .endif + .ifc \gr,%r1 + \opd = 1 + .endif + .ifc \gr,%r2 + \opd = 2 + .endif + .ifc \gr,%r3 + \opd = 3 + .endif + .ifc \gr,%r4 + \opd = 4 + .endif + .ifc \gr,%r5 + \opd = 5 + .endif + .ifc \gr,%r6 + \opd = 6 + .endif + .ifc \gr,%r7 + \opd = 7 + .endif + .ifc \gr,%r8 + \opd = 8 + .endif + .ifc \gr,%r9 + \opd = 9 + .endif + .ifc \gr,%r10 + \opd = 10 + .endif + .ifc \gr,%r11 + \opd = 11 + .endif + .ifc \gr,%r12 + \opd = 12 + .endif + .ifc \gr,%r13 + \opd = 13 + .endif + .ifc \gr,%r14 + \opd = 14 + .endif + .ifc \gr,%r15 + \opd = 15 + .endif + .if \opd == 255 + \opd = \gr + .endif +.endm + +/* VX_NUM - Retrieve vector register number + * + * @opd: Operand to store register number + * @vxr: String designation register in the format "%vN" + * + * The vector register number is used for as input number to the + * instruction and, as well as, to compute the RXB field of the + * instruction. + */ +.macro VX_NUM opd vxr + \opd = 255 + .ifc \vxr,%v0 + \opd = 0 + .endif + .ifc \vxr,%v1 + \opd = 1 + .endif + .ifc \vxr,%v2 + \opd = 2 + .endif + .ifc \vxr,%v3 + \opd = 3 + .endif + .ifc \vxr,%v4 + \opd = 4 + .endif + .ifc \vxr,%v5 + \opd = 5 + .endif + .ifc \vxr,%v6 + \opd = 6 + .endif + .ifc \vxr,%v7 + \opd = 7 + .endif + .ifc \vxr,%v8 + \opd = 8 + .endif + .ifc \vxr,%v9 + \opd = 9 + .endif + .ifc \vxr,%v10 + \opd = 10 + .endif + .ifc \vxr,%v11 + \opd = 11 + .endif + .ifc \vxr,%v12 + \opd = 12 + .endif + .ifc \vxr,%v13 + \opd = 13 + .endif + .ifc \vxr,%v14 + \opd = 14 + .endif + .ifc \vxr,%v15 + \opd = 15 + .endif + .ifc \vxr,%v16 + \opd = 16 + .endif + .ifc \vxr,%v17 + \opd = 17 + .endif + .ifc \vxr,%v18 + \opd = 18 + .endif + .ifc \vxr,%v19 + \opd = 19 + .endif + .ifc \vxr,%v20 + \opd = 20 + .endif + .ifc \vxr,%v21 + \opd = 21 + .endif + .ifc \vxr,%v22 + \opd = 22 + .endif + .ifc \vxr,%v23 + \opd = 23 + .endif + .ifc \vxr,%v24 + \opd = 24 + .endif + .ifc \vxr,%v25 + \opd = 25 + .endif + .ifc \vxr,%v26 + \opd = 26 + .endif + .ifc \vxr,%v27 + \opd = 27 + .endif + .ifc \vxr,%v28 + \opd = 28 + .endif + .ifc \vxr,%v29 + \opd = 29 + .endif + .ifc \vxr,%v30 + \opd = 30 + .endif + .ifc \vxr,%v31 + \opd = 31 + .endif + .if \opd == 255 + \opd = \vxr + .endif +.endm + +/* RXB - Compute most significant bit used vector registers + * + * @rxb: Operand to store computed RXB value + * @v1: First vector register designated operand + * @v2: Second vector register designated operand + * @v3: Third vector register designated operand + * @v4: Fourth vector register designated operand + */ +.macro RXB rxb v1 v2=0 v3=0 v4=0 + \rxb = 0 + .if \v1 & 0x10 + \rxb = \rxb | 0x08 + .endif + .if \v2 & 0x10 + \rxb = \rxb | 0x04 + .endif + .if \v3 & 0x10 + \rxb = \rxb | 0x02 + .endif + .if \v4 & 0x10 + \rxb = \rxb | 0x01 + .endif +.endm + +/* MRXB - Generate Element Size Control and RXB value + * + * @m: Element size control + * @v1: First vector register designated operand (for RXB) + * @v2: Second vector register designated operand (for RXB) + * @v3: Third vector register designated operand (for RXB) + * @v4: Fourth vector register designated operand (for RXB) + */ +.macro MRXB m v1 v2=0 v3=0 v4=0 + rxb = 0 + RXB rxb, \v1, \v2, \v3, \v4 + .byte (\m << 4) | rxb +.endm + +/* MRXBOPC - Generate Element Size Control, RXB, and final Opcode fields + * + * @m: Element size control + * @opc: Opcode + * @v1: First vector register designated operand (for RXB) + * @v2: Second vector register designated operand (for RXB) + * @v3: Third vector register designated operand (for RXB) + * @v4: Fourth vector register designated operand (for RXB) + */ +.macro MRXBOPC m opc v1 v2=0 v3=0 v4=0 + MRXB \m, \v1, \v2, \v3, \v4 + .byte \opc +.endm + +/* Vector support instructions */ + +/* VECTOR GENERATE BYTE MASK */ +.macro VGBM vr imm2 + VX_NUM v1, \vr + .word (0xE700 | ((v1&15) << 4)) + .word \imm2 + MRXBOPC 0, 0x44, v1 +.endm +.macro VZERO vxr + VGBM \vxr, 0 +.endm +.macro VONE vxr + VGBM \vxr, 0xFFFF +.endm + +/* VECTOR LOAD VR ELEMENT FROM GR */ +.macro VLVG v, gr, disp, m + VX_NUM v1, \v + GR_NUM b2, "%r0" + GR_NUM r3, \gr + .word 0xE700 | ((v1&15) << 4) | r3 + .word (b2 << 12) | (\disp) + MRXBOPC \m, 0x22, v1 +.endm +.macro VLVGB v, gr, index, base + VLVG \v, \gr, \index, \base, 0 +.endm +.macro VLVGH v, gr, index + VLVG \v, \gr, \index, 1 +.endm +.macro VLVGF v, gr, index + VLVG \v, \gr, \index, 2 +.endm +.macro VLVGG v, gr, index + VLVG \v, \gr, \index, 3 +.endm + +/* VECTOR LOAD REGISTER */ +.macro VLR v1, v2 + VX_NUM v1, \v1 + VX_NUM v2, \v2 + .word 0xE700 | ((v1&15) << 4) | (v2&15) + .word 0 + MRXBOPC 0, 0x56, v1, v2 +.endm + +/* VECTOR LOAD */ +.macro VL v, disp, index="%r0", base + VX_NUM v1, \v + GR_NUM x2, \index + GR_NUM b2, \base + .word 0xE700 | ((v1&15) << 4) | x2 + .word (b2 << 12) | (\disp) + MRXBOPC 0, 0x06, v1 +.endm + +/* VECTOR LOAD ELEMENT */ +.macro VLEx vr1, disp, index="%r0", base, m3, opc + VX_NUM v1, \vr1 + GR_NUM x2, \index + GR_NUM b2, \base + .word 0xE700 | ((v1&15) << 4) | x2 + .word (b2 << 12) | (\disp) + MRXBOPC \m3, \opc, v1 +.endm +.macro VLEB vr1, disp, index="%r0", base, m3 + VLEx \vr1, \disp, \index, \base, \m3, 0x00 +.endm +.macro VLEH vr1, disp, index="%r0", base, m3 + VLEx \vr1, \disp, \index, \base, \m3, 0x01 +.endm +.macro VLEF vr1, disp, index="%r0", base, m3 + VLEx \vr1, \disp, \index, \base, \m3, 0x03 +.endm +.macro VLEG vr1, disp, index="%r0", base, m3 + VLEx \vr1, \disp, \index, \base, \m3, 0x02 +.endm + +/* VECTOR LOAD ELEMENT IMMEDIATE */ +.macro VLEIx vr1, imm2, m3, opc + VX_NUM v1, \vr1 + .word 0xE700 | ((v1&15) << 4) + .word \imm2 + MRXBOPC \m3, \opc, v1 +.endm +.macro VLEIB vr1, imm2, index + VLEIx \vr1, \imm2, \index, 0x40 +.endm +.macro VLEIH vr1, imm2, index + VLEIx \vr1, \imm2, \index, 0x41 +.endm +.macro VLEIF vr1, imm2, index + VLEIx \vr1, \imm2, \index, 0x43 +.endm +.macro VLEIG vr1, imm2, index + VLEIx \vr1, \imm2, \index, 0x42 +.endm + +/* VECTOR LOAD GR FROM VR ELEMENT */ +.macro VLGV gr, vr, disp, base="%r0", m + GR_NUM r1, \gr + GR_NUM b2, \base + VX_NUM v3, \vr + .word 0xE700 | (r1 << 4) | (v3&15) + .word (b2 << 12) | (\disp) + MRXBOPC \m, 0x21, v3 +.endm +.macro VLGVB gr, vr, disp, base="%r0" + VLGV \gr, \vr, \disp, \base, 0 +.endm +.macro VLGVH gr, vr, disp, base="%r0" + VLGV \gr, \vr, \disp, \base, 1 +.endm +.macro VLGVF gr, vr, disp, base="%r0" + VLGV \gr, \vr, \disp, \base, 2 +.endm +.macro VLGVG gr, vr, disp, base="%r0" + VLGV \gr, \vr, \disp, \base, 3 +.endm + +/* VECTOR LOAD MULTIPLE */ +.macro VLM vfrom, vto, disp, base, hint=3 + VX_NUM v1, \vfrom + VX_NUM v3, \vto + GR_NUM b2, \base + .word 0xE700 | ((v1&15) << 4) | (v3&15) + .word (b2 << 12) | (\disp) + MRXBOPC \hint, 0x36, v1, v3 +.endm + +/* VECTOR STORE */ +.macro VST vr1, disp, index="%r0", base + VX_NUM v1, \vr1 + GR_NUM x2, \index + GR_NUM b2, \base + .word 0xE700 | ((v1&15) << 4) | (x2&15) + .word (b2 << 12) | (\disp) + MRXBOPC 0, 0x0E, v1 +.endm + +/* VECTOR STORE MULTIPLE */ +.macro VSTM vfrom, vto, disp, base, hint=3 + VX_NUM v1, \vfrom + VX_NUM v3, \vto + GR_NUM b2, \base + .word 0xE700 | ((v1&15) << 4) | (v3&15) + .word (b2 << 12) | (\disp) + MRXBOPC \hint, 0x3E, v1, v3 +.endm + +/* VECTOR PERMUTE */ +.macro VPERM vr1, vr2, vr3, vr4 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + VX_NUM v3, \vr3 + VX_NUM v4, \vr4 + .word 0xE700 | ((v1&15) << 4) | (v2&15) + .word ((v3&15) << 12) + MRXBOPC (v4&15), 0x8C, v1, v2, v3, v4 +.endm + +/* VECTOR UNPACK LOGICAL LOW */ +.macro VUPLL vr1, vr2, m3 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + .word 0xE700 | ((v1&15) << 4) | (v2&15) + .word 0x0000 + MRXBOPC \m3, 0xD4, v1, v2 +.endm +.macro VUPLLB vr1, vr2 + VUPLL \vr1, \vr2, 0 +.endm +.macro VUPLLH vr1, vr2 + VUPLL \vr1, \vr2, 1 +.endm +.macro VUPLLF vr1, vr2 + VUPLL \vr1, \vr2, 2 +.endm + +/* VECTOR PERMUTE DOUBLEWORD IMMEDIATE */ +.macro VPDI vr1, vr2, vr3, m4 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + VX_NUM v3, \vr3 + .word 0xE700 | ((v1&15) << 4) | (v2&15) + .word ((v3&15) << 12) + MRXBOPC \m4, 0x84, v1, v2, v3 +.endm + +/* VECTOR REPLICATE */ +.macro VREP vr1, vr3, imm2, m4 + VX_NUM v1, \vr1 + VX_NUM v3, \vr3 + .word 0xE700 | ((v1&15) << 4) | (v3&15) + .word \imm2 + MRXBOPC \m4, 0x4D, v1, v3 +.endm +.macro VREPB vr1, vr3, imm2 + VREP \vr1, \vr3, \imm2, 0 +.endm +.macro VREPH vr1, vr3, imm2 + VREP \vr1, \vr3, \imm2, 1 +.endm +.macro VREPF vr1, vr3, imm2 + VREP \vr1, \vr3, \imm2, 2 +.endm +.macro VREPG vr1, vr3, imm2 + VREP \vr1, \vr3, \imm2, 3 +.endm + +/* VECTOR MERGE HIGH */ +.macro VMRH vr1, vr2, vr3, m4 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + VX_NUM v3, \vr3 + .word 0xE700 | ((v1&15) << 4) | (v2&15) + .word ((v3&15) << 12) + MRXBOPC \m4, 0x61, v1, v2, v3 +.endm +.macro VMRHB vr1, vr2, vr3 + VMRH \vr1, \vr2, \vr3, 0 +.endm +.macro VMRHH vr1, vr2, vr3 + VMRH \vr1, \vr2, \vr3, 1 +.endm +.macro VMRHF vr1, vr2, vr3 + VMRH \vr1, \vr2, \vr3, 2 +.endm +.macro VMRHG vr1, vr2, vr3 + VMRH \vr1, \vr2, \vr3, 3 +.endm + +/* VECTOR MERGE LOW */ +.macro VMRL vr1, vr2, vr3, m4 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + VX_NUM v3, \vr3 + .word 0xE700 | ((v1&15) << 4) | (v2&15) + .word ((v3&15) << 12) + MRXBOPC \m4, 0x60, v1, v2, v3 +.endm +.macro VMRLB vr1, vr2, vr3 + VMRL \vr1, \vr2, \vr3, 0 +.endm +.macro VMRLH vr1, vr2, vr3 + VMRL \vr1, \vr2, \vr3, 1 +.endm +.macro VMRLF vr1, vr2, vr3 + VMRL \vr1, \vr2, \vr3, 2 +.endm +.macro VMRLG vr1, vr2, vr3 + VMRL \vr1, \vr2, \vr3, 3 +.endm + + +/* Vector integer instructions */ + +/* VECTOR AND */ +.macro VN vr1, vr2, vr3 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + VX_NUM v3, \vr3 + .word 0xE700 | ((v1&15) << 4) | (v2&15) + .word ((v3&15) << 12) + MRXBOPC 0, 0x68, v1, v2, v3 +.endm + +/* VECTOR EXCLUSIVE OR */ +.macro VX vr1, vr2, vr3 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + VX_NUM v3, \vr3 + .word 0xE700 | ((v1&15) << 4) | (v2&15) + .word ((v3&15) << 12) + MRXBOPC 0, 0x6D, v1, v2, v3 +.endm + +/* VECTOR GALOIS FIELD MULTIPLY SUM */ +.macro VGFM vr1, vr2, vr3, m4 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + VX_NUM v3, \vr3 + .word 0xE700 | ((v1&15) << 4) | (v2&15) + .word ((v3&15) << 12) + MRXBOPC \m4, 0xB4, v1, v2, v3 +.endm +.macro VGFMB vr1, vr2, vr3 + VGFM \vr1, \vr2, \vr3, 0 +.endm +.macro VGFMH vr1, vr2, vr3 + VGFM \vr1, \vr2, \vr3, 1 +.endm +.macro VGFMF vr1, vr2, vr3 + VGFM \vr1, \vr2, \vr3, 2 +.endm +.macro VGFMG vr1, vr2, vr3 + VGFM \vr1, \vr2, \vr3, 3 +.endm + +/* VECTOR GALOIS FIELD MULTIPLY SUM AND ACCUMULATE */ +.macro VGFMA vr1, vr2, vr3, vr4, m5 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + VX_NUM v3, \vr3 + VX_NUM v4, \vr4 + .word 0xE700 | ((v1&15) << 4) | (v2&15) + .word ((v3&15) << 12) | (\m5 << 8) + MRXBOPC (v4&15), 0xBC, v1, v2, v3, v4 +.endm +.macro VGFMAB vr1, vr2, vr3, vr4 + VGFMA \vr1, \vr2, \vr3, \vr4, 0 +.endm +.macro VGFMAH vr1, vr2, vr3, vr4 + VGFMA \vr1, \vr2, \vr3, \vr4, 1 +.endm +.macro VGFMAF vr1, vr2, vr3, vr4 + VGFMA \vr1, \vr2, \vr3, \vr4, 2 +.endm +.macro VGFMAG vr1, vr2, vr3, vr4 + VGFMA \vr1, \vr2, \vr3, \vr4, 3 +.endm + +/* VECTOR SHIFT RIGHT LOGICAL BY BYTE */ +.macro VSRLB vr1, vr2, vr3 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + VX_NUM v3, \vr3 + .word 0xE700 | ((v1&15) << 4) | (v2&15) + .word ((v3&15) << 12) + MRXBOPC 0, 0x7D, v1, v2, v3 +.endm + +/* VECTOR REPLICATE IMMEDIATE */ +.macro VREPI vr1, imm2, m3 + VX_NUM v1, \vr1 + .word 0xE700 | ((v1&15) << 4) + .word \imm2 + MRXBOPC \m3, 0x45, v1 +.endm +.macro VREPIB vr1, imm2 + VREPI \vr1, \imm2, 0 +.endm +.macro VREPIH vr1, imm2 + VREPI \vr1, \imm2, 1 +.endm +.macro VREPIF vr1, imm2 + VREPI \vr1, \imm2, 2 +.endm +.macro VREPIG vr1, imm2 + VREP \vr1, \imm2, 3 +.endm + +/* VECTOR ADD */ +.macro VA vr1, vr2, vr3, m4 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + VX_NUM v3, \vr3 + .word 0xE700 | ((v1&15) << 4) | (v2&15) + .word ((v3&15) << 12) + MRXBOPC \m4, 0xF3, v1, v2, v3 +.endm +.macro VAB vr1, vr2, vr3 + VA \vr1, \vr2, \vr3, 0 +.endm +.macro VAH vr1, vr2, vr3 + VA \vr1, \vr2, \vr3, 1 +.endm +.macro VAF vr1, vr2, vr3 + VA \vr1, \vr2, \vr3, 2 +.endm +.macro VAG vr1, vr2, vr3 + VA \vr1, \vr2, \vr3, 3 +.endm +.macro VAQ vr1, vr2, vr3 + VA \vr1, \vr2, \vr3, 4 +.endm + +/* VECTOR ELEMENT SHIFT RIGHT ARITHMETIC */ +.macro VESRAV vr1, vr2, vr3, m4 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + VX_NUM v3, \vr3 + .word 0xE700 | ((v1&15) << 4) | (v2&15) + .word ((v3&15) << 12) + MRXBOPC \m4, 0x7A, v1, v2, v3 +.endm + +.macro VESRAVB vr1, vr2, vr3 + VESRAV \vr1, \vr2, \vr3, 0 +.endm +.macro VESRAVH vr1, vr2, vr3 + VESRAV \vr1, \vr2, \vr3, 1 +.endm +.macro VESRAVF vr1, vr2, vr3 + VESRAV \vr1, \vr2, \vr3, 2 +.endm +.macro VESRAVG vr1, vr2, vr3 + VESRAV \vr1, \vr2, \vr3, 3 +.endm + +/* VECTOR ELEMENT ROTATE LEFT LOGICAL */ +.macro VERLL vr1, vr3, disp, base="%r0", m4 + VX_NUM v1, \vr1 + VX_NUM v3, \vr3 + GR_NUM b2, \base + .word 0xE700 | ((v1&15) << 4) | (v3&15) + .word (b2 << 12) | (\disp) + MRXBOPC \m4, 0x33, v1, v3 +.endm +.macro VERLLB vr1, vr3, disp, base="%r0" + VERLL \vr1, \vr3, \disp, \base, 0 +.endm +.macro VERLLH vr1, vr3, disp, base="%r0" + VERLL \vr1, \vr3, \disp, \base, 1 +.endm +.macro VERLLF vr1, vr3, disp, base="%r0" + VERLL \vr1, \vr3, \disp, \base, 2 +.endm +.macro VERLLG vr1, vr3, disp, base="%r0" + VERLL \vr1, \vr3, \disp, \base, 3 +.endm + +/* VECTOR SHIFT LEFT DOUBLE BY BYTE */ +.macro VSLDB vr1, vr2, vr3, imm4 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + VX_NUM v3, \vr3 + .word 0xE700 | ((v1&15) << 4) | (v2&15) + .word ((v3&15) << 12) | (\imm4) + MRXBOPC 0, 0x77, v1, v2, v3 +.endm + +#endif /* __ASSEMBLY__ */ +#endif /* __ASM_S390_VX_INSN_INTERNAL_H */ diff --git a/arch/s390/include/asm/vx-insn.h b/arch/s390/include/asm/vx-insn.h index 95480ed9149e..8c188f1c6d27 100644 --- a/arch/s390/include/asm/vx-insn.h +++ b/arch/s390/include/asm/vx-insn.h @@ -2,677 +2,18 @@ /* * Support for Vector Instructions * - * Assembler macros to generate .byte/.word code for particular - * vector instructions that are supported by recent binutils (>= 2.26) only. - * - * Copyright IBM Corp. 2015 - * Author(s): Hendrik Brueckner + * This wrapper header file allows to use the vector instruction macros in + * both assembler files as well as in inline assemblies in C files. */ #ifndef __ASM_S390_VX_INSN_H #define __ASM_S390_VX_INSN_H -#ifdef __ASSEMBLY__ - - -/* Macros to generate vector instruction byte code */ - -/* GR_NUM - Retrieve general-purpose register number - * - * @opd: Operand to store register number - * @r64: String designation register in the format "%rN" - */ -.macro GR_NUM opd gr - \opd = 255 - .ifc \gr,%r0 - \opd = 0 - .endif - .ifc \gr,%r1 - \opd = 1 - .endif - .ifc \gr,%r2 - \opd = 2 - .endif - .ifc \gr,%r3 - \opd = 3 - .endif - .ifc \gr,%r4 - \opd = 4 - .endif - .ifc \gr,%r5 - \opd = 5 - .endif - .ifc \gr,%r6 - \opd = 6 - .endif - .ifc \gr,%r7 - \opd = 7 - .endif - .ifc \gr,%r8 - \opd = 8 - .endif - .ifc \gr,%r9 - \opd = 9 - .endif - .ifc \gr,%r10 - \opd = 10 - .endif - .ifc \gr,%r11 - \opd = 11 - .endif - .ifc \gr,%r12 - \opd = 12 - .endif - .ifc \gr,%r13 - \opd = 13 - .endif - .ifc \gr,%r14 - \opd = 14 - .endif - .ifc \gr,%r15 - \opd = 15 - .endif - .if \opd == 255 - \opd = \gr - .endif -.endm - -/* VX_NUM - Retrieve vector register number - * - * @opd: Operand to store register number - * @vxr: String designation register in the format "%vN" - * - * The vector register number is used for as input number to the - * instruction and, as well as, to compute the RXB field of the - * instruction. - */ -.macro VX_NUM opd vxr - \opd = 255 - .ifc \vxr,%v0 - \opd = 0 - .endif - .ifc \vxr,%v1 - \opd = 1 - .endif - .ifc \vxr,%v2 - \opd = 2 - .endif - .ifc \vxr,%v3 - \opd = 3 - .endif - .ifc \vxr,%v4 - \opd = 4 - .endif - .ifc \vxr,%v5 - \opd = 5 - .endif - .ifc \vxr,%v6 - \opd = 6 - .endif - .ifc \vxr,%v7 - \opd = 7 - .endif - .ifc \vxr,%v8 - \opd = 8 - .endif - .ifc \vxr,%v9 - \opd = 9 - .endif - .ifc \vxr,%v10 - \opd = 10 - .endif - .ifc \vxr,%v11 - \opd = 11 - .endif - .ifc \vxr,%v12 - \opd = 12 - .endif - .ifc \vxr,%v13 - \opd = 13 - .endif - .ifc \vxr,%v14 - \opd = 14 - .endif - .ifc \vxr,%v15 - \opd = 15 - .endif - .ifc \vxr,%v16 - \opd = 16 - .endif - .ifc \vxr,%v17 - \opd = 17 - .endif - .ifc \vxr,%v18 - \opd = 18 - .endif - .ifc \vxr,%v19 - \opd = 19 - .endif - .ifc \vxr,%v20 - \opd = 20 - .endif - .ifc \vxr,%v21 - \opd = 21 - .endif - .ifc \vxr,%v22 - \opd = 22 - .endif - .ifc \vxr,%v23 - \opd = 23 - .endif - .ifc \vxr,%v24 - \opd = 24 - .endif - .ifc \vxr,%v25 - \opd = 25 - .endif - .ifc \vxr,%v26 - \opd = 26 - .endif - .ifc \vxr,%v27 - \opd = 27 - .endif - .ifc \vxr,%v28 - \opd = 28 - .endif - .ifc \vxr,%v29 - \opd = 29 - .endif - .ifc \vxr,%v30 - \opd = 30 - .endif - .ifc \vxr,%v31 - \opd = 31 - .endif - .if \opd == 255 - \opd = \vxr - .endif -.endm - -/* RXB - Compute most significant bit used vector registers - * - * @rxb: Operand to store computed RXB value - * @v1: First vector register designated operand - * @v2: Second vector register designated operand - * @v3: Third vector register designated operand - * @v4: Fourth vector register designated operand - */ -.macro RXB rxb v1 v2=0 v3=0 v4=0 - \rxb = 0 - .if \v1 & 0x10 - \rxb = \rxb | 0x08 - .endif - .if \v2 & 0x10 - \rxb = \rxb | 0x04 - .endif - .if \v3 & 0x10 - \rxb = \rxb | 0x02 - .endif - .if \v4 & 0x10 - \rxb = \rxb | 0x01 - .endif -.endm - -/* MRXB - Generate Element Size Control and RXB value - * - * @m: Element size control - * @v1: First vector register designated operand (for RXB) - * @v2: Second vector register designated operand (for RXB) - * @v3: Third vector register designated operand (for RXB) - * @v4: Fourth vector register designated operand (for RXB) - */ -.macro MRXB m v1 v2=0 v3=0 v4=0 - rxb = 0 - RXB rxb, \v1, \v2, \v3, \v4 - .byte (\m << 4) | rxb -.endm - -/* MRXBOPC - Generate Element Size Control, RXB, and final Opcode fields - * - * @m: Element size control - * @opc: Opcode - * @v1: First vector register designated operand (for RXB) - * @v2: Second vector register designated operand (for RXB) - * @v3: Third vector register designated operand (for RXB) - * @v4: Fourth vector register designated operand (for RXB) - */ -.macro MRXBOPC m opc v1 v2=0 v3=0 v4=0 - MRXB \m, \v1, \v2, \v3, \v4 - .byte \opc -.endm - -/* Vector support instructions */ - -/* VECTOR GENERATE BYTE MASK */ -.macro VGBM vr imm2 - VX_NUM v1, \vr - .word (0xE700 | ((v1&15) << 4)) - .word \imm2 - MRXBOPC 0, 0x44, v1 -.endm -.macro VZERO vxr - VGBM \vxr, 0 -.endm -.macro VONE vxr - VGBM \vxr, 0xFFFF -.endm - -/* VECTOR LOAD VR ELEMENT FROM GR */ -.macro VLVG v, gr, disp, m - VX_NUM v1, \v - GR_NUM b2, "%r0" - GR_NUM r3, \gr - .word 0xE700 | ((v1&15) << 4) | r3 - .word (b2 << 12) | (\disp) - MRXBOPC \m, 0x22, v1 -.endm -.macro VLVGB v, gr, index, base - VLVG \v, \gr, \index, \base, 0 -.endm -.macro VLVGH v, gr, index - VLVG \v, \gr, \index, 1 -.endm -.macro VLVGF v, gr, index - VLVG \v, \gr, \index, 2 -.endm -.macro VLVGG v, gr, index - VLVG \v, \gr, \index, 3 -.endm - -/* VECTOR LOAD REGISTER */ -.macro VLR v1, v2 - VX_NUM v1, \v1 - VX_NUM v2, \v2 - .word 0xE700 | ((v1&15) << 4) | (v2&15) - .word 0 - MRXBOPC 0, 0x56, v1, v2 -.endm - -/* VECTOR LOAD */ -.macro VL v, disp, index="%r0", base - VX_NUM v1, \v - GR_NUM x2, \index - GR_NUM b2, \base - .word 0xE700 | ((v1&15) << 4) | x2 - .word (b2 << 12) | (\disp) - MRXBOPC 0, 0x06, v1 -.endm - -/* VECTOR LOAD ELEMENT */ -.macro VLEx vr1, disp, index="%r0", base, m3, opc - VX_NUM v1, \vr1 - GR_NUM x2, \index - GR_NUM b2, \base - .word 0xE700 | ((v1&15) << 4) | x2 - .word (b2 << 12) | (\disp) - MRXBOPC \m3, \opc, v1 -.endm -.macro VLEB vr1, disp, index="%r0", base, m3 - VLEx \vr1, \disp, \index, \base, \m3, 0x00 -.endm -.macro VLEH vr1, disp, index="%r0", base, m3 - VLEx \vr1, \disp, \index, \base, \m3, 0x01 -.endm -.macro VLEF vr1, disp, index="%r0", base, m3 - VLEx \vr1, \disp, \index, \base, \m3, 0x03 -.endm -.macro VLEG vr1, disp, index="%r0", base, m3 - VLEx \vr1, \disp, \index, \base, \m3, 0x02 -.endm - -/* VECTOR LOAD ELEMENT IMMEDIATE */ -.macro VLEIx vr1, imm2, m3, opc - VX_NUM v1, \vr1 - .word 0xE700 | ((v1&15) << 4) - .word \imm2 - MRXBOPC \m3, \opc, v1 -.endm -.macro VLEIB vr1, imm2, index - VLEIx \vr1, \imm2, \index, 0x40 -.endm -.macro VLEIH vr1, imm2, index - VLEIx \vr1, \imm2, \index, 0x41 -.endm -.macro VLEIF vr1, imm2, index - VLEIx \vr1, \imm2, \index, 0x43 -.endm -.macro VLEIG vr1, imm2, index - VLEIx \vr1, \imm2, \index, 0x42 -.endm - -/* VECTOR LOAD GR FROM VR ELEMENT */ -.macro VLGV gr, vr, disp, base="%r0", m - GR_NUM r1, \gr - GR_NUM b2, \base - VX_NUM v3, \vr - .word 0xE700 | (r1 << 4) | (v3&15) - .word (b2 << 12) | (\disp) - MRXBOPC \m, 0x21, v3 -.endm -.macro VLGVB gr, vr, disp, base="%r0" - VLGV \gr, \vr, \disp, \base, 0 -.endm -.macro VLGVH gr, vr, disp, base="%r0" - VLGV \gr, \vr, \disp, \base, 1 -.endm -.macro VLGVF gr, vr, disp, base="%r0" - VLGV \gr, \vr, \disp, \base, 2 -.endm -.macro VLGVG gr, vr, disp, base="%r0" - VLGV \gr, \vr, \disp, \base, 3 -.endm - -/* VECTOR LOAD MULTIPLE */ -.macro VLM vfrom, vto, disp, base, hint=3 - VX_NUM v1, \vfrom - VX_NUM v3, \vto - GR_NUM b2, \base - .word 0xE700 | ((v1&15) << 4) | (v3&15) - .word (b2 << 12) | (\disp) - MRXBOPC \hint, 0x36, v1, v3 -.endm - -/* VECTOR STORE */ -.macro VST vr1, disp, index="%r0", base - VX_NUM v1, \vr1 - GR_NUM x2, \index - GR_NUM b2, \base - .word 0xE700 | ((v1&15) << 4) | (x2&15) - .word (b2 << 12) | (\disp) - MRXBOPC 0, 0x0E, v1 -.endm - -/* VECTOR STORE MULTIPLE */ -.macro VSTM vfrom, vto, disp, base, hint=3 - VX_NUM v1, \vfrom - VX_NUM v3, \vto - GR_NUM b2, \base - .word 0xE700 | ((v1&15) << 4) | (v3&15) - .word (b2 << 12) | (\disp) - MRXBOPC \hint, 0x3E, v1, v3 -.endm - -/* VECTOR PERMUTE */ -.macro VPERM vr1, vr2, vr3, vr4 - VX_NUM v1, \vr1 - VX_NUM v2, \vr2 - VX_NUM v3, \vr3 - VX_NUM v4, \vr4 - .word 0xE700 | ((v1&15) << 4) | (v2&15) - .word ((v3&15) << 12) - MRXBOPC (v4&15), 0x8C, v1, v2, v3, v4 -.endm - -/* VECTOR UNPACK LOGICAL LOW */ -.macro VUPLL vr1, vr2, m3 - VX_NUM v1, \vr1 - VX_NUM v2, \vr2 - .word 0xE700 | ((v1&15) << 4) | (v2&15) - .word 0x0000 - MRXBOPC \m3, 0xD4, v1, v2 -.endm -.macro VUPLLB vr1, vr2 - VUPLL \vr1, \vr2, 0 -.endm -.macro VUPLLH vr1, vr2 - VUPLL \vr1, \vr2, 1 -.endm -.macro VUPLLF vr1, vr2 - VUPLL \vr1, \vr2, 2 -.endm - -/* VECTOR PERMUTE DOUBLEWORD IMMEDIATE */ -.macro VPDI vr1, vr2, vr3, m4 - VX_NUM v1, \vr1 - VX_NUM v2, \vr2 - VX_NUM v3, \vr3 - .word 0xE700 | ((v1&15) << 4) | (v2&15) - .word ((v3&15) << 12) - MRXBOPC \m4, 0x84, v1, v2, v3 -.endm - -/* VECTOR REPLICATE */ -.macro VREP vr1, vr3, imm2, m4 - VX_NUM v1, \vr1 - VX_NUM v3, \vr3 - .word 0xE700 | ((v1&15) << 4) | (v3&15) - .word \imm2 - MRXBOPC \m4, 0x4D, v1, v3 -.endm -.macro VREPB vr1, vr3, imm2 - VREP \vr1, \vr3, \imm2, 0 -.endm -.macro VREPH vr1, vr3, imm2 - VREP \vr1, \vr3, \imm2, 1 -.endm -.macro VREPF vr1, vr3, imm2 - VREP \vr1, \vr3, \imm2, 2 -.endm -.macro VREPG vr1, vr3, imm2 - VREP \vr1, \vr3, \imm2, 3 -.endm - -/* VECTOR MERGE HIGH */ -.macro VMRH vr1, vr2, vr3, m4 - VX_NUM v1, \vr1 - VX_NUM v2, \vr2 - VX_NUM v3, \vr3 - .word 0xE700 | ((v1&15) << 4) | (v2&15) - .word ((v3&15) << 12) - MRXBOPC \m4, 0x61, v1, v2, v3 -.endm -.macro VMRHB vr1, vr2, vr3 - VMRH \vr1, \vr2, \vr3, 0 -.endm -.macro VMRHH vr1, vr2, vr3 - VMRH \vr1, \vr2, \vr3, 1 -.endm -.macro VMRHF vr1, vr2, vr3 - VMRH \vr1, \vr2, \vr3, 2 -.endm -.macro VMRHG vr1, vr2, vr3 - VMRH \vr1, \vr2, \vr3, 3 -.endm - -/* VECTOR MERGE LOW */ -.macro VMRL vr1, vr2, vr3, m4 - VX_NUM v1, \vr1 - VX_NUM v2, \vr2 - VX_NUM v3, \vr3 - .word 0xE700 | ((v1&15) << 4) | (v2&15) - .word ((v3&15) << 12) - MRXBOPC \m4, 0x60, v1, v2, v3 -.endm -.macro VMRLB vr1, vr2, vr3 - VMRL \vr1, \vr2, \vr3, 0 -.endm -.macro VMRLH vr1, vr2, vr3 - VMRL \vr1, \vr2, \vr3, 1 -.endm -.macro VMRLF vr1, vr2, vr3 - VMRL \vr1, \vr2, \vr3, 2 -.endm -.macro VMRLG vr1, vr2, vr3 - VMRL \vr1, \vr2, \vr3, 3 -.endm - - -/* Vector integer instructions */ - -/* VECTOR AND */ -.macro VN vr1, vr2, vr3 - VX_NUM v1, \vr1 - VX_NUM v2, \vr2 - VX_NUM v3, \vr3 - .word 0xE700 | ((v1&15) << 4) | (v2&15) - .word ((v3&15) << 12) - MRXBOPC 0, 0x68, v1, v2, v3 -.endm - -/* VECTOR EXCLUSIVE OR */ -.macro VX vr1, vr2, vr3 - VX_NUM v1, \vr1 - VX_NUM v2, \vr2 - VX_NUM v3, \vr3 - .word 0xE700 | ((v1&15) << 4) | (v2&15) - .word ((v3&15) << 12) - MRXBOPC 0, 0x6D, v1, v2, v3 -.endm - -/* VECTOR GALOIS FIELD MULTIPLY SUM */ -.macro VGFM vr1, vr2, vr3, m4 - VX_NUM v1, \vr1 - VX_NUM v2, \vr2 - VX_NUM v3, \vr3 - .word 0xE700 | ((v1&15) << 4) | (v2&15) - .word ((v3&15) << 12) - MRXBOPC \m4, 0xB4, v1, v2, v3 -.endm -.macro VGFMB vr1, vr2, vr3 - VGFM \vr1, \vr2, \vr3, 0 -.endm -.macro VGFMH vr1, vr2, vr3 - VGFM \vr1, \vr2, \vr3, 1 -.endm -.macro VGFMF vr1, vr2, vr3 - VGFM \vr1, \vr2, \vr3, 2 -.endm -.macro VGFMG vr1, vr2, vr3 - VGFM \vr1, \vr2, \vr3, 3 -.endm - -/* VECTOR GALOIS FIELD MULTIPLY SUM AND ACCUMULATE */ -.macro VGFMA vr1, vr2, vr3, vr4, m5 - VX_NUM v1, \vr1 - VX_NUM v2, \vr2 - VX_NUM v3, \vr3 - VX_NUM v4, \vr4 - .word 0xE700 | ((v1&15) << 4) | (v2&15) - .word ((v3&15) << 12) | (\m5 << 8) - MRXBOPC (v4&15), 0xBC, v1, v2, v3, v4 -.endm -.macro VGFMAB vr1, vr2, vr3, vr4 - VGFMA \vr1, \vr2, \vr3, \vr4, 0 -.endm -.macro VGFMAH vr1, vr2, vr3, vr4 - VGFMA \vr1, \vr2, \vr3, \vr4, 1 -.endm -.macro VGFMAF vr1, vr2, vr3, vr4 - VGFMA \vr1, \vr2, \vr3, \vr4, 2 -.endm -.macro VGFMAG vr1, vr2, vr3, vr4 - VGFMA \vr1, \vr2, \vr3, \vr4, 3 -.endm - -/* VECTOR SHIFT RIGHT LOGICAL BY BYTE */ -.macro VSRLB vr1, vr2, vr3 - VX_NUM v1, \vr1 - VX_NUM v2, \vr2 - VX_NUM v3, \vr3 - .word 0xE700 | ((v1&15) << 4) | (v2&15) - .word ((v3&15) << 12) - MRXBOPC 0, 0x7D, v1, v2, v3 -.endm - -/* VECTOR REPLICATE IMMEDIATE */ -.macro VREPI vr1, imm2, m3 - VX_NUM v1, \vr1 - .word 0xE700 | ((v1&15) << 4) - .word \imm2 - MRXBOPC \m3, 0x45, v1 -.endm -.macro VREPIB vr1, imm2 - VREPI \vr1, \imm2, 0 -.endm -.macro VREPIH vr1, imm2 - VREPI \vr1, \imm2, 1 -.endm -.macro VREPIF vr1, imm2 - VREPI \vr1, \imm2, 2 -.endm -.macro VREPIG vr1, imm2 - VREP \vr1, \imm2, 3 -.endm - -/* VECTOR ADD */ -.macro VA vr1, vr2, vr3, m4 - VX_NUM v1, \vr1 - VX_NUM v2, \vr2 - VX_NUM v3, \vr3 - .word 0xE700 | ((v1&15) << 4) | (v2&15) - .word ((v3&15) << 12) - MRXBOPC \m4, 0xF3, v1, v2, v3 -.endm -.macro VAB vr1, vr2, vr3 - VA \vr1, \vr2, \vr3, 0 -.endm -.macro VAH vr1, vr2, vr3 - VA \vr1, \vr2, \vr3, 1 -.endm -.macro VAF vr1, vr2, vr3 - VA \vr1, \vr2, \vr3, 2 -.endm -.macro VAG vr1, vr2, vr3 - VA \vr1, \vr2, \vr3, 3 -.endm -.macro VAQ vr1, vr2, vr3 - VA \vr1, \vr2, \vr3, 4 -.endm - -/* VECTOR ELEMENT SHIFT RIGHT ARITHMETIC */ -.macro VESRAV vr1, vr2, vr3, m4 - VX_NUM v1, \vr1 - VX_NUM v2, \vr2 - VX_NUM v3, \vr3 - .word 0xE700 | ((v1&15) << 4) | (v2&15) - .word ((v3&15) << 12) - MRXBOPC \m4, 0x7A, v1, v2, v3 -.endm - -.macro VESRAVB vr1, vr2, vr3 - VESRAV \vr1, \vr2, \vr3, 0 -.endm -.macro VESRAVH vr1, vr2, vr3 - VESRAV \vr1, \vr2, \vr3, 1 -.endm -.macro VESRAVF vr1, vr2, vr3 - VESRAV \vr1, \vr2, \vr3, 2 -.endm -.macro VESRAVG vr1, vr2, vr3 - VESRAV \vr1, \vr2, \vr3, 3 -.endm +#include -/* VECTOR ELEMENT ROTATE LEFT LOGICAL */ -.macro VERLL vr1, vr3, disp, base="%r0", m4 - VX_NUM v1, \vr1 - VX_NUM v3, \vr3 - GR_NUM b2, \base - .word 0xE700 | ((v1&15) << 4) | (v3&15) - .word (b2 << 12) | (\disp) - MRXBOPC \m4, 0x33, v1, v3 -.endm -.macro VERLLB vr1, vr3, disp, base="%r0" - VERLL \vr1, \vr3, \disp, \base, 0 -.endm -.macro VERLLH vr1, vr3, disp, base="%r0" - VERLL \vr1, \vr3, \disp, \base, 1 -.endm -.macro VERLLF vr1, vr3, disp, base="%r0" - VERLL \vr1, \vr3, \disp, \base, 2 -.endm -.macro VERLLG vr1, vr3, disp, base="%r0" - VERLL \vr1, \vr3, \disp, \base, 3 -.endm +#ifndef __ASSEMBLY__ -/* VECTOR SHIFT LEFT DOUBLE BY BYTE */ -.macro VSLDB vr1, vr2, vr3, imm4 - VX_NUM v1, \vr1 - VX_NUM v2, \vr2 - VX_NUM v3, \vr3 - .word 0xE700 | ((v1&15) << 4) | (v2&15) - .word ((v3&15) << 12) | (\imm4) - MRXBOPC 0, 0x77, v1, v2, v3 -.endm +asm(".include \"asm/vx-insn-asm.h\"\n"); -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLY__ */ #endif /* __ASM_S390_VX_INSN_H */ diff --git a/arch/s390/kernel/fpu.c b/arch/s390/kernel/fpu.c index d864c9a325e2..4666b29ac8a1 100644 --- a/arch/s390/kernel/fpu.c +++ b/arch/s390/kernel/fpu.c @@ -10,8 +10,7 @@ #include #include #include - -asm(".include \"asm/vx-insn.h\"\n"); +#include void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags) { diff --git a/lib/raid6/s390vx.uc b/lib/raid6/s390vx.uc index 9e597e1f91a4..b25dfc9c7759 100644 --- a/lib/raid6/s390vx.uc +++ b/lib/raid6/s390vx.uc @@ -13,8 +13,7 @@ #include #include - -asm(".include \"asm/vx-insn.h\"\n"); +#include #define NSIZE 16 -- cgit v1.2.3 From 5720aab289e138463bb499a42681c1c526030756 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 1 Dec 2022 18:01:36 +0100 Subject: s390/nmi: use vector instruction macros instead of byte patterns Use vector instruction macros instead of byte patterns to increase readability. The generated code is nearly identical: - 1e8: e7 0f 10 00 00 36 vlm %v0,%v15,0(%r1) - 1ee: e7 0f 11 00 0c 36 vlm %v16,%v31,256(%r1) + 1e8: e7 0f 10 00 30 36 vlm %v0,%v15,0(%r1),3 + 1ee: e7 0f 11 00 3c 36 vlm %v16,%v31,256(%r1),3 By using the VLM macro the alignment hint is automatically specified too. Even though from a performance perspective it doesn't matter at all for the machine check code, this shows yet another benefit when using the macros. Signed-off-by: Heiko Carstens Signed-off-by: Alexander Gordeev --- arch/s390/kernel/nmi.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index 31cb9b00a36b..f384f2dc4f23 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c @@ -19,7 +19,7 @@ #include #include #include - +#include #include #include #include @@ -31,8 +31,7 @@ #include #include #include - -#include +#include struct mcck_struct { unsigned int kill_task : 1; @@ -293,8 +292,8 @@ static int notrace s390_validate_registers(union mci mci, int umode) __ctl_load(cr0.val, 0, 0); asm volatile( " la 1,%0\n" - " .word 0xe70f,0x1000,0x0036\n" /* vlm 0,15,0(1) */ - " .word 0xe70f,0x1100,0x0c36\n" /* vlm 16,31,256(1) */ + " VLM 0,15,0,1\n" + " VLM 16,31,256,1\n" : : "Q" (*(struct vx_array *)mcesa->vector_save_area) : "1"); -- cgit v1.2.3 From f9e5938ace2cc8ecf15815649481266017135757 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Sun, 27 Nov 2022 09:45:26 +0100 Subject: s390/nmi: rework register validation handling If a machine check happens in kernel mode, and the machine check interruption code indicates that e.g. vector register contents in the machine check area are not valid, the logic is to kill current. The idea behind this was that if within kernel context vector registers are not used then it is sufficient to kill the current user space process to avoid that it continues with potentially corrupt register contents. This however does not necessarily work, since the current code does not take into account that a machine check can also happen when a kernel thread is running (= no user space context), and in addition there is no way to distinguish between the "previous" and "next" user process task, if the machine check happens when a task switch happens. Given that machine checks with invalid saved register contents in the machine check save area are extremely rare, simplify the logic: if register contents are invalid and the previous context was kernel mode, stop the whole machine. If the previous context was user mode, kill the corresponding task. Signed-off-by: Heiko Carstens Signed-off-by: Alexander Gordeev --- arch/s390/kernel/nmi.c | 64 ++++++++------------------------------------------ 1 file changed, 10 insertions(+), 54 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index f384f2dc4f23..3518ba638a85 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c @@ -180,10 +180,10 @@ void noinstr s390_handle_mcck(struct pt_regs *regs) trace_hardirqs_on(); } /* - * returns 0 if all required registers are available + * returns 0 if register contents could be validated * returns 1 otherwise */ -static int notrace s390_validate_registers(union mci mci, int umode) +static int notrace s390_validate_registers(union mci mci) { struct mcesa *mcesa; void *fpt_save_area; @@ -194,45 +194,15 @@ static int notrace s390_validate_registers(union mci mci, int umode) kill_task = 0; zero = 0; - if (!mci.gr) { - /* - * General purpose registers couldn't be restored and have - * unknown contents. Stop system or terminate process. - */ - if (!umode) - s390_handle_damage(); + if (!mci.gr || !mci.fp) kill_task = 1; - } - if (!mci.fp) { - /* - * Floating point registers can't be restored. If the - * kernel currently uses floating point registers the - * system is stopped. If the process has its floating - * pointer registers loaded it is terminated. - */ - if (S390_lowcore.fpu_flags & KERNEL_VXR_V0V7) - s390_handle_damage(); - if (!test_cpu_flag(CIF_FPU)) - kill_task = 1; - } fpt_save_area = &S390_lowcore.floating_pt_save_area; if (!mci.fc) { - /* - * Floating point control register can't be restored. - * If the kernel currently uses the floating pointer - * registers and needs the FPC register the system is - * stopped. If the process has its floating pointer - * registers loaded it is terminated. Otherwise the - * FPC is just validated. - */ - if (S390_lowcore.fpu_flags & KERNEL_FPC) - s390_handle_damage(); + kill_task = 1; asm volatile( " lfpc %0\n" : : "Q" (zero)); - if (!test_cpu_flag(CIF_FPU)) - kill_task = 1; } else { asm volatile( " lfpc %0\n" @@ -274,19 +244,8 @@ static int notrace s390_validate_registers(union mci mci, int umode) * appropriate actions. The host vector or FPU values have been * saved by KVM and will be restored by KVM. */ - if (!mci.vr && !test_cpu_flag(CIF_MCCK_GUEST)) { - /* - * Vector registers can't be restored. If the kernel - * currently uses vector registers the system is - * stopped. If the process has its vector registers - * loaded it is terminated. Otherwise just validate - * the registers. - */ - if (S390_lowcore.fpu_flags & KERNEL_VXR) - s390_handle_damage(); - if (!test_cpu_flag(CIF_FPU)) - kill_task = 1; - } + if (!mci.vr && !test_cpu_flag(CIF_MCCK_GUEST)) + kill_task = 1; cr0.val = S390_lowcore.cregs_save_area[0]; cr0.afp = cr0.vx = 1; __ctl_load(cr0.val, 0, 0); @@ -305,13 +264,8 @@ static int notrace s390_validate_registers(union mci mci, int umode) : : "a" (&S390_lowcore.access_regs_save_area) : "memory"); - if (!mci.ar) { - /* - * Access registers have unknown contents. - * Terminating task. - */ + if (!mci.ar) kill_task = 1; - } /* Validate guarded storage registers */ cr2.val = S390_lowcore.cregs_save_area[2]; if (cr2.gse) { @@ -450,7 +404,9 @@ int notrace s390_do_machine_check(struct pt_regs *regs) s390_handle_damage(); } } - if (s390_validate_registers(mci, user_mode(regs))) { + if (s390_validate_registers(mci)) { + if (!user_mode(regs)) + s390_handle_damage(); /* * Couldn't restore all register contents for the * user space process -> mark task for termination. -- cgit v1.2.3 From a4c41fe3985d4c1c31d84833b30fd9ac48fe6f84 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 28 Nov 2022 14:14:27 +0100 Subject: s390/sclp: keep sclp_early_sccb Keep sclp_early_sccb so it can also be used after initdata has been freed. This is a prerequisite to allow printing a message from the machine check handler. Reviewed-by: Peter Oberparleiter Reviewed-by: Alexander Gordeev Signed-off-by: Heiko Carstens Signed-off-by: Alexander Gordeev --- arch/s390/mm/init.c | 3 --- drivers/s390/char/sclp_early_core.c | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index a28832eefb06..1a25d456d865 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -208,9 +208,6 @@ void free_initmem(void) __set_memory((unsigned long)_sinittext, (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT, SET_MEMORY_RW | SET_MEMORY_NX); - free_reserved_area(sclp_early_sccb, - sclp_early_sccb + EXT_SCCB_READ_SCP, - POISON_FREE_INITMEM, "unused early sccb"); free_initmem_default(POISON_FREE_INITMEM); } diff --git a/drivers/s390/char/sclp_early_core.c b/drivers/s390/char/sclp_early_core.c index 676634de65a8..baf1cd2b0854 100644 --- a/drivers/s390/char/sclp_early_core.c +++ b/drivers/s390/char/sclp_early_core.c @@ -17,7 +17,7 @@ static struct read_info_sccb __bootdata(sclp_info_sccb); static int __bootdata(sclp_info_sccb_valid); -char *__bootdata(sclp_early_sccb); +char *__bootdata_preserved(sclp_early_sccb); int sclp_init_state = sclp_init_state_uninitialized; /* * Used to keep track of the size of the event masks. Qemu until version 2.11 -- cgit v1.2.3 From b64d7254ffe8b11010150fa97a4b235ec36e7a90 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 28 Nov 2022 14:16:06 +0100 Subject: s390/sclp: introduce sclp_emergency_printk() Introduce sclp_emergency_printk() which can be used to emit a message in emergency cases. sclp_emergency_printk() is only supposed to be used in cases where it can be assumed that regular console device drivers may not work anymore. For example this may be the case for unrecoverable machine checks. Reviewed-by: Peter Oberparleiter Signed-off-by: Heiko Carstens Signed-off-by: Alexander Gordeev --- arch/s390/include/asm/sclp.h | 1 + drivers/s390/char/sclp_early_core.c | 24 ++++++++++++++++++++++++ 2 files changed, 25 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index 67a24dda17b6..dac7da88f61f 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h @@ -132,6 +132,7 @@ void sclp_early_get_ipl_info(struct sclp_ipl_info *info); void sclp_early_detect(void); void sclp_early_printk(const char *s); void __sclp_early_printk(const char *s, unsigned int len); +void sclp_emergency_printk(const char *s); int sclp_early_get_memsize(unsigned long *mem); int sclp_early_get_hsa_size(unsigned long *hsa_size); diff --git a/drivers/s390/char/sclp_early_core.c b/drivers/s390/char/sclp_early_core.c index baf1cd2b0854..ac1d00980fa6 100644 --- a/drivers/s390/char/sclp_early_core.c +++ b/drivers/s390/char/sclp_early_core.c @@ -240,6 +240,30 @@ void sclp_early_printk(const char *str) __sclp_early_printk(str, strlen(str)); } +/* + * Use sclp_emergency_printk() to print a string when the system is in a + * state where regular console drivers cannot be assumed to work anymore. + * + * Callers must make sure that no concurrent SCLP requests are outstanding + * and all other CPUs are stopped, or at least disabled for external + * interrupts. + */ +void sclp_emergency_printk(const char *str) +{ + int have_linemode, have_vt220; + unsigned int len; + + len = strlen(str); + /* + * Don't care about return values; if requests fail, just ignore and + * continue to have a rather high chance that anything is printed. + */ + sclp_early_setup(0, &have_linemode, &have_vt220); + sclp_early_print_lm(str, len); + sclp_early_print_vt220(str, len); + sclp_early_setup(1, &have_linemode, &have_vt220); +} + /* * We can't pass sclp_info_sccb to sclp_early_cmd() here directly, * because it might not fulfil the requiremets for a SCLP communication buffer: -- cgit v1.2.3 From 506faa5b9b4f05e667b39252a28dace17a5017a1 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 28 Nov 2022 14:16:33 +0100 Subject: s390/nmi: print machine check interruption code before stopping system In case a system will be stopped because of e.g. missing validity bits print the machine check interruption code before the system is stopped. This is helpful, since up to now no message was printed in such a case. Only a disabled wait PSW was loaded, which doesn't give a hint of what went wrong. Improve this by printing a message with debug information. Reviewed-by: Peter Oberparleiter Reviewed-by: Alexander Gordeev Signed-off-by: Heiko Carstens Signed-off-by: Alexander Gordeev --- arch/s390/kernel/nmi.c | 52 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index 3518ba638a85..b1579c85b869 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c @@ -114,9 +114,61 @@ void nmi_free_mcesa(u64 *mcesad) kmem_cache_free(mcesa_cache, __va(*mcesad & MCESA_ORIGIN_MASK)); } +static __always_inline char *nmi_puts(char *dest, const char *src) +{ + while (*src) + *dest++ = *src++; + *dest = 0; + return dest; +} + +static __always_inline char *u64_to_hex(char *dest, u64 val) +{ + int i, num; + + for (i = 1; i <= 16; i++) { + num = (val >> (64 - 4 * i)) & 0xf; + if (num >= 10) + *dest++ = 'A' + num - 10; + else + *dest++ = '0' + num; + } + *dest = 0; + return dest; +} + static notrace void s390_handle_damage(void) { + union ctlreg0 cr0, cr0_new; + char message[100]; + psw_t psw_save; + char *ptr; + smp_emergency_stop(); + diag_amode31_ops.diag308_reset(); + ptr = nmi_puts(message, "System stopped due to unrecoverable machine check, code: 0x"); + u64_to_hex(ptr, S390_lowcore.mcck_interruption_code); + + /* + * Disable low address protection and make machine check new PSW a + * disabled wait PSW. Any additional machine check cannot be handled. + */ + __ctl_store(cr0.val, 0, 0); + cr0_new = cr0; + cr0_new.lap = 0; + __ctl_load(cr0_new.val, 0, 0); + psw_save = S390_lowcore.mcck_new_psw; + psw_bits(S390_lowcore.mcck_new_psw).io = 0; + psw_bits(S390_lowcore.mcck_new_psw).ext = 0; + psw_bits(S390_lowcore.mcck_new_psw).wait = 1; + sclp_emergency_printk(message); + + /* + * Restore machine check new PSW and control register 0 to original + * values. This makes possible system dump analysis easier. + */ + S390_lowcore.mcck_new_psw = psw_save; + __ctl_load(cr0.val, 0, 0); disabled_wait(); while (1); } -- cgit v1.2.3 From 742aed05af97dc5ba6c53a4b6cb6a7b31e32f9e9 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 29 Nov 2022 17:22:14 +0100 Subject: s390/nmi: move storage error checking back to C, enter with DAT on Checking for storage errors in machine check entry code was done in order to handle also storage errors on kernel page tables. However this is extremely unlikely and some basic assumptions what works on machine check entry are necessary anyway. In order to simplify machine check handling delay checking for storage errors to C code. With this also change the machine check new PSW to have DAT on, which simplifies the entry code even further. Reviewed-by: Alexander Gordeev Signed-off-by: Heiko Carstens Signed-off-by: Alexander Gordeev --- arch/s390/kernel/entry.S | 34 ++++------------------------------ arch/s390/kernel/nmi.c | 16 +++++++++++++++- arch/s390/kernel/setup.c | 3 ++- 3 files changed, 21 insertions(+), 32 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index d2a1f2f4f5b8..e0d11f3adfcc 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -122,24 +122,6 @@ _LPP_OFFSET = __LC_LPP "jnz .+8; .insn rrf,0xb2e80000,0,0,13,0", 82 .endm - /* - * The CHKSTG macro jumps to the provided label in case the - * machine check interruption code reports one of unrecoverable - * storage errors: - * - Storage error uncorrected - * - Storage key error uncorrected - * - Storage degradation with Failing-storage-address validity - */ - .macro CHKSTG errlabel - TSTMSK __LC_MCCK_CODE,(MCCK_CODE_STG_ERROR|MCCK_CODE_STG_KEY_ERROR) - jnz \errlabel - TSTMSK __LC_MCCK_CODE,MCCK_CODE_STG_DEGRAD - jz .Loklabel\@ - TSTMSK __LC_MCCK_CODE,MCCK_CODE_STG_FAIL_ADDR - jnz \errlabel -.Loklabel\@: - .endm - #if IS_ENABLED(CONFIG_KVM) /* * The OUTSIDE macro jumps to the provided label in case the value @@ -546,26 +528,18 @@ ENTRY(mcck_int_handler) 3: TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID jno .Lmcck_panic tmhh %r8,0x0001 # interrupting from user ? - jnz 6f + jnz .Lmcck_user TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID jno .Lmcck_panic #if IS_ENABLED(CONFIG_KVM) - OUTSIDE %r9,.Lsie_gmap,.Lsie_done,6f + OUTSIDE %r9,.Lsie_gmap,.Lsie_done,.Lmcck_stack OUTSIDE %r9,.Lsie_entry,.Lsie_leave,4f oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST - j 5f -4: CHKSTG .Lmcck_panic -5: larl %r14,.Lstosm_tmp - stosm 0(%r14),0x04 # turn dat on, keep irqs off - BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) +4: BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) SIEEXIT j .Lmcck_stack #endif -6: CHKSTG .Lmcck_panic - larl %r14,.Lstosm_tmp - stosm 0(%r14),0x04 # turn dat on, keep irqs off - tmhh %r8,0x0001 # interrupting from user ? - jz .Lmcck_stack +.Lmcck_user: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP .Lmcck_stack: lg %r15,__LC_MCCK_STACK diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index b1579c85b869..b54d4aa33851 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c @@ -487,7 +487,21 @@ int notrace s390_do_machine_check(struct pt_regs *regs) mcck->stp_queue |= stp_island_check(); mcck_pending = 1; } - + /* + * Reinject storage related machine checks into the guest if they + * happen when the guest is running. + */ + if (!test_cpu_flag(CIF_MCCK_GUEST)) { + /* Storage error uncorrected */ + if (mci.se) + s390_handle_damage(); + /* Storage key-error uncorrected */ + if (mci.ke) + s390_handle_damage(); + /* Storage degradation */ + if (mci.ds && mci.fa) + s390_handle_damage(); + } if (mci.cp) { /* Channel report word pending */ mcck->channel_report = 1; diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index ab19ddb09d65..2094f575c532 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -437,7 +437,7 @@ static void __init setup_lowcore_dat_off(void) lc->svc_new_psw.addr = (unsigned long) system_call; lc->program_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK; lc->program_new_psw.addr = (unsigned long) pgm_check_handler; - lc->mcck_new_psw.mask = PSW_KERNEL_BITS; + lc->mcck_new_psw.mask = int_psw_mask; lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler; lc->io_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK; lc->io_new_psw.addr = (unsigned long) io_int_handler; @@ -512,6 +512,7 @@ static void __init setup_lowcore_dat_on(void) S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT; S390_lowcore.svc_new_psw.mask |= PSW_MASK_DAT; S390_lowcore.program_new_psw.mask |= PSW_MASK_DAT; + S390_lowcore.mcck_new_psw.mask |= PSW_MASK_DAT; S390_lowcore.io_new_psw.mask |= PSW_MASK_DAT; __ctl_set_bit(0, 28); __ctl_store(S390_lowcore.cregs_save_area, 0, 15); -- cgit v1.2.3 From bb3860cc02c660b409a1e02521b84b1d7d4e84cd Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 30 Nov 2022 19:43:28 +0100 Subject: s390/nmi: get rid of private slab cache Get rid of private "nmi_save_areas" slab cache. The only reason this was introduced years ago was that with some slab debugging options allocations would only guarantee a minimum alignment of ARCH_KMALLOC_MINALIGN, which was eight bytes back then. This is not sufficient for the extended machine check save area. However since commit 59bb47985c1d ("mm, sl[aou]b: guarantee natural alignment for kmalloc(power-of-two)") kmalloc guarantees a power-of-two alignment even with debugging options enabled. Therefore the private slab cache can be removed. Reviewed-by: Alexander Gordeev Signed-off-by: Heiko Carstens Signed-off-by: Alexander Gordeev --- arch/s390/kernel/nmi.c | 40 +++++++++------------------------------- 1 file changed, 9 insertions(+), 31 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index b54d4aa33851..5dbf274719a9 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c @@ -42,21 +42,12 @@ struct mcck_struct { }; static DEFINE_PER_CPU(struct mcck_struct, cpu_mcck); -static struct kmem_cache *mcesa_cache; -static unsigned long mcesa_origin_lc; static inline int nmi_needs_mcesa(void) { return MACHINE_HAS_VX || MACHINE_HAS_GS; } -static inline unsigned long nmi_get_mcesa_size(void) -{ - if (MACHINE_HAS_GS) - return MCESA_MAX_SIZE; - return MCESA_MIN_SIZE; -} - /* * The initial machine check extended save area for the boot CPU. * It will be replaced on the boot CPU reinit with an allocated @@ -74,36 +65,23 @@ void __init nmi_alloc_mcesa_early(u64 *mcesad) *mcesad |= ilog2(MCESA_MAX_SIZE); } -static void __init nmi_alloc_cache(void) +int nmi_alloc_mcesa(u64 *mcesad) { unsigned long size; - - if (!nmi_needs_mcesa()) - return; - size = nmi_get_mcesa_size(); - if (size > MCESA_MIN_SIZE) - mcesa_origin_lc = ilog2(size); - /* create slab cache for the machine-check-extended-save-areas */ - mcesa_cache = kmem_cache_create("nmi_save_areas", size, size, 0, NULL); - if (!mcesa_cache) - panic("Couldn't create nmi save area cache"); -} - -int __ref nmi_alloc_mcesa(u64 *mcesad) -{ - unsigned long origin; + void *origin; *mcesad = 0; if (!nmi_needs_mcesa()) return 0; - if (!mcesa_cache) - nmi_alloc_cache(); - origin = (unsigned long) kmem_cache_alloc(mcesa_cache, GFP_KERNEL); + size = MACHINE_HAS_GS ? MCESA_MAX_SIZE : MCESA_MIN_SIZE; + origin = kmalloc(size, GFP_KERNEL); if (!origin) return -ENOMEM; /* The pointer is stored with mcesa_bits ORed in */ - kmemleak_not_leak((void *) origin); - *mcesad = __pa(origin) | mcesa_origin_lc; + kmemleak_not_leak(origin); + *mcesad = __pa(origin); + if (MACHINE_HAS_GS) + *mcesad |= ilog2(MCESA_MAX_SIZE); return 0; } @@ -111,7 +89,7 @@ void nmi_free_mcesa(u64 *mcesad) { if (!nmi_needs_mcesa()) return; - kmem_cache_free(mcesa_cache, __va(*mcesad & MCESA_ORIGIN_MASK)); + kfree(__va(*mcesad & MCESA_ORIGIN_MASK)); } static __always_inline char *nmi_puts(char *dest, const char *src) -- cgit v1.2.3