diff options
Diffstat (limited to 'arch/s390')
60 files changed, 519 insertions, 698 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 199ac3e4da1d..baed39772c84 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -35,9 +35,6 @@ config GENERIC_BUG config GENERIC_BUG_RELATIVE_POINTERS def_bool y -config ARCH_DMA_ADDR_T_64BIT - def_bool y - config GENERIC_LOCKBREAK def_bool y if SMP && PREEMPT @@ -68,6 +65,7 @@ config S390 select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA select ARCH_HAS_KCOV + select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_SET_MEMORY select ARCH_HAS_SG_CHAIN select ARCH_HAS_STRICT_KERNEL_RWX @@ -133,7 +131,6 @@ config S390 select HAVE_CMPXCHG_LOCAL select HAVE_COPY_THREAD_TLS select HAVE_DEBUG_KMEMLEAK - select HAVE_DMA_API_DEBUG select HAVE_DMA_CONTIGUOUS select DMA_DIRECT_OPS select HAVE_DYNAMIC_FTRACE @@ -709,7 +706,11 @@ config QDIO menuconfig PCI bool "PCI support" select PCI_MSI + select IOMMU_HELPER select IOMMU_SUPPORT + select NEED_DMA_MAP_STATE + select NEED_SG_DMA_LENGTH + help Enable PCI support. @@ -733,15 +734,6 @@ config PCI_DOMAINS config HAS_IOMEM def_bool PCI -config IOMMU_HELPER - def_bool PCI - -config NEED_SG_DMA_LENGTH - def_bool PCI - -config NEED_DMA_MAP_STATE - def_bool PCI - config CHSC_SCH def_tristate m prompt "Support for CHSC subchannels" @@ -847,6 +839,10 @@ config CCW source "drivers/Kconfig" +config HAVE_PNETID + tristate + default (SMC || CCWGROUP) + source "fs/Kconfig" source "arch/s390/Kconfig.debug" diff --git a/arch/s390/Makefile b/arch/s390/Makefile index c79936d02f7b..68a690442be0 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -18,7 +18,7 @@ KBUILD_CFLAGS += -m64 KBUILD_AFLAGS += -m64 UTS_MACHINE := s390x STACK_SIZE := 16384 -CHECKFLAGS += -D__s390__ -D__s390x__ -mbig-endian +CHECKFLAGS += -D__s390__ -D__s390x__ export LD_BFD diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index cb6e8066b1ad..ee6a9c387c87 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c @@ -391,7 +391,7 @@ int appldata_register_ops(struct appldata_ops *ops) if (ops->size > APPLDATA_MAX_REC_SIZE) return -EINVAL; - ops->ctl_table = kzalloc(4 * sizeof(struct ctl_table), GFP_KERNEL); + ops->ctl_table = kcalloc(4, sizeof(struct ctl_table), GFP_KERNEL); if (!ops->ctl_table) return -ENOMEM; diff --git a/arch/s390/crypto/arch_random.c b/arch/s390/crypto/arch_random.c index 8720e9203ecf..dd95cdbd22ce 100644 --- a/arch/s390/crypto/arch_random.c +++ b/arch/s390/crypto/arch_random.c @@ -2,14 +2,37 @@ /* * s390 arch random implementation. * - * Copyright IBM Corp. 2017 - * Author(s): Harald Freudenberger <freude@de.ibm.com> + * Copyright IBM Corp. 2017, 2018 + * Author(s): Harald Freudenberger + * + * The s390_arch_random_generate() function may be called from random.c + * in interrupt context. So this implementation does the best to be very + * fast. There is a buffer of random data which is asynchronously checked + * and filled by a workqueue thread. + * If there are enough bytes in the buffer the s390_arch_random_generate() + * just delivers these bytes. Otherwise false is returned until the + * worker thread refills the buffer. + * The worker fills the rng buffer by pulling fresh entropy from the + * high quality (but slow) true hardware random generator. This entropy + * is then spread over the buffer with an pseudo random generator PRNG. + * As the arch_get_random_seed_long() fetches 8 bytes and the calling + * function add_interrupt_randomness() counts this as 1 bit entropy the + * distribution needs to make sure there is in fact 1 bit entropy contained + * in 8 bytes of the buffer. The current values pull 32 byte entropy + * and scatter this into a 2048 byte buffer. So 8 byte in the buffer + * will contain 1 bit of entropy. + * The worker thread is rescheduled based on the charge level of the + * buffer but at least with 500 ms delay to avoid too much CPU consumption. + * So the max. amount of rng data delivered via arch_get_random_seed is + * limited to 4k bytes per second. */ #include <linux/kernel.h> #include <linux/atomic.h> #include <linux/random.h> +#include <linux/slab.h> #include <linux/static_key.h> +#include <linux/workqueue.h> #include <asm/cpacf.h> DEFINE_STATIC_KEY_FALSE(s390_arch_random_available); @@ -17,11 +40,83 @@ DEFINE_STATIC_KEY_FALSE(s390_arch_random_available); atomic64_t s390_arch_random_counter = ATOMIC64_INIT(0); EXPORT_SYMBOL(s390_arch_random_counter); +#define ARCH_REFILL_TICKS (HZ/2) +#define ARCH_PRNG_SEED_SIZE 32 +#define ARCH_RNG_BUF_SIZE 2048 + +static DEFINE_SPINLOCK(arch_rng_lock); +static u8 *arch_rng_buf; +static unsigned int arch_rng_buf_idx; + +static void arch_rng_refill_buffer(struct work_struct *); +static DECLARE_DELAYED_WORK(arch_rng_work, arch_rng_refill_buffer); + +bool s390_arch_random_generate(u8 *buf, unsigned int nbytes) +{ + /* lock rng buffer */ + if (!spin_trylock(&arch_rng_lock)) + return false; + + /* try to resolve the requested amount of bytes from the buffer */ + arch_rng_buf_idx -= nbytes; + if (arch_rng_buf_idx < ARCH_RNG_BUF_SIZE) { + memcpy(buf, arch_rng_buf + arch_rng_buf_idx, nbytes); + atomic64_add(nbytes, &s390_arch_random_counter); + spin_unlock(&arch_rng_lock); + return true; + } + + /* not enough bytes in rng buffer, refill is done asynchronously */ + spin_unlock(&arch_rng_lock); + + return false; +} +EXPORT_SYMBOL(s390_arch_random_generate); + +static void arch_rng_refill_buffer(struct work_struct *unused) +{ + unsigned int delay = ARCH_REFILL_TICKS; + + spin_lock(&arch_rng_lock); + if (arch_rng_buf_idx > ARCH_RNG_BUF_SIZE) { + /* buffer is exhausted and needs refill */ + u8 seed[ARCH_PRNG_SEED_SIZE]; + u8 prng_wa[240]; + /* fetch ARCH_PRNG_SEED_SIZE bytes of entropy */ + cpacf_trng(NULL, 0, seed, sizeof(seed)); + /* blow this entropy up to ARCH_RNG_BUF_SIZE with PRNG */ + memset(prng_wa, 0, sizeof(prng_wa)); + cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED, + &prng_wa, NULL, 0, seed, sizeof(seed)); + cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN, + &prng_wa, arch_rng_buf, ARCH_RNG_BUF_SIZE, NULL, 0); + arch_rng_buf_idx = ARCH_RNG_BUF_SIZE; + } + delay += (ARCH_REFILL_TICKS * arch_rng_buf_idx) / ARCH_RNG_BUF_SIZE; + spin_unlock(&arch_rng_lock); + + /* kick next check */ + queue_delayed_work(system_long_wq, &arch_rng_work, delay); +} + static int __init s390_arch_random_init(void) { - /* check if subfunction CPACF_PRNO_TRNG is available */ - if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG)) + /* all the needed PRNO subfunctions available ? */ + if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG) && + cpacf_query_func(CPACF_PRNO, CPACF_PRNO_SHA512_DRNG_GEN)) { + + /* alloc arch random working buffer */ + arch_rng_buf = kmalloc(ARCH_RNG_BUF_SIZE, GFP_KERNEL); + if (!arch_rng_buf) + return -ENOMEM; + + /* kick worker queue job to fill the random buffer */ + queue_delayed_work(system_long_wq, + &arch_rng_work, ARCH_REFILL_TICKS); + + /* enable arch random to the outside world */ static_branch_enable(&s390_arch_random_available); + } return 0; } diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c index be8cc53204b5..a2945b289a29 100644 --- a/arch/s390/hypfs/hypfs_diag.c +++ b/arch/s390/hypfs/hypfs_diag.c @@ -239,7 +239,7 @@ static void *page_align_ptr(void *ptr) static void *diag204_alloc_vbuf(int pages) { /* The buffer has to be page aligned! */ - diag204_buf_vmalloc = vmalloc(PAGE_SIZE * (pages + 1)); + diag204_buf_vmalloc = vmalloc(array_size(PAGE_SIZE, (pages + 1))); if (!diag204_buf_vmalloc) return ERR_PTR(-ENOMEM); diag204_buf = page_align_ptr(diag204_buf_vmalloc); diff --git a/arch/s390/hypfs/hypfs_diag0c.c b/arch/s390/hypfs/hypfs_diag0c.c index dce87f1bec94..cebf05150cc1 100644 --- a/arch/s390/hypfs/hypfs_diag0c.c +++ b/arch/s390/hypfs/hypfs_diag0c.c @@ -49,7 +49,8 @@ static void *diag0c_store(unsigned int *count) get_online_cpus(); cpu_count = num_online_cpus(); - cpu_vec = kmalloc(sizeof(*cpu_vec) * num_possible_cpus(), GFP_KERNEL); + cpu_vec = kmalloc_array(num_possible_cpus(), sizeof(*cpu_vec), + GFP_KERNEL); if (!cpu_vec) goto fail_put_online_cpus; /* Note: Diag 0c needs 8 byte alignment and real storage */ diff --git a/arch/s390/hypfs/hypfs_sprp.c b/arch/s390/hypfs/hypfs_sprp.c index ae0ed8dd5f1b..5d85a039391c 100644 --- a/arch/s390/hypfs/hypfs_sprp.c +++ b/arch/s390/hypfs/hypfs_sprp.c @@ -13,7 +13,6 @@ #include <linux/string.h> #include <linux/types.h> #include <linux/uaccess.h> -#include <asm/compat.h> #include <asm/diag.h> #include <asm/sclp.h> #include "hypfs.h" diff --git a/arch/s390/include/asm/archrandom.h b/arch/s390/include/asm/archrandom.h index 09aed1095336..c67b82dfa558 100644 --- a/arch/s390/include/asm/archrandom.h +++ b/arch/s390/include/asm/archrandom.h @@ -15,16 +15,11 @@ #include <linux/static_key.h> #include <linux/atomic.h> -#include <asm/cpacf.h> DECLARE_STATIC_KEY_FALSE(s390_arch_random_available); extern atomic64_t s390_arch_random_counter; -static void s390_arch_random_generate(u8 *buf, unsigned int nbytes) -{ - cpacf_trng(NULL, 0, buf, nbytes); - atomic64_add(nbytes, &s390_arch_random_counter); -} +bool s390_arch_random_generate(u8 *buf, unsigned int nbytes); static inline bool arch_has_random(void) { @@ -51,8 +46,7 @@ static inline bool arch_get_random_int(unsigned int *v) static inline bool arch_get_random_seed_long(unsigned long *v) { if (static_branch_likely(&s390_arch_random_available)) { - s390_arch_random_generate((u8 *)v, sizeof(*v)); - return true; + return s390_arch_random_generate((u8 *)v, sizeof(*v)); } return false; } @@ -60,8 +54,7 @@ static inline bool arch_get_random_seed_long(unsigned long *v) static inline bool arch_get_random_seed_int(unsigned int *v) { if (static_branch_likely(&s390_arch_random_available)) { - s390_arch_random_generate((u8 *)v, sizeof(*v)); - return true; + return s390_arch_random_generate((u8 *)v, sizeof(*v)); } return false; } diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h index 20bce136b2e5..a29dd430fb40 100644 --- a/arch/s390/include/asm/ccwdev.h +++ b/arch/s390/include/asm/ccwdev.h @@ -231,4 +231,5 @@ int ccw_device_siosl(struct ccw_device *); extern void ccw_device_get_schid(struct ccw_device *, struct subchannel_id *); struct channel_path_desc_fmt0 *ccw_device_get_chp_desc(struct ccw_device *, int); +u8 *ccw_device_get_util_str(struct ccw_device *cdev, int chp_idx); #endif /* _S390_CCWDEV_H_ */ diff --git a/arch/s390/include/asm/ccwgroup.h b/arch/s390/include/asm/ccwgroup.h index 99aa817dad32..860cab7479c3 100644 --- a/arch/s390/include/asm/ccwgroup.h +++ b/arch/s390/include/asm/ccwgroup.h @@ -73,4 +73,14 @@ extern void ccwgroup_remove_ccwdev(struct ccw_device *cdev); #define to_ccwgroupdev(x) container_of((x), struct ccwgroup_device, dev) #define to_ccwgroupdrv(x) container_of((x), struct ccwgroup_driver, driver) + +#if IS_ENABLED(CONFIG_CCWGROUP) +bool dev_is_ccwgroup(struct device *dev); +#else /* CONFIG_CCWGROUP */ +static inline bool dev_is_ccwgroup(struct device *dev) +{ + return false; +} +#endif /* CONFIG_CCWGROUP */ + #endif diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h index 9830fb6b076e..97db2fba546a 100644 --- a/arch/s390/include/asm/compat.h +++ b/arch/s390/include/asm/compat.h @@ -53,7 +53,6 @@ typedef u32 compat_size_t; typedef s32 compat_ssize_t; -typedef s32 compat_time_t; typedef s32 compat_clock_t; typedef s32 compat_pid_t; typedef u16 __compat_uid_t; @@ -97,16 +96,6 @@ typedef struct { u32 gprs_high[NUM_GPRS]; } s390_compat_regs_high; -struct compat_timespec { - compat_time_t tv_sec; - s32 tv_nsec; -}; - -struct compat_timeval { - compat_time_t tv_sec; - s32 tv_usec; -}; - struct compat_stat { compat_dev_t st_dev; u16 __pad1; @@ -243,10 +232,10 @@ struct compat_ipc64_perm { struct compat_semid64_ds { struct compat_ipc64_perm sem_perm; - compat_time_t sem_otime; - compat_ulong_t __pad1; - compat_time_t sem_ctime; - compat_ulong_t __pad2; + compat_ulong_t sem_otime; + compat_ulong_t sem_otime_high; + compat_ulong_t sem_ctime; + compat_ulong_t sem_ctime_high; compat_ulong_t sem_nsems; compat_ulong_t __unused1; compat_ulong_t __unused2; @@ -254,12 +243,12 @@ struct compat_semid64_ds { struct compat_msqid64_ds { struct compat_ipc64_perm msg_perm; - compat_time_t msg_stime; - compat_ulong_t __pad1; - compat_time_t msg_rtime; - compat_ulong_t __pad2; - compat_time_t msg_ctime; - compat_ulong_t __pad3; + compat_ulong_t msg_stime; + compat_ulong_t msg_stime_high; + compat_ulong_t msg_rtime; + compat_ulong_t msg_rtime_high; + compat_ulong_t msg_ctime; + compat_ulong_t msg_ctime_high; compat_ulong_t msg_cbytes; compat_ulong_t msg_qnum; compat_ulong_t msg_qbytes; @@ -272,12 +261,12 @@ struct compat_msqid64_ds { struct compat_shmid64_ds { struct compat_ipc64_perm shm_perm; compat_size_t shm_segsz; - compat_time_t shm_atime; - compat_ulong_t __pad1; - compat_time_t shm_dtime; - compat_ulong_t __pad2; - compat_time_t shm_ctime; - compat_ulong_t __pad3; + compat_ulong_t shm_atime; + compat_ulong_t shm_atime_high; + compat_ulong_t shm_dtime; + compat_ulong_t shm_dtime_high; + compat_ulong_t shm_ctime; + compat_ulong_t shm_ctime_high; compat_pid_t shm_cpid; compat_pid_t shm_lpid; compat_ulong_t shm_nattch; diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h index f58d17e9dd65..de023a9a88ca 100644 --- a/arch/s390/include/asm/cpu_mf.h +++ b/arch/s390/include/asm/cpu_mf.h @@ -113,7 +113,7 @@ struct hws_basic_entry { struct hws_diag_entry { unsigned int def:16; /* 0-15 Data Entry Format */ - unsigned int R:14; /* 16-19 and 20-30 reserved */ + unsigned int R:15; /* 16-19 and 20-30 reserved */ unsigned int I:1; /* 31 entry valid or invalid */ u8 data[]; /* Machine-dependent sample data */ } __packed; @@ -129,7 +129,9 @@ struct hws_trailer_entry { unsigned int f:1; /* 0 - Block Full Indicator */ unsigned int a:1; /* 1 - Alert request control */ unsigned int t:1; /* 2 - Timestamp format */ - unsigned long long:61; /* 3 - 63: Reserved */ + unsigned int :29; /* 3 - 31: Reserved */ + unsigned int bsdes:16; /* 32-47: size of basic SDE */ + unsigned int dsdes:16; /* 48-63: size of diagnostic SDE */ }; unsigned long long flags; /* 0 - 63: All indicators */ }; diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h index 99c93d0346f9..4600453536c2 100644 --- a/arch/s390/include/asm/ctl_reg.h +++ b/arch/s390/include/asm/ctl_reg.h @@ -10,8 +10,20 @@ #include <linux/const.h> +#define CR0_CLOCK_COMPARATOR_SIGN _BITUL(63 - 10) +#define CR0_EMERGENCY_SIGNAL_SUBMASK _BITUL(63 - 49) +#define CR0_EXTERNAL_CALL_SUBMASK _BITUL(63 - 50) +#define CR0_CLOCK_COMPARATOR_SUBMASK _BITUL(63 - 52) +#define CR0_CPU_TIMER_SUBMASK _BITUL(63 - 53) +#define CR0_SERVICE_SIGNAL_SUBMASK _BITUL(63 - 54) +#define CR0_UNUSED_56 _BITUL(63 - 56) +#define CR0_INTERRUPT_KEY_SUBMASK _BITUL(63 - 57) +#define CR0_MEASUREMENT_ALERT_SUBMASK _BITUL(63 - 58) + #define CR2_GUARDED_STORAGE _BITUL(63 - 59) +#define CR14_UNUSED_32 _BITUL(63 - 32) +#define CR14_UNUSED_33 _BITUL(63 - 33) #define CR14_CHANNEL_REPORT_SUBMASK _BITUL(63 - 35) #define CR14_RECOVERY_SUBMASK _BITUL(63 - 36) #define CR14_DEGRADATION_SUBMASK _BITUL(63 - 37) diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index 1a61b1b997f2..7d22a474a040 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h @@ -125,8 +125,9 @@ * ELF register definitions.. */ +#include <linux/compat.h> + #include <asm/ptrace.h> -#include <asm/compat.h> #include <asm/syscall.h> #include <asm/user.h> @@ -136,7 +137,6 @@ typedef s390_regs elf_gregset_t; typedef s390_fp_regs compat_elf_fpregset_t; typedef s390_compat_regs compat_elf_gregset_t; -#include <linux/compat.h> #include <linux/sched/mm.h> /* for task_struct */ #include <asm/mmu_context.h> diff --git a/arch/s390/include/asm/hardirq.h b/arch/s390/include/asm/hardirq.h index a296c6acfd07..dfbc3c6c0674 100644 --- a/arch/s390/include/asm/hardirq.h +++ b/arch/s390/include/asm/hardirq.h @@ -14,6 +14,8 @@ #include <asm/lowcore.h> #define local_softirq_pending() (S390_lowcore.softirq_pending) +#define set_softirq_pending(x) (S390_lowcore.softirq_pending = (x)) +#define or_softirq_pending(x) (S390_lowcore.softirq_pending |= (x)) #define __ARCH_IRQ_STAT #define __ARCH_HAS_DO_SOFTIRQ diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 81cdb6b55118..a2188e309bd6 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -812,6 +812,7 @@ struct kvm_arch{ int use_irqchip; int use_cmma; int use_pfmfi; + int use_skf; int user_cpu_state_ctrl; int user_sigp; int user_stsi; diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h index c639c95850e4..f5ff9dbad8ac 100644 --- a/arch/s390/include/asm/mmu.h +++ b/arch/s390/include/asm/mmu.h @@ -21,7 +21,7 @@ typedef struct { /* The mmu context uses extended page tables. */ unsigned int has_pgste:1; /* The mmu context uses storage keys. */ - unsigned int use_skey:1; + unsigned int uses_skeys:1; /* The mmu context uses CMM. */ unsigned int uses_cmm:1; } mm_context_t; diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 324f6f452982..d16bc79c30bb 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -30,7 +30,7 @@ static inline int init_new_context(struct task_struct *tsk, test_thread_flag(TIF_PGSTE) || (current->mm && current->mm->context.alloc_pgste); mm->context.has_pgste = 0; - mm->context.use_skey = 0; + mm->context.uses_skeys = 0; mm->context.uses_cmm = 0; #endif switch (mm->context.asce_limit) { diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index 12fe3591034f..94f8db468c9b 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -2,8 +2,6 @@ #ifndef __ASM_S390_PCI_H #define __ASM_S390_PCI_H -/* must be set before including asm-generic/pci.h */ -#define PCI_DMA_BUS_IS_PHYS (0) /* must be set before including pci_clp.h */ #define PCI_BAR_COUNT 6 diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 2d24d33bf188..5ab636089c60 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -171,7 +171,6 @@ static inline int is_module_addr(void *addr) #define _PAGE_WRITE 0x020 /* SW pte write bit */ #define _PAGE_SPECIAL 0x040 /* SW associated with special page */ #define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */ -#define __HAVE_ARCH_PTE_SPECIAL #ifdef CONFIG_MEM_SOFT_DIRTY #define _PAGE_SOFT_DIRTY 0x002 /* SW pte soft dirty bit */ @@ -507,10 +506,10 @@ static inline int mm_alloc_pgste(struct mm_struct *mm) * faults should no longer be backed by zero pages */ #define mm_forbids_zeropage mm_has_pgste -static inline int mm_use_skey(struct mm_struct *mm) +static inline int mm_uses_skeys(struct mm_struct *mm) { #ifdef CONFIG_PGSTE - if (mm->context.use_skey) + if (mm->context.uses_skeys) return 1; #endif return 0; diff --git a/arch/s390/include/asm/pnet.h b/arch/s390/include/asm/pnet.h new file mode 100644 index 000000000000..6e278584f8f1 --- /dev/null +++ b/arch/s390/include/asm/pnet.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * IBM System z PNET ID Support + * + * Copyright IBM Corp. 2018 + */ + +#ifndef _ASM_S390_PNET_H +#define _ASM_S390_PNET_H + +#include <linux/device.h> +#include <linux/types.h> + +#define PNETIDS_LEN 64 /* Total utility string length in bytes + * to cover up to 4 PNETIDs of 16 bytes + * for up to 4 device ports + */ +#define MAX_PNETID_LEN 16 /* Max.length of a single port PNETID */ +#define MAX_PNETID_PORTS (PNETIDS_LEN / MAX_PNETID_LEN) + /* Max. # of ports with a PNETID */ + +int pnet_id_by_dev_port(struct device *dev, unsigned short port, u8 *pnetid); +#endif /* _ASM_S390_PNET_H */ diff --git a/arch/s390/include/uapi/asm/Kbuild b/arch/s390/include/uapi/asm/Kbuild index faef3f7e8353..e364873e0d10 100644 --- a/arch/s390/include/uapi/asm/Kbuild +++ b/arch/s390/include/uapi/asm/Kbuild @@ -9,9 +9,12 @@ generic-y += errno.h generic-y += fcntl.h generic-y += ioctl.h generic-y += mman.h +generic-y += msgbuf.h generic-y += param.h generic-y += poll.h generic-y += resource.h +generic-y += sembuf.h +generic-y += shmbuf.h generic-y += sockios.h generic-y += swab.h generic-y += termbits.h diff --git a/arch/s390/include/uapi/asm/msgbuf.h b/arch/s390/include/uapi/asm/msgbuf.h deleted file mode 100644 index 604f847cd68c..000000000000 --- a/arch/s390/include/uapi/asm/msgbuf.h +++ /dev/null @@ -1,38 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#ifndef _S390_MSGBUF_H -#define _S390_MSGBUF_H - -/* - * The msqid64_ds structure for S/390 architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 64-bit time_t to solve y2038 problem - * - 2 miscellaneous 32-bit values - */ - -struct msqid64_ds { - struct ipc64_perm msg_perm; - __kernel_time_t msg_stime; /* last msgsnd time */ -#ifndef __s390x__ - unsigned long __unused1; -#endif /* ! __s390x__ */ - __kernel_time_t msg_rtime; /* last msgrcv time */ -#ifndef __s390x__ - unsigned long __unused2; -#endif /* ! __s390x__ */ - __kernel_time_t msg_ctime; /* last change time */ -#ifndef __s390x__ - unsigned long __unused3; -#endif /* ! __s390x__ */ - unsigned long msg_cbytes; /* current number of bytes on queue */ - unsigned long msg_qnum; /* number of messages in queue */ - unsigned long msg_qbytes; /* max number of bytes on queue */ - __kernel_pid_t msg_lspid; /* pid of last msgsnd */ - __kernel_pid_t msg_lrpid; /* last receive pid */ - unsigned long __unused4; - unsigned long __unused5; -}; - -#endif /* _S390_MSGBUF_H */ diff --git a/arch/s390/include/uapi/asm/sembuf.h b/arch/s390/include/uapi/asm/sembuf.h deleted file mode 100644 index 3e917697b668..000000000000 --- a/arch/s390/include/uapi/asm/sembuf.h +++ /dev/null @@ -1,30 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#ifndef _S390_SEMBUF_H -#define _S390_SEMBUF_H - -/* - * The semid64_ds structure for S/390 architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 64-bit time_t to solve y2038 problem (for !__s390x__) - * - 2 miscellaneous 32-bit values - */ - -struct semid64_ds { - struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ - __kernel_time_t sem_otime; /* last semop time */ -#ifndef __s390x__ - unsigned long __unused1; -#endif /* ! __s390x__ */ - __kernel_time_t sem_ctime; /* last change time */ -#ifndef __s390x__ - unsigned long __unused2; -#endif /* ! __s390x__ */ - unsigned long sem_nsems; /* no. of semaphores in array */ - unsigned long __unused3; - unsigned long __unused4; -}; - -#endif /* _S390_SEMBUF_H */ diff --git a/arch/s390/include/uapi/asm/shmbuf.h b/arch/s390/include/uapi/asm/shmbuf.h deleted file mode 100644 index 9cdce8d7ce60..000000000000 --- a/arch/s390/include/uapi/asm/shmbuf.h +++ /dev/null @@ -1,49 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#ifndef _S390_SHMBUF_H -#define _S390_SHMBUF_H - -/* - * The shmid64_ds structure for S/390 architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 64-bit time_t to solve y2038 problem (for !__s390x__) - * - 2 miscellaneous 32-bit values - */ - -struct shmid64_ds { - struct ipc64_perm shm_perm; /* operation perms */ - size_t shm_segsz; /* size of segment (bytes) */ - __kernel_time_t shm_atime; /* last attach time */ -#ifndef __s390x__ - unsigned long __unused1; -#endif /* ! __s390x__ */ - __kernel_time_t shm_dtime; /* last detach time */ -#ifndef __s390x__ - unsigned long __unused2; -#endif /* ! __s390x__ */ - __kernel_time_t shm_ctime; /* last change time */ -#ifndef __s390x__ - unsigned long __unused3; -#endif /* ! __s390x__ */ - __kernel_pid_t shm_cpid; /* pid of creator */ - __kernel_pid_t shm_lpid; /* pid of last operator */ - unsigned long shm_nattch; /* no. of current attaches */ - unsigned long __unused4; - unsigned long __unused5; -}; - -struct shminfo64 { - unsigned long shmmax; - unsigned long shmmin; - unsigned long shmmni; - unsigned long shmseg; - unsigned long shmall; - unsigned long __unused1; - unsigned long __unused2; - unsigned long __unused3; - unsigned long __unused4; -}; - -#endif /* _S390_SHMBUF_H */ diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index f92dd8ed3884..2fed39b26b42 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -6,22 +6,26 @@ ifdef CONFIG_FUNCTION_TRACER # Do not trace tracer code -CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) # Do not trace early setup code -CFLAGS_REMOVE_als.o = $(CC_FLAGS_FTRACE) -CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_als.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_early_nobss.o = $(CC_FLAGS_FTRACE) endif -GCOV_PROFILE_als.o := n -GCOV_PROFILE_early.o := n +GCOV_PROFILE_als.o := n +GCOV_PROFILE_early.o := n +GCOV_PROFILE_early_nobss.o := n -KCOV_INSTRUMENT_als.o := n -KCOV_INSTRUMENT_early.o := n +KCOV_INSTRUMENT_als.o := n +KCOV_INSTRUMENT_early.o := n +KCOV_INSTRUMENT_early_nobss.o := n -UBSAN_SANITIZE_als.o := n -UBSAN_SANITIZE_early.o := n +UBSAN_SANITIZE_als.o := n +UBSAN_SANITIZE_early.o := n +UBSAN_SANITIZE_early_nobss.o := n # # Use -march=z900 for als.c to be able to print an error @@ -57,7 +61,7 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o -obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o als.o +obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o als.o early_nobss.o obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o @@ -94,3 +98,6 @@ obj-$(CONFIG_TRACEPOINTS) += trace.o # vdso obj-y += vdso64/ obj-$(CONFIG_COMPAT) += vdso32/ + +chkbss := head.o head64.o als.o early_nobss.o +include $(srctree)/arch/s390/scripts/Makefile.chkbss diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index 80e974adb9e8..d374f9b218b4 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -194,11 +194,13 @@ static debug_entry_t ***debug_areas_alloc(int pages_per_area, int nr_areas) debug_entry_t ***areas; int i, j; - areas = kmalloc(nr_areas * sizeof(debug_entry_t **), GFP_KERNEL); + areas = kmalloc_array(nr_areas, sizeof(debug_entry_t **), GFP_KERNEL); if (!areas) goto fail_malloc_areas; for (i = 0; i < nr_areas; i++) { - areas[i] = kmalloc(pages_per_area * sizeof(debug_entry_t *), GFP_KERNEL); + areas[i] = kmalloc_array(pages_per_area, + sizeof(debug_entry_t *), + GFP_KERNEL); if (!areas[i]) goto fail_malloc_areas2; for (j = 0; j < pages_per_area; j++) { diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 32daa0f84325..827699eb48fa 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -34,32 +34,6 @@ static void __init setup_boot_command_line(void); /* - * Get the TOD clock running. - */ -static void __init reset_tod_clock(void) -{ - u64 time; - - if (store_tod_clock(&time) == 0) - return; - /* TOD clock not running. Set the clock to Unix Epoch. */ - if (set_tod_clock(TOD_UNIX_EPOCH) != 0 || store_tod_clock(&time) != 0) - disabled_wait(0); - - memset(tod_clock_base, 0, 16); - *(__u64 *) &tod_clock_base[1] = TOD_UNIX_EPOCH; - S390_lowcore.last_update_clock = TOD_UNIX_EPOCH; -} - -/* - * Clear bss memory - */ -static noinline __init void clear_bss_section(void) -{ - memset(__bss_start, 0, __bss_stop - __bss_start); -} - -/* * Initialize storage key for kernel pages */ static noinline __init void init_kernel_storage_key(void) @@ -310,57 +284,6 @@ static int __init cad_setup(char *str) } early_param("cad", cad_setup); -static __init void memmove_early(void *dst, const void *src, size_t n) -{ - unsigned long addr; - long incr; - psw_t old; - - if (!n) - return; - incr = 1; - if (dst > src) { - incr = -incr; - dst += n - 1; - src += n - 1; - } - old = S390_lowcore.program_new_psw; - S390_lowcore.program_new_psw.mask = __extract_psw(); - asm volatile( - " larl %[addr],1f\n" - " stg %[addr],%[psw_pgm_addr]\n" - "0: mvc 0(1,%[dst]),0(%[src])\n" - " agr %[dst],%[incr]\n" - " agr %[src],%[incr]\n" - " brctg %[n],0b\n" - "1:\n" - : [addr] "=&d" (addr), - [psw_pgm_addr] "=Q" (S390_lowcore.program_new_psw.addr), - [dst] "+&a" (dst), [src] "+&a" (src), [n] "+d" (n) - : [incr] "d" (incr) - : "cc", "memory"); - S390_lowcore.program_new_psw = old; -} - -static __init noinline void rescue_initrd(void) -{ -#ifdef CONFIG_BLK_DEV_INITRD - unsigned long min_initrd_addr = (unsigned long) _end + (4UL << 20); - /* - * Just like in case of IPL from VM reader we make sure there is a - * gap of 4MB between end of kernel and start of initrd. - * That way we can also be sure that saving an NSS will succeed, - * which however only requires different segments. - */ - if (!INITRD_START || !INITRD_SIZE) - return; - if (INITRD_START >= min_initrd_addr) - return; - memmove_early((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE); - INITRD_START = min_initrd_addr; -#endif -} - /* Set up boot command line */ static void __init append_to_cmdline(size_t (*ipl_data)(char *, size_t)) { @@ -410,9 +333,6 @@ static void __init setup_boot_command_line(void) void __init startup_init(void) { - reset_tod_clock(); - rescue_initrd(); - clear_bss_section(); time_early_init(); init_kernel_storage_key(); lockdep_off(); diff --git a/arch/s390/kernel/early_nobss.c b/arch/s390/kernel/early_nobss.c new file mode 100644 index 000000000000..2d84fc48df3a --- /dev/null +++ b/arch/s390/kernel/early_nobss.c @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright IBM Corp. 2007, 2018 + */ + +/* + * Early setup functions which may not rely on an initialized bss + * section. The last thing that is supposed to happen here is + * initialization of the bss section. + */ + +#include <linux/processor.h> +#include <linux/string.h> +#include <asm/sections.h> +#include <asm/lowcore.h> +#include <asm/setup.h> +#include <asm/timex.h> +#include "entry.h" + +static void __init reset_tod_clock(void) +{ + u64 time; + + if (store_tod_clock(&time) == 0) + return; + /* TOD clock not running. Set the clock to Unix Epoch. */ + if (set_tod_clock(TOD_UNIX_EPOCH) != 0 || store_tod_clock(&time) != 0) + disabled_wait(0); + + memset(tod_clock_base, 0, 16); + *(__u64 *) &tod_clock_base[1] = TOD_UNIX_EPOCH; + S390_lowcore.last_update_clock = TOD_UNIX_EPOCH; +} + +static void __init rescue_initrd(void) +{ + unsigned long min_initrd_addr = (unsigned long) _end + (4UL << 20); + + /* + * Just like in case of IPL from VM reader we make sure there is a + * gap of 4MB between end of kernel and start of initrd. + * That way we can also be sure that saving an NSS will succeed, + * which however only requires different segments. + */ + if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD)) + return; + if (!INITRD_START || !INITRD_SIZE) + return; + if (INITRD_START >= min_initrd_addr) + return; + memmove((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE); + INITRD_START = min_initrd_addr; +} + +static void __init clear_bss_section(void) +{ + memset(__bss_start, 0, __bss_stop - __bss_start); +} + +void __init startup_init_nobss(void) +{ + reset_tod_clock(); + rescue_initrd(); + clear_bss_section(); +} diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h index e87758f8fbdc..961abfac2c5f 100644 --- a/arch/s390/kernel/entry.h +++ b/arch/s390/kernel/entry.h @@ -58,6 +58,7 @@ void do_notify_resume(struct pt_regs *regs); void __init init_IRQ(void); void do_IRQ(struct pt_regs *regs, int irq); void do_restart(void); +void __init startup_init_nobss(void); void __init startup_init(void); void die(struct pt_regs *regs, const char *str); int setup_profiling_timer(unsigned int multiplier); diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index 38a973ccf501..791cb9000e86 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S @@ -40,8 +40,12 @@ ENTRY(startup_continue) stg %r15,__LC_KERNEL_STACK # set end of kernel stack aghi %r15,-160 # -# Save ipl parameters, clear bss memory, initialize storage key for kernel pages, -# and create a kernel NSS if the SAVESYS= parm is defined +# Early setup functions that may not rely on an initialized bss section, +# like moving the initrd. Returns with an initialized bss section. +# + brasl %r14,startup_init_nobss +# +# Early machine initialization and detection functions. # brasl %r14,startup_init lpswe .Lentry-.LPG1(13) # jump to _stext in primary-space, diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index 0dc8ac8548ee..d298d3cb46d0 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c @@ -123,8 +123,8 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, /* Allocate one syminfo structure per symbol. */ me->arch.nsyms = symtab->sh_size / sizeof(Elf_Sym); - me->arch.syminfo = vmalloc(me->arch.nsyms * - sizeof(struct mod_arch_syminfo)); + me->arch.syminfo = vmalloc(array_size(sizeof(struct mod_arch_syminfo), + me->arch.nsyms)); if (!me->arch.syminfo) return -ENOMEM; symbols = (void *) hdr + symtab->sh_offset; diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c index 8ad6a7128b3a..18ae7b9c71d6 100644 --- a/arch/s390/kernel/nospec-branch.c +++ b/arch/s390/kernel/nospec-branch.c @@ -36,9 +36,9 @@ early_param("nospec", nospec_setup_early); static int __init nospec_report(void) { if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) - pr_info("Spectre V2 mitigation: execute trampolines.\n"); + pr_info("Spectre V2 mitigation: execute trampolines\n"); if (__test_facility(82, S390_lowcore.alt_stfle_fac_list)) - pr_info("Spectre V2 mitigation: limited branch prediction.\n"); + pr_info("Spectre V2 mitigation: limited branch prediction\n"); return 0; } arch_initcall(nospec_report); diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c index feebb2944882..d63fb3c56b8a 100644 --- a/arch/s390/kernel/perf_cpum_cf_events.c +++ b/arch/s390/kernel/perf_cpum_cf_events.c @@ -527,7 +527,7 @@ static __init struct attribute **merge_attr(struct attribute **a, j++; j++; - new = kmalloc(sizeof(struct attribute *) * j, GFP_KERNEL); + new = kmalloc_array(j, sizeof(struct attribute *), GFP_KERNEL); if (!new) return NULL; j = 0; diff --git a/arch/s390/kernel/sthyi.c b/arch/s390/kernel/sthyi.c index 80b862e9c53c..0859cde36f75 100644 --- a/arch/s390/kernel/sthyi.c +++ b/arch/s390/kernel/sthyi.c @@ -315,7 +315,7 @@ static void fill_diag(struct sthyi_sctns *sctns) if (pages <= 0) return; - diag204_buf = vmalloc(PAGE_SIZE * pages); + diag204_buf = vmalloc(array_size(pages, PAGE_SIZE)); if (!diag204_buf) return; diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c index fc7e04c2195b..54f5496913fa 100644 --- a/arch/s390/kernel/sysinfo.c +++ b/arch/s390/kernel/sysinfo.c @@ -294,21 +294,9 @@ static int sysinfo_show(struct seq_file *m, void *v) return 0; } -static int sysinfo_open(struct inode *inode, struct file *file) -{ - return single_open(file, sysinfo_show, NULL); -} - -static const struct file_operations sysinfo_fops = { - .open = sysinfo_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - static int __init sysinfo_create_proc(void) { - proc_create("sysinfo", 0444, NULL, &sysinfo_fops); + proc_create_single("sysinfo", 0444, NULL, sysinfo_show); return 0; } device_initcall(sysinfo_create_proc); @@ -386,18 +374,6 @@ static const struct seq_operations service_level_seq_ops = { .show = service_level_show }; -static int service_level_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &service_level_seq_ops); -} - -static const struct file_operations service_level_ops = { - .open = service_level_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release -}; - static void service_level_vm_print(struct seq_file *m, struct service_level *slr) { @@ -420,7 +396,7 @@ static struct service_level service_level_vm = { static __init int create_proc_service_level(void) { - proc_create("service_levels", 0, NULL, &service_level_ops); + proc_create_seq("service_levels", 0, NULL, &service_level_seq_ops); if (MACHINE_IS_VM) register_service_level(&service_level_vm); return 0; diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index a5297a22bc1e..8003b38c1688 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -44,14 +44,8 @@ int is_valid_bugaddr(unsigned long addr) void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str) { - siginfo_t info; - if (user_mode(regs)) { - info.si_signo = si_signo; - info.si_errno = 0; - info.si_code = si_code; - info.si_addr = get_trap_ip(regs); - force_sig_info(si_signo, &info, current); + force_sig_fault(si_signo, si_code, get_trap_ip(regs), current); report_user_fault(regs, si_signo, 0); } else { const struct exception_table_entry *fixup; @@ -80,18 +74,12 @@ NOKPROBE_SYMBOL(do_trap); void do_per_trap(struct pt_regs *regs) { - siginfo_t info; - if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP) return; if (!current->ptrace) return; - info.si_signo = SIGTRAP; - info.si_errno = 0; - info.si_code = TRAP_HWBKPT; - info.si_addr = - (void __force __user *) current->thread.per_event.address; - force_sig_info(SIGTRAP, &info, current); + force_sig_fault(SIGTRAP, TRAP_HWBKPT, + (void __force __user *) current->thread.per_event.address, current); } NOKPROBE_SYMBOL(do_per_trap); @@ -165,7 +153,6 @@ void translation_exception(struct pt_regs *regs) void illegal_op(struct pt_regs *regs) { - siginfo_t info; __u8 opcode[6]; __u16 __user *location; int is_uprobe_insn = 0; @@ -177,13 +164,9 @@ void illegal_op(struct pt_regs *regs) if (get_user(*((__u16 *) opcode), (__u16 __user *) location)) return; if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) { - if (current->ptrace) { - info.si_signo = SIGTRAP; - info.si_errno = 0; - info.si_code = TRAP_BRKPT; - info.si_addr = location; - force_sig_info(SIGTRAP, &info, current); - } else + if (current->ptrace) + force_sig_fault(SIGTRAP, TRAP_BRKPT, location, current); + else signal = SIGILL; #ifdef CONFIG_UPROBES } else if (*((__u16 *) opcode) == UPROBE_SWBP_INSN) { diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index f3a1c7c6824e..09abae40f917 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c @@ -285,7 +285,7 @@ static int __init vdso_init(void) + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; /* Make sure pages are in the correct state */ - vdso32_pagelist = kzalloc(sizeof(struct page *) * (vdso32_pages + 1), + vdso32_pagelist = kcalloc(vdso32_pages + 1, sizeof(struct page *), GFP_KERNEL); BUG_ON(vdso32_pagelist == NULL); for (i = 0; i < vdso32_pages - 1; i++) { @@ -303,7 +303,7 @@ static int __init vdso_init(void) + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; /* Make sure pages are in the correct state */ - vdso64_pagelist = kzalloc(sizeof(struct page *) * (vdso64_pages + 1), + vdso64_pagelist = kcalloc(vdso64_pages + 1, sizeof(struct page *), GFP_KERNEL); BUG_ON(vdso64_pagelist == NULL); for (i = 0; i < vdso64_pages - 1; i++) { diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 08d12cfaf091..f0414f52817b 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -82,10 +82,10 @@ SECTIONS . = ALIGN(PAGE_SIZE); .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { - VMLINUX_SYMBOL(_sinittext) = . ; + _sinittext = .; INIT_TEXT . = ALIGN(PAGE_SIZE); - VMLINUX_SYMBOL(_einittext) = . ; + _einittext = .; } /* diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index 8e2b8647ee12..07d30ffcfa41 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -847,7 +847,7 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data, nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1; pages = pages_array; if (nr_pages > ARRAY_SIZE(pages_array)) - pages = vmalloc(nr_pages * sizeof(unsigned long)); + pages = vmalloc(array_size(nr_pages, sizeof(unsigned long))); if (!pages) return -ENOMEM; need_ipte_lock = psw_bits(*psw).dat && !asce.r; diff --git a/arch/s390/kvm/guestdbg.c b/arch/s390/kvm/guestdbg.c index b5f3e82006d0..394a5f53805b 100644 --- a/arch/s390/kvm/guestdbg.c +++ b/arch/s390/kvm/guestdbg.c @@ -153,7 +153,7 @@ void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu) if (guestdbg_sstep_enabled(vcpu)) { /* disable timer (clock-comparator) interrupts */ - vcpu->arch.sie_block->gcr[0] &= ~0x800ul; + vcpu->arch.sie_block->gcr[0] &= ~CR0_CLOCK_COMPARATOR_SUBMASK; vcpu->arch.sie_block->gcr[9] |= PER_EVENT_IFETCH; vcpu->arch.sie_block->gcr[10] = 0; vcpu->arch.sie_block->gcr[11] = -1UL; diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 37d06e022238..daa09f89ca2d 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -159,7 +159,7 @@ static int psw_interrupts_disabled(struct kvm_vcpu *vcpu) static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu) { if (psw_extint_disabled(vcpu) || - !(vcpu->arch.sie_block->gcr[0] & 0x800ul)) + !(vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SUBMASK)) return 0; if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu)) /* No timer interrupts when single stepping */ @@ -172,7 +172,7 @@ static int ckc_irq_pending(struct kvm_vcpu *vcpu) const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm); const u64 ckc = vcpu->arch.sie_block->ckc; - if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) { + if (vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SIGN) { if ((s64)ckc >= (s64)now) return 0; } else if (ckc >= now) { @@ -184,7 +184,7 @@ static int ckc_irq_pending(struct kvm_vcpu *vcpu) static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu) { return !psw_extint_disabled(vcpu) && - (vcpu->arch.sie_block->gcr[0] & 0x400ul); + (vcpu->arch.sie_block->gcr[0] & CR0_CPU_TIMER_SUBMASK); } static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu) @@ -285,15 +285,15 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu) active_mask &= ~IRQ_PEND_IO_MASK; else active_mask = disable_iscs(vcpu, active_mask); - if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul)) + if (!(vcpu->arch.sie_block->gcr[0] & CR0_EXTERNAL_CALL_SUBMASK)) __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask); - if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul)) + if (!(vcpu->arch.sie_block->gcr[0] & CR0_EMERGENCY_SIGNAL_SUBMASK)) __clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask); - if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul)) + if (!(vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SUBMASK)) __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask); - if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul)) + if (!(vcpu->arch.sie_block->gcr[0] & CR0_CPU_TIMER_SUBMASK)) __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask); - if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul)) + if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK)) __clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask); if (psw_mchk_disabled(vcpu)) active_mask &= ~IRQ_PEND_MCHK_MASK; @@ -1042,7 +1042,7 @@ int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop) /* external call pending and deliverable */ if (kvm_s390_ext_call_pending(vcpu) && !psw_extint_disabled(vcpu) && - (vcpu->arch.sie_block->gcr[0] & 0x2000ul)) + (vcpu->arch.sie_block->gcr[0] & CR0_EXTERNAL_CALL_SUBMASK)) return 1; if (!exclude_stop && kvm_s390_is_stop_irq_pending(vcpu)) @@ -1062,7 +1062,7 @@ static u64 __calculate_sltime(struct kvm_vcpu *vcpu) u64 cputm, sltime = 0; if (ckc_interrupts_enabled(vcpu)) { - if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) { + if (vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SIGN) { if ((s64)now < (s64)ckc) sltime = tod_to_ns((s64)ckc - (s64)now); } else if (now < ckc) { diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 64c986243018..3b7a5151b6a5 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -791,11 +791,21 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu); -static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) +void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm) { struct kvm_vcpu *vcpu; int i; + kvm_s390_vcpu_block_all(kvm); + + kvm_for_each_vcpu(i, vcpu, kvm) + kvm_s390_vcpu_crypto_setup(vcpu); + + kvm_s390_vcpu_unblock_all(kvm); +} + +static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) +{ if (!test_kvm_facility(kvm, 76)) return -EINVAL; @@ -832,10 +842,7 @@ static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) return -ENXIO; } - kvm_for_each_vcpu(i, vcpu, kvm) { - kvm_s390_vcpu_crypto_setup(vcpu); - exit_sie(vcpu); - } + kvm_s390_vcpu_crypto_reset_all(kvm); mutex_unlock(&kvm->lock); return 0; } @@ -1033,8 +1040,8 @@ static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr) return ret; } -static void kvm_s390_get_tod_clock_ext(struct kvm *kvm, - struct kvm_s390_vm_tod_clock *gtod) +static void kvm_s390_get_tod_clock(struct kvm *kvm, + struct kvm_s390_vm_tod_clock *gtod) { struct kvm_s390_tod_clock_ext htod; @@ -1043,10 +1050,12 @@ static void kvm_s390_get_tod_clock_ext(struct kvm *kvm, get_tod_clock_ext((char *)&htod); gtod->tod = htod.tod + kvm->arch.epoch; - gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx; - - if (gtod->tod < htod.tod) - gtod->epoch_idx += 1; + gtod->epoch_idx = 0; + if (test_kvm_facility(kvm, 139)) { + gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx; + if (gtod->tod < htod.tod) + gtod->epoch_idx += 1; + } preempt_enable(); } @@ -1056,12 +1065,7 @@ static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr) struct kvm_s390_vm_tod_clock gtod; memset(>od, 0, sizeof(gtod)); - - if (test_kvm_facility(kvm, 139)) - kvm_s390_get_tod_clock_ext(kvm, >od); - else - gtod.tod = kvm_s390_get_tod_clock_fast(kvm); - + kvm_s390_get_tod_clock(kvm, >od); if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod))) return -EFAULT; @@ -1493,7 +1497,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) return -EINVAL; /* Is this guest using storage keys? */ - if (!mm_use_skey(current->mm)) + if (!mm_uses_skeys(current->mm)) return KVM_S390_GET_SKEYS_NONE; /* Enforce sane limit on memory allocation */ @@ -1725,7 +1729,7 @@ static int kvm_s390_set_cmma_bits(struct kvm *kvm, if (args->count == 0) return 0; - bits = vmalloc(sizeof(*bits) * args->count); + bits = vmalloc(array_size(sizeof(*bits), args->count)); if (!bits) return -ENOMEM; @@ -1982,10 +1986,10 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) rc = -ENOMEM; - kvm->arch.use_esca = 0; /* start with basic SCA */ if (!sclp.has_64bscao) alloc_flags |= GFP_DMA; rwlock_init(&kvm->arch.sca_lock); + /* start with basic SCA */ kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags); if (!kvm->arch.sca) goto out_err; @@ -2036,8 +2040,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) kvm_s390_crypto_init(kvm); mutex_init(&kvm->arch.float_int.ais_lock); - kvm->arch.float_int.simm = 0; - kvm->arch.float_int.nimm = 0; spin_lock_init(&kvm->arch.float_int.lock); for (i = 0; i < FIRQ_LIST_COUNT; i++) INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]); @@ -2063,11 +2065,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) kvm->arch.gmap->pfault_enabled = 0; } - kvm->arch.css_support = 0; - kvm->arch.use_irqchip = 0; kvm->arch.use_pfmfi = sclp.has_pfmfi; - kvm->arch.epoch = 0; - + kvm->arch.use_skf = sclp.has_skey; spin_lock_init(&kvm->arch.start_stop_lock); kvm_s390_vsie_init(kvm); kvm_s390_gisa_init(kvm); @@ -2433,8 +2432,12 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) vcpu->arch.sie_block->ckc = 0UL; vcpu->arch.sie_block->todpr = 0; memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64)); - vcpu->arch.sie_block->gcr[0] = 0xE0UL; - vcpu->arch.sie_block->gcr[14] = 0xC2000000UL; + vcpu->arch.sie_block->gcr[0] = CR0_UNUSED_56 | + CR0_INTERRUPT_KEY_SUBMASK | + CR0_MEASUREMENT_ALERT_SUBMASK; + vcpu->arch.sie_block->gcr[14] = CR14_UNUSED_32 | + CR14_UNUSED_33 | + CR14_EXTERNAL_DAMAGE_SUBMASK; /* make sure the new fpc will be lazily loaded */ save_fpu_regs(); current->thread.fpu.fpc = 0; @@ -3192,7 +3195,7 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu) return 0; if (kvm_s390_vcpu_has_irq(vcpu, 0)) return 0; - if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul)) + if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK)) return 0; if (!vcpu->arch.gmap->pfault_enabled) return 0; @@ -3990,7 +3993,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, return r; } -int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) +vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) { #ifdef CONFIG_KVM_S390_UCONTROL if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET) diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 1b5621f4fe5b..981e3ba97461 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -410,4 +410,17 @@ static inline int kvm_s390_use_sca_entries(void) } void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu, struct mcck_volatile_info *mcck_info); + +/** + * kvm_s390_vcpu_crypto_reset_all + * + * Reset the crypto attributes for each vcpu. This can be done while the vcpus + * are running as each vcpu will be removed from SIE before resetting the crypt + * attributes and restored to SIE afterward. + * + * Note: The kvm->lock must be held while calling this function + * + * @kvm: the KVM guest + */ +void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm); #endif diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index ebfa0442e569..eb0eb60c7be6 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -26,7 +26,6 @@ #include <asm/gmap.h> #include <asm/io.h> #include <asm/ptrace.h> -#include <asm/compat.h> #include <asm/sclp.h> #include "gaccess.h" #include "kvm-s390.h" @@ -205,24 +204,28 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu) int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu) { - int rc = 0; + int rc; struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block; trace_kvm_s390_skey_related_inst(vcpu); - if (!(sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)) && + /* Already enabled? */ + if (vcpu->kvm->arch.use_skf && + !(sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)) && !kvm_s390_test_cpuflags(vcpu, CPUSTAT_KSS)) - return rc; + return 0; rc = s390_enable_skey(); VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc); - if (!rc) { - if (kvm_s390_test_cpuflags(vcpu, CPUSTAT_KSS)) - kvm_s390_clear_cpuflags(vcpu, CPUSTAT_KSS); - else - sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | - ICTL_RRBE); - } - return rc; + if (rc) + return rc; + + if (kvm_s390_test_cpuflags(vcpu, CPUSTAT_KSS)) + kvm_s390_clear_cpuflags(vcpu, CPUSTAT_KSS); + if (!vcpu->kvm->arch.use_skf) + sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; + else + sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE); + return 0; } static int try_handle_skey(struct kvm_vcpu *vcpu) @@ -232,7 +235,7 @@ static int try_handle_skey(struct kvm_vcpu *vcpu) rc = kvm_s390_skey_check_enable(vcpu); if (rc) return rc; - if (sclp.has_skey) { + if (vcpu->kvm->arch.use_skf) { /* with storage-key facility, SIE interprets it for us */ kvm_s390_retry_instr(vcpu); VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index 969882b54266..84c89cb9636f 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -557,7 +557,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_64BSCAO)) gpa |= (u64) READ_ONCE(scb_o->scaoh) << 32; if (gpa) { - if (!(gpa & ~0x1fffUL)) + if (gpa < 2 * PAGE_SIZE) rc = set_validity_icpt(scb_s, 0x0038U); else if ((gpa & ~0x1fffUL) == kvm_s390_get_prefix(vcpu)) rc = set_validity_icpt(scb_s, 0x0011U); @@ -578,7 +578,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) gpa = READ_ONCE(scb_o->itdba) & ~0xffUL; if (gpa && (scb_s->ecb & ECB_TE)) { - if (!(gpa & ~0x1fffUL)) { + if (gpa < 2 * PAGE_SIZE) { rc = set_validity_icpt(scb_s, 0x0080U); goto unpin; } @@ -594,7 +594,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) gpa = READ_ONCE(scb_o->gvrd) & ~0x1ffUL; if (gpa && (scb_s->eca & ECA_VX) && !(scb_s->ecd & ECD_HOSTREGMGMT)) { - if (!(gpa & ~0x1fffUL)) { + if (gpa < 2 * PAGE_SIZE) { rc = set_validity_icpt(scb_s, 0x1310U); goto unpin; } @@ -613,7 +613,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) gpa = READ_ONCE(scb_o->riccbd) & ~0x3fUL; if (gpa && (scb_s->ecb3 & ECB3_RI)) { - if (!(gpa & ~0x1fffUL)) { + if (gpa < 2 * PAGE_SIZE) { rc = set_validity_icpt(scb_s, 0x0043U); goto unpin; } @@ -632,7 +632,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) gpa = READ_ONCE(scb_o->sdnxo) & ~0xfUL; sdnxc = READ_ONCE(scb_o->sdnxo) & 0xfUL; - if (!gpa || !(gpa & ~0x1fffUL)) { + if (!gpa || gpa < 2 * PAGE_SIZE) { rc = set_validity_icpt(scb_s, 0x10b0U); goto unpin; } diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile index 9bfe0802684b..57ab40188d4b 100644 --- a/arch/s390/lib/Makefile +++ b/arch/s390/lib/Makefile @@ -8,3 +8,6 @@ obj-y += mem.o xor.o lib-$(CONFIG_SMP) += spinlock.o lib-$(CONFIG_KPROBES) += probes.o lib-$(CONFIG_UPROBES) += probes.o + +chkbss := mem.o +include $(srctree)/arch/s390/scripts/Makefile.chkbss diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c index 920d40894535..6ad15d3fab81 100644 --- a/arch/s390/mm/extmem.c +++ b/arch/s390/mm/extmem.c @@ -103,7 +103,7 @@ static int scode_set; static int dcss_set_subcodes(void) { - char *name = kmalloc(8 * sizeof(char), GFP_KERNEL | GFP_DMA); + char *name = kmalloc(8, GFP_KERNEL | GFP_DMA); unsigned long rx, ry; int rc; diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 93faeca52284..e074480d3598 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -265,14 +265,10 @@ void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault) */ static noinline void do_sigsegv(struct pt_regs *regs, int si_code) { - struct siginfo si; - report_user_fault(regs, SIGSEGV, 1); - si.si_signo = SIGSEGV; - si.si_errno = 0; - si.si_code = si_code; - si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK); - force_sig_info(SIGSEGV, &si, current); + force_sig_fault(SIGSEGV, si_code, + (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK), + current); } static noinline void do_no_context(struct pt_regs *regs) @@ -316,18 +312,13 @@ static noinline void do_low_address(struct pt_regs *regs) static noinline void do_sigbus(struct pt_regs *regs) { - struct task_struct *tsk = current; - struct siginfo si; - /* * Send a sigbus, regardless of whether we were in kernel * or user mode. */ - si.si_signo = SIGBUS; - si.si_errno = 0; - si.si_code = BUS_ADRERR; - si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK); - force_sig_info(SIGBUS, &si, tsk); + force_sig_fault(SIGBUS, BUS_ADRERR, + (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK), + current); } static noinline int signal_return(struct pt_regs *regs) diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index 2c55a2b9d6c6..bc56ec8abcf7 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -2184,14 +2184,14 @@ int s390_enable_skey(void) int rc = 0; down_write(&mm->mmap_sem); - if (mm_use_skey(mm)) + if (mm_uses_skeys(mm)) goto out_up; - mm->context.use_skey = 1; + mm->context.uses_skeys = 1; for (vma = mm->mmap; vma; vma = vma->vm_next) { if (ksm_madvise(vma, vma->vm_start, vma->vm_end, MADV_UNMERGEABLE, &vma->vm_flags)) { - mm->context.use_skey = 0; + mm->context.uses_skeys = 0; rc = -ENOMEM; goto out_up; } diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index 562f72955956..84bd6329a88d 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c @@ -190,14 +190,15 @@ unsigned long *page_table_alloc(struct mm_struct *mm) if (!list_empty(&mm->context.pgtable_list)) { page = list_first_entry(&mm->context.pgtable_list, struct page, lru); - mask = atomic_read(&page->_mapcount); + mask = atomic_read(&page->_refcount) >> 24; mask = (mask | (mask >> 4)) & 3; if (mask != 3) { table = (unsigned long *) page_to_phys(page); bit = mask & 1; /* =1 -> second 2K */ if (bit) table += PTRS_PER_PTE; - atomic_xor_bits(&page->_mapcount, 1U << bit); + atomic_xor_bits(&page->_refcount, + 1U << (bit + 24)); list_del(&page->lru); } } @@ -218,12 +219,12 @@ unsigned long *page_table_alloc(struct mm_struct *mm) table = (unsigned long *) page_to_phys(page); if (mm_alloc_pgste(mm)) { /* Return 4K page table with PGSTEs */ - atomic_set(&page->_mapcount, 3); + atomic_xor_bits(&page->_refcount, 3 << 24); memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE); memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE); } else { /* Return the first 2K fragment of the page */ - atomic_set(&page->_mapcount, 1); + atomic_xor_bits(&page->_refcount, 1 << 24); memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE); spin_lock_bh(&mm->context.lock); list_add(&page->lru, &mm->context.pgtable_list); @@ -242,7 +243,8 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) /* Free 2K page table fragment of a 4K page */ bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)); spin_lock_bh(&mm->context.lock); - mask = atomic_xor_bits(&page->_mapcount, 1U << bit); + mask = atomic_xor_bits(&page->_refcount, 1U << (bit + 24)); + mask >>= 24; if (mask & 3) list_add(&page->lru, &mm->context.pgtable_list); else @@ -253,7 +255,6 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) } pgtable_page_dtor(page); - atomic_set(&page->_mapcount, -1); __free_page(page); } @@ -274,7 +275,8 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table, } bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)); spin_lock_bh(&mm->context.lock); - mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit); + mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24)); + mask >>= 24; if (mask & 3) list_add_tail(&page->lru, &mm->context.pgtable_list); else @@ -296,12 +298,13 @@ static void __tlb_remove_table(void *_table) break; case 1: /* lower 2K of a 4K page table */ case 2: /* higher 2K of a 4K page table */ - if (atomic_xor_bits(&page->_mapcount, mask << 4) != 0) + mask = atomic_xor_bits(&page->_refcount, mask << (4 + 24)); + mask >>= 24; + if (mask != 0) break; /* fallthrough */ case 3: /* 4K page table with pgstes */ pgtable_page_dtor(page); - atomic_set(&page->_mapcount, -1); __free_page(page); break; } diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 4f2b65d01a70..301e466e4263 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -158,7 +158,7 @@ static inline pgste_t pgste_update_all(pte_t pte, pgste_t pgste, #ifdef CONFIG_PGSTE unsigned long address, bits, skey; - if (!mm_use_skey(mm) || pte_val(pte) & _PAGE_INVALID) + if (!mm_uses_skeys(mm) || pte_val(pte) & _PAGE_INVALID) return pgste; address = pte_val(pte) & PAGE_MASK; skey = (unsigned long) page_get_storage_key(address); @@ -180,7 +180,7 @@ static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry, unsigned long address; unsigned long nkey; - if (!mm_use_skey(mm) || pte_val(entry) & _PAGE_INVALID) + if (!mm_uses_skeys(mm) || pte_val(entry) & _PAGE_INVALID) return; VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID)); address = pte_val(entry) & PAGE_MASK; diff --git a/arch/s390/net/Makefile b/arch/s390/net/Makefile index e0d5f245e42b..8cab6deb0403 100644 --- a/arch/s390/net/Makefile +++ b/arch/s390/net/Makefile @@ -2,4 +2,5 @@ # # Arch-specific network modules # -obj-$(CONFIG_BPF_JIT) += bpf_jit.o bpf_jit_comp.o +obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o +obj-$(CONFIG_HAVE_PNETID) += pnet.o diff --git a/arch/s390/net/bpf_jit.S b/arch/s390/net/bpf_jit.S deleted file mode 100644 index 9f794869c1b0..000000000000 --- a/arch/s390/net/bpf_jit.S +++ /dev/null @@ -1,120 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * BPF Jit compiler for s390, help functions. - * - * Copyright IBM Corp. 2012,2015 - * - * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> - * Michael Holzheu <holzheu@linux.vnet.ibm.com> - */ - -#include <linux/linkage.h> -#include <asm/nospec-insn.h> -#include "bpf_jit.h" - -/* - * Calling convention: - * registers %r7-%r10, %r11,%r13, and %r15 are call saved - * - * Input (64 bit): - * %r3 (%b2) = offset into skb data - * %r6 (%b5) = return address - * %r7 (%b6) = skb pointer - * %r12 = skb data pointer - * - * Output: - * %r14= %b0 = return value (read skb value) - * - * Work registers: %r2,%r4,%r5,%r14 - * - * skb_copy_bits takes 4 parameters: - * %r2 = skb pointer - * %r3 = offset into skb data - * %r4 = pointer to temp buffer - * %r5 = length to copy - * Return value in %r2: 0 = ok - * - * bpf_internal_load_pointer_neg_helper takes 3 parameters: - * %r2 = skb pointer - * %r3 = offset into data - * %r4 = length to copy - * Return value in %r2: Pointer to data - */ - -#define SKF_MAX_NEG_OFF -0x200000 /* SKF_LL_OFF from filter.h */ - -/* - * Load SIZE bytes from SKB - */ -#define sk_load_common(NAME, SIZE, LOAD) \ -ENTRY(sk_load_##NAME); \ - ltgr %r3,%r3; /* Is offset negative? */ \ - jl sk_load_##NAME##_slow_neg; \ -ENTRY(sk_load_##NAME##_pos); \ - aghi %r3,SIZE; /* Offset + SIZE */ \ - clg %r3,STK_OFF_HLEN(%r15); /* Offset + SIZE > hlen? */ \ - jh sk_load_##NAME##_slow; \ - LOAD %r14,-SIZE(%r3,%r12); /* Get data from skb */ \ - B_EX OFF_OK,%r6; /* Return */ \ - \ -sk_load_##NAME##_slow:; \ - lgr %r2,%r7; /* Arg1 = skb pointer */ \ - aghi %r3,-SIZE; /* Arg2 = offset */ \ - la %r4,STK_OFF_TMP(%r15); /* Arg3 = temp bufffer */ \ - lghi %r5,SIZE; /* Arg4 = size */ \ - brasl %r14,skb_copy_bits; /* Get data from skb */ \ - LOAD %r14,STK_OFF_TMP(%r15); /* Load from temp bufffer */ \ - ltgr %r2,%r2; /* Set cc to (%r2 != 0) */ \ - BR_EX %r6; /* Return */ - -sk_load_common(word, 4, llgf) /* r14 = *(u32 *) (skb->data+offset) */ -sk_load_common(half, 2, llgh) /* r14 = *(u16 *) (skb->data+offset) */ - - GEN_BR_THUNK %r6 - GEN_B_THUNK OFF_OK,%r6 - -/* - * Load 1 byte from SKB (optimized version) - */ - /* r14 = *(u8 *) (skb->data+offset) */ -ENTRY(sk_load_byte) - ltgr %r3,%r3 # Is offset negative? - jl sk_load_byte_slow_neg -ENTRY(sk_load_byte_pos) - clg %r3,STK_OFF_HLEN(%r15) # Offset >= hlen? - jnl sk_load_byte_slow - llgc %r14,0(%r3,%r12) # Get byte from skb - B_EX OFF_OK,%r6 # Return OK - -sk_load_byte_slow: - lgr %r2,%r7 # Arg1 = skb pointer - # Arg2 = offset - la %r4,STK_OFF_TMP(%r15) # Arg3 = pointer to temp buffer - lghi %r5,1 # Arg4 = size (1 byte) - brasl %r14,skb_copy_bits # Get data from skb - llgc %r14,STK_OFF_TMP(%r15) # Load result from temp buffer - ltgr %r2,%r2 # Set cc to (%r2 != 0) - BR_EX %r6 # Return cc - -#define sk_negative_common(NAME, SIZE, LOAD) \ -sk_load_##NAME##_slow_neg:; \ - cgfi %r3,SKF_MAX_NEG_OFF; \ - jl bpf_error; \ - lgr %r2,%r7; /* Arg1 = skb pointer */ \ - /* Arg2 = offset */ \ - lghi %r4,SIZE; /* Arg3 = size */ \ - brasl %r14,bpf_internal_load_pointer_neg_helper; \ - ltgr %r2,%r2; \ - jz bpf_error; \ - LOAD %r14,0(%r2); /* Get data from pointer */ \ - xr %r3,%r3; /* Set cc to zero */ \ - BR_EX %r6; /* Return cc */ - -sk_negative_common(word, 4, llgf) -sk_negative_common(half, 2, llgh) -sk_negative_common(byte, 1, llgc) - -bpf_error: -# force a return 0 from jit handler - ltgr %r15,%r15 # Set condition code - BR_EX %r6 diff --git a/arch/s390/net/bpf_jit.h b/arch/s390/net/bpf_jit.h index 5e1e5133132d..7822ea92e54a 100644 --- a/arch/s390/net/bpf_jit.h +++ b/arch/s390/net/bpf_jit.h @@ -16,9 +16,6 @@ #include <linux/filter.h> #include <linux/types.h> -extern u8 sk_load_word_pos[], sk_load_half_pos[], sk_load_byte_pos[]; -extern u8 sk_load_word[], sk_load_half[], sk_load_byte[]; - #endif /* __ASSEMBLY__ */ /* @@ -36,15 +33,6 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[]; * | | | * | BPF stack | | * | | | - * +---------------+ | - * | 8 byte skbp | | - * R15+176 -> +---------------+ | - * | 8 byte hlen | | - * R15+168 -> +---------------+ | - * | 4 byte align | | - * +---------------+ | - * | 4 byte temp | | - * | for bpf_jit.S | | * R15+160 -> +---------------+ | * | new backchain | | * R15+152 -> +---------------+ | @@ -57,17 +45,11 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[]; * The stack size used by the BPF program ("BPF stack" above) is passed * via "aux->stack_depth". */ -#define STK_SPACE_ADD (8 + 8 + 4 + 4 + 160) +#define STK_SPACE_ADD (160) #define STK_160_UNUSED (160 - 12 * 8) #define STK_OFF (STK_SPACE_ADD - STK_160_UNUSED) -#define STK_OFF_TMP 160 /* Offset of tmp buffer on stack */ -#define STK_OFF_HLEN 168 /* Offset of SKB header length on stack */ -#define STK_OFF_SKBP 176 /* Offset of SKB pointer on stack */ #define STK_OFF_R6 (160 - 11 * 8) /* Offset of r6 on stack */ #define STK_OFF_TCCNT (160 - 12 * 8) /* Offset of tail_call_cnt on stack */ -/* Offset to skip condition code check */ -#define OFF_OK 4 - #endif /* __ARCH_S390_NET_BPF_JIT_H */ diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index dd2bcf0e7d00..d2db8acb1a55 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -51,23 +51,21 @@ struct bpf_jit { #define BPF_SIZE_MAX 0xffff /* Max size for program (16 bit branches) */ -#define SEEN_SKB 1 /* skb access */ -#define SEEN_MEM 2 /* use mem[] for temporary storage */ -#define SEEN_RET0 4 /* ret0_ip points to a valid return 0 */ -#define SEEN_LITERAL 8 /* code uses literals */ -#define SEEN_FUNC 16 /* calls C functions */ -#define SEEN_TAIL_CALL 32 /* code uses tail calls */ -#define SEEN_REG_AX 64 /* code uses constant blinding */ -#define SEEN_STACK (SEEN_FUNC | SEEN_MEM | SEEN_SKB) +#define SEEN_MEM (1 << 0) /* use mem[] for temporary storage */ +#define SEEN_RET0 (1 << 1) /* ret0_ip points to a valid return 0 */ +#define SEEN_LITERAL (1 << 2) /* code uses literals */ +#define SEEN_FUNC (1 << 3) /* calls C functions */ +#define SEEN_TAIL_CALL (1 << 4) /* code uses tail calls */ +#define SEEN_REG_AX (1 << 5) /* code uses constant blinding */ +#define SEEN_STACK (SEEN_FUNC | SEEN_MEM) /* * s390 registers */ #define REG_W0 (MAX_BPF_JIT_REG + 0) /* Work register 1 (even) */ #define REG_W1 (MAX_BPF_JIT_REG + 1) /* Work register 2 (odd) */ -#define REG_SKB_DATA (MAX_BPF_JIT_REG + 2) /* SKB data register */ -#define REG_L (MAX_BPF_JIT_REG + 3) /* Literal pool register */ -#define REG_15 (MAX_BPF_JIT_REG + 4) /* Register 15 */ +#define REG_L (MAX_BPF_JIT_REG + 2) /* Literal pool register */ +#define REG_15 (MAX_BPF_JIT_REG + 3) /* Register 15 */ #define REG_0 REG_W0 /* Register 0 */ #define REG_1 REG_W1 /* Register 1 */ #define REG_2 BPF_REG_1 /* Register 2 */ @@ -92,10 +90,8 @@ static const int reg2hex[] = { [BPF_REG_9] = 10, /* BPF stack pointer */ [BPF_REG_FP] = 13, - /* Register for blinding (shared with REG_SKB_DATA) */ + /* Register for blinding */ [BPF_REG_AX] = 12, - /* SKB data pointer */ - [REG_SKB_DATA] = 12, /* Work registers for s390x backend */ [REG_W0] = 0, [REG_W1] = 1, @@ -402,27 +398,6 @@ static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth) } /* - * For SKB access %b1 contains the SKB pointer. For "bpf_jit.S" - * we store the SKB header length on the stack and the SKB data - * pointer in REG_SKB_DATA if BPF_REG_AX is not used. - */ -static void emit_load_skb_data_hlen(struct bpf_jit *jit) -{ - /* Header length: llgf %w1,<len>(%b1) */ - EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_1, - offsetof(struct sk_buff, len)); - /* s %w1,<data_len>(%b1) */ - EMIT4_DISP(0x5b000000, REG_W1, BPF_REG_1, - offsetof(struct sk_buff, data_len)); - /* stg %w1,ST_OFF_HLEN(%r0,%r15) */ - EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15, STK_OFF_HLEN); - if (!(jit->seen & SEEN_REG_AX)) - /* lg %skb_data,data_off(%b1) */ - EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0, - BPF_REG_1, offsetof(struct sk_buff, data)); -} - -/* * Emit function prologue * * Save registers and create stack frame if necessary. @@ -462,12 +437,6 @@ static void bpf_jit_prologue(struct bpf_jit *jit, u32 stack_depth) EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15, 152); } - if (jit->seen & SEEN_SKB) { - emit_load_skb_data_hlen(jit); - /* stg %b1,ST_OFF_SKBP(%r0,%r15) */ - EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_1, REG_0, REG_15, - STK_OFF_SKBP); - } } /* @@ -537,12 +506,12 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i { struct bpf_insn *insn = &fp->insnsi[i]; int jmp_off, last, insn_count = 1; - unsigned int func_addr, mask; u32 dst_reg = insn->dst_reg; u32 src_reg = insn->src_reg; u32 *addrs = jit->addrs; s32 imm = insn->imm; s16 off = insn->off; + unsigned int mask; if (dst_reg == BPF_REG_AX || src_reg == BPF_REG_AX) jit->seen |= SEEN_REG_AX; @@ -1029,13 +998,6 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i } /* lgr %b0,%r2: load return value into %b0 */ EMIT4(0xb9040000, BPF_REG_0, REG_2); - if ((jit->seen & SEEN_SKB) && - bpf_helper_changes_pkt_data((void *)func)) { - /* lg %b1,ST_OFF_SKBP(%r15) */ - EMIT6_DISP_LH(0xe3000000, 0x0004, BPF_REG_1, REG_0, - REG_15, STK_OFF_SKBP); - emit_load_skb_data_hlen(jit); - } break; } case BPF_JMP | BPF_TAIL_CALL: @@ -1235,73 +1197,6 @@ branch_oc: jmp_off = addrs[i + off + 1] - (addrs[i + 1] - 4); EMIT4_PCREL(0xa7040000 | mask << 8, jmp_off); break; - /* - * BPF_LD - */ - case BPF_LD | BPF_ABS | BPF_B: /* b0 = *(u8 *) (skb->data+imm) */ - case BPF_LD | BPF_IND | BPF_B: /* b0 = *(u8 *) (skb->data+imm+src) */ - if ((BPF_MODE(insn->code) == BPF_ABS) && (imm >= 0)) - func_addr = __pa(sk_load_byte_pos); - else - func_addr = __pa(sk_load_byte); - goto call_fn; - case BPF_LD | BPF_ABS | BPF_H: /* b0 = *(u16 *) (skb->data+imm) */ - case BPF_LD | BPF_IND | BPF_H: /* b0 = *(u16 *) (skb->data+imm+src) */ - if ((BPF_MODE(insn->code) == BPF_ABS) && (imm >= 0)) - func_addr = __pa(sk_load_half_pos); - else - func_addr = __pa(sk_load_half); - goto call_fn; - case BPF_LD | BPF_ABS | BPF_W: /* b0 = *(u32 *) (skb->data+imm) */ - case BPF_LD | BPF_IND | BPF_W: /* b0 = *(u32 *) (skb->data+imm+src) */ - if ((BPF_MODE(insn->code) == BPF_ABS) && (imm >= 0)) - func_addr = __pa(sk_load_word_pos); - else - func_addr = __pa(sk_load_word); - goto call_fn; -call_fn: - jit->seen |= SEEN_SKB | SEEN_RET0 | SEEN_FUNC; - REG_SET_SEEN(REG_14); /* Return address of possible func call */ - - /* - * Implicit input: - * BPF_REG_6 (R7) : skb pointer - * REG_SKB_DATA (R12): skb data pointer (if no BPF_REG_AX) - * - * Calculated input: - * BPF_REG_2 (R3) : offset of byte(s) to fetch in skb - * BPF_REG_5 (R6) : return address - * - * Output: - * BPF_REG_0 (R14): data read from skb - * - * Scratch registers (BPF_REG_1-5) - */ - - /* Call function: llilf %w1,func_addr */ - EMIT6_IMM(0xc00f0000, REG_W1, func_addr); - - /* Offset: lgfi %b2,imm */ - EMIT6_IMM(0xc0010000, BPF_REG_2, imm); - if (BPF_MODE(insn->code) == BPF_IND) - /* agfr %b2,%src (%src is s32 here) */ - EMIT4(0xb9180000, BPF_REG_2, src_reg); - - /* Reload REG_SKB_DATA if BPF_REG_AX is used */ - if (jit->seen & SEEN_REG_AX) - /* lg %skb_data,data_off(%b6) */ - EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0, - BPF_REG_6, offsetof(struct sk_buff, data)); - /* basr %b5,%w1 (%b5 is call saved) */ - EMIT2(0x0d00, BPF_REG_5, REG_W1); - - /* - * Note: For fast access we jump directly after the - * jnz instruction from bpf_jit.S - */ - /* jnz <ret0> */ - EMIT4_PCREL(0xa7740000, jit->ret0_ip - jit->prg); - break; default: /* too complex, give up */ pr_err("Unknown opcode %02x\n", insn->code); return -1; diff --git a/arch/s390/net/pnet.c b/arch/s390/net/pnet.c new file mode 100644 index 000000000000..e22f1b10a6c7 --- /dev/null +++ b/arch/s390/net/pnet.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * IBM System z PNET ID Support + * + * Copyright IBM Corp. 2018 + */ + +#include <linux/device.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/types.h> +#include <asm/ccwgroup.h> +#include <asm/ccwdev.h> +#include <asm/pnet.h> + +/* + * Get the PNETIDs from a device. + * s390 hardware supports the definition of a so-called Physical Network + * Identifier (short PNETID) per network device port. These PNETIDs can be + * used to identify network devices that are attached to the same physical + * network (broadcast domain). + * + * The device can be + * - a ccwgroup device with all bundled subchannels having the same PNETID + * - a PCI attached network device + * + * Returns: + * 0: PNETIDs extracted from device. + * -ENOMEM: No memory to extract utility string. + * -EOPNOTSUPP: Device type without utility string support + */ +static int pnet_ids_by_device(struct device *dev, u8 *pnetids) +{ + memset(pnetids, 0, PNETIDS_LEN); + if (dev_is_ccwgroup(dev)) { + struct ccwgroup_device *gdev = to_ccwgroupdev(dev); + u8 *util_str; + + util_str = ccw_device_get_util_str(gdev->cdev[0], 0); + if (!util_str) + return -ENOMEM; + memcpy(pnetids, util_str, PNETIDS_LEN); + kfree(util_str); + return 0; + } + if (dev_is_pci(dev)) { + struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); + + memcpy(pnetids, zdev->util_str, sizeof(zdev->util_str)); + return 0; + } + return -EOPNOTSUPP; +} + +/* + * Extract the pnetid for a device port. + * + * Return 0 if a pnetid is found and -ENOENT otherwise. + */ +int pnet_id_by_dev_port(struct device *dev, unsigned short port, u8 *pnetid) +{ + u8 pnetids[MAX_PNETID_PORTS][MAX_PNETID_LEN]; + static const u8 zero[MAX_PNETID_LEN] = { 0 }; + int rc = 0; + + if (!dev || port >= MAX_PNETID_PORTS) + return -ENOENT; + + if (!pnet_ids_by_device(dev, (u8 *)pnetids) && + memcmp(pnetids[port], zero, MAX_PNETID_LEN)) + memcpy(pnetid, pnetids[port], MAX_PNETID_LEN); + else + rc = -ENOENT; + + return rc; +} +EXPORT_SYMBOL_GPL(pnet_id_by_dev_port); + +MODULE_DESCRIPTION("pnetid determination from utility strings"); +MODULE_LICENSE("GPL"); diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c index 93cd0f1ca12b..19b2d2a9b43d 100644 --- a/arch/s390/pci/pci_clp.c +++ b/arch/s390/pci/pci_clp.c @@ -19,7 +19,6 @@ #include <linux/uaccess.h> #include <asm/pci_debug.h> #include <asm/pci_clp.h> -#include <asm/compat.h> #include <asm/clp.h> #include <uapi/asm/clp.h> diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c index 2d15d84c20ed..d387a0fbdd7e 100644 --- a/arch/s390/pci/pci_dma.c +++ b/arch/s390/pci/pci_dma.c @@ -668,15 +668,6 @@ void zpci_dma_exit(void) kmem_cache_destroy(dma_region_table_cache); } -#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) - -static int __init dma_debug_do_init(void) -{ - dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); - return 0; -} -fs_initcall(dma_debug_do_init); - const struct dma_map_ops s390_pci_dma_ops = { .alloc = s390_dma_alloc, .free = s390_dma_free, @@ -685,8 +676,6 @@ const struct dma_map_ops s390_pci_dma_ops = { .map_page = s390_dma_map_pages, .unmap_page = s390_dma_unmap_pages, .mapping_error = s390_mapping_error, - /* if we support direct DMA this must be conditional */ - .is_phys = 0, /* dma_supported is unconditionally true without a callback */ }; EXPORT_SYMBOL_GPL(s390_pci_dma_ops); diff --git a/arch/s390/scripts/Makefile.chkbss b/arch/s390/scripts/Makefile.chkbss new file mode 100644 index 000000000000..d92f2d94a5d9 --- /dev/null +++ b/arch/s390/scripts/Makefile.chkbss @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0 + +quiet_cmd_chkbss = CHKBSS $< +define cmd_chkbss + if ! $(OBJDUMP) -j .bss -w -h $< | awk 'END { if ($$3) exit 1 }'; then \ + echo "error: $< .bss section is not empty" >&2; exit 1; \ + fi; \ + touch $@; +endef + +$(obj)/built-in.a: $(patsubst %, $(obj)/%.chkbss, $(chkbss)) + +%.o.chkbss: %.o + $(call cmd,chkbss) |