diff options
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/boot/startup.c | 38 | ||||
-rw-r--r-- | arch/s390/boot/vmem.c | 12 | ||||
-rw-r--r-- | arch/s390/boot/vmlinux.lds.S | 1 | ||||
-rw-r--r-- | arch/s390/configs/debug_defconfig | 48 | ||||
-rw-r--r-- | arch/s390/configs/defconfig | 45 | ||||
-rw-r--r-- | arch/s390/configs/zfcpdump_defconfig | 5 | ||||
-rw-r--r-- | arch/s390/include/asm/entry-common.h | 2 | ||||
-rw-r--r-- | arch/s390/include/asm/kvm_host.h | 1 | ||||
-rw-r--r-- | arch/s390/include/asm/unistd.h | 1 | ||||
-rw-r--r-- | arch/s390/kernel/crash_dump.c | 54 | ||||
-rw-r--r-- | arch/s390/kernel/syscall.c | 27 | ||||
-rw-r--r-- | arch/s390/kernel/syscalls/syscall.tbl | 2 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.c | 1 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.h | 15 | ||||
-rw-r--r-- | arch/s390/kvm/priv.c | 32 | ||||
-rw-r--r-- | arch/s390/mm/pgalloc.c | 4 | ||||
-rw-r--r-- | arch/s390/net/bpf_jit_comp.c | 489 | ||||
-rw-r--r-- | arch/s390/pci/pci_irq.c | 2 |
18 files changed, 580 insertions, 199 deletions
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c index 182aac6a0f77..5a36d5538dae 100644 --- a/arch/s390/boot/startup.c +++ b/arch/s390/boot/startup.c @@ -170,11 +170,14 @@ static void kaslr_adjust_got(unsigned long offset) u64 *entry; /* - * Even without -fPIE, Clang still uses a global offset table for some - * reason. Adjust the GOT entries. + * Adjust GOT entries, except for ones for undefined weak symbols + * that resolved to zero. This also skips the first three reserved + * entries on s390x that are zero. */ - for (entry = (u64 *)vmlinux.got_start; entry < (u64 *)vmlinux.got_end; entry++) - *entry += offset - __START_KERNEL; + for (entry = (u64 *)vmlinux.got_start; entry < (u64 *)vmlinux.got_end; entry++) { + if (*entry) + *entry += offset - __START_KERNEL; + } } /* @@ -384,7 +387,7 @@ static void fixup_vmlinux_info(void) void startup_kernel(void) { unsigned long kernel_size = vmlinux.image_size + vmlinux.bss_size; - unsigned long nokaslr_offset_phys = mem_safe_offset(); + unsigned long nokaslr_offset_phys, kaslr_large_page_offset; unsigned long amode31_lma = 0; unsigned long max_physmem_end; unsigned long asce_limit; @@ -393,6 +396,12 @@ void startup_kernel(void) fixup_vmlinux_info(); setup_lpp(); + + /* + * Non-randomized kernel physical start address must be _SEGMENT_SIZE + * aligned (see blow). + */ + nokaslr_offset_phys = ALIGN(mem_safe_offset(), _SEGMENT_SIZE); safe_addr = PAGE_ALIGN(nokaslr_offset_phys + kernel_size); /* @@ -425,10 +434,25 @@ void startup_kernel(void) save_ipl_cert_comp_list(); rescue_initrd(safe_addr, ident_map_size); - if (kaslr_enabled()) - __kaslr_offset_phys = randomize_within_range(kernel_size, THREAD_SIZE, 0, ident_map_size); + /* + * __kaslr_offset_phys must be _SEGMENT_SIZE aligned, so the lower + * 20 bits (the offset within a large page) are zero. Copy the last + * 20 bits of __kaslr_offset, which is THREAD_SIZE aligned, to + * __kaslr_offset_phys. + * + * With this the last 20 bits of __kaslr_offset_phys and __kaslr_offset + * are identical, which is required to allow for large mappings of the + * kernel image. + */ + kaslr_large_page_offset = __kaslr_offset & ~_SEGMENT_MASK; + if (kaslr_enabled()) { + unsigned long end = ident_map_size - kaslr_large_page_offset; + + __kaslr_offset_phys = randomize_within_range(kernel_size, _SEGMENT_SIZE, 0, end); + } if (!__kaslr_offset_phys) __kaslr_offset_phys = nokaslr_offset_phys; + __kaslr_offset_phys |= kaslr_large_page_offset; kaslr_adjust_vmlinux_info(__kaslr_offset_phys); physmem_reserve(RR_VMLINUX, __kaslr_offset_phys, kernel_size); deploy_kernel((void *)__kaslr_offset_phys); diff --git a/arch/s390/boot/vmem.c b/arch/s390/boot/vmem.c index 96d48b7112d4..40cfce2687c4 100644 --- a/arch/s390/boot/vmem.c +++ b/arch/s390/boot/vmem.c @@ -261,21 +261,27 @@ static unsigned long _pa(unsigned long addr, unsigned long size, enum populate_m static bool large_allowed(enum populate_mode mode) { - return (mode == POPULATE_DIRECT) || (mode == POPULATE_IDENTITY); + return (mode == POPULATE_DIRECT) || (mode == POPULATE_IDENTITY) || (mode == POPULATE_KERNEL); } static bool can_large_pud(pud_t *pu_dir, unsigned long addr, unsigned long end, enum populate_mode mode) { + unsigned long size = end - addr; + return machine.has_edat2 && large_allowed(mode) && - IS_ALIGNED(addr, PUD_SIZE) && (end - addr) >= PUD_SIZE; + IS_ALIGNED(addr, PUD_SIZE) && (size >= PUD_SIZE) && + IS_ALIGNED(_pa(addr, size, mode), PUD_SIZE); } static bool can_large_pmd(pmd_t *pm_dir, unsigned long addr, unsigned long end, enum populate_mode mode) { + unsigned long size = end - addr; + return machine.has_edat1 && large_allowed(mode) && - IS_ALIGNED(addr, PMD_SIZE) && (end - addr) >= PMD_SIZE; + IS_ALIGNED(addr, PMD_SIZE) && (size >= PMD_SIZE) && + IS_ALIGNED(_pa(addr, size, mode), PMD_SIZE); } static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long end, diff --git a/arch/s390/boot/vmlinux.lds.S b/arch/s390/boot/vmlinux.lds.S index 1fe5a1d3ff60..a750711d44c8 100644 --- a/arch/s390/boot/vmlinux.lds.S +++ b/arch/s390/boot/vmlinux.lds.S @@ -109,6 +109,7 @@ SECTIONS #ifdef CONFIG_KERNEL_UNCOMPRESSED . = ALIGN(PAGE_SIZE); . += AMODE31_SIZE; /* .amode31 section */ + . = ALIGN(1 << 20); /* _SEGMENT_SIZE */ #else . = ALIGN(8); #endif diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index 145342e46ea8..f3602414a961 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -43,7 +43,6 @@ CONFIG_PROFILING=y CONFIG_KEXEC=y CONFIG_KEXEC_FILE=y CONFIG_KEXEC_SIG=y -CONFIG_CRASH_DUMP=y CONFIG_LIVEPATCH=y CONFIG_MARCH_Z13=y CONFIG_NR_CPUS=512 @@ -51,6 +50,7 @@ CONFIG_NUMA=y CONFIG_HZ_100=y CONFIG_CERT_STORE=y CONFIG_EXPOLINE=y +# CONFIG_EXPOLINE_EXTERN is not set CONFIG_EXPOLINE_AUTO=y CONFIG_CHSC_SCH=y CONFIG_VFIO_CCW=m @@ -76,6 +76,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODULE_UNLOAD_TAINT_TRACKING=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG_SHA256=y CONFIG_BLK_DEV_THROTTLING=y CONFIG_BLK_WBT=y CONFIG_BLK_CGROUP_IOLATENCY=y @@ -100,7 +101,6 @@ CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTREMOVE=y CONFIG_KSM=y CONFIG_TRANSPARENT_HUGEPAGE=y -CONFIG_CMA_DEBUG=y CONFIG_CMA_DEBUGFS=y CONFIG_CMA_SYSFS=y CONFIG_CMA_AREAS=7 @@ -119,6 +119,7 @@ CONFIG_UNIX_DIAG=m CONFIG_XFRM_USER=m CONFIG_NET_KEY=m CONFIG_SMC_DIAG=m +CONFIG_SMC_LO=y CONFIG_INET=y CONFIG_IP_MULTICAST=y CONFIG_IP_ADVANCED_ROUTER=y @@ -133,7 +134,6 @@ CONFIG_IP_MROUTE=y CONFIG_IP_MROUTE_MULTIPLE_TABLES=y CONFIG_IP_PIMSM_V1=y CONFIG_IP_PIMSM_V2=y -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -167,6 +167,7 @@ CONFIG_BRIDGE_NETFILTER=m CONFIG_NETFILTER_NETLINK_HOOK=m CONFIG_NF_CONNTRACK=m CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y CONFIG_NF_CONNTRACK_PROCFS=y CONFIG_NF_CONNTRACK_EVENTS=y CONFIG_NF_CONNTRACK_TIMEOUT=y @@ -183,17 +184,39 @@ CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_CT_NETLINK=m CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y CONFIG_NF_TABLES=m CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m +CONFIG_NFT_CONNLIMIT=m CONFIG_NFT_LOG=m CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m CONFIG_NFT_REJECT=m CONFIG_NFT_COMPAT=m CONFIG_NFT_HASH=m CONFIG_NFT_FIB_INET=m -CONFIG_NETFILTER_XTABLES_COMPAT=y +CONFIG_NFT_XFRM=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_SYNPROXY=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +CONFIG_NFT_REJECT_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NF_FLOW_TABLE_PROCFS=y CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_AUDIT=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m @@ -206,8 +229,10 @@ CONFIG_NETFILTER_XT_TARGET_HMARK=m CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m CONFIG_NETFILTER_XT_TARGET_LOG=m CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m CONFIG_NETFILTER_XT_TARGET_TEE=m CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m @@ -216,6 +241,7 @@ CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m CONFIG_NETFILTER_XT_MATCH_CLUSTER=m CONFIG_NETFILTER_XT_MATCH_COMMENT=m CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m @@ -230,6 +256,7 @@ CONFIG_NETFILTER_XT_MATCH_DSCP=m CONFIG_NETFILTER_XT_MATCH_ESP=m CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m CONFIG_NETFILTER_XT_MATCH_IPRANGE=m CONFIG_NETFILTER_XT_MATCH_IPVS=m CONFIG_NETFILTER_XT_MATCH_LENGTH=m @@ -247,6 +274,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -302,7 +330,6 @@ CONFIG_IP_NF_TARGET_ECN=m CONFIG_IP_NF_TARGET_TTL=m CONFIG_IP_NF_RAW=m CONFIG_IP_NF_SECURITY=m -CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NFT_FIB_IPV6=m @@ -373,7 +400,6 @@ CONFIG_NET_ACT_POLICE=m CONFIG_NET_ACT_GACT=m CONFIG_GACT_PROB=y CONFIG_NET_ACT_MIRRED=m -CONFIG_NET_ACT_IPT=m CONFIG_NET_ACT_NAT=m CONFIG_NET_ACT_PEDIT=m CONFIG_NET_ACT_SIMP=m @@ -462,6 +488,7 @@ CONFIG_DM_VERITY=m CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y CONFIG_DM_SWITCH=m CONFIG_DM_INTEGRITY=m +CONFIG_DM_VDO=m CONFIG_NETDEVICES=y CONFIG_BONDING=m CONFIG_DUMMY=m @@ -574,18 +601,16 @@ CONFIG_WATCHDOG=y CONFIG_WATCHDOG_NOWAYOUT=y CONFIG_SOFT_WATCHDOG=m CONFIG_DIAG288_WATCHDOG=m -# CONFIG_DRM_DEBUG_MODESET_LOCK is not set +CONFIG_DRM=m +CONFIG_DRM_VIRTIO_GPU=m CONFIG_FB=y # CONFIG_FB_DEVICE is not set -CONFIG_FRAMEBUFFER_CONSOLE=y -CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y # CONFIG_HID_SUPPORT is not set # CONFIG_USB_SUPPORT is not set CONFIG_INFINIBAND=m CONFIG_INFINIBAND_USER_ACCESS=m CONFIG_MLX4_INFINIBAND=m CONFIG_MLX5_INFINIBAND=m -CONFIG_SYNC_FILE=y CONFIG_VFIO=m CONFIG_VFIO_PCI=m CONFIG_MLX5_VFIO_PCI=m @@ -645,7 +670,6 @@ CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_EXFAT_FS=m CONFIG_NTFS_FS=m -CONFIG_NTFS_RW=y CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y @@ -663,6 +687,7 @@ CONFIG_SQUASHFS_XZ=y CONFIG_SQUASHFS_ZSTD=y CONFIG_ROMFS_FS=m CONFIG_NFS_FS=m +CONFIG_NFS_V2=m CONFIG_NFS_V3_ACL=y CONFIG_NFS_V4=m CONFIG_NFS_SWAP=y @@ -879,6 +904,5 @@ CONFIG_RBTREE_TEST=y CONFIG_INTERVAL_TREE_TEST=m CONFIG_PERCPU_TEST=m CONFIG_ATOMIC64_SELFTEST=y -CONFIG_STRING_SELFTEST=y CONFIG_TEST_BITOPS=m CONFIG_TEST_BPF=m diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig index dc237896f99d..d0d8925fdf09 100644 --- a/arch/s390/configs/defconfig +++ b/arch/s390/configs/defconfig @@ -41,7 +41,6 @@ CONFIG_PROFILING=y CONFIG_KEXEC=y CONFIG_KEXEC_FILE=y CONFIG_KEXEC_SIG=y -CONFIG_CRASH_DUMP=y CONFIG_LIVEPATCH=y CONFIG_MARCH_Z13=y CONFIG_NR_CPUS=512 @@ -49,6 +48,7 @@ CONFIG_NUMA=y CONFIG_HZ_100=y CONFIG_CERT_STORE=y CONFIG_EXPOLINE=y +# CONFIG_EXPOLINE_EXTERN is not set CONFIG_EXPOLINE_AUTO=y CONFIG_CHSC_SCH=y CONFIG_VFIO_CCW=m @@ -71,6 +71,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODULE_UNLOAD_TAINT_TRACKING=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG_SHA256=y CONFIG_BLK_DEV_THROTTLING=y CONFIG_BLK_WBT=y CONFIG_BLK_CGROUP_IOLATENCY=y @@ -110,6 +111,7 @@ CONFIG_UNIX_DIAG=m CONFIG_XFRM_USER=m CONFIG_NET_KEY=m CONFIG_SMC_DIAG=m +CONFIG_SMC_LO=y CONFIG_INET=y CONFIG_IP_MULTICAST=y CONFIG_IP_ADVANCED_ROUTER=y @@ -124,7 +126,6 @@ CONFIG_IP_MROUTE=y CONFIG_IP_MROUTE_MULTIPLE_TABLES=y CONFIG_IP_PIMSM_V1=y CONFIG_IP_PIMSM_V2=y -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -158,6 +159,7 @@ CONFIG_BRIDGE_NETFILTER=m CONFIG_NETFILTER_NETLINK_HOOK=m CONFIG_NF_CONNTRACK=m CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y CONFIG_NF_CONNTRACK_PROCFS=y CONFIG_NF_CONNTRACK_EVENTS=y CONFIG_NF_CONNTRACK_TIMEOUT=y @@ -174,17 +176,39 @@ CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_CT_NETLINK=m CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y CONFIG_NF_TABLES=m CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m +CONFIG_NFT_CONNLIMIT=m CONFIG_NFT_LOG=m CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m CONFIG_NFT_REJECT=m CONFIG_NFT_COMPAT=m CONFIG_NFT_HASH=m CONFIG_NFT_FIB_INET=m -CONFIG_NETFILTER_XTABLES_COMPAT=y +CONFIG_NFT_XFRM=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_SYNPROXY=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +CONFIG_NFT_REJECT_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NF_FLOW_TABLE_PROCFS=y CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_AUDIT=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m @@ -197,8 +221,10 @@ CONFIG_NETFILTER_XT_TARGET_HMARK=m CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m CONFIG_NETFILTER_XT_TARGET_LOG=m CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m CONFIG_NETFILTER_XT_TARGET_TEE=m CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m @@ -207,6 +233,7 @@ CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m CONFIG_NETFILTER_XT_MATCH_CLUSTER=m CONFIG_NETFILTER_XT_MATCH_COMMENT=m CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m @@ -221,6 +248,7 @@ CONFIG_NETFILTER_XT_MATCH_DSCP=m CONFIG_NETFILTER_XT_MATCH_ESP=m CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m CONFIG_NETFILTER_XT_MATCH_IPRANGE=m CONFIG_NETFILTER_XT_MATCH_IPVS=m CONFIG_NETFILTER_XT_MATCH_LENGTH=m @@ -238,6 +266,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -293,7 +322,6 @@ CONFIG_IP_NF_TARGET_ECN=m CONFIG_IP_NF_TARGET_TTL=m CONFIG_IP_NF_RAW=m CONFIG_IP_NF_SECURITY=m -CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NFT_FIB_IPV6=m @@ -363,7 +391,6 @@ CONFIG_NET_ACT_POLICE=m CONFIG_NET_ACT_GACT=m CONFIG_GACT_PROB=y CONFIG_NET_ACT_MIRRED=m -CONFIG_NET_ACT_IPT=m CONFIG_NET_ACT_NAT=m CONFIG_NET_ACT_PEDIT=m CONFIG_NET_ACT_SIMP=m @@ -452,6 +479,7 @@ CONFIG_DM_VERITY=m CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y CONFIG_DM_SWITCH=m CONFIG_DM_INTEGRITY=m +CONFIG_DM_VDO=m CONFIG_NETDEVICES=y CONFIG_BONDING=m CONFIG_DUMMY=m @@ -564,17 +592,16 @@ CONFIG_WATCHDOG_CORE=y CONFIG_WATCHDOG_NOWAYOUT=y CONFIG_SOFT_WATCHDOG=m CONFIG_DIAG288_WATCHDOG=m +CONFIG_DRM=m +CONFIG_DRM_VIRTIO_GPU=m CONFIG_FB=y # CONFIG_FB_DEVICE is not set -CONFIG_FRAMEBUFFER_CONSOLE=y -CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y # CONFIG_HID_SUPPORT is not set # CONFIG_USB_SUPPORT is not set CONFIG_INFINIBAND=m CONFIG_INFINIBAND_USER_ACCESS=m CONFIG_MLX4_INFINIBAND=m CONFIG_MLX5_INFINIBAND=m -CONFIG_SYNC_FILE=y CONFIG_VFIO=m CONFIG_VFIO_PCI=m CONFIG_MLX5_VFIO_PCI=m @@ -630,7 +657,6 @@ CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_EXFAT_FS=m CONFIG_NTFS_FS=m -CONFIG_NTFS_RW=y CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y @@ -649,6 +675,7 @@ CONFIG_SQUASHFS_XZ=y CONFIG_SQUASHFS_ZSTD=y CONFIG_ROMFS_FS=m CONFIG_NFS_FS=m +CONFIG_NFS_V2=m CONFIG_NFS_V3_ACL=y CONFIG_NFS_V4=m CONFIG_NFS_SWAP=y diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig index c51f3ec4eb28..8c2b61363bab 100644 --- a/arch/s390/configs/zfcpdump_defconfig +++ b/arch/s390/configs/zfcpdump_defconfig @@ -9,25 +9,22 @@ CONFIG_BPF_SYSCALL=y CONFIG_BLK_DEV_INITRD=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_KEXEC=y -CONFIG_CRASH_DUMP=y CONFIG_MARCH_Z13=y CONFIG_NR_CPUS=2 CONFIG_HZ_100=y # CONFIG_CHSC_SCH is not set # CONFIG_SCM_BUS is not set +# CONFIG_AP is not set # CONFIG_PFAULT is not set # CONFIG_S390_HYPFS is not set # CONFIG_VIRTUALIZATION is not set # CONFIG_S390_GUEST is not set # CONFIG_SECCOMP is not set -# CONFIG_GCC_PLUGINS is not set # CONFIG_BLOCK_LEGACY_AUTOLOAD is not set CONFIG_PARTITION_ADVANCED=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set # CONFIG_SWAP is not set # CONFIG_COMPAT_BRK is not set -# CONFIG_COMPACTION is not set -# CONFIG_MIGRATION is not set CONFIG_NET=y # CONFIG_IUCV is not set # CONFIG_PCPU_DEV_REFCNT is not set diff --git a/arch/s390/include/asm/entry-common.h b/arch/s390/include/asm/entry-common.h index 7f5004065e8a..35555c944630 100644 --- a/arch/s390/include/asm/entry-common.h +++ b/arch/s390/include/asm/entry-common.h @@ -54,7 +54,7 @@ static __always_inline void arch_exit_to_user_mode(void) static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, unsigned long ti_work) { - choose_random_kstack_offset(get_tod_clock_fast() & 0xff); + choose_random_kstack_offset(get_tod_clock_fast()); } #define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 95990461888f..9281063636a7 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -427,6 +427,7 @@ struct kvm_vcpu_stat { u64 instruction_io_other; u64 instruction_lpsw; u64 instruction_lpswe; + u64 instruction_lpswey; u64 instruction_pfmf; u64 instruction_ptff; u64 instruction_sck; diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h index 4260bc5ce7f8..70fc671397da 100644 --- a/arch/s390/include/asm/unistd.h +++ b/arch/s390/include/asm/unistd.h @@ -35,6 +35,5 @@ #define __ARCH_WANT_SYS_FORK #define __ARCH_WANT_SYS_VFORK #define __ARCH_WANT_SYS_CLONE -#define __ARCH_WANT_SYS_CLONE3 #endif /* _ASM_S390_UNISTD_H_ */ diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c index 9863ebe75019..edae13416196 100644 --- a/arch/s390/kernel/crash_dump.c +++ b/arch/s390/kernel/crash_dump.c @@ -451,7 +451,7 @@ static void *nt_final(void *ptr) /* * Initialize ELF header (new kernel) */ -static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt) +static void *ehdr_init(Elf64_Ehdr *ehdr, int phdr_count) { memset(ehdr, 0, sizeof(*ehdr)); memcpy(ehdr->e_ident, ELFMAG, SELFMAG); @@ -465,11 +465,8 @@ static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt) ehdr->e_phoff = sizeof(Elf64_Ehdr); ehdr->e_ehsize = sizeof(Elf64_Ehdr); ehdr->e_phentsize = sizeof(Elf64_Phdr); - /* - * Number of memory chunk PT_LOAD program headers plus one kernel - * image PT_LOAD program header plus one PT_NOTE program header. - */ - ehdr->e_phnum = mem_chunk_cnt + 1 + 1; + /* Number of PT_LOAD program headers plus PT_NOTE program header */ + ehdr->e_phnum = phdr_count + 1; return ehdr + 1; } @@ -503,12 +500,14 @@ static int get_mem_chunk_cnt(void) /* * Initialize ELF loads (new kernel) */ -static void loads_init(Elf64_Phdr *phdr) +static void loads_init(Elf64_Phdr *phdr, bool os_info_has_vm) { - unsigned long old_identity_base = os_info_old_value(OS_INFO_IDENTITY_BASE); + unsigned long old_identity_base = 0; phys_addr_t start, end; u64 idx; + if (os_info_has_vm) + old_identity_base = os_info_old_value(OS_INFO_IDENTITY_BASE); for_each_physmem_range(idx, &oldmem_type, &start, &end) { phdr->p_type = PT_LOAD; phdr->p_vaddr = old_identity_base + start; @@ -522,6 +521,11 @@ static void loads_init(Elf64_Phdr *phdr) } } +static bool os_info_has_vm(void) +{ + return os_info_old_value(OS_INFO_KASLR_OFFSET); +} + /* * Prepare PT_LOAD type program header for kernel image region */ @@ -566,7 +570,7 @@ static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset) return ptr; } -static size_t get_elfcorehdr_size(int mem_chunk_cnt) +static size_t get_elfcorehdr_size(int phdr_count) { size_t size; @@ -581,10 +585,8 @@ static size_t get_elfcorehdr_size(int mem_chunk_cnt) size += nt_vmcoreinfo_size(); /* nt_final */ size += sizeof(Elf64_Nhdr); - /* PT_LOAD type program header for kernel text region */ - size += sizeof(Elf64_Phdr); /* PT_LOADS */ - size += mem_chunk_cnt * sizeof(Elf64_Phdr); + size += phdr_count * sizeof(Elf64_Phdr); return size; } @@ -595,8 +597,8 @@ static size_t get_elfcorehdr_size(int mem_chunk_cnt) int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size) { Elf64_Phdr *phdr_notes, *phdr_loads, *phdr_text; + int mem_chunk_cnt, phdr_text_cnt; size_t alloc_size; - int mem_chunk_cnt; void *ptr, *hdr; u64 hdr_off; @@ -615,12 +617,14 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size) } mem_chunk_cnt = get_mem_chunk_cnt(); + phdr_text_cnt = os_info_has_vm() ? 1 : 0; - alloc_size = get_elfcorehdr_size(mem_chunk_cnt); + alloc_size = get_elfcorehdr_size(mem_chunk_cnt + phdr_text_cnt); hdr = kzalloc(alloc_size, GFP_KERNEL); - /* Without elfcorehdr /proc/vmcore cannot be created. Thus creating + /* + * Without elfcorehdr /proc/vmcore cannot be created. Thus creating * a dump with this crash kernel will fail. Panic now to allow other * dump mechanisms to take over. */ @@ -628,21 +632,23 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size) panic("s390 kdump allocating elfcorehdr failed"); /* Init elf header */ - ptr = ehdr_init(hdr, mem_chunk_cnt); + phdr_notes = ehdr_init(hdr, mem_chunk_cnt + phdr_text_cnt); /* Init program headers */ - phdr_notes = ptr; - ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr)); - phdr_text = ptr; - ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr)); - phdr_loads = ptr; - ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr) * mem_chunk_cnt); + if (phdr_text_cnt) { + phdr_text = phdr_notes + 1; + phdr_loads = phdr_text + 1; + } else { + phdr_loads = phdr_notes + 1; + } + ptr = PTR_ADD(phdr_loads, sizeof(Elf64_Phdr) * mem_chunk_cnt); /* Init notes */ hdr_off = PTR_DIFF(ptr, hdr); ptr = notes_init(phdr_notes, ptr, ((unsigned long) hdr) + hdr_off); /* Init kernel text program header */ - text_init(phdr_text); + if (phdr_text_cnt) + text_init(phdr_text); /* Init loads */ - loads_init(phdr_loads); + loads_init(phdr_loads, phdr_text_cnt); /* Finalize program headers */ hdr_off = PTR_DIFF(ptr, hdr); *addr = (unsigned long long) hdr; diff --git a/arch/s390/kernel/syscall.c b/arch/s390/kernel/syscall.c index dc2355c623d6..50cbcbbaa03d 100644 --- a/arch/s390/kernel/syscall.c +++ b/arch/s390/kernel/syscall.c @@ -38,33 +38,6 @@ #include "entry.h" -/* - * Perform the mmap() system call. Linux for S/390 isn't able to handle more - * than 5 system call parameters, so this system call uses a memory block - * for parameter passing. - */ - -struct s390_mmap_arg_struct { - unsigned long addr; - unsigned long len; - unsigned long prot; - unsigned long flags; - unsigned long fd; - unsigned long offset; -}; - -SYSCALL_DEFINE1(mmap2, struct s390_mmap_arg_struct __user *, arg) -{ - struct s390_mmap_arg_struct a; - int error = -EFAULT; - - if (copy_from_user(&a, arg, sizeof(a))) - goto out; - error = ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset); -out: - return error; -} - #ifdef CONFIG_SYSVIPC /* * sys_ipc() is the de-multiplexer for the SysV IPC calls. diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl index bd0fee24ad10..01071182763e 100644 --- a/arch/s390/kernel/syscalls/syscall.tbl +++ b/arch/s390/kernel/syscalls/syscall.tbl @@ -418,7 +418,7 @@ 412 32 utimensat_time64 - sys_utimensat 413 32 pselect6_time64 - compat_sys_pselect6_time64 414 32 ppoll_time64 - compat_sys_ppoll_time64 -416 32 io_pgetevents_time64 - sys_io_pgetevents +416 32 io_pgetevents_time64 - compat_sys_io_pgetevents_time64 417 32 recvmmsg_time64 - compat_sys_recvmmsg_time64 418 32 mq_timedsend_time64 - sys_mq_timedsend 419 32 mq_timedreceive_time64 - sys_mq_timedreceive diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 82e9631cd9ef..54b5b2565df8 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -132,6 +132,7 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { STATS_DESC_COUNTER(VCPU, instruction_io_other), STATS_DESC_COUNTER(VCPU, instruction_lpsw), STATS_DESC_COUNTER(VCPU, instruction_lpswe), + STATS_DESC_COUNTER(VCPU, instruction_lpswey), STATS_DESC_COUNTER(VCPU, instruction_pfmf), STATS_DESC_COUNTER(VCPU, instruction_ptff), STATS_DESC_COUNTER(VCPU, instruction_sck), diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 111eb5c74784..bf8534218af3 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -138,6 +138,21 @@ static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, u8 *ar) return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; } +static inline u64 kvm_s390_get_base_disp_siy(struct kvm_vcpu *vcpu, u8 *ar) +{ + u32 base1 = vcpu->arch.sie_block->ipb >> 28; + s64 disp1; + + /* The displacement is a 20bit _SIGNED_ value */ + disp1 = sign_extend64(((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) + + ((vcpu->arch.sie_block->ipb & 0xff00) << 4), 19); + + if (ar) + *ar = base1; + + return (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1; +} + static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu, u64 *address1, u64 *address2, u8 *ar_b1, u8 *ar_b2) diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 1be19cc9d73c..1a49b89706f8 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -797,6 +797,36 @@ static int handle_lpswe(struct kvm_vcpu *vcpu) return 0; } +static int handle_lpswey(struct kvm_vcpu *vcpu) +{ + psw_t new_psw; + u64 addr; + int rc; + u8 ar; + + vcpu->stat.instruction_lpswey++; + + if (!test_kvm_facility(vcpu->kvm, 193)) + return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); + + if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) + return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); + + addr = kvm_s390_get_base_disp_siy(vcpu, &ar); + if (addr & 7) + return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); + + rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw)); + if (rc) + return kvm_s390_inject_prog_cond(vcpu, rc); + + vcpu->arch.sie_block->gpsw = new_psw; + if (!is_valid_psw(&vcpu->arch.sie_block->gpsw)) + return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); + + return 0; +} + static int handle_stidp(struct kvm_vcpu *vcpu) { u64 stidp_data = vcpu->kvm->arch.model.cpuid; @@ -1462,6 +1492,8 @@ int kvm_s390_handle_eb(struct kvm_vcpu *vcpu) case 0x61: case 0x62: return handle_ri(vcpu); + case 0x71: + return handle_lpswey(vcpu); default: return -EOPNOTSUPP; } diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index abb629d7e131..7e3e767ab87d 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c @@ -55,6 +55,8 @@ unsigned long *crst_table_alloc(struct mm_struct *mm) void crst_table_free(struct mm_struct *mm, unsigned long *table) { + if (!table) + return; pagetable_free(virt_to_ptdesc(table)); } @@ -262,6 +264,8 @@ static unsigned long *base_crst_alloc(unsigned long val) static void base_crst_free(unsigned long *table) { + if (!table) + return; pagetable_free(virt_to_ptdesc(table)); } diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 4be8f5cadd02..9d440a0b729e 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -31,11 +31,12 @@ #include <asm/nospec-branch.h> #include <asm/set_memory.h> #include <asm/text-patching.h> +#include <asm/unwind.h> #include "bpf_jit.h" struct bpf_jit { u32 seen; /* Flags to remember seen eBPF instructions */ - u32 seen_reg[16]; /* Array to remember which registers are used */ + u16 seen_regs; /* Mask to remember which registers are used */ u32 *addrs; /* Array with relative instruction addresses */ u8 *prg_buf; /* Start of program */ int size; /* Size of program and literal pool */ @@ -53,6 +54,8 @@ struct bpf_jit { int excnt; /* Number of exception table entries */ int prologue_plt_ret; /* Return address for prologue hotpatch PLT */ int prologue_plt; /* Start of prologue hotpatch PLT */ + int kern_arena; /* Pool offset of kernel arena address */ + u64 user_arena; /* User arena address */ }; #define SEEN_MEM BIT(0) /* use mem[] for temporary storage */ @@ -60,6 +63,8 @@ struct bpf_jit { #define SEEN_FUNC BIT(2) /* calls C functions */ #define SEEN_STACK (SEEN_FUNC | SEEN_MEM) +#define NVREGS 0xffc0 /* %r6-%r15 */ + /* * s390 registers */ @@ -118,8 +123,8 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) { u32 r1 = reg2hex[b1]; - if (r1 >= 6 && r1 <= 15 && !jit->seen_reg[r1]) - jit->seen_reg[r1] = 1; + if (r1 >= 6 && r1 <= 15) + jit->seen_regs |= (1 << r1); } #define REG_SET_SEEN(b1) \ @@ -127,8 +132,6 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) reg_set_seen(jit, b1); \ }) -#define REG_SEEN(b1) jit->seen_reg[reg2hex[(b1)]] - /* * EMIT macros for code generation */ @@ -436,12 +439,12 @@ static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re, u32 stack_depth) /* * Return first seen register (from start) */ -static int get_start(struct bpf_jit *jit, int start) +static int get_start(u16 seen_regs, int start) { int i; for (i = start; i <= 15; i++) { - if (jit->seen_reg[i]) + if (seen_regs & (1 << i)) return i; } return 0; @@ -450,15 +453,15 @@ static int get_start(struct bpf_jit *jit, int start) /* * Return last seen register (from start) (gap >= 2) */ -static int get_end(struct bpf_jit *jit, int start) +static int get_end(u16 seen_regs, int start) { int i; for (i = start; i < 15; i++) { - if (!jit->seen_reg[i] && !jit->seen_reg[i + 1]) + if (!(seen_regs & (3 << i))) return i - 1; } - return jit->seen_reg[15] ? 15 : 14; + return (seen_regs & (1 << 15)) ? 15 : 14; } #define REGS_SAVE 1 @@ -467,8 +470,10 @@ static int get_end(struct bpf_jit *jit, int start) * Save and restore clobbered registers (6-15) on stack. * We save/restore registers in chunks with gap >= 2 registers. */ -static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth) +static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth, + u16 extra_regs) { + u16 seen_regs = jit->seen_regs | extra_regs; const int last = 15, save_restore_size = 6; int re = 6, rs; @@ -482,10 +487,10 @@ static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth) } do { - rs = get_start(jit, re); + rs = get_start(seen_regs, re); if (!rs) break; - re = get_end(jit, rs + 1); + re = get_end(seen_regs, rs + 1); if (op == REGS_SAVE) save_regs(jit, rs, re); else @@ -570,8 +575,21 @@ static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp, } /* Tail calls have to skip above initialization */ jit->tail_call_start = jit->prg; - /* Save registers */ - save_restore_regs(jit, REGS_SAVE, stack_depth); + if (fp->aux->exception_cb) { + /* + * Switch stack, the new address is in the 2nd parameter. + * + * Arrange the restoration of %r6-%r15 in the epilogue. + * Do not restore them now, the prog does not need them. + */ + /* lgr %r15,%r3 */ + EMIT4(0xb9040000, REG_15, REG_3); + jit->seen_regs |= NVREGS; + } else { + /* Save registers */ + save_restore_regs(jit, REGS_SAVE, stack_depth, + fp->aux->exception_boundary ? NVREGS : 0); + } /* Setup literal pool */ if (is_first_pass(jit) || (jit->seen & SEEN_LITERAL)) { if (!is_first_pass(jit) && @@ -647,7 +665,7 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth) /* Load exit code: lgr %r2,%b0 */ EMIT4(0xb9040000, REG_2, BPF_REG_0); /* Restore registers */ - save_restore_regs(jit, REGS_RESTORE, stack_depth); + save_restore_regs(jit, REGS_RESTORE, stack_depth, 0); if (nospec_uses_trampoline()) { jit->r14_thunk_ip = jit->prg; /* Generate __s390_indirect_jump_r14 thunk */ @@ -667,50 +685,111 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth) jit->prg += sizeof(struct bpf_plt); } -static int get_probe_mem_regno(const u8 *insn) -{ - /* - * insn must point to llgc, llgh, llgf, lg, lgb, lgh or lgf, which have - * destination register at the same position. - */ - if (insn[0] != 0xe3) /* common prefix */ - return -1; - if (insn[5] != 0x90 && /* llgc */ - insn[5] != 0x91 && /* llgh */ - insn[5] != 0x16 && /* llgf */ - insn[5] != 0x04 && /* lg */ - insn[5] != 0x77 && /* lgb */ - insn[5] != 0x15 && /* lgh */ - insn[5] != 0x14) /* lgf */ - return -1; - return insn[1] >> 4; -} - bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs) { regs->psw.addr = extable_fixup(x); - regs->gprs[x->data] = 0; + if (x->data != -1) + regs->gprs[x->data] = 0; return true; } -static int bpf_jit_probe_mem(struct bpf_jit *jit, struct bpf_prog *fp, - int probe_prg, int nop_prg) +/* + * A single BPF probe instruction + */ +struct bpf_jit_probe { + int prg; /* JITed instruction offset */ + int nop_prg; /* JITed nop offset */ + int reg; /* Register to clear on exception */ + int arena_reg; /* Register to use for arena addressing */ +}; + +static void bpf_jit_probe_init(struct bpf_jit_probe *probe) +{ + probe->prg = -1; + probe->nop_prg = -1; + probe->reg = -1; + probe->arena_reg = REG_0; +} + +/* + * Handlers of certain exceptions leave psw.addr pointing to the instruction + * directly after the failing one. Therefore, create two exception table + * entries and also add a nop in case two probing instructions come directly + * after each other. + */ +static void bpf_jit_probe_emit_nop(struct bpf_jit *jit, + struct bpf_jit_probe *probe) +{ + if (probe->prg == -1 || probe->nop_prg != -1) + /* The probe is not armed or nop is already emitted. */ + return; + + probe->nop_prg = jit->prg; + /* bcr 0,%0 */ + _EMIT2(0x0700); +} + +static void bpf_jit_probe_load_pre(struct bpf_jit *jit, struct bpf_insn *insn, + struct bpf_jit_probe *probe) +{ + if (BPF_MODE(insn->code) != BPF_PROBE_MEM && + BPF_MODE(insn->code) != BPF_PROBE_MEMSX && + BPF_MODE(insn->code) != BPF_PROBE_MEM32) + return; + + if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) { + /* lgrl %r1,kern_arena */ + EMIT6_PCREL_RILB(0xc4080000, REG_W1, jit->kern_arena); + probe->arena_reg = REG_W1; + } + probe->prg = jit->prg; + probe->reg = reg2hex[insn->dst_reg]; +} + +static void bpf_jit_probe_store_pre(struct bpf_jit *jit, struct bpf_insn *insn, + struct bpf_jit_probe *probe) +{ + if (BPF_MODE(insn->code) != BPF_PROBE_MEM32) + return; + + /* lgrl %r1,kern_arena */ + EMIT6_PCREL_RILB(0xc4080000, REG_W1, jit->kern_arena); + probe->arena_reg = REG_W1; + probe->prg = jit->prg; +} + +static void bpf_jit_probe_atomic_pre(struct bpf_jit *jit, + struct bpf_insn *insn, + struct bpf_jit_probe *probe) +{ + if (BPF_MODE(insn->code) != BPF_PROBE_ATOMIC) + return; + + /* lgrl %r1,kern_arena */ + EMIT6_PCREL_RILB(0xc4080000, REG_W1, jit->kern_arena); + /* agr %r1,%dst */ + EMIT4(0xb9080000, REG_W1, insn->dst_reg); + probe->arena_reg = REG_W1; + probe->prg = jit->prg; +} + +static int bpf_jit_probe_post(struct bpf_jit *jit, struct bpf_prog *fp, + struct bpf_jit_probe *probe) { struct exception_table_entry *ex; - int reg, prg; + int i, prg; s64 delta; u8 *insn; - int i; + if (probe->prg == -1) + /* The probe is not armed. */ + return 0; + bpf_jit_probe_emit_nop(jit, probe); if (!fp->aux->extable) /* Do nothing during early JIT passes. */ return 0; - insn = jit->prg_buf + probe_prg; - reg = get_probe_mem_regno(insn); - if (WARN_ON_ONCE(reg < 0)) - /* JIT bug - unexpected probe instruction. */ - return -1; - if (WARN_ON_ONCE(probe_prg + insn_length(*insn) != nop_prg)) + insn = jit->prg_buf + probe->prg; + if (WARN_ON_ONCE(probe->prg + insn_length(*insn) != probe->nop_prg)) /* JIT bug - gap between probe and nop instructions. */ return -1; for (i = 0; i < 2; i++) { @@ -719,23 +798,24 @@ static int bpf_jit_probe_mem(struct bpf_jit *jit, struct bpf_prog *fp, return -1; ex = &fp->aux->extable[jit->excnt]; /* Add extable entries for probe and nop instructions. */ - prg = i == 0 ? probe_prg : nop_prg; + prg = i == 0 ? probe->prg : probe->nop_prg; delta = jit->prg_buf + prg - (u8 *)&ex->insn; if (WARN_ON_ONCE(delta < INT_MIN || delta > INT_MAX)) /* JIT bug - code and extable must be close. */ return -1; ex->insn = delta; /* - * Always land on the nop. Note that extable infrastructure - * ignores fixup field, it is handled by ex_handler_bpf(). + * Land on the current instruction. Note that the extable + * infrastructure ignores the fixup field; it is handled by + * ex_handler_bpf(). */ - delta = jit->prg_buf + nop_prg - (u8 *)&ex->fixup; + delta = jit->prg_buf + jit->prg - (u8 *)&ex->fixup; if (WARN_ON_ONCE(delta < INT_MIN || delta > INT_MAX)) /* JIT bug - landing pad and extable must be close. */ return -1; ex->fixup = delta; ex->type = EX_TYPE_BPF; - ex->data = reg; + ex->data = probe->reg; jit->excnt++; } return 0; @@ -782,19 +862,15 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, s32 branch_oc_off = insn->off; u32 dst_reg = insn->dst_reg; u32 src_reg = insn->src_reg; + struct bpf_jit_probe probe; int last, insn_count = 1; u32 *addrs = jit->addrs; s32 imm = insn->imm; s16 off = insn->off; - int probe_prg = -1; unsigned int mask; - int nop_prg; int err; - if (BPF_CLASS(insn->code) == BPF_LDX && - (BPF_MODE(insn->code) == BPF_PROBE_MEM || - BPF_MODE(insn->code) == BPF_PROBE_MEMSX)) - probe_prg = jit->prg; + bpf_jit_probe_init(&probe); switch (insn->code) { /* @@ -823,6 +899,22 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, } break; case BPF_ALU64 | BPF_MOV | BPF_X: + if (insn_is_cast_user(insn)) { + int patch_brc; + + /* ltgr %dst,%src */ + EMIT4(0xb9020000, dst_reg, src_reg); + /* brc 8,0f */ + patch_brc = jit->prg; + EMIT4_PCREL_RIC(0xa7040000, 8, 0); + /* iihf %dst,user_arena>>32 */ + EMIT6_IMM(0xc0080000, dst_reg, jit->user_arena >> 32); + /* 0: */ + if (jit->prg_buf) + *(u16 *)(jit->prg_buf + patch_brc + 2) = + (jit->prg - patch_brc) >> 1; + break; + } switch (insn->off) { case 0: /* DST = SRC */ /* lgr %dst,%src */ @@ -1366,51 +1458,99 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, * BPF_ST(X) */ case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src_reg */ - /* stcy %src,off(%dst) */ - EMIT6_DISP_LH(0xe3000000, 0x0072, src_reg, dst_reg, REG_0, off); + case BPF_STX | BPF_PROBE_MEM32 | BPF_B: + bpf_jit_probe_store_pre(jit, insn, &probe); + /* stcy %src,off(%dst,%arena) */ + EMIT6_DISP_LH(0xe3000000, 0x0072, src_reg, dst_reg, + probe.arena_reg, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; jit->seen |= SEEN_MEM; break; case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */ - /* sthy %src,off(%dst) */ - EMIT6_DISP_LH(0xe3000000, 0x0070, src_reg, dst_reg, REG_0, off); + case BPF_STX | BPF_PROBE_MEM32 | BPF_H: + bpf_jit_probe_store_pre(jit, insn, &probe); + /* sthy %src,off(%dst,%arena) */ + EMIT6_DISP_LH(0xe3000000, 0x0070, src_reg, dst_reg, + probe.arena_reg, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; jit->seen |= SEEN_MEM; break; case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */ - /* sty %src,off(%dst) */ - EMIT6_DISP_LH(0xe3000000, 0x0050, src_reg, dst_reg, REG_0, off); + case BPF_STX | BPF_PROBE_MEM32 | BPF_W: + bpf_jit_probe_store_pre(jit, insn, &probe); + /* sty %src,off(%dst,%arena) */ + EMIT6_DISP_LH(0xe3000000, 0x0050, src_reg, dst_reg, + probe.arena_reg, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; jit->seen |= SEEN_MEM; break; case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */ - /* stg %src,off(%dst) */ - EMIT6_DISP_LH(0xe3000000, 0x0024, src_reg, dst_reg, REG_0, off); + case BPF_STX | BPF_PROBE_MEM32 | BPF_DW: + bpf_jit_probe_store_pre(jit, insn, &probe); + /* stg %src,off(%dst,%arena) */ + EMIT6_DISP_LH(0xe3000000, 0x0024, src_reg, dst_reg, + probe.arena_reg, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; jit->seen |= SEEN_MEM; break; case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */ + case BPF_ST | BPF_PROBE_MEM32 | BPF_B: /* lhi %w0,imm */ EMIT4_IMM(0xa7080000, REG_W0, (u8) imm); - /* stcy %w0,off(dst) */ - EMIT6_DISP_LH(0xe3000000, 0x0072, REG_W0, dst_reg, REG_0, off); + bpf_jit_probe_store_pre(jit, insn, &probe); + /* stcy %w0,off(%dst,%arena) */ + EMIT6_DISP_LH(0xe3000000, 0x0072, REG_W0, dst_reg, + probe.arena_reg, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; jit->seen |= SEEN_MEM; break; case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */ + case BPF_ST | BPF_PROBE_MEM32 | BPF_H: /* lhi %w0,imm */ EMIT4_IMM(0xa7080000, REG_W0, (u16) imm); - /* sthy %w0,off(dst) */ - EMIT6_DISP_LH(0xe3000000, 0x0070, REG_W0, dst_reg, REG_0, off); + bpf_jit_probe_store_pre(jit, insn, &probe); + /* sthy %w0,off(%dst,%arena) */ + EMIT6_DISP_LH(0xe3000000, 0x0070, REG_W0, dst_reg, + probe.arena_reg, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; jit->seen |= SEEN_MEM; break; case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */ + case BPF_ST | BPF_PROBE_MEM32 | BPF_W: /* llilf %w0,imm */ EMIT6_IMM(0xc00f0000, REG_W0, (u32) imm); - /* sty %w0,off(%dst) */ - EMIT6_DISP_LH(0xe3000000, 0x0050, REG_W0, dst_reg, REG_0, off); + bpf_jit_probe_store_pre(jit, insn, &probe); + /* sty %w0,off(%dst,%arena) */ + EMIT6_DISP_LH(0xe3000000, 0x0050, REG_W0, dst_reg, + probe.arena_reg, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; jit->seen |= SEEN_MEM; break; case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */ + case BPF_ST | BPF_PROBE_MEM32 | BPF_DW: /* lgfi %w0,imm */ EMIT6_IMM(0xc0010000, REG_W0, imm); - /* stg %w0,off(%dst) */ - EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W0, dst_reg, REG_0, off); + bpf_jit_probe_store_pre(jit, insn, &probe); + /* stg %w0,off(%dst,%arena) */ + EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W0, dst_reg, + probe.arena_reg, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; jit->seen |= SEEN_MEM; break; /* @@ -1418,15 +1558,30 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, */ case BPF_STX | BPF_ATOMIC | BPF_DW: case BPF_STX | BPF_ATOMIC | BPF_W: + case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW: + case BPF_STX | BPF_PROBE_ATOMIC | BPF_W: { bool is32 = BPF_SIZE(insn->code) == BPF_W; + /* + * Unlike loads and stores, atomics have only a base register, + * but no index register. For the non-arena case, simply use + * %dst as a base. For the arena case, use the work register + * %r1: first, load the arena base into it, and then add %dst + * to it. + */ + probe.arena_reg = dst_reg; + switch (insn->imm) { -/* {op32|op64} {%w0|%src},%src,off(%dst) */ #define EMIT_ATOMIC(op32, op64) do { \ + bpf_jit_probe_atomic_pre(jit, insn, &probe); \ + /* {op32|op64} {%w0|%src},%src,off(%arena) */ \ EMIT6_DISP_LH(0xeb000000, is32 ? (op32) : (op64), \ (insn->imm & BPF_FETCH) ? src_reg : REG_W0, \ - src_reg, dst_reg, off); \ + src_reg, probe.arena_reg, off); \ + err = bpf_jit_probe_post(jit, fp, &probe); \ + if (err < 0) \ + return err; \ if (insn->imm & BPF_FETCH) { \ /* bcr 14,0 - see atomic_fetch_{add,and,or,xor}() */ \ _EMIT2(0x07e0); \ @@ -1455,25 +1610,50 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, EMIT_ATOMIC(0x00f7, 0x00e7); break; #undef EMIT_ATOMIC - case BPF_XCHG: - /* {ly|lg} %w0,off(%dst) */ + case BPF_XCHG: { + struct bpf_jit_probe load_probe = probe; + int loop_start; + + bpf_jit_probe_atomic_pre(jit, insn, &load_probe); + /* {ly|lg} %w0,off(%arena) */ EMIT6_DISP_LH(0xe3000000, is32 ? 0x0058 : 0x0004, REG_W0, REG_0, - dst_reg, off); - /* 0: {csy|csg} %w0,%src,off(%dst) */ + load_probe.arena_reg, off); + bpf_jit_probe_emit_nop(jit, &load_probe); + /* Reuse {ly|lg}'s arena_reg for {csy|csg}. */ + if (load_probe.prg != -1) { + probe.prg = jit->prg; + probe.arena_reg = load_probe.arena_reg; + } + loop_start = jit->prg; + /* 0: {csy|csg} %w0,%src,off(%arena) */ EMIT6_DISP_LH(0xeb000000, is32 ? 0x0014 : 0x0030, - REG_W0, src_reg, dst_reg, off); + REG_W0, src_reg, probe.arena_reg, off); + bpf_jit_probe_emit_nop(jit, &probe); /* brc 4,0b */ - EMIT4_PCREL_RIC(0xa7040000, 4, jit->prg - 6); + EMIT4_PCREL_RIC(0xa7040000, 4, loop_start); /* {llgfr|lgr} %src,%w0 */ EMIT4(is32 ? 0xb9160000 : 0xb9040000, src_reg, REG_W0); + /* Both probes should land here on exception. */ + err = bpf_jit_probe_post(jit, fp, &load_probe); + if (err < 0) + return err; + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; if (is32 && insn_is_zext(&insn[1])) insn_count = 2; break; + } case BPF_CMPXCHG: - /* 0: {csy|csg} %b0,%src,off(%dst) */ + bpf_jit_probe_atomic_pre(jit, insn, &probe); + /* 0: {csy|csg} %b0,%src,off(%arena) */ EMIT6_DISP_LH(0xeb000000, is32 ? 0x0014 : 0x0030, - BPF_REG_0, src_reg, dst_reg, off); + BPF_REG_0, src_reg, + probe.arena_reg, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; break; default: pr_err("Unknown atomic operation %02x\n", insn->imm); @@ -1488,51 +1668,87 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, */ case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */ case BPF_LDX | BPF_PROBE_MEM | BPF_B: - /* llgc %dst,0(off,%src) */ - EMIT6_DISP_LH(0xe3000000, 0x0090, dst_reg, src_reg, REG_0, off); + case BPF_LDX | BPF_PROBE_MEM32 | BPF_B: + bpf_jit_probe_load_pre(jit, insn, &probe); + /* llgc %dst,off(%src,%arena) */ + EMIT6_DISP_LH(0xe3000000, 0x0090, dst_reg, src_reg, + probe.arena_reg, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; jit->seen |= SEEN_MEM; if (insn_is_zext(&insn[1])) insn_count = 2; break; case BPF_LDX | BPF_MEMSX | BPF_B: /* dst = *(s8 *)(ul) (src + off) */ case BPF_LDX | BPF_PROBE_MEMSX | BPF_B: - /* lgb %dst,0(off,%src) */ + bpf_jit_probe_load_pre(jit, insn, &probe); + /* lgb %dst,off(%src) */ EMIT6_DISP_LH(0xe3000000, 0x0077, dst_reg, src_reg, REG_0, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; jit->seen |= SEEN_MEM; break; case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */ case BPF_LDX | BPF_PROBE_MEM | BPF_H: - /* llgh %dst,0(off,%src) */ - EMIT6_DISP_LH(0xe3000000, 0x0091, dst_reg, src_reg, REG_0, off); + case BPF_LDX | BPF_PROBE_MEM32 | BPF_H: + bpf_jit_probe_load_pre(jit, insn, &probe); + /* llgh %dst,off(%src,%arena) */ + EMIT6_DISP_LH(0xe3000000, 0x0091, dst_reg, src_reg, + probe.arena_reg, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; jit->seen |= SEEN_MEM; if (insn_is_zext(&insn[1])) insn_count = 2; break; case BPF_LDX | BPF_MEMSX | BPF_H: /* dst = *(s16 *)(ul) (src + off) */ case BPF_LDX | BPF_PROBE_MEMSX | BPF_H: - /* lgh %dst,0(off,%src) */ + bpf_jit_probe_load_pre(jit, insn, &probe); + /* lgh %dst,off(%src) */ EMIT6_DISP_LH(0xe3000000, 0x0015, dst_reg, src_reg, REG_0, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; jit->seen |= SEEN_MEM; break; case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */ case BPF_LDX | BPF_PROBE_MEM | BPF_W: + case BPF_LDX | BPF_PROBE_MEM32 | BPF_W: + bpf_jit_probe_load_pre(jit, insn, &probe); /* llgf %dst,off(%src) */ jit->seen |= SEEN_MEM; - EMIT6_DISP_LH(0xe3000000, 0x0016, dst_reg, src_reg, REG_0, off); + EMIT6_DISP_LH(0xe3000000, 0x0016, dst_reg, src_reg, + probe.arena_reg, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; if (insn_is_zext(&insn[1])) insn_count = 2; break; case BPF_LDX | BPF_MEMSX | BPF_W: /* dst = *(s32 *)(ul) (src + off) */ case BPF_LDX | BPF_PROBE_MEMSX | BPF_W: + bpf_jit_probe_load_pre(jit, insn, &probe); /* lgf %dst,off(%src) */ jit->seen |= SEEN_MEM; EMIT6_DISP_LH(0xe3000000, 0x0014, dst_reg, src_reg, REG_0, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; break; case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */ case BPF_LDX | BPF_PROBE_MEM | BPF_DW: - /* lg %dst,0(off,%src) */ + case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW: + bpf_jit_probe_load_pre(jit, insn, &probe); + /* lg %dst,off(%src,%arena) */ jit->seen |= SEEN_MEM; - EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, src_reg, REG_0, off); + EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, src_reg, + probe.arena_reg, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; break; /* * BPF_JMP / CALL @@ -1647,7 +1863,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, /* * Restore registers before calling function */ - save_restore_regs(jit, REGS_RESTORE, stack_depth); + save_restore_regs(jit, REGS_RESTORE, stack_depth, 0); /* * goto *(prog->bpf_func + tail_call_start); @@ -1897,22 +2113,6 @@ branch_oc: return -1; } - if (probe_prg != -1) { - /* - * Handlers of certain exceptions leave psw.addr pointing to - * the instruction directly after the failing one. Therefore, - * create two exception table entries and also add a nop in - * case two probing instructions come directly after each - * other. - */ - nop_prg = jit->prg; - /* bcr 0,%0 */ - _EMIT2(0x0700); - err = bpf_jit_probe_mem(jit, fp, probe_prg, nop_prg); - if (err < 0) - return err; - } - return insn_count; } @@ -1958,12 +2158,18 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp, bool extra_pass, u32 stack_depth) { int i, insn_count, lit32_size, lit64_size; + u64 kern_arena; jit->lit32 = jit->lit32_start; jit->lit64 = jit->lit64_start; jit->prg = 0; jit->excnt = 0; + kern_arena = bpf_arena_get_kern_vm_start(fp->aux->arena); + if (kern_arena) + jit->kern_arena = _EMIT_CONST_U64(kern_arena); + jit->user_arena = bpf_arena_get_user_vm_start(fp->aux->arena); + bpf_jit_prologue(jit, fp, stack_depth); if (bpf_set_addr(jit, 0) < 0) return -1; @@ -2011,9 +2217,25 @@ static struct bpf_binary_header *bpf_jit_alloc(struct bpf_jit *jit, struct bpf_prog *fp) { struct bpf_binary_header *header; + struct bpf_insn *insn; u32 extable_size; u32 code_size; + int i; + + for (i = 0; i < fp->len; i++) { + insn = &fp->insnsi[i]; + if (BPF_CLASS(insn->code) == BPF_STX && + BPF_MODE(insn->code) == BPF_PROBE_ATOMIC && + (BPF_SIZE(insn->code) == BPF_DW || + BPF_SIZE(insn->code) == BPF_W) && + insn->imm == BPF_XCHG) + /* + * bpf_jit_insn() emits a load and a compare-and-swap, + * both of which need to be probed. + */ + fp->aux->num_exentries += 1; + } /* We need two entries per insn. */ fp->aux->num_exentries *= 2; @@ -2689,3 +2911,52 @@ bool bpf_jit_supports_subprog_tailcalls(void) { return true; } + +bool bpf_jit_supports_arena(void) +{ + return true; +} + +bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena) +{ + /* + * Currently the verifier uses this function only to check which + * atomic stores to arena are supported, and they all are. + */ + return true; +} + +bool bpf_jit_supports_exceptions(void) +{ + /* + * Exceptions require unwinding support, which is always available, + * because the kernel is always built with backchain. + */ + return true; +} + +void arch_bpf_stack_walk(bool (*consume_fn)(void *, u64, u64, u64), + void *cookie) +{ + unsigned long addr, prev_addr = 0; + struct unwind_state state; + + unwind_for_each_frame(&state, NULL, NULL, 0) { + addr = unwind_get_return_address(&state); + if (!addr) + break; + /* + * addr is a return address and state.sp is the value of %r15 + * at this address. exception_cb needs %r15 at entry to the + * function containing addr, so take the next state.sp. + * + * There is no bp, and the exception_cb prog does not need one + * to perform a quasi-longjmp. The common code requires a + * non-zero bp, so pass sp there as well. + */ + if (prev_addr && !consume_fn(cookie, prev_addr, state.sp, + state.sp)) + break; + prev_addr = addr; + } +} diff --git a/arch/s390/pci/pci_irq.c b/arch/s390/pci/pci_irq.c index ff8f24854c64..0ef83b6ac0db 100644 --- a/arch/s390/pci/pci_irq.c +++ b/arch/s390/pci/pci_irq.c @@ -410,7 +410,7 @@ static void __init cpu_enable_directed_irq(void *unused) union zpci_sic_iib iib = {{0}}; union zpci_sic_iib ziib = {{0}}; - iib.cdiib.dibv_addr = (u64) zpci_ibv[smp_processor_id()]->vector; + iib.cdiib.dibv_addr = virt_to_phys(zpci_ibv[smp_processor_id()]->vector); zpci_set_irq_ctrl(SIC_IRQ_MODE_SET_CPU, 0, &iib); zpci_set_irq_ctrl(SIC_IRQ_MODE_D_SINGLE, PCI_ISC, &ziib); |