diff options
author | David S. Miller <davem@davemloft.net> | 2017-02-08 00:29:30 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-02-08 00:29:30 +0300 |
commit | 3efa70d78f218e4c9276b0bac0545e5184c1c47b (patch) | |
tree | f4abe2f05e173023d2a262afd4aebb1e89fe6985 | |
parent | 76e0e70e6452b971a69cc9794ff4a6715c11f7f2 (diff) | |
parent | 926af6273fc683cd98cd0ce7bf0d04a02eed6742 (diff) | |
download | linux-3efa70d78f218e4c9276b0bac0545e5184c1c47b.tar.xz |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
The conflict was an interaction between a bug fix in the
netvsc driver in 'net' and an optimization of the RX path
in 'net-next'.
Signed-off-by: David S. Miller <davem@davemloft.net>
176 files changed, 1221 insertions, 1049 deletions
diff --git a/Documentation/media/uapi/cec/cec-func-close.rst b/Documentation/media/uapi/cec/cec-func-close.rst index 8267c31b317d..895d9c2d1c04 100644 --- a/Documentation/media/uapi/cec/cec-func-close.rst +++ b/Documentation/media/uapi/cec/cec-func-close.rst @@ -33,11 +33,6 @@ Arguments Description =========== -.. note:: - - This documents the proposed CEC API. This API is not yet finalized - and is currently only available as a staging kernel module. - Closes the cec device. Resources associated with the file descriptor are freed. The device configuration remain unchanged. diff --git a/Documentation/media/uapi/cec/cec-func-ioctl.rst b/Documentation/media/uapi/cec/cec-func-ioctl.rst index 9e8dbb118d6a..7dcfd178fb24 100644 --- a/Documentation/media/uapi/cec/cec-func-ioctl.rst +++ b/Documentation/media/uapi/cec/cec-func-ioctl.rst @@ -39,11 +39,6 @@ Arguments Description =========== -.. note:: - - This documents the proposed CEC API. This API is not yet finalized - and is currently only available as a staging kernel module. - The :c:func:`ioctl()` function manipulates cec device parameters. The argument ``fd`` must be an open file descriptor. diff --git a/Documentation/media/uapi/cec/cec-func-open.rst b/Documentation/media/uapi/cec/cec-func-open.rst index af3f5b5c24c6..0304388cd159 100644 --- a/Documentation/media/uapi/cec/cec-func-open.rst +++ b/Documentation/media/uapi/cec/cec-func-open.rst @@ -46,11 +46,6 @@ Arguments Description =========== -.. note:: - - This documents the proposed CEC API. This API is not yet finalized - and is currently only available as a staging kernel module. - To open a cec device applications call :c:func:`open()` with the desired device name. The function has no side effects; the device configuration remain unchanged. diff --git a/Documentation/media/uapi/cec/cec-func-poll.rst b/Documentation/media/uapi/cec/cec-func-poll.rst index cfb73e6027a5..6a863cfda6e0 100644 --- a/Documentation/media/uapi/cec/cec-func-poll.rst +++ b/Documentation/media/uapi/cec/cec-func-poll.rst @@ -39,11 +39,6 @@ Arguments Description =========== -.. note:: - - This documents the proposed CEC API. This API is not yet finalized - and is currently only available as a staging kernel module. - With the :c:func:`poll()` function applications can wait for CEC events. diff --git a/Documentation/media/uapi/cec/cec-intro.rst b/Documentation/media/uapi/cec/cec-intro.rst index 4a19ea5323a9..07ee2b8f89d6 100644 --- a/Documentation/media/uapi/cec/cec-intro.rst +++ b/Documentation/media/uapi/cec/cec-intro.rst @@ -3,11 +3,6 @@ Introduction ============ -.. note:: - - This documents the proposed CEC API. This API is not yet finalized - and is currently only available as a staging kernel module. - HDMI connectors provide a single pin for use by the Consumer Electronics Control protocol. This protocol allows different devices connected by an HDMI cable to communicate. The protocol for CEC version 1.4 is defined @@ -31,3 +26,15 @@ control just the CEC pin. Drivers that support CEC will create a CEC device node (/dev/cecX) to give userspace access to the CEC adapter. The :ref:`CEC_ADAP_G_CAPS` ioctl will tell userspace what it is allowed to do. + +In order to check the support and test it, it is suggested to download +the `v4l-utils <https://git.linuxtv.org/v4l-utils.git/>`_ package. It +provides three tools to handle CEC: + +- cec-ctl: the Swiss army knife of CEC. Allows you to configure, transmit + and monitor CEC messages. + +- cec-compliance: does a CEC compliance test of a remote CEC device to + determine how compliant the CEC implementation is. + +- cec-follower: emulates a CEC follower. diff --git a/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst b/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst index 2b0ddb14b280..a0e961f11017 100644 --- a/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst +++ b/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst @@ -29,11 +29,6 @@ Arguments Description =========== -.. note:: - - This documents the proposed CEC API. This API is not yet finalized - and is currently only available as a staging kernel module. - All cec devices must support :ref:`ioctl CEC_ADAP_G_CAPS <CEC_ADAP_G_CAPS>`. To query device information, applications call the ioctl with a pointer to a struct :c:type:`cec_caps`. The driver fills the structure and diff --git a/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst b/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst index b878637e91b3..09f09bbe28d4 100644 --- a/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst +++ b/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst @@ -35,11 +35,6 @@ Arguments Description =========== -.. note:: - - This documents the proposed CEC API. This API is not yet finalized - and is currently only available as a staging kernel module. - To query the current CEC logical addresses, applications call :ref:`ioctl CEC_ADAP_G_LOG_ADDRS <CEC_ADAP_G_LOG_ADDRS>` with a pointer to a struct :c:type:`cec_log_addrs` where the driver stores the logical addresses. diff --git a/Documentation/media/uapi/cec/cec-ioc-adap-g-phys-addr.rst b/Documentation/media/uapi/cec/cec-ioc-adap-g-phys-addr.rst index 3357deb43c85..a3cdc75cec3e 100644 --- a/Documentation/media/uapi/cec/cec-ioc-adap-g-phys-addr.rst +++ b/Documentation/media/uapi/cec/cec-ioc-adap-g-phys-addr.rst @@ -35,11 +35,6 @@ Arguments Description =========== -.. note:: - - This documents the proposed CEC API. This API is not yet finalized - and is currently only available as a staging kernel module. - To query the current physical address applications call :ref:`ioctl CEC_ADAP_G_PHYS_ADDR <CEC_ADAP_G_PHYS_ADDR>` with a pointer to a __u16 where the driver stores the physical address. diff --git a/Documentation/media/uapi/cec/cec-ioc-dqevent.rst b/Documentation/media/uapi/cec/cec-ioc-dqevent.rst index e256c6605de7..6e589a1fae17 100644 --- a/Documentation/media/uapi/cec/cec-ioc-dqevent.rst +++ b/Documentation/media/uapi/cec/cec-ioc-dqevent.rst @@ -30,11 +30,6 @@ Arguments Description =========== -.. note:: - - This documents the proposed CEC API. This API is not yet finalized - and is currently only available as a staging kernel module. - CEC devices can send asynchronous events. These can be retrieved by calling :c:func:`CEC_DQEVENT`. If the file descriptor is in non-blocking mode and no event is pending, then it will return -1 and diff --git a/Documentation/media/uapi/cec/cec-ioc-g-mode.rst b/Documentation/media/uapi/cec/cec-ioc-g-mode.rst index 4f5818b9d277..e4ded9df0a84 100644 --- a/Documentation/media/uapi/cec/cec-ioc-g-mode.rst +++ b/Documentation/media/uapi/cec/cec-ioc-g-mode.rst @@ -31,11 +31,6 @@ Arguments Description =========== -.. note:: - - This documents the proposed CEC API. This API is not yet finalized - and is currently only available as a staging kernel module. - By default any filehandle can use :ref:`CEC_TRANSMIT`, but in order to prevent applications from stepping on each others toes it must be possible to obtain exclusive access to the CEC adapter. This ioctl sets the diff --git a/Documentation/media/uapi/cec/cec-ioc-receive.rst b/Documentation/media/uapi/cec/cec-ioc-receive.rst index bdf015b1d1dc..dc2adb391c0a 100644 --- a/Documentation/media/uapi/cec/cec-ioc-receive.rst +++ b/Documentation/media/uapi/cec/cec-ioc-receive.rst @@ -34,11 +34,6 @@ Arguments Description =========== -.. note:: - - This documents the proposed CEC API. This API is not yet finalized - and is currently only available as a staging kernel module. - To receive a CEC message the application has to fill in the ``timeout`` field of struct :c:type:`cec_msg` and pass it to :ref:`ioctl CEC_RECEIVE <CEC_RECEIVE>`. diff --git a/MAINTAINERS b/MAINTAINERS index a9368bba9b37..93a41ef97f18 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -13101,7 +13101,7 @@ F: drivers/input/serio/userio.c F: include/uapi/linux/userio.h VIRTIO CONSOLE DRIVER -M: Amit Shah <amit.shah@redhat.com> +M: Amit Shah <amit@kernel.org> L: virtualization@lists.linux-foundation.org S: Maintained F: drivers/char/virtio_console.c @@ -1,7 +1,7 @@ VERSION = 4 PATCHLEVEL = 10 SUBLEVEL = 0 -EXTRAVERSION = -rc6 +EXTRAVERSION = -rc7 NAME = Fearless Coyote # *DOCUMENTATION* @@ -797,7 +797,7 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types) KBUILD_ARFLAGS := $(call ar-option,D) # check for 'asm goto' -ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y) +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y) KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO endif diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c index 91ebe382147f..5f69c3bd59bb 100644 --- a/arch/arc/kernel/unaligned.c +++ b/arch/arc/kernel/unaligned.c @@ -243,7 +243,7 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs, /* clear any remanants of delay slot */ if (delay_mode(regs)) { - regs->ret = regs->bta ~1U; + regs->ret = regs->bta & ~1U; regs->status32 &= ~STATUS_DE_MASK; } else { regs->ret += state.instr_len; diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index a8ee573fe610..281f4f1fcd1f 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -164,7 +164,6 @@ config PPC select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE select HAVE_ARCH_HARDENED_USERCOPY select HAVE_KERNEL_GZIP - select HAVE_CC_STACKPROTECTOR config GENERIC_CSUM def_bool CPU_LITTLE_ENDIAN @@ -484,6 +483,7 @@ config RELOCATABLE bool "Build a relocatable kernel" depends on (PPC64 && !COMPILE_TEST) || (FLATMEM && (44x || FSL_BOOKE)) select NONSTATIC_KERNEL + select MODULE_REL_CRCS if MODVERSIONS help This builds a kernel image that is capable of running at the location the kernel is loaded at. For ppc32, there is no any diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h index b312b152461b..6e834caa3720 100644 --- a/arch/powerpc/include/asm/cpu_has_feature.h +++ b/arch/powerpc/include/asm/cpu_has_feature.h @@ -23,7 +23,9 @@ static __always_inline bool cpu_has_feature(unsigned long feature) { int i; +#ifndef __clang__ /* clang can't cope with this */ BUILD_BUG_ON(!__builtin_constant_p(feature)); +#endif #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG if (!static_key_initialized) { diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index a34c764ca8dd..233a7e8cc8e3 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -160,7 +160,9 @@ static __always_inline bool mmu_has_feature(unsigned long feature) { int i; +#ifndef __clang__ /* clang can't cope with this */ BUILD_BUG_ON(!__builtin_constant_p(feature)); +#endif #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG if (!static_key_initialized) { diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h index cc12c61ef315..53885512b8d3 100644 --- a/arch/powerpc/include/asm/module.h +++ b/arch/powerpc/include/asm/module.h @@ -90,9 +90,5 @@ static inline int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sec } #endif -#if defined(CONFIG_MODVERSIONS) && defined(CONFIG_PPC64) -#define ARCH_RELOCATES_KCRCTAB -#define reloc_start PHYSICAL_START -#endif #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_MODULE_H */ diff --git a/arch/powerpc/include/asm/stackprotector.h b/arch/powerpc/include/asm/stackprotector.h deleted file mode 100644 index 6720190eabec..000000000000 --- a/arch/powerpc/include/asm/stackprotector.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * GCC stack protector support. - * - * Stack protector works by putting predefined pattern at the start of - * the stack frame and verifying that it hasn't been overwritten when - * returning from the function. The pattern is called stack canary - * and gcc expects it to be defined by a global variable called - * "__stack_chk_guard" on PPC. This unfortunately means that on SMP - * we cannot have a different canary value per task. - */ - -#ifndef _ASM_STACKPROTECTOR_H -#define _ASM_STACKPROTECTOR_H - -#include <linux/random.h> -#include <linux/version.h> -#include <asm/reg.h> - -extern unsigned long __stack_chk_guard; - -/* - * Initialize the stackprotector canary value. - * - * NOTE: this must only be called from functions that never return, - * and it must always be inlined. - */ -static __always_inline void boot_init_stack_canary(void) -{ - unsigned long canary; - - /* Try to get a semi random initial value. */ - get_random_bytes(&canary, sizeof(canary)); - canary ^= mftb(); - canary ^= LINUX_VERSION_CODE; - - current->stack_canary = canary; - __stack_chk_guard = current->stack_canary; -} - -#endif /* _ASM_STACKPROTECTOR_H */ diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 23f8082d7bfa..f4c2b52e58b3 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -19,10 +19,6 @@ CFLAGS_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) -# -fstack-protector triggers protection checks in this code, -# but it is being used too early to link to meaningful stack_chk logic. -CFLAGS_prom_init.o += $(call cc-option, -fno-stack-protector) - ifdef CONFIG_FUNCTION_TRACER # Do not trace early boot code CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE) diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 0601e6a7297c..195a9fc8f81c 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -91,9 +91,6 @@ int main(void) DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp)); #endif -#ifdef CONFIG_CC_STACKPROTECTOR - DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary)); -#endif DEFINE(KSP, offsetof(struct thread_struct, ksp)); DEFINE(PT_REGS, offsetof(struct thread_struct, regs)); #ifdef CONFIG_BOOKE diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index d88573bdd090..b94887165a10 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -545,7 +545,7 @@ static void *eeh_pe_detach_dev(void *data, void *userdata) static void *__eeh_clear_pe_frozen_state(void *data, void *flag) { struct eeh_pe *pe = (struct eeh_pe *)data; - bool *clear_sw_state = flag; + bool clear_sw_state = *(bool *)flag; int i, rc = 1; for (i = 0; rc && i < 3; i++) diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 5742dbdbee46..3841d749a430 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -674,11 +674,7 @@ BEGIN_FTR_SECTION mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */ END_FTR_SECTION_IFSET(CPU_FTR_SPE) #endif /* CONFIG_SPE */ -#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) - lwz r0,TSK_STACK_CANARY(r2) - lis r4,__stack_chk_guard@ha - stw r0,__stack_chk_guard@l(r4) -#endif + lwz r0,_CCR(r1) mtcrf 0xFF,r0 /* r3-r12 are destroyed -- Cort */ diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index bb1807184bad..0b0f89685b67 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -286,14 +286,6 @@ static void dedotify_versions(struct modversion_info *vers, for (end = (void *)vers + size; vers < end; vers++) if (vers->name[0] == '.') { memmove(vers->name, vers->name+1, strlen(vers->name)); -#ifdef ARCH_RELOCATES_KCRCTAB - /* The TOC symbol has no CRC computed. To avoid CRC - * check failing, we must force it to the expected - * value (see CRC check in module.c). - */ - if (!strcmp(vers->name, "TOC.")) - vers->crc = -(unsigned long)reloc_start; -#endif } } diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 04885cec24df..5dd056df0baa 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -64,12 +64,6 @@ #include <linux/kprobes.h> #include <linux/kdebug.h> -#ifdef CONFIG_CC_STACKPROTECTOR -#include <linux/stackprotector.h> -unsigned long __stack_chk_guard __read_mostly; -EXPORT_SYMBOL(__stack_chk_guard); -#endif - /* Transactional Memory debug */ #ifdef TM_DEBUG_SW #define TM_DEBUG(x...) printk(KERN_INFO x) diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index ec47a939cbdd..ac83eb04a8b8 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -2834,6 +2834,9 @@ static void __init prom_find_boot_cpu(void) cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu); + if (!PHANDLE_VALID(cpu_pkg)) + return; + prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval)); prom.cpu = be32_to_cpu(rval); diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index cfa53ccc8baf..34f1a0dbc898 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c @@ -65,7 +65,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa, if (!pmdp) return -ENOMEM; if (map_page_size == PMD_SIZE) { - ptep = (pte_t *)pudp; + ptep = pmdp_ptep(pmdp); goto set_the_pte; } ptep = pte_alloc_kernel(pmdp, ea); @@ -90,7 +90,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa, } pmdp = pmd_offset(pudp, ea); if (map_page_size == PMD_SIZE) { - ptep = (pte_t *)pudp; + ptep = pmdp_ptep(pmdp); goto set_the_pte; } if (!pmd_present(*pmdp)) { diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 6ef688a1ef3e..7ff1b0c86a8e 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -1085,9 +1085,9 @@ static void aesni_free_simds(void) aesni_simd_skciphers[i]; i++) simd_skcipher_free(aesni_simd_skciphers[i]); - for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2) && - aesni_simd_skciphers2[i].simd; i++) - simd_skcipher_free(aesni_simd_skciphers2[i].simd); + for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++) + if (aesni_simd_skciphers2[i].simd) + simd_skcipher_free(aesni_simd_skciphers2[i].simd); } static int __init aesni_init(void) @@ -1168,7 +1168,7 @@ static int __init aesni_init(void) simd = simd_skcipher_create_compat(algname, drvname, basename); err = PTR_ERR(simd); if (IS_ERR(simd)) - goto unregister_simds; + continue; aesni_simd_skciphers2[i].simd = simd; } diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c index 17c3564d087a..22ef4f72cf32 100644 --- a/arch/x86/events/intel/rapl.c +++ b/arch/x86/events/intel/rapl.c @@ -161,7 +161,13 @@ static u64 rapl_timer_ms; static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu) { - return rapl_pmus->pmus[topology_logical_package_id(cpu)]; + unsigned int pkgid = topology_logical_package_id(cpu); + + /* + * The unsigned check also catches the '-1' return value for non + * existent mappings in the topology map. + */ + return pkgid < rapl_pmus->maxpkg ? rapl_pmus->pmus[pkgid] : NULL; } static inline u64 rapl_read_counter(struct perf_event *event) @@ -402,6 +408,8 @@ static int rapl_pmu_event_init(struct perf_event *event) /* must be done before validate_group */ pmu = cpu_to_rapl_pmu(event->cpu); + if (!pmu) + return -EINVAL; event->cpu = pmu->cpu; event->pmu_private = pmu; event->hw.event_base = msr; @@ -585,6 +593,20 @@ static int rapl_cpu_online(unsigned int cpu) struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu); int target; + if (!pmu) { + pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu)); + if (!pmu) + return -ENOMEM; + + raw_spin_lock_init(&pmu->lock); + INIT_LIST_HEAD(&pmu->active_list); + pmu->pmu = &rapl_pmus->pmu; + pmu->timer_interval = ms_to_ktime(rapl_timer_ms); + rapl_hrtimer_init(pmu); + + rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu; + } + /* * Check if there is an online cpu in the package which collects rapl * events already. @@ -598,27 +620,6 @@ static int rapl_cpu_online(unsigned int cpu) return 0; } -static int rapl_cpu_prepare(unsigned int cpu) -{ - struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu); - - if (pmu) - return 0; - - pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu)); - if (!pmu) - return -ENOMEM; - - raw_spin_lock_init(&pmu->lock); - INIT_LIST_HEAD(&pmu->active_list); - pmu->pmu = &rapl_pmus->pmu; - pmu->timer_interval = ms_to_ktime(rapl_timer_ms); - pmu->cpu = -1; - rapl_hrtimer_init(pmu); - rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu; - return 0; -} - static int rapl_check_hw_unit(bool apply_quirk) { u64 msr_rapl_power_unit_bits; @@ -803,29 +804,21 @@ static int __init rapl_pmu_init(void) /* * Install callbacks. Core will call them for each online cpu. */ - - ret = cpuhp_setup_state(CPUHP_PERF_X86_RAPL_PREP, "perf/x86/rapl:prepare", - rapl_cpu_prepare, NULL); - if (ret) - goto out; - ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE, "perf/x86/rapl:online", rapl_cpu_online, rapl_cpu_offline); if (ret) - goto out1; + goto out; ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1); if (ret) - goto out2; + goto out1; rapl_advertise(); return 0; -out2: - cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE); out1: - cpuhp_remove_state(CPUHP_PERF_X86_RAPL_PREP); + cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE); out: pr_warn("Initialization failed (%d), disabled\n", ret); cleanup_rapl_pmus(); @@ -836,7 +829,6 @@ module_init(rapl_pmu_init); static void __exit intel_rapl_exit(void) { cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE); - cpuhp_remove_state_nocalls(CPUHP_PERF_X86_RAPL_PREP); perf_pmu_unregister(&rapl_pmus->pmu); cleanup_rapl_pmus(); } diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 8c4ccdc3a3f3..1ab45976474d 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -100,7 +100,13 @@ ssize_t uncore_event_show(struct kobject *kobj, struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu) { - return pmu->boxes[topology_logical_package_id(cpu)]; + unsigned int pkgid = topology_logical_package_id(cpu); + + /* + * The unsigned check also catches the '-1' return value for non + * existent mappings in the topology map. + */ + return pkgid < max_packages ? pmu->boxes[pkgid] : NULL; } u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event) @@ -764,30 +770,6 @@ static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu) pmu->registered = false; } -static void __uncore_exit_boxes(struct intel_uncore_type *type, int cpu) -{ - struct intel_uncore_pmu *pmu = type->pmus; - struct intel_uncore_box *box; - int i, pkg; - - if (pmu) { - pkg = topology_physical_package_id(cpu); - for (i = 0; i < type->num_boxes; i++, pmu++) { - box = pmu->boxes[pkg]; - if (box) - uncore_box_exit(box); - } - } -} - -static void uncore_exit_boxes(void *dummy) -{ - struct intel_uncore_type **types; - - for (types = uncore_msr_uncores; *types; types++) - __uncore_exit_boxes(*types++, smp_processor_id()); -} - static void uncore_free_boxes(struct intel_uncore_pmu *pmu) { int pkg; @@ -1058,86 +1040,6 @@ static void uncore_pci_exit(void) } } -static int uncore_cpu_dying(unsigned int cpu) -{ - struct intel_uncore_type *type, **types = uncore_msr_uncores; - struct intel_uncore_pmu *pmu; - struct intel_uncore_box *box; - int i, pkg; - - pkg = topology_logical_package_id(cpu); - for (; *types; types++) { - type = *types; - pmu = type->pmus; - for (i = 0; i < type->num_boxes; i++, pmu++) { - box = pmu->boxes[pkg]; - if (box && atomic_dec_return(&box->refcnt) == 0) - uncore_box_exit(box); - } - } - return 0; -} - -static int first_init; - -static int uncore_cpu_starting(unsigned int cpu) -{ - struct intel_uncore_type *type, **types = uncore_msr_uncores; - struct intel_uncore_pmu *pmu; - struct intel_uncore_box *box; - int i, pkg, ncpus = 1; - - if (first_init) { - /* - * On init we get the number of online cpus in the package - * and set refcount for all of them. - */ - ncpus = cpumask_weight(topology_core_cpumask(cpu)); - } - - pkg = topology_logical_package_id(cpu); - for (; *types; types++) { - type = *types; - pmu = type->pmus; - for (i = 0; i < type->num_boxes; i++, pmu++) { - box = pmu->boxes[pkg]; - if (!box) - continue; - /* The first cpu on a package activates the box */ - if (atomic_add_return(ncpus, &box->refcnt) == ncpus) - uncore_box_init(box); - } - } - - return 0; -} - -static int uncore_cpu_prepare(unsigned int cpu) -{ - struct intel_uncore_type *type, **types = uncore_msr_uncores; - struct intel_uncore_pmu *pmu; - struct intel_uncore_box *box; - int i, pkg; - - pkg = topology_logical_package_id(cpu); - for (; *types; types++) { - type = *types; - pmu = type->pmus; - for (i = 0; i < type->num_boxes; i++, pmu++) { - if (pmu->boxes[pkg]) - continue; - /* First cpu of a package allocates the box */ - box = uncore_alloc_box(type, cpu_to_node(cpu)); - if (!box) - return -ENOMEM; - box->pmu = pmu; - box->pkgid = pkg; - pmu->boxes[pkg] = box; - } - } - return 0; -} - static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu, int new_cpu) { @@ -1177,12 +1079,14 @@ static void uncore_change_context(struct intel_uncore_type **uncores, static int uncore_event_cpu_offline(unsigned int cpu) { - int target; + struct intel_uncore_type *type, **types = uncore_msr_uncores; + struct intel_uncore_pmu *pmu; + struct intel_uncore_box *box; + int i, pkg, target; /* Check if exiting cpu is used for collecting uncore events */ if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask)) - return 0; - + goto unref; /* Find a new cpu to collect uncore events */ target = cpumask_any_but(topology_core_cpumask(cpu), cpu); @@ -1194,12 +1098,82 @@ static int uncore_event_cpu_offline(unsigned int cpu) uncore_change_context(uncore_msr_uncores, cpu, target); uncore_change_context(uncore_pci_uncores, cpu, target); + +unref: + /* Clear the references */ + pkg = topology_logical_package_id(cpu); + for (; *types; types++) { + type = *types; + pmu = type->pmus; + for (i = 0; i < type->num_boxes; i++, pmu++) { + box = pmu->boxes[pkg]; + if (box && atomic_dec_return(&box->refcnt) == 0) + uncore_box_exit(box); + } + } return 0; } +static int allocate_boxes(struct intel_uncore_type **types, + unsigned int pkg, unsigned int cpu) +{ + struct intel_uncore_box *box, *tmp; + struct intel_uncore_type *type; + struct intel_uncore_pmu *pmu; + LIST_HEAD(allocated); + int i; + + /* Try to allocate all required boxes */ + for (; *types; types++) { + type = *types; + pmu = type->pmus; + for (i = 0; i < type->num_boxes; i++, pmu++) { + if (pmu->boxes[pkg]) + continue; + box = uncore_alloc_box(type, cpu_to_node(cpu)); + if (!box) + goto cleanup; + box->pmu = pmu; + box->pkgid = pkg; + list_add(&box->active_list, &allocated); + } + } + /* Install them in the pmus */ + list_for_each_entry_safe(box, tmp, &allocated, active_list) { + list_del_init(&box->active_list); + box->pmu->boxes[pkg] = box; + } + return 0; + +cleanup: + list_for_each_entry_safe(box, tmp, &allocated, active_list) { + list_del_init(&box->active_list); + kfree(box); + } + return -ENOMEM; +} + static int uncore_event_cpu_online(unsigned int cpu) { - int target; + struct intel_uncore_type *type, **types = uncore_msr_uncores; + struct intel_uncore_pmu *pmu; + struct intel_uncore_box *box; + int i, ret, pkg, target; + + pkg = topology_logical_package_id(cpu); + ret = allocate_boxes(types, pkg, cpu); + if (ret) + return ret; + + for (; *types; types++) { + type = *types; + pmu = type->pmus; + for (i = 0; i < type->num_boxes; i++, pmu++) { + box = pmu->boxes[pkg]; + if (!box && atomic_inc_return(&box->refcnt) == 1) + uncore_box_init(box); + } + } /* * Check if there is an online cpu in the package @@ -1389,38 +1363,16 @@ static int __init intel_uncore_init(void) if (cret && pret) return -ENODEV; - /* - * Install callbacks. Core will call them for each online cpu. - * - * The first online cpu of each package allocates and takes - * the refcounts for all other online cpus in that package. - * If msrs are not enabled no allocation is required and - * uncore_cpu_prepare() is not called for each online cpu. - */ - if (!cret) { - ret = cpuhp_setup_state(CPUHP_PERF_X86_UNCORE_PREP, - "perf/x86/intel/uncore:prepare", - uncore_cpu_prepare, NULL); - if (ret) - goto err; - } else { - cpuhp_setup_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP, - "perf/x86/intel/uncore:prepare", - uncore_cpu_prepare, NULL); - } - first_init = 1; - cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING, - "perf/x86/uncore:starting", - uncore_cpu_starting, uncore_cpu_dying); - first_init = 0; - cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE, - "perf/x86/uncore:online", - uncore_event_cpu_online, uncore_event_cpu_offline); + /* Install hotplug callbacks to setup the targets for each package */ + ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE, + "perf/x86/intel/uncore:online", + uncore_event_cpu_online, + uncore_event_cpu_offline); + if (ret) + goto err; return 0; err: - /* Undo box->init_box() */ - on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1); uncore_types_exit(uncore_msr_uncores); uncore_pci_exit(); return ret; @@ -1429,9 +1381,7 @@ module_init(intel_uncore_init); static void __exit intel_uncore_exit(void) { - cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_ONLINE); - cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_STARTING); - cpuhp_remove_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP); + cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE); uncore_types_exit(uncore_msr_uncores); uncore_pci_exit(); } diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index 38711df3bcb5..2266f864b747 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h @@ -140,6 +140,7 @@ extern void __init load_ucode_bsp(void); extern void load_ucode_ap(void); void reload_early_microcode(void); extern bool get_builtin_firmware(struct cpio_data *cd, const char *name); +extern bool initrd_gone; #else static inline int __init microcode_init(void) { return 0; }; static inline void __init load_ucode_bsp(void) { } diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 1e35dd06b090..52f352b063fd 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -2117,6 +2117,7 @@ static inline void __init check_timer(void) if (idx != -1 && irq_trigger(idx)) unmask_ioapic_irq(irq_get_chip_data(0)); } + irq_domain_deactivate_irq(irq_data); irq_domain_activate_irq(irq_data); if (timer_irq_works()) { if (disable_timer_pin_1 > 0) @@ -2138,6 +2139,7 @@ static inline void __init check_timer(void) * legacy devices should be connected to IO APIC #0 */ replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2); + irq_domain_deactivate_irq(irq_data); irq_domain_activate_irq(irq_data); legacy_pic->unmask(0); if (timer_irq_works()) { diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 00ef43233e03..537c6647d84c 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -1373,20 +1373,15 @@ static unsigned long mce_adjust_timer_default(unsigned long interval) static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default; -static void __restart_timer(struct timer_list *t, unsigned long interval) +static void __start_timer(struct timer_list *t, unsigned long interval) { unsigned long when = jiffies + interval; unsigned long flags; local_irq_save(flags); - if (timer_pending(t)) { - if (time_before(when, t->expires)) - mod_timer(t, when); - } else { - t->expires = round_jiffies(when); - add_timer_on(t, smp_processor_id()); - } + if (!timer_pending(t) || time_before(when, t->expires)) + mod_timer(t, round_jiffies(when)); local_irq_restore(flags); } @@ -1421,7 +1416,7 @@ static void mce_timer_fn(unsigned long data) done: __this_cpu_write(mce_next_interval, iv); - __restart_timer(t, iv); + __start_timer(t, iv); } /* @@ -1432,7 +1427,7 @@ void mce_timer_kick(unsigned long interval) struct timer_list *t = this_cpu_ptr(&mce_timer); unsigned long iv = __this_cpu_read(mce_next_interval); - __restart_timer(t, interval); + __start_timer(t, interval); if (interval < iv) __this_cpu_write(mce_next_interval, interval); @@ -1779,17 +1774,15 @@ static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c) } } -static void mce_start_timer(unsigned int cpu, struct timer_list *t) +static void mce_start_timer(struct timer_list *t) { unsigned long iv = check_interval * HZ; if (mca_cfg.ignore_ce || !iv) return; - per_cpu(mce_next_interval, cpu) = iv; - - t->expires = round_jiffies(jiffies + iv); - add_timer_on(t, cpu); + this_cpu_write(mce_next_interval, iv); + __start_timer(t, iv); } static void __mcheck_cpu_setup_timer(void) @@ -1806,7 +1799,7 @@ static void __mcheck_cpu_init_timer(void) unsigned int cpu = smp_processor_id(); setup_pinned_timer(t, mce_timer_fn, cpu); - mce_start_timer(cpu, t); + mce_start_timer(t); } /* Handle unconfigured int18 (should never happen) */ @@ -2566,7 +2559,7 @@ static int mce_cpu_dead(unsigned int cpu) static int mce_cpu_online(unsigned int cpu) { - struct timer_list *t = &per_cpu(mce_timer, cpu); + struct timer_list *t = this_cpu_ptr(&mce_timer); int ret; mce_device_create(cpu); @@ -2577,13 +2570,13 @@ static int mce_cpu_online(unsigned int cpu) return ret; } mce_reenable_cpu(); - mce_start_timer(cpu, t); + mce_start_timer(t); return 0; } static int mce_cpu_pre_down(unsigned int cpu) { - struct timer_list *t = &per_cpu(mce_timer, cpu); + struct timer_list *t = this_cpu_ptr(&mce_timer); mce_disable_cpu(); del_timer_sync(t); diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 6a31e2691f3a..079e81733a58 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -384,8 +384,9 @@ void load_ucode_amd_ap(unsigned int family) reget: if (!get_builtin_microcode(&cp, family)) { #ifdef CONFIG_BLK_DEV_INITRD - cp = find_cpio_data(ucode_path, (void *)initrd_start, - initrd_end - initrd_start, NULL); + if (!initrd_gone) + cp = find_cpio_data(ucode_path, (void *)initrd_start, + initrd_end - initrd_start, NULL); #endif if (!(cp.data && cp.size)) { /* diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 2af69d27da62..73102d932760 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -46,6 +46,8 @@ static struct microcode_ops *microcode_ops; static bool dis_ucode_ldr = true; +bool initrd_gone; + LIST_HEAD(microcode_cache); /* @@ -190,21 +192,24 @@ void load_ucode_ap(void) static int __init save_microcode_in_initrd(void) { struct cpuinfo_x86 *c = &boot_cpu_data; + int ret = -EINVAL; switch (c->x86_vendor) { case X86_VENDOR_INTEL: if (c->x86 >= 6) - return save_microcode_in_initrd_intel(); + ret = save_microcode_in_initrd_intel(); break; case X86_VENDOR_AMD: if (c->x86 >= 0x10) - return save_microcode_in_initrd_amd(c->x86); + ret = save_microcode_in_initrd_amd(c->x86); break; default: break; } - return -EINVAL; + initrd_gone = true; + + return ret; } struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa) @@ -247,9 +252,16 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa) * has the virtual address of the beginning of the initrd. It also * possibly relocates the ramdisk. In either case, initrd_start contains * the updated address so use that instead. + * + * initrd_gone is for the hotplug case where we've thrown out initrd + * already. */ - if (!use_pa && initrd_start) - start = initrd_start; + if (!use_pa) { + if (initrd_gone) + return (struct cpio_data){ NULL, 0, "" }; + if (initrd_start) + start = initrd_start; + } return find_cpio_data(path, (void *)start, size, NULL); #else /* !CONFIG_BLK_DEV_INITRD */ diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 3f329b74e040..8325d8a09ab0 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -41,7 +41,7 @@ static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin"; -/* Current microcode patch used in early patching */ +/* Current microcode patch used in early patching on the APs. */ struct microcode_intel *intel_ucode_patch; static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1, @@ -607,12 +607,6 @@ int __init save_microcode_in_initrd_intel(void) struct ucode_cpu_info uci; struct cpio_data cp; - /* - * AP loading didn't find any microcode patch, no need to save anything. - */ - if (!intel_ucode_patch || IS_ERR(intel_ucode_patch)) - return 0; - if (!load_builtin_intel_microcode(&cp)) cp = find_microcode_in_initrd(ucode_path, false); @@ -628,7 +622,6 @@ int __init save_microcode_in_initrd_intel(void) return 0; } - /* * @res_patch, output: a pointer to the patch we found. */ diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index e4e97a5355ce..de7234401275 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -9,6 +9,7 @@ #include <asm/fpu/regset.h> #include <asm/fpu/signal.h> #include <asm/fpu/types.h> +#include <asm/fpu/xstate.h> #include <asm/traps.h> #include <linux/hardirq.h> @@ -183,7 +184,8 @@ void fpstate_init(union fpregs_state *state) * it will #GP. Make sure it is replaced after the memset(). */ if (static_cpu_has(X86_FEATURE_XSAVES)) - state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT; + state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT | + xfeatures_mask; if (static_cpu_has(X86_FEATURE_FXSR)) fpstate_init_fxstate(&state->fxsave); diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 85e87b46c318..dc6ba5bda9fc 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -352,6 +352,7 @@ static int hpet_resume(struct clock_event_device *evt, int timer) } else { struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); + irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq)); irq_domain_activate_irq(irq_get_irq_data(hdev->irq)); disable_irq(hdev->irq); irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu)); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index d153be8929a6..e52c9088660f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3182,6 +3182,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu) memcpy(dest, xsave, XSAVE_HDR_OFFSET); /* Set XSTATE_BV */ + xstate_bv &= vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE; *(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv; /* diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 319148bd4b05..2f25a363068c 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -269,6 +269,22 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) efi_scratch.use_pgd = true; /* + * Certain firmware versions are way too sentimential and still believe + * they are exclusive and unquestionable owners of the first physical page, + * even though they explicitly mark it as EFI_CONVENTIONAL_MEMORY + * (but then write-access it later during SetVirtualAddressMap()). + * + * Create a 1:1 mapping for this page, to avoid triple faults during early + * boot with such firmware. We are free to hand this page to the BIOS, + * as trim_bios_range() will reserve the first page and isolate it away + * from memory allocators anyway. + */ + if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, _PAGE_RW)) { + pr_err("Failed to create 1:1 mapping for the first page!\n"); + return 1; + } + + /* * When making calls to the firmware everything needs to be 1:1 * mapped and addressable with 32-bit pointers. Map the kernel * text and allocate a new stack because we can't rely on the diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index 848e8568fb3c..8fd4be610607 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -419,7 +419,7 @@ subsys_initcall(topology_init); void cpu_reset(void) { -#if XCHAL_HAVE_PTP_MMU +#if XCHAL_HAVE_PTP_MMU && IS_ENABLED(CONFIG_MMU) local_irq_disable(); /* * We have full MMU: all autoload ways, ways 7, 8 and 9 of DTLB must diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index f849311e9fd4..533265f110e0 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -661,9 +661,9 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags) unlock: list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) { af_alg_free_sg(&rsgl->sgl); + list_del(&rsgl->list); if (rsgl != &ctx->first_rsgl) sock_kfree_s(sk, rsgl, sizeof(*rsgl)); - list_del(&rsgl->list); } INIT_LIST_HEAD(&ctx->list); aead_wmem_wakeup(sk); diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 2f82b8eba360..7361d00818e2 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -2704,6 +2704,7 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); struct device *dev = acpi_desc->dev; struct acpi_nfit_flush_work flush; + int rc; /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */ device_lock(dev); @@ -2716,7 +2717,10 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) INIT_WORK_ONSTACK(&flush.work, flush_probe); COMPLETION_INITIALIZER_ONSTACK(flush.cmp); queue_work(nfit_wq, &flush.work); - return wait_for_completion_interruptible(&flush.cmp); + + rc = wait_for_completion_interruptible(&flush.cmp); + cancel_work_sync(&flush.work); + return rc; } static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 4497d263209f..ac350c518e0c 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -558,9 +558,6 @@ static void fw_load_abort(struct firmware_priv *fw_priv) struct firmware_buf *buf = fw_priv->buf; __fw_load_abort(buf); - - /* avoid user action after loading abort */ - fw_priv->buf = NULL; } static LIST_HEAD(pending_fw_head); @@ -713,7 +710,7 @@ static ssize_t firmware_loading_store(struct device *dev, mutex_lock(&fw_lock); fw_buf = fw_priv->buf; - if (!fw_buf) + if (fw_state_is_aborted(&fw_buf->fw_st)) goto out; switch (loading) { diff --git a/drivers/base/memory.c b/drivers/base/memory.c index dacb6a8418aa..fa26ffd25fa6 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -389,33 +389,33 @@ static ssize_t show_valid_zones(struct device *dev, { struct memory_block *mem = to_memory_block(dev); unsigned long start_pfn, end_pfn; + unsigned long valid_start, valid_end, valid_pages; unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; - struct page *first_page; struct zone *zone; int zone_shift = 0; start_pfn = section_nr_to_pfn(mem->start_section_nr); end_pfn = start_pfn + nr_pages; - first_page = pfn_to_page(start_pfn); /* The block contains more than one zone can not be offlined. */ - if (!test_pages_in_a_zone(start_pfn, end_pfn)) + if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end)) return sprintf(buf, "none\n"); - zone = page_zone(first_page); + zone = page_zone(pfn_to_page(valid_start)); + valid_pages = valid_end - valid_start; /* MMOP_ONLINE_KEEP */ sprintf(buf, "%s", zone->name); /* MMOP_ONLINE_KERNEL */ - zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL, &zone_shift); + zone_can_shift(valid_start, valid_pages, ZONE_NORMAL, &zone_shift); if (zone_shift) { strcat(buf, " "); strcat(buf, (zone + zone_shift)->name); } /* MMOP_ONLINE_MOVABLE */ - zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE, &zone_shift); + zone_can_shift(valid_start, valid_pages, ZONE_MOVABLE, &zone_shift); if (zone_shift) { strcat(buf, " "); strcat(buf, (zone + zone_shift)->name); diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 872eac4cb1df..a14fac6a01d3 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -966,13 +966,13 @@ int __pm_runtime_idle(struct device *dev, int rpmflags) unsigned long flags; int retval; - might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); - if (rpmflags & RPM_GET_PUT) { if (!atomic_dec_and_test(&dev->power.usage_count)) return 0; } + might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); + spin_lock_irqsave(&dev->power.lock, flags); retval = rpm_idle(dev, rpmflags); spin_unlock_irqrestore(&dev->power.lock, flags); @@ -998,13 +998,13 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags) unsigned long flags; int retval; - might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); - if (rpmflags & RPM_GET_PUT) { if (!atomic_dec_and_test(&dev->power.usage_count)) return 0; } + might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); + spin_lock_irqsave(&dev->power.lock, flags); retval = rpm_suspend(dev, rpmflags); spin_unlock_irqrestore(&dev->power.lock, flags); @@ -1029,7 +1029,8 @@ int __pm_runtime_resume(struct device *dev, int rpmflags) unsigned long flags; int retval; - might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); + might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe && + dev->power.runtime_status != RPM_ACTIVE); if (rpmflags & RPM_GET_PUT) atomic_inc(&dev->power.usage_count); diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c index 4fda623e55bb..c94360671f41 100644 --- a/drivers/cpufreq/brcmstb-avs-cpufreq.c +++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c @@ -784,8 +784,19 @@ static int brcm_avs_target_index(struct cpufreq_policy *policy, static int brcm_avs_suspend(struct cpufreq_policy *policy) { struct private_data *priv = policy->driver_data; + int ret; + + ret = brcm_avs_get_pmap(priv, &priv->pmap); + if (ret) + return ret; - return brcm_avs_get_pmap(priv, &priv->pmap); + /* + * We can't use the P-state returned by brcm_avs_get_pmap(), since + * that's the initial P-state from when the P-map was downloaded to the + * AVS co-processor, not necessarily the P-state we are running at now. + * So, we get the current P-state explicitly. + */ + return brcm_avs_get_pstate(priv, &priv->pmap.state); } static int brcm_avs_resume(struct cpufreq_policy *policy) @@ -954,9 +965,9 @@ static ssize_t show_brcm_avs_pmap(struct cpufreq_policy *policy, char *buf) brcm_avs_parse_p1(pmap.p1, &mdiv_p0, &pdiv, &ndiv); brcm_avs_parse_p2(pmap.p2, &mdiv_p1, &mdiv_p2, &mdiv_p3, &mdiv_p4); - return sprintf(buf, "0x%08x 0x%08x %u %u %u %u %u %u %u\n", + return sprintf(buf, "0x%08x 0x%08x %u %u %u %u %u %u %u %u %u\n", pmap.p1, pmap.p2, ndiv, pdiv, mdiv_p0, mdiv_p1, mdiv_p2, - mdiv_p3, mdiv_p4); + mdiv_p3, mdiv_p4, pmap.mode, pmap.state); } static ssize_t show_brcm_avs_voltage(struct cpufreq_policy *policy, char *buf) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index a54d65aa776d..50bd6d987fc3 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1235,6 +1235,25 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata) cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); } +#define MSR_IA32_POWER_CTL_BIT_EE 19 + +/* Disable energy efficiency optimization */ +static void intel_pstate_disable_ee(int cpu) +{ + u64 power_ctl; + int ret; + + ret = rdmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, &power_ctl); + if (ret) + return; + + if (!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE))) { + pr_info("Disabling energy efficiency optimization\n"); + power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE); + wrmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, power_ctl); + } +} + static int atom_get_min_pstate(void) { u64 value; @@ -1845,6 +1864,11 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { {} }; +static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = { + ICPU(INTEL_FAM6_KABYLAKE_DESKTOP, core_params), + {} +}; + static int intel_pstate_init_cpu(unsigned int cpunum) { struct cpudata *cpu; @@ -1875,6 +1899,12 @@ static int intel_pstate_init_cpu(unsigned int cpunum) cpu->cpu = cpunum; if (hwp_active) { + const struct x86_cpu_id *id; + + id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids); + if (id) + intel_pstate_disable_ee(cpunum); + intel_pstate_hwp_enable(cpu); pid_params.sample_rate_ms = 50; pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC; diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index e2ce8190ecc9..612898b4aaad 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -959,7 +959,7 @@ static irqreturn_t ccp5_irq_handler(int irq, void *data) static void ccp5_config(struct ccp_device *ccp) { /* Public side */ - iowrite32(0x00001249, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET); + iowrite32(0x0, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET); } static void ccp5other_config(struct ccp_device *ccp) diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 830f35e6005f..649e5610a5ce 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -238,6 +238,7 @@ struct ccp_dma_chan { struct ccp_device *ccp; spinlock_t lock; + struct list_head created; struct list_head pending; struct list_head active; struct list_head complete; diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c index 6553912804f7..e5d9278f4019 100644 --- a/drivers/crypto/ccp/ccp-dmaengine.c +++ b/drivers/crypto/ccp/ccp-dmaengine.c @@ -63,6 +63,7 @@ static void ccp_free_chan_resources(struct dma_chan *dma_chan) ccp_free_desc_resources(chan->ccp, &chan->complete); ccp_free_desc_resources(chan->ccp, &chan->active); ccp_free_desc_resources(chan->ccp, &chan->pending); + ccp_free_desc_resources(chan->ccp, &chan->created); spin_unlock_irqrestore(&chan->lock, flags); } @@ -273,6 +274,7 @@ static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc) spin_lock_irqsave(&chan->lock, flags); cookie = dma_cookie_assign(tx_desc); + list_del(&desc->entry); list_add_tail(&desc->entry, &chan->pending); spin_unlock_irqrestore(&chan->lock, flags); @@ -426,7 +428,7 @@ static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan, spin_lock_irqsave(&chan->lock, sflags); - list_add_tail(&desc->entry, &chan->pending); + list_add_tail(&desc->entry, &chan->created); spin_unlock_irqrestore(&chan->lock, sflags); @@ -610,6 +612,7 @@ static int ccp_terminate_all(struct dma_chan *dma_chan) /*TODO: Purge the complete list? */ ccp_free_desc_resources(chan->ccp, &chan->active); ccp_free_desc_resources(chan->ccp, &chan->pending); + ccp_free_desc_resources(chan->ccp, &chan->created); spin_unlock_irqrestore(&chan->lock, flags); @@ -679,6 +682,7 @@ int ccp_dmaengine_register(struct ccp_device *ccp) chan->ccp = ccp; spin_lock_init(&chan->lock); + INIT_LIST_HEAD(&chan->created); INIT_LIST_HEAD(&chan->pending); INIT_LIST_HEAD(&chan->active); INIT_LIST_HEAD(&chan->complete); diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index 2ed1e24b44a8..b4b78b37f8a6 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -158,7 +158,7 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, case CRYPTO_ALG_TYPE_AEAD: ctx_req.req.aead_req = (struct aead_request *)req; ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req); - dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.aead_req->dst, + dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.ctx.reqctx->dst, ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE); if (ctx_req.ctx.reqctx->skb) { kfree_skb(ctx_req.ctx.reqctx->skb); @@ -1362,8 +1362,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, struct chcr_wr *chcr_req; struct cpl_rx_phys_dsgl *phys_cpl; struct phys_sge_parm sg_param; - struct scatterlist *src, *dst; - struct scatterlist src_sg[2], dst_sg[2]; + struct scatterlist *src; unsigned int frags = 0, transhdr_len; unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0; unsigned int kctx_len = 0; @@ -1383,19 +1382,21 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0) goto err; - src = scatterwalk_ffwd(src_sg, req->src, req->assoclen); - dst = src; + src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen); + reqctx->dst = src; + if (req->src != req->dst) { err = chcr_copy_assoc(req, aeadctx); if (err) return ERR_PTR(err); - dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen); + reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst, + req->assoclen); } if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) { null = 1; assoclen = 0; } - reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen + + reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen + (op_type ? -authsize : authsize)); if (reqctx->dst_nents <= 0) { pr_err("AUTHENC:Invalid Destination sg entries\n"); @@ -1460,7 +1461,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); sg_param.qid = qid; sg_param.align = 0; - if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst, + if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst, &sg_param)) goto dstmap_fail; @@ -1711,8 +1712,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, struct chcr_wr *chcr_req; struct cpl_rx_phys_dsgl *phys_cpl; struct phys_sge_parm sg_param; - struct scatterlist *src, *dst; - struct scatterlist src_sg[2], dst_sg[2]; + struct scatterlist *src; unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE; unsigned int dst_size = 0, kctx_len; unsigned int sub_type; @@ -1728,17 +1728,19 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0) goto err; sub_type = get_aead_subtype(tfm); - src = scatterwalk_ffwd(src_sg, req->src, req->assoclen); - dst = src; + src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen); + reqctx->dst = src; + if (req->src != req->dst) { err = chcr_copy_assoc(req, aeadctx); if (err) { pr_err("AAD copy to destination buffer fails\n"); return ERR_PTR(err); } - dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen); + reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst, + req->assoclen); } - reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen + + reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen + (op_type ? -authsize : authsize)); if (reqctx->dst_nents <= 0) { pr_err("CCM:Invalid Destination sg entries\n"); @@ -1777,7 +1779,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); sg_param.qid = qid; sg_param.align = 0; - if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst, + if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst, &sg_param)) goto dstmap_fail; @@ -1809,8 +1811,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, struct chcr_wr *chcr_req; struct cpl_rx_phys_dsgl *phys_cpl; struct phys_sge_parm sg_param; - struct scatterlist *src, *dst; - struct scatterlist src_sg[2], dst_sg[2]; + struct scatterlist *src; unsigned int frags = 0, transhdr_len; unsigned int ivsize = AES_BLOCK_SIZE; unsigned int dst_size = 0, kctx_len; @@ -1832,13 +1833,14 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0) goto err; - src = scatterwalk_ffwd(src_sg, req->src, req->assoclen); - dst = src; + src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen); + reqctx->dst = src; if (req->src != req->dst) { err = chcr_copy_assoc(req, aeadctx); if (err) return ERR_PTR(err); - dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen); + reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst, + req->assoclen); } if (!req->cryptlen) @@ -1848,7 +1850,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, crypt_len = AES_BLOCK_SIZE; else crypt_len = req->cryptlen; - reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen + + reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen + (op_type ? -authsize : authsize)); if (reqctx->dst_nents <= 0) { pr_err("GCM:Invalid Destination sg entries\n"); @@ -1923,7 +1925,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); sg_param.qid = qid; sg_param.align = 0; - if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst, + if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst, &sg_param)) goto dstmap_fail; @@ -1937,7 +1939,8 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, write_sg_to_skb(skb, &frags, src, req->cryptlen); } else { aes_gcm_empty_pld_pad(req->dst, authsize - 1); - write_sg_to_skb(skb, &frags, dst, crypt_len); + write_sg_to_skb(skb, &frags, reqctx->dst, crypt_len); + } create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1, @@ -2189,8 +2192,8 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key, unsigned int ck_size; int ret = 0, key_ctx_size = 0; - if (get_aead_subtype(aead) == - CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) { + if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 && + keylen > 3) { keylen -= 4; /* nonce/salt is present in the last 4 bytes */ memcpy(aeadctx->salt, key + keylen, 4); } diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c index 918da8e6e2d8..1c65f07e1cc9 100644 --- a/drivers/crypto/chelsio/chcr_core.c +++ b/drivers/crypto/chelsio/chcr_core.c @@ -52,6 +52,7 @@ static struct cxgb4_uld_info chcr_uld_info = { int assign_chcr_device(struct chcr_dev **dev) { struct uld_ctx *u_ctx; + int ret = -ENXIO; /* * Which device to use if multiple devices are available TODO @@ -59,15 +60,14 @@ int assign_chcr_device(struct chcr_dev **dev) * must go to the same device to maintain the ordering. */ mutex_lock(&dev_mutex); /* TODO ? */ - u_ctx = list_first_entry(&uld_ctx_list, struct uld_ctx, entry); - if (!u_ctx) { - mutex_unlock(&dev_mutex); - return -ENXIO; + list_for_each_entry(u_ctx, &uld_ctx_list, entry) + if (u_ctx && u_ctx->dev) { + *dev = u_ctx->dev; + ret = 0; + break; } - - *dev = u_ctx->dev; mutex_unlock(&dev_mutex); - return 0; + return ret; } static int chcr_dev_add(struct uld_ctx *u_ctx) @@ -202,10 +202,8 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state) static int __init chcr_crypto_init(void) { - if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info)) { + if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info)) pr_err("ULD register fail: No chcr crypto support in cxgb4"); - return -1; - } return 0; } diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h index d5af7d64a763..7ec0a8f12475 100644 --- a/drivers/crypto/chelsio/chcr_crypto.h +++ b/drivers/crypto/chelsio/chcr_crypto.h @@ -158,6 +158,9 @@ struct ablk_ctx { }; struct chcr_aead_reqctx { struct sk_buff *skb; + struct scatterlist *dst; + struct scatterlist srcffwd[2]; + struct scatterlist dstffwd[2]; short int dst_nents; u16 verify; u8 iv[CHCR_MAX_CRYPTO_IV_LEN]; diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c index bc5cbc193aae..5b2d78a5b5aa 100644 --- a/drivers/crypto/qat/qat_c62x/adf_drv.c +++ b/drivers/crypto/qat/qat_c62x/adf_drv.c @@ -233,7 +233,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) &hw_data->accel_capabilities_mask); /* Find and map all the device's BARS */ - i = 0; + i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0; bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, ADF_PCI_MAX_BARS * 2) { diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h index e8822536530b..33f0a6251e38 100644 --- a/drivers/crypto/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h @@ -69,6 +69,7 @@ #define ADF_ERRSOU5 (0x3A000 + 0xD8) #define ADF_DEVICE_FUSECTL_OFFSET 0x40 #define ADF_DEVICE_LEGFUSE_OFFSET 0x4C +#define ADF_DEVICE_FUSECTL_MASK 0x80000000 #define ADF_PCI_MAX_BARS 3 #define ADF_DEVICE_NAME_LENGTH 32 #define ADF_ETR_MAX_RINGS_PER_BANK 16 diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c index 1e480f140663..8c4fd255a601 100644 --- a/drivers/crypto/qat/qat_common/qat_hal.c +++ b/drivers/crypto/qat/qat_common/qat_hal.c @@ -456,7 +456,7 @@ static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle) unsigned int csr_val; int times = 30; - if (handle->pci_dev->device == ADF_C3XXX_PCI_DEVICE_ID) + if (handle->pci_dev->device != ADF_DH895XCC_PCI_DEVICE_ID) return 0; csr_val = ADF_CSR_RD(csr_addr, 0); @@ -716,7 +716,7 @@ int qat_hal_init(struct adf_accel_dev *accel_dev) (void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v + LOCAL_TO_XFER_REG_OFFSET); handle->pci_dev = pci_info->pci_dev; - if (handle->pci_dev->device != ADF_C3XXX_PCI_DEVICE_ID) { + if (handle->pci_dev->device == ADF_DH895XCC_PCI_DEVICE_ID) { sram_bar = &pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)]; handle->hal_sram_addr_v = sram_bar->virt_addr; diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c index 921dfa047202..260c4b4b492e 100644 --- a/drivers/firmware/efi/libstub/fdt.c +++ b/drivers/firmware/efi/libstub/fdt.c @@ -187,6 +187,7 @@ static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map) struct exit_boot_struct { efi_memory_desc_t *runtime_map; int *runtime_entry_count; + void *new_fdt_addr; }; static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg, @@ -202,7 +203,7 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg, efi_get_virtmap(*map->map, *map->map_size, *map->desc_size, p->runtime_map, p->runtime_entry_count); - return EFI_SUCCESS; + return update_fdt_memmap(p->new_fdt_addr, map); } /* @@ -300,22 +301,13 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, priv.runtime_map = runtime_map; priv.runtime_entry_count = &runtime_entry_count; + priv.new_fdt_addr = (void *)*new_fdt_addr; status = efi_exit_boot_services(sys_table, handle, &map, &priv, exit_boot_func); if (status == EFI_SUCCESS) { efi_set_virtual_address_map_t *svam; - status = update_fdt_memmap((void *)*new_fdt_addr, &map); - if (status != EFI_SUCCESS) { - /* - * The kernel won't get far without the memory map, but - * may still be able to print something meaningful so - * return success here. - */ - return EFI_SUCCESS; - } - /* Install the new virtual address map */ svam = sys_table->runtime->set_virtual_address_map; status = svam(runtime_entry_count * desc_size, desc_size, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index e2b0b1646f99..0635829b18cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -254,6 +254,9 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev) } WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); + if (adev->mode_info.num_crtc) + amdgpu_display_set_vga_render_state(adev, false); + gmc_v6_0_mc_stop(adev, &save); if (gmc_v6_0_wait_for_idle((void *)adev)) { @@ -283,7 +286,6 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev) dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } gmc_v6_0_mc_resume(adev, &save); - amdgpu_display_set_vga_render_state(adev, false); } static int gmc_v6_0_mc_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 50f5cf7b69d1..fdfb1ec17e66 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -2032,13 +2032,16 @@ static void complete_crtc_signaling(struct drm_device *dev, } for_each_crtc_in_state(state, crtc, crtc_state, i) { + struct drm_pending_vblank_event *event = crtc_state->event; /* - * TEST_ONLY and PAGE_FLIP_EVENT are mutually - * exclusive, if they weren't, this code should be - * called on success for TEST_ONLY too. + * Free the allocated event. drm_atomic_helper_setup_commit + * can allocate an event too, so only free it if it's ours + * to prevent a double free in drm_atomic_state_clear. */ - if (crtc_state->event) - drm_event_cancel_free(dev, &crtc_state->event->base); + if (event && (event->base.fence || event->base.file_priv)) { + drm_event_cancel_free(dev, &event->base); + crtc_state->event = NULL; + } } if (!fence_state) diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 34f757bcabae..4594477dee00 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1666,9 +1666,6 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev, funcs = plane->helper_private; - if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc)) - continue; - if (funcs->prepare_fb) { ret = funcs->prepare_fb(plane, plane_state); if (ret) @@ -1685,9 +1682,6 @@ fail: if (j >= i) continue; - if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc)) - continue; - funcs = plane->helper_private; if (funcs->cleanup_fb) @@ -1954,9 +1948,6 @@ void drm_atomic_helper_cleanup_planes(struct drm_device *dev, for_each_plane_in_state(old_state, plane, plane_state, i) { const struct drm_plane_helper_funcs *funcs; - if (!drm_atomic_helper_framebuffer_changed(dev, old_state, plane_state->crtc)) - continue; - funcs = plane->helper_private; if (funcs->cleanup_fb) diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 5a4526289392..7a7019ac9388 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -225,6 +225,7 @@ int drm_connector_init(struct drm_device *dev, INIT_LIST_HEAD(&connector->probed_modes); INIT_LIST_HEAD(&connector->modes); + mutex_init(&connector->mutex); connector->edid_blob_ptr = NULL; connector->status = connector_status_unknown; @@ -359,6 +360,8 @@ void drm_connector_cleanup(struct drm_connector *connector) connector->funcs->atomic_destroy_state(connector, connector->state); + mutex_destroy(&connector->mutex); + memset(connector, 0, sizeof(*connector)); } EXPORT_SYMBOL(drm_connector_cleanup); @@ -374,14 +377,18 @@ EXPORT_SYMBOL(drm_connector_cleanup); */ int drm_connector_register(struct drm_connector *connector) { - int ret; + int ret = 0; - if (connector->registered) + if (!connector->dev->registered) return 0; + mutex_lock(&connector->mutex); + if (connector->registered) + goto unlock; + ret = drm_sysfs_connector_add(connector); if (ret) - return ret; + goto unlock; ret = drm_debugfs_connector_add(connector); if (ret) { @@ -397,12 +404,14 @@ int drm_connector_register(struct drm_connector *connector) drm_mode_object_register(connector->dev, &connector->base); connector->registered = true; - return 0; + goto unlock; err_debugfs: drm_debugfs_connector_remove(connector); err_sysfs: drm_sysfs_connector_remove(connector); +unlock: + mutex_unlock(&connector->mutex); return ret; } EXPORT_SYMBOL(drm_connector_register); @@ -415,8 +424,11 @@ EXPORT_SYMBOL(drm_connector_register); */ void drm_connector_unregister(struct drm_connector *connector) { - if (!connector->registered) + mutex_lock(&connector->mutex); + if (!connector->registered) { + mutex_unlock(&connector->mutex); return; + } if (connector->funcs->early_unregister) connector->funcs->early_unregister(connector); @@ -425,6 +437,7 @@ void drm_connector_unregister(struct drm_connector *connector) drm_debugfs_connector_remove(connector); connector->registered = false; + mutex_unlock(&connector->mutex); } EXPORT_SYMBOL(drm_connector_unregister); diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index a525751b4559..6594b4088f11 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -745,6 +745,8 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags) if (ret) goto err_minors; + dev->registered = true; + if (dev->driver->load) { ret = dev->driver->load(dev, flags); if (ret) @@ -785,6 +787,8 @@ void drm_dev_unregister(struct drm_device *dev) drm_lastclose(dev); + dev->registered = false; + if (drm_core_check_feature(dev, DRIVER_MODESET)) drm_modeset_unregister_all(dev); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 69bc3b0c4390..8493e19b563a 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1012,6 +1012,8 @@ struct intel_fbc { struct work_struct underrun_work; struct intel_fbc_state_cache { + struct i915_vma *vma; + struct { unsigned int mode_flags; uint32_t hsw_bdw_pixel_rate; @@ -1025,15 +1027,14 @@ struct intel_fbc { } plane; struct { - u64 ilk_ggtt_offset; uint32_t pixel_format; unsigned int stride; - int fence_reg; - unsigned int tiling_mode; } fb; } state_cache; struct intel_fbc_reg_params { + struct i915_vma *vma; + struct { enum pipe pipe; enum plane plane; @@ -1041,10 +1042,8 @@ struct intel_fbc { } crtc; struct { - u64 ggtt_offset; uint32_t pixel_format; unsigned int stride; - int fence_reg; } fb; int cfb_size; @@ -3168,13 +3167,6 @@ i915_gem_object_to_ggtt(struct drm_i915_gem_object *obj, return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view); } -static inline unsigned long -i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o, - const struct i915_ggtt_view *view) -{ - return i915_ggtt_offset(i915_gem_object_to_ggtt(o, view)); -} - /* i915_gem_fence_reg.c */ int __must_check i915_vma_get_fence(struct i915_vma *vma); int __must_check i915_vma_put_fence(struct i915_vma *vma); diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c index dbe9fb41ae53..8d3e515f27ba 100644 --- a/drivers/gpu/drm/i915/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/intel_atomic_plane.c @@ -85,6 +85,8 @@ intel_plane_duplicate_state(struct drm_plane *plane) __drm_atomic_helper_plane_duplicate_state(plane, state); + intel_state->vma = NULL; + return state; } @@ -100,6 +102,24 @@ void intel_plane_destroy_state(struct drm_plane *plane, struct drm_plane_state *state) { + struct i915_vma *vma; + + vma = fetch_and_zero(&to_intel_plane_state(state)->vma); + + /* + * FIXME: Normally intel_cleanup_plane_fb handles destruction of vma. + * We currently don't clear all planes during driver unload, so we have + * to be able to unpin vma here for now. + * + * Normally this can only happen during unload when kmscon is disabled + * and userspace doesn't attempt to set a framebuffer at all. + */ + if (vma) { + mutex_lock(&plane->dev->struct_mutex); + intel_unpin_fb_vma(vma); + mutex_unlock(&plane->dev->struct_mutex); + } + drm_atomic_helper_plane_destroy_state(plane, state); } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index f0b9aa7a0483..f1e4a21d4664 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2235,27 +2235,22 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) i915_vma_pin_fence(vma); } + i915_vma_get(vma); err: intel_runtime_pm_put(dev_priv); return vma; } -void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) +void intel_unpin_fb_vma(struct i915_vma *vma) { - struct drm_i915_gem_object *obj = intel_fb_obj(fb); - struct i915_ggtt_view view; - struct i915_vma *vma; - - WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex)); - - intel_fill_fb_ggtt_view(&view, fb, rotation); - vma = i915_gem_object_to_ggtt(obj, &view); + lockdep_assert_held(&vma->vm->dev->struct_mutex); if (WARN_ON_ONCE(!vma)) return; i915_vma_unpin_fence(vma); i915_gem_object_unpin_from_display_plane(vma); + i915_vma_put(vma); } static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane, @@ -2750,7 +2745,6 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, struct drm_device *dev = intel_crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *c; - struct intel_crtc *i; struct drm_i915_gem_object *obj; struct drm_plane *primary = intel_crtc->base.primary; struct drm_plane_state *plane_state = primary->state; @@ -2775,20 +2769,20 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, * an fb with another CRTC instead */ for_each_crtc(dev, c) { - i = to_intel_crtc(c); + struct intel_plane_state *state; if (c == &intel_crtc->base) continue; - if (!i->active) + if (!to_intel_crtc(c)->active) continue; - fb = c->primary->fb; - if (!fb) + state = to_intel_plane_state(c->primary->state); + if (!state->vma) continue; - obj = intel_fb_obj(fb); - if (i915_gem_object_ggtt_offset(obj, NULL) == plane_config->base) { + if (intel_plane_ggtt_offset(state) == plane_config->base) { + fb = c->primary->fb; drm_framebuffer_reference(fb); goto valid_fb; } @@ -2809,6 +2803,19 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, return; valid_fb: + mutex_lock(&dev->struct_mutex); + intel_state->vma = + intel_pin_and_fence_fb_obj(fb, primary->state->rotation); + mutex_unlock(&dev->struct_mutex); + if (IS_ERR(intel_state->vma)) { + DRM_ERROR("failed to pin boot fb on pipe %d: %li\n", + intel_crtc->pipe, PTR_ERR(intel_state->vma)); + + intel_state->vma = NULL; + drm_framebuffer_unreference(fb); + return; + } + plane_state->src_x = 0; plane_state->src_y = 0; plane_state->src_w = fb->width << 16; @@ -3104,13 +3111,13 @@ static void i9xx_update_primary_plane(struct drm_plane *primary, I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); if (INTEL_GEN(dev_priv) >= 4) { I915_WRITE(DSPSURF(plane), - intel_fb_gtt_offset(fb, rotation) + + intel_plane_ggtt_offset(plane_state) + intel_crtc->dspaddr_offset); I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); I915_WRITE(DSPLINOFF(plane), linear_offset); } else { I915_WRITE(DSPADDR(plane), - intel_fb_gtt_offset(fb, rotation) + + intel_plane_ggtt_offset(plane_state) + intel_crtc->dspaddr_offset); } POSTING_READ(reg); @@ -3207,7 +3214,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary, I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); I915_WRITE(DSPSURF(plane), - intel_fb_gtt_offset(fb, rotation) + + intel_plane_ggtt_offset(plane_state) + intel_crtc->dspaddr_offset); if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { I915_WRITE(DSPOFFSET(plane), (y << 16) | x); @@ -3230,23 +3237,6 @@ u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv, } } -u32 intel_fb_gtt_offset(struct drm_framebuffer *fb, - unsigned int rotation) -{ - struct drm_i915_gem_object *obj = intel_fb_obj(fb); - struct i915_ggtt_view view; - struct i915_vma *vma; - - intel_fill_fb_ggtt_view(&view, fb, rotation); - - vma = i915_gem_object_to_ggtt(obj, &view); - if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n", - view.type)) - return -1; - - return i915_ggtt_offset(vma); -} - static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) { struct drm_device *dev = intel_crtc->base.dev; @@ -3441,7 +3431,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane, } I915_WRITE(PLANE_SURF(pipe, 0), - intel_fb_gtt_offset(fb, rotation) + surf_addr); + intel_plane_ggtt_offset(plane_state) + surf_addr); POSTING_READ(PLANE_SURF(pipe, 0)); } @@ -11536,7 +11526,7 @@ static void intel_unpin_work_fn(struct work_struct *__work) flush_work(&work->mmio_work); mutex_lock(&dev->struct_mutex); - intel_unpin_fb_obj(work->old_fb, primary->state->rotation); + intel_unpin_fb_vma(work->old_vma); i915_gem_object_put(work->pending_flip_obj); mutex_unlock(&dev->struct_mutex); @@ -12246,8 +12236,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, goto cleanup_pending; } - work->gtt_offset = intel_fb_gtt_offset(fb, primary->state->rotation); - work->gtt_offset += intel_crtc->dspaddr_offset; + work->old_vma = to_intel_plane_state(primary->state)->vma; + to_intel_plane_state(primary->state)->vma = vma; + + work->gtt_offset = i915_ggtt_offset(vma) + intel_crtc->dspaddr_offset; work->rotation = crtc->primary->state->rotation; /* @@ -12301,7 +12293,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, cleanup_request: i915_add_request_no_flush(request); cleanup_unpin: - intel_unpin_fb_obj(fb, crtc->primary->state->rotation); + to_intel_plane_state(primary->state)->vma = work->old_vma; + intel_unpin_fb_vma(vma); cleanup_pending: atomic_dec(&intel_crtc->unpin_work_count); unlock: @@ -14794,6 +14787,8 @@ intel_prepare_plane_fb(struct drm_plane *plane, DRM_DEBUG_KMS("failed to pin object\n"); return PTR_ERR(vma); } + + to_intel_plane_state(new_state)->vma = vma; } return 0; @@ -14812,19 +14807,12 @@ void intel_cleanup_plane_fb(struct drm_plane *plane, struct drm_plane_state *old_state) { - struct drm_i915_private *dev_priv = to_i915(plane->dev); - struct intel_plane_state *old_intel_state; - struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb); - struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb); - - old_intel_state = to_intel_plane_state(old_state); - - if (!obj && !old_obj) - return; + struct i915_vma *vma; - if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR || - !INTEL_INFO(dev_priv)->cursor_needs_physical)) - intel_unpin_fb_obj(old_state->fb, old_state->rotation); + /* Should only be called after a successful intel_prepare_plane_fb()! */ + vma = fetch_and_zero(&to_intel_plane_state(old_state)->vma); + if (vma) + intel_unpin_fb_vma(vma); } int @@ -15166,7 +15154,7 @@ intel_update_cursor_plane(struct drm_plane *plane, if (!obj) addr = 0; else if (!INTEL_INFO(dev_priv)->cursor_needs_physical) - addr = i915_gem_object_ggtt_offset(obj, NULL); + addr = intel_plane_ggtt_offset(state); else addr = obj->phys_handle->busaddr; @@ -17066,41 +17054,12 @@ void intel_display_resume(struct drm_device *dev) void intel_modeset_gem_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_crtc *c; - struct drm_i915_gem_object *obj; intel_init_gt_powersave(dev_priv); intel_modeset_init_hw(dev); intel_setup_overlay(dev_priv); - - /* - * Make sure any fbs we allocated at startup are properly - * pinned & fenced. When we do the allocation it's too early - * for this. - */ - for_each_crtc(dev, c) { - struct i915_vma *vma; - - obj = intel_fb_obj(c->primary->fb); - if (obj == NULL) - continue; - - mutex_lock(&dev->struct_mutex); - vma = intel_pin_and_fence_fb_obj(c->primary->fb, - c->primary->state->rotation); - mutex_unlock(&dev->struct_mutex); - if (IS_ERR(vma)) { - DRM_ERROR("failed to pin boot fb on pipe %d\n", - to_intel_crtc(c)->pipe); - drm_framebuffer_unreference(c->primary->fb); - c->primary->fb = NULL; - c->primary->crtc = c->primary->state->crtc = NULL; - update_state_fb(c->primary); - c->state->plane_mask &= ~(1 << drm_plane_index(c->primary)); - } - } } int intel_connector_register(struct drm_connector *connector) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index cd72ae171eeb..03a2112004f9 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -377,6 +377,7 @@ struct intel_atomic_state { struct intel_plane_state { struct drm_plane_state base; struct drm_rect clip; + struct i915_vma *vma; struct { u32 offset; @@ -1046,6 +1047,7 @@ struct intel_flip_work { struct work_struct mmio_work; struct drm_crtc *crtc; + struct i915_vma *old_vma; struct drm_framebuffer *old_fb; struct drm_i915_gem_object *pending_flip_obj; struct drm_pending_vblank_event *event; @@ -1273,7 +1275,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector, struct drm_modeset_acquire_ctx *ctx); struct i915_vma * intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation); -void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation); +void intel_unpin_fb_vma(struct i915_vma *vma); struct drm_framebuffer * __intel_framebuffer_create(struct drm_device *dev, struct drm_mode_fb_cmd2 *mode_cmd, @@ -1362,7 +1364,10 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode, int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state); int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state); -u32 intel_fb_gtt_offset(struct drm_framebuffer *fb, unsigned int rotation); +static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state) +{ + return i915_ggtt_offset(state->vma); +} u32 skl_plane_ctl_format(uint32_t pixel_format); u32 skl_plane_ctl_tiling(uint64_t fb_modifier); diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index 62f215b12eb5..f3a1d6a5cabe 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c @@ -173,7 +173,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv) if (IS_I945GM(dev_priv)) fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; - fbc_ctl |= params->fb.fence_reg; + fbc_ctl |= params->vma->fence->id; I915_WRITE(FBC_CONTROL, fbc_ctl); } @@ -193,8 +193,8 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv) else dpfc_ctl |= DPFC_CTL_LIMIT_1X; - if (params->fb.fence_reg != I915_FENCE_REG_NONE) { - dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg; + if (params->vma->fence) { + dpfc_ctl |= DPFC_CTL_FENCE_EN | params->vma->fence->id; I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset); } else { I915_WRITE(DPFC_FENCE_YOFF, 0); @@ -251,13 +251,14 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv) break; } - if (params->fb.fence_reg != I915_FENCE_REG_NONE) { + if (params->vma->fence) { dpfc_ctl |= DPFC_CTL_FENCE_EN; if (IS_GEN5(dev_priv)) - dpfc_ctl |= params->fb.fence_reg; + dpfc_ctl |= params->vma->fence->id; if (IS_GEN6(dev_priv)) { I915_WRITE(SNB_DPFC_CTL_SA, - SNB_CPU_FENCE_ENABLE | params->fb.fence_reg); + SNB_CPU_FENCE_ENABLE | + params->vma->fence->id); I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset); } @@ -269,7 +270,8 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv) } I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset); - I915_WRITE(ILK_FBC_RT_BASE, params->fb.ggtt_offset | ILK_FBC_RT_VALID); + I915_WRITE(ILK_FBC_RT_BASE, + i915_ggtt_offset(params->vma) | ILK_FBC_RT_VALID); /* enable it... */ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); @@ -319,10 +321,11 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv) break; } - if (params->fb.fence_reg != I915_FENCE_REG_NONE) { + if (params->vma->fence) { dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; I915_WRITE(SNB_DPFC_CTL_SA, - SNB_CPU_FENCE_ENABLE | params->fb.fence_reg); + SNB_CPU_FENCE_ENABLE | + params->vma->fence->id); I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset); } else { I915_WRITE(SNB_DPFC_CTL_SA,0); @@ -727,14 +730,6 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) return effective_w <= max_w && effective_h <= max_h; } -/* XXX replace me when we have VMA tracking for intel_plane_state */ -static int get_fence_id(struct drm_framebuffer *fb) -{ - struct i915_vma *vma = i915_gem_object_to_ggtt(intel_fb_obj(fb), NULL); - - return vma && vma->fence ? vma->fence->id : I915_FENCE_REG_NONE; -} - static void intel_fbc_update_state_cache(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) @@ -743,7 +738,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc, struct intel_fbc *fbc = &dev_priv->fbc; struct intel_fbc_state_cache *cache = &fbc->state_cache; struct drm_framebuffer *fb = plane_state->base.fb; - struct drm_i915_gem_object *obj; + + cache->vma = NULL; cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags; if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) @@ -758,16 +754,10 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc, if (!cache->plane.visible) return; - obj = intel_fb_obj(fb); - - /* FIXME: We lack the proper locking here, so only run this on the - * platforms that need. */ - if (IS_GEN(dev_priv, 5, 6)) - cache->fb.ilk_ggtt_offset = i915_gem_object_ggtt_offset(obj, NULL); cache->fb.pixel_format = fb->pixel_format; cache->fb.stride = fb->pitches[0]; - cache->fb.fence_reg = get_fence_id(fb); - cache->fb.tiling_mode = i915_gem_object_get_tiling(obj); + + cache->vma = plane_state->vma; } static bool intel_fbc_can_activate(struct intel_crtc *crtc) @@ -784,7 +774,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc) return false; } - if (!cache->plane.visible) { + if (!cache->vma) { fbc->no_fbc_reason = "primary plane not visible"; return false; } @@ -807,8 +797,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc) * so have no fence associated with it) due to aperture constaints * at the time of pinning. */ - if (cache->fb.tiling_mode != I915_TILING_X || - cache->fb.fence_reg == I915_FENCE_REG_NONE) { + if (!cache->vma->fence) { fbc->no_fbc_reason = "framebuffer not tiled or fenced"; return false; } @@ -888,17 +877,16 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc, * zero. */ memset(params, 0, sizeof(*params)); + params->vma = cache->vma; + params->crtc.pipe = crtc->pipe; params->crtc.plane = crtc->plane; params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc); params->fb.pixel_format = cache->fb.pixel_format; params->fb.stride = cache->fb.stride; - params->fb.fence_reg = cache->fb.fence_reg; params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache); - - params->fb.ggtt_offset = cache->fb.ilk_ggtt_offset; } static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1, diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 8cf2d80f2254..f4a8c4fc57c4 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -284,7 +284,7 @@ static int intelfb_create(struct drm_fb_helper *helper, out_destroy_fbi: drm_fb_helper_release_fbi(helper); out_unpin: - intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0); + intel_unpin_fb_vma(vma); out_unlock: mutex_unlock(&dev->struct_mutex); return ret; @@ -549,7 +549,7 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev) if (ifbdev->fb) { mutex_lock(&ifbdev->helper.dev->struct_mutex); - intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0); + intel_unpin_fb_vma(ifbdev->vma); mutex_unlock(&ifbdev->helper.dev->struct_mutex); drm_framebuffer_remove(&ifbdev->fb->base); diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 8f131a08d440..242a73e66d82 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -273,7 +273,7 @@ skl_update_plane(struct drm_plane *drm_plane, I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl); I915_WRITE(PLANE_SURF(pipe, plane), - intel_fb_gtt_offset(fb, rotation) + surf_addr); + intel_plane_ggtt_offset(plane_state) + surf_addr); POSTING_READ(PLANE_SURF(pipe, plane)); } @@ -458,7 +458,7 @@ vlv_update_plane(struct drm_plane *dplane, I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w); I915_WRITE(SPCNTR(pipe, plane), sprctl); I915_WRITE(SPSURF(pipe, plane), - intel_fb_gtt_offset(fb, rotation) + sprsurf_offset); + intel_plane_ggtt_offset(plane_state) + sprsurf_offset); POSTING_READ(SPSURF(pipe, plane)); } @@ -594,7 +594,7 @@ ivb_update_plane(struct drm_plane *plane, I915_WRITE(SPRSCALE(pipe), sprscale); I915_WRITE(SPRCTL(pipe), sprctl); I915_WRITE(SPRSURF(pipe), - intel_fb_gtt_offset(fb, rotation) + sprsurf_offset); + intel_plane_ggtt_offset(plane_state) + sprsurf_offset); POSTING_READ(SPRSURF(pipe)); } @@ -721,7 +721,7 @@ ilk_update_plane(struct drm_plane *plane, I915_WRITE(DVSSCALE(pipe), dvsscale); I915_WRITE(DVSCNTR(pipe), dvscntr); I915_WRITE(DVSSURF(pipe), - intel_fb_gtt_offset(fb, rotation) + dvssurf_offset); + intel_plane_ggtt_offset(plane_state) + dvssurf_offset); POSTING_READ(DVSSURF(pipe)); } diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c index 74856a8b8f35..e64f52464ecf 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/hw.c +++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c @@ -222,6 +222,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype) uint32_t mpllP; pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP); + mpllP = (mpllP >> 8) & 0xf; if (!mpllP) mpllP = 4; @@ -232,7 +233,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype) uint32_t clock; pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock); - return clock; + return clock / 1000; } ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals); diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h index ccdce1b4eec4..d5e58a38f160 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.h +++ b/drivers/gpu/drm/nouveau/nouveau_fence.h @@ -99,6 +99,7 @@ struct nv84_fence_priv { struct nouveau_bo *bo; struct nouveau_bo *bo_gart; u32 *suspend; + struct mutex mutex; }; int nv84_fence_context_new(struct nouveau_channel *); diff --git a/drivers/gpu/drm/nouveau/nouveau_led.h b/drivers/gpu/drm/nouveau/nouveau_led.h index 187ecdb82002..21a5775028cc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_led.h +++ b/drivers/gpu/drm/nouveau/nouveau_led.h @@ -42,7 +42,7 @@ nouveau_led(struct drm_device *dev) } /* nouveau_led.c */ -#if IS_ENABLED(CONFIG_LEDS_CLASS) +#if IS_REACHABLE(CONFIG_LEDS_CLASS) int nouveau_led_init(struct drm_device *dev); void nouveau_led_suspend(struct drm_device *dev); void nouveau_led_resume(struct drm_device *dev); diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c index 08f9c6fa0f7f..1fba38622744 100644 --- a/drivers/gpu/drm/nouveau/nouveau_usif.c +++ b/drivers/gpu/drm/nouveau/nouveau_usif.c @@ -313,7 +313,8 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc) if (!(ret = nvif_unpack(-ENOSYS, &data, &size, argv->v0, 0, 0, true))) { /* block access to objects not created via this interface */ owner = argv->v0.owner; - if (argv->v0.object == 0ULL) + if (argv->v0.object == 0ULL && + argv->v0.type != NVIF_IOCTL_V0_DEL) argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */ else argv->v0.owner = NVDRM_OBJECT_USIF; diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 2c2c64507661..32097fd615fd 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -4052,6 +4052,11 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) } } + for_each_crtc_in_state(state, crtc, crtc_state, i) { + if (crtc->state->event) + drm_crtc_vblank_get(crtc); + } + /* Update plane(s). */ for_each_plane_in_state(state, plane, plane_state, i) { struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane->state); @@ -4101,6 +4106,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) drm_crtc_send_vblank_event(crtc, crtc->state->event); spin_unlock_irqrestore(&crtc->dev->event_lock, flags); crtc->state->event = NULL; + drm_crtc_vblank_put(crtc); } } diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c index 52b87ae83e7b..f0b322bec7df 100644 --- a/drivers/gpu/drm/nouveau/nv84_fence.c +++ b/drivers/gpu/drm/nouveau/nv84_fence.c @@ -107,8 +107,10 @@ nv84_fence_context_del(struct nouveau_channel *chan) struct nv84_fence_chan *fctx = chan->fence; nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence); + mutex_lock(&priv->mutex); nouveau_bo_vma_del(priv->bo, &fctx->vma_gart); nouveau_bo_vma_del(priv->bo, &fctx->vma); + mutex_unlock(&priv->mutex); nouveau_fence_context_del(&fctx->base); chan->fence = NULL; nouveau_fence_context_free(&fctx->base); @@ -134,11 +136,13 @@ nv84_fence_context_new(struct nouveau_channel *chan) fctx->base.sync32 = nv84_fence_sync32; fctx->base.sequence = nv84_fence_read(chan); + mutex_lock(&priv->mutex); ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma); if (ret == 0) { ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm, &fctx->vma_gart); } + mutex_unlock(&priv->mutex); if (ret) nv84_fence_context_del(chan); @@ -212,6 +216,8 @@ nv84_fence_create(struct nouveau_drm *drm) priv->base.context_base = dma_fence_context_alloc(priv->base.contexts); priv->base.uevent = true; + mutex_init(&priv->mutex); + /* Use VRAM if there is any ; otherwise fallback to system memory */ domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM : /* diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c index 6f0436df0219..f8f2f16c22a2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c @@ -59,7 +59,7 @@ gt215_hda_eld(NV50_DISP_MTHD_V1) ); } for (i = 0; i < size; i++) - nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[0]); + nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[i]); for (; i < 0x60; i++) nvkm_wr32(device, 0x61c440 + soff, (i << 8)); nvkm_mask(device, 0x61c448 + soff, 0x80000003, 0x80000003); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c index 567466f93cd5..0db8efbf1c2e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c @@ -433,8 +433,6 @@ nv50_disp_dptmds_war(struct nvkm_device *device) case 0x94: case 0x96: case 0x98: - case 0xaa: - case 0xac: return true; default: break; diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index e0c143b865f3..30bd4a6a9d46 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -97,9 +97,10 @@ * 2.46.0 - Add PFP_SYNC_ME support on evergreen * 2.47.0 - Add UVD_NO_OP register support * 2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI + * 2.49.0 - DRM_RADEON_GEM_INFO ioctl returns correct vram_size/visible values */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 48 +#define KMS_DRIVER_MINOR 49 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 0bcffd8a7bd3..96683f5b2b1b 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -220,8 +220,8 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data, man = &rdev->mman.bdev.man[TTM_PL_VRAM]; - args->vram_size = rdev->mc.real_vram_size; - args->vram_visible = (u64)man->size << PAGE_SHIFT; + args->vram_size = (u64)man->size << PAGE_SHIFT; + args->vram_visible = rdev->mc.visible_vram_size; args->vram_visible -= rdev->vram_pin_size; args->gart_size = rdev->mc.gtt_size; args->gart_size -= rdev->gart_pin_size; diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c index cd49cb17eb7f..308dbda700eb 100644 --- a/drivers/hv/ring_buffer.c +++ b/drivers/hv/ring_buffer.c @@ -383,6 +383,7 @@ int hv_ringbuffer_read(struct vmbus_channel *channel, return ret; } + init_cached_read_index(channel); next_read_location = hv_get_next_read_location(inring_info); next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc, sizeof(desc), diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c index 2bbf0c521beb..7d61b566e148 100644 --- a/drivers/iio/adc/palmas_gpadc.c +++ b/drivers/iio/adc/palmas_gpadc.c @@ -775,7 +775,7 @@ static int palmas_adc_wakeup_reset(struct palmas_gpadc *adc) static int palmas_gpadc_suspend(struct device *dev) { - struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct iio_dev *indio_dev = dev_get_drvdata(dev); struct palmas_gpadc *adc = iio_priv(indio_dev); int wakeup = adc->wakeup1_enable || adc->wakeup2_enable; int ret; @@ -798,7 +798,7 @@ static int palmas_gpadc_suspend(struct device *dev) static int palmas_gpadc_resume(struct device *dev) { - struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct iio_dev *indio_dev = dev_get_drvdata(dev); struct palmas_gpadc *adc = iio_priv(indio_dev); int wakeup = adc->wakeup1_enable || adc->wakeup2_enable; int ret; diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c index 9a081465c42f..6bb23a49e81e 100644 --- a/drivers/iio/health/afe4403.c +++ b/drivers/iio/health/afe4403.c @@ -422,7 +422,7 @@ MODULE_DEVICE_TABLE(of, afe4403_of_match); static int __maybe_unused afe4403_suspend(struct device *dev) { - struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev)); struct afe4403_data *afe = iio_priv(indio_dev); int ret; @@ -443,7 +443,7 @@ static int __maybe_unused afe4403_suspend(struct device *dev) static int __maybe_unused afe4403_resume(struct device *dev) { - struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev)); struct afe4403_data *afe = iio_priv(indio_dev); int ret; diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c index 45266404f7e3..964f5231a831 100644 --- a/drivers/iio/health/afe4404.c +++ b/drivers/iio/health/afe4404.c @@ -428,7 +428,7 @@ MODULE_DEVICE_TABLE(of, afe4404_of_match); static int __maybe_unused afe4404_suspend(struct device *dev) { - struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); struct afe4404_data *afe = iio_priv(indio_dev); int ret; @@ -449,7 +449,7 @@ static int __maybe_unused afe4404_suspend(struct device *dev) static int __maybe_unused afe4404_resume(struct device *dev) { - struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); struct afe4404_data *afe = iio_priv(indio_dev); int ret; diff --git a/drivers/iio/health/max30100.c b/drivers/iio/health/max30100.c index 90ab8a2d2846..183c14329d6e 100644 --- a/drivers/iio/health/max30100.c +++ b/drivers/iio/health/max30100.c @@ -238,7 +238,7 @@ static irqreturn_t max30100_interrupt_handler(int irq, void *private) mutex_lock(&data->lock); - while (cnt || (cnt = max30100_fifo_count(data) > 0)) { + while (cnt || (cnt = max30100_fifo_count(data)) > 0) { ret = max30100_read_measurement(data); if (ret) break; diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c index 9c47bc98f3ac..2a22ad920333 100644 --- a/drivers/iio/humidity/dht11.c +++ b/drivers/iio/humidity/dht11.c @@ -71,7 +71,8 @@ * a) select an implementation using busy loop polling on those systems * b) use the checksum to do some probabilistic decoding */ -#define DHT11_START_TRANSMISSION 18 /* ms */ +#define DHT11_START_TRANSMISSION_MIN 18000 /* us */ +#define DHT11_START_TRANSMISSION_MAX 20000 /* us */ #define DHT11_MIN_TIMERES 34000 /* ns */ #define DHT11_THRESHOLD 49000 /* ns */ #define DHT11_AMBIG_LOW 23000 /* ns */ @@ -228,7 +229,8 @@ static int dht11_read_raw(struct iio_dev *iio_dev, ret = gpio_direction_output(dht11->gpio, 0); if (ret) goto err; - msleep(DHT11_START_TRANSMISSION); + usleep_range(DHT11_START_TRANSMISSION_MIN, + DHT11_START_TRANSMISSION_MAX); ret = gpio_direction_input(dht11->gpio); if (ret) goto err; diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 7c6c57216bf2..8a9f742d8ed7 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1534,18 +1534,18 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string return PTR_ERR(key); } - rcu_read_lock(); + down_read(&key->sem); ukp = user_key_payload(key); if (!ukp) { - rcu_read_unlock(); + up_read(&key->sem); key_put(key); kzfree(new_key_string); return -EKEYREVOKED; } if (cc->key_size != ukp->datalen) { - rcu_read_unlock(); + up_read(&key->sem); key_put(key); kzfree(new_key_string); return -EINVAL; @@ -1553,7 +1553,7 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string memcpy(cc->key, ukp->data, cc->key_size); - rcu_read_unlock(); + up_read(&key->sem); key_put(key); /* clear the flag since following operations may invalidate previously valid key */ diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 6400cffb986d..3570bcb7a4a4 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -427,7 +427,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes) unsigned long flags; struct priority_group *pg; struct pgpath *pgpath; - bool bypassed = true; + unsigned bypassed = 1; if (!atomic_read(&m->nr_valid_paths)) { clear_bit(MPATHF_QUEUE_IO, &m->flags); @@ -466,7 +466,7 @@ check_current_pg: */ do { list_for_each_entry(pg, &m->priority_groups, list) { - if (pg->bypassed == bypassed) + if (pg->bypassed == !!bypassed) continue; pgpath = choose_path_in_pg(m, pg, nr_bytes); if (!IS_ERR_OR_NULL(pgpath)) { diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 9d7275fb541a..6e702fc69a83 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -779,6 +779,10 @@ static void dm_old_request_fn(struct request_queue *q) int srcu_idx; struct dm_table *map = dm_get_live_table(md, &srcu_idx); + if (unlikely(!map)) { + dm_put_live_table(md, srcu_idx); + return; + } ti = dm_table_find_target(map, pos); dm_put_live_table(md, srcu_idx); } diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c index ebb5e391b800..87a6b65ed3af 100644 --- a/drivers/media/cec/cec-adap.c +++ b/drivers/media/cec/cec-adap.c @@ -1206,7 +1206,7 @@ static int cec_config_thread_func(void *arg) las->log_addr[i] = CEC_LOG_ADDR_INVALID; if (last_la == CEC_LOG_ADDR_INVALID || last_la == CEC_LOG_ADDR_UNREGISTERED || - !(last_la & type2mask[type])) + !((1 << last_la) & type2mask[type])) last_la = la_list[0]; err = cec_config_log_addr(adap, i, last_la); diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 23909804ffb8..0def99590d16 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -2733,7 +2733,8 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id) if (intmask & SDHCI_INT_RETUNE) mmc_retune_needed(host->mmc); - if (intmask & SDHCI_INT_CARD_INT) { + if ((intmask & SDHCI_INT_CARD_INT) && + (host->ier & SDHCI_INT_CARD_INT)) { sdhci_enable_sdio_irq_nolock(host, false); host->thread_isr |= SDHCI_INT_CARD_INT; result = IRQ_WAKE_THREAD; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index 87226685f742..8fa18fc17cd2 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h @@ -1014,9 +1014,7 @@ static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value) { - u8 __iomem *reg_addr = ACCESS_ONCE(base); - - writel(value, reg_addr + reg); + writel(value, base + reg); } #define dsaf_write_dev(a, reg, value) \ @@ -1024,9 +1022,7 @@ static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value) static inline u32 dsaf_read_reg(u8 __iomem *base, u32 reg) { - u8 __iomem *reg_addr = ACCESS_ONCE(base); - - return readl(reg_addr + reg); + return readl(base + reg); } static inline void dsaf_write_syscon(struct regmap *base, u32 reg, u32 value) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index ca730d4abbb4..c4d714fcc7da 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -1113,7 +1113,7 @@ static int mlx4_en_set_ringparam(struct net_device *dev, memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile)); new_prof.tx_ring_size = tx_size; new_prof.rx_ring_size = rx_size; - err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof); + err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true); if (err) goto out; @@ -1788,7 +1788,7 @@ static int mlx4_en_set_channels(struct net_device *dev, new_prof.tx_ring_num[TX_XDP] = xdp_count; new_prof.rx_ring_num = channel->rx_count; - err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof); + err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true); if (err) goto out; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 60a021c34881..748e9f65c386 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2048,6 +2048,8 @@ static void mlx4_en_free_resources(struct mlx4_en_priv *priv) if (priv->tx_cq[t] && priv->tx_cq[t][i]) mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]); } + kfree(priv->tx_ring[t]); + kfree(priv->tx_cq[t]); } for (i = 0; i < priv->rx_ring_num; i++) { @@ -2190,9 +2192,11 @@ static void mlx4_en_update_priv(struct mlx4_en_priv *dst, int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv, struct mlx4_en_priv *tmp, - struct mlx4_en_port_profile *prof) + struct mlx4_en_port_profile *prof, + bool carry_xdp_prog) { - int t; + struct bpf_prog *xdp_prog; + int i, t; mlx4_en_copy_priv(tmp, priv, prof); @@ -2206,6 +2210,23 @@ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv, } return -ENOMEM; } + + /* All rx_rings has the same xdp_prog. Pick the first one. */ + xdp_prog = rcu_dereference_protected( + priv->rx_ring[0]->xdp_prog, + lockdep_is_held(&priv->mdev->state_lock)); + + if (xdp_prog && carry_xdp_prog) { + xdp_prog = bpf_prog_add(xdp_prog, tmp->rx_ring_num); + if (IS_ERR(xdp_prog)) { + mlx4_en_free_resources(tmp); + return PTR_ERR(xdp_prog); + } + for (i = 0; i < tmp->rx_ring_num; i++) + rcu_assign_pointer(tmp->rx_ring[i]->xdp_prog, + xdp_prog); + } + return 0; } @@ -2220,7 +2241,6 @@ void mlx4_en_destroy_netdev(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; - int t; en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); @@ -2254,11 +2274,6 @@ void mlx4_en_destroy_netdev(struct net_device *dev) mlx4_en_free_resources(priv); mutex_unlock(&mdev->state_lock); - for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { - kfree(priv->tx_ring[t]); - kfree(priv->tx_cq[t]); - } - free_netdev(dev); } @@ -2761,7 +2776,7 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog) en_warn(priv, "Reducing the number of TX rings, to not exceed the max total rings number.\n"); } - err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof); + err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, false); if (err) { if (prog) bpf_prog_sub(prog, priv->rx_ring_num - 1); @@ -3505,7 +3520,7 @@ int mlx4_en_reset_config(struct net_device *dev, memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile)); memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config)); - err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof); + err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true); if (err) goto out; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index f15ddba3659a..d85e6446f9d9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -515,8 +515,11 @@ void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv) return; for (ring = 0; ring < priv->rx_ring_num; ring++) { - if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) + if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) { + local_bh_disable(); napi_reschedule(&priv->rx_cq[ring]->napi); + local_bh_enable(); + } } } diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index ba1c6cd0cc79..cec59bc264c9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -679,7 +679,8 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev, int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv, struct mlx4_en_priv *tmp, - struct mlx4_en_port_profile *prof); + struct mlx4_en_port_profile *prof, + bool carry_xdp_prog); void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv, struct mlx4_en_priv *tmp); diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 5cfdb1a1b4c1..fd6ebbefd919 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -1254,6 +1254,9 @@ void netvsc_channel_cb(void *context) netvsc_channel_idle(net_device, q_idx)) return; + /* commit_rd_index() -> hv_signal_on_read() needs this. */ + init_cached_read_index(channel); + while ((desc = get_next_pkt_raw(channel)) != NULL) { netvsc_process_raw_pkt(device, channel, net_device, ndev, desc->trans_id, desc); diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 402618565838..c27011bbe30c 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -681,7 +681,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, size_t linear; if (q->flags & IFF_VNET_HDR) { - vnet_hdr_len = q->vnet_hdr_sz; + vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); err = -EINVAL; if (len < vnet_hdr_len) @@ -820,7 +820,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q, if (q->flags & IFF_VNET_HDR) { struct virtio_net_hdr vnet_hdr; - vnet_hdr_len = q->vnet_hdr_sz; + vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); if (iov_iter_count(iter) < vnet_hdr_len) return -EINVAL; diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 92b08383cafa..0d8f4d3847f6 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -920,6 +920,11 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, return -EIO; } + if (!try_module_get(d->driver->owner)) { + dev_err(&dev->dev, "failed to get the device driver module\n"); + return -EIO; + } + get_device(d); /* Assume that if there is no driver, that it doesn't @@ -977,6 +982,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, error: phy_detach(phydev); put_device(d); + module_put(d->driver->owner); if (ndev_owner != bus->owner) module_put(bus->owner); return err; @@ -1059,6 +1065,7 @@ void phy_detach(struct phy_device *phydev) bus = phydev->mdio.bus; put_device(&phydev->mdio.dev); + module_put(phydev->mdio.dev.driver->owner); if (ndev_owner != bus->owner) module_put(bus->owner); } diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 8a7d6b905362..30863e378925 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1207,9 +1207,11 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, } if (tun->flags & IFF_VNET_HDR) { - if (len < tun->vnet_hdr_sz) + int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); + + if (len < vnet_hdr_sz) return -EINVAL; - len -= tun->vnet_hdr_sz; + len -= vnet_hdr_sz; if (!copy_from_iter_full(&gso, sizeof(gso), from)) return -EFAULT; @@ -1220,7 +1222,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, if (tun16_to_cpu(tun, gso.hdr_len) > len) return -EINVAL; - iov_iter_advance(from, tun->vnet_hdr_sz - sizeof(gso)); + iov_iter_advance(from, vnet_hdr_sz - sizeof(gso)); } if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) { @@ -1371,7 +1373,7 @@ static ssize_t tun_put_user(struct tun_struct *tun, vlan_hlen = VLAN_HLEN; if (tun->flags & IFF_VNET_HDR) - vnet_hdr_sz = tun->vnet_hdr_sz; + vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); total = skb->len + vlan_hlen + vnet_hdr_sz; diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c index 3daa41bdd4ea..0acc9b640419 100644 --- a/drivers/net/usb/catc.c +++ b/drivers/net/usb/catc.c @@ -776,7 +776,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id struct net_device *netdev; struct catc *catc; u8 broadcast[ETH_ALEN]; - int i, pktsz; + int pktsz, ret; if (usb_set_interface(usbdev, intf->altsetting->desc.bInterfaceNumber, 1)) { @@ -811,12 +811,8 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id if ((!catc->ctrl_urb) || (!catc->tx_urb) || (!catc->rx_urb) || (!catc->irq_urb)) { dev_err(&intf->dev, "No free urbs available.\n"); - usb_free_urb(catc->ctrl_urb); - usb_free_urb(catc->tx_urb); - usb_free_urb(catc->rx_urb); - usb_free_urb(catc->irq_urb); - free_netdev(netdev); - return -ENOMEM; + ret = -ENOMEM; + goto fail_free; } /* The F5U011 has the same vendor/product as the netmate but a device version of 0x130 */ @@ -844,15 +840,24 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id catc->irq_buf, 2, catc_irq_done, catc, 1); if (!catc->is_f5u011) { + u32 *buf; + int i; + dev_dbg(dev, "Checking memory size\n"); - i = 0x12345678; - catc_write_mem(catc, 0x7a80, &i, 4); - i = 0x87654321; - catc_write_mem(catc, 0xfa80, &i, 4); - catc_read_mem(catc, 0x7a80, &i, 4); + buf = kmalloc(4, GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto fail_free; + } + + *buf = 0x12345678; + catc_write_mem(catc, 0x7a80, buf, 4); + *buf = 0x87654321; + catc_write_mem(catc, 0xfa80, buf, 4); + catc_read_mem(catc, 0x7a80, buf, 4); - switch (i) { + switch (*buf) { case 0x12345678: catc_set_reg(catc, TxBufCount, 8); catc_set_reg(catc, RxBufCount, 32); @@ -867,6 +872,8 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id dev_dbg(dev, "32k Memory\n"); break; } + + kfree(buf); dev_dbg(dev, "Getting MAC from SEEROM.\n"); @@ -913,16 +920,21 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id usb_set_intfdata(intf, catc); SET_NETDEV_DEV(netdev, &intf->dev); - if (register_netdev(netdev) != 0) { - usb_set_intfdata(intf, NULL); - usb_free_urb(catc->ctrl_urb); - usb_free_urb(catc->tx_urb); - usb_free_urb(catc->rx_urb); - usb_free_urb(catc->irq_urb); - free_netdev(netdev); - return -EIO; - } + ret = register_netdev(netdev); + if (ret) + goto fail_clear_intfdata; + return 0; + +fail_clear_intfdata: + usb_set_intfdata(intf, NULL); +fail_free: + usb_free_urb(catc->ctrl_urb); + usb_free_urb(catc->tx_urb); + usb_free_urb(catc->rx_urb); + usb_free_urb(catc->irq_urb); + free_netdev(netdev); + return ret; } static void catc_disconnect(struct usb_interface *intf) diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index 24e803fe9a53..36674484c6fb 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c @@ -126,40 +126,61 @@ static void async_ctrl_callback(struct urb *urb) static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data) { + u8 *buf; int ret; + buf = kmalloc(size, GFP_NOIO); + if (!buf) + return -ENOMEM; + ret = usb_control_msg(pegasus->usb, usb_rcvctrlpipe(pegasus->usb, 0), PEGASUS_REQ_GET_REGS, PEGASUS_REQT_READ, 0, - indx, data, size, 1000); + indx, buf, size, 1000); if (ret < 0) netif_dbg(pegasus, drv, pegasus->net, "%s returned %d\n", __func__, ret); + else if (ret <= size) + memcpy(data, buf, ret); + kfree(buf); return ret; } -static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data) +static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size, + const void *data) { + u8 *buf; int ret; + buf = kmemdup(data, size, GFP_NOIO); + if (!buf) + return -ENOMEM; + ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0), PEGASUS_REQ_SET_REGS, PEGASUS_REQT_WRITE, 0, - indx, data, size, 100); + indx, buf, size, 100); if (ret < 0) netif_dbg(pegasus, drv, pegasus->net, "%s returned %d\n", __func__, ret); + kfree(buf); return ret; } static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data) { + u8 *buf; int ret; + buf = kmemdup(&data, 1, GFP_NOIO); + if (!buf) + return -ENOMEM; + ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0), PEGASUS_REQ_SET_REG, PEGASUS_REQT_WRITE, data, - indx, &data, 1, 1000); + indx, buf, 1, 1000); if (ret < 0) netif_dbg(pegasus, drv, pegasus->net, "%s returned %d\n", __func__, ret); + kfree(buf); return ret; } diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index 95b7bd0d7abc..c81c79110cef 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c @@ -155,16 +155,36 @@ static const char driver_name [] = "rtl8150"; */ static int get_registers(rtl8150_t * dev, u16 indx, u16 size, void *data) { - return usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), - RTL8150_REQ_GET_REGS, RTL8150_REQT_READ, - indx, 0, data, size, 500); + void *buf; + int ret; + + buf = kmalloc(size, GFP_NOIO); + if (!buf) + return -ENOMEM; + + ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), + RTL8150_REQ_GET_REGS, RTL8150_REQT_READ, + indx, 0, buf, size, 500); + if (ret > 0 && ret <= size) + memcpy(data, buf, ret); + kfree(buf); + return ret; } -static int set_registers(rtl8150_t * dev, u16 indx, u16 size, void *data) +static int set_registers(rtl8150_t * dev, u16 indx, u16 size, const void *data) { - return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), - RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE, - indx, 0, data, size, 500); + void *buf; + int ret; + + buf = kmemdup(data, size, GFP_NOIO); + if (!buf) + return -ENOMEM; + + ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), + RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE, + indx, 0, buf, size, 500); + kfree(buf); + return ret; } static void async_set_reg_cb(struct urb *urb) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c index efedd918d10a..bcbb0c60f1f1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c @@ -92,7 +92,7 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - char *fw_name = "rtlwifi/rtl8192cfwU.bin"; + char *fw_name; rtl8192ce_bt_reg_init(hw); @@ -161,8 +161,13 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw) } /* request fw */ - if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version)) + if (IS_VENDOR_UMC_A_CUT(rtlhal->version) && + !IS_92C_SERIAL(rtlhal->version)) + fw_name = "rtlwifi/rtl8192cfwU.bin"; + else if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version)) fw_name = "rtlwifi/rtl8192cfwU_B.bin"; + else + fw_name = "rtlwifi/rtl8192cfw.bin"; rtlpriv->max_fw_size = 0x4000; pr_info("Using firmware %s\n", fw_name); diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index a518cb1b59d4..ce3e8dfa10ad 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -52,17 +52,17 @@ static void namespace_blk_release(struct device *dev) kfree(nsblk); } -static struct device_type namespace_io_device_type = { +static const struct device_type namespace_io_device_type = { .name = "nd_namespace_io", .release = namespace_io_release, }; -static struct device_type namespace_pmem_device_type = { +static const struct device_type namespace_pmem_device_type = { .name = "nd_namespace_pmem", .release = namespace_pmem_release, }; -static struct device_type namespace_blk_device_type = { +static const struct device_type namespace_blk_device_type = { .name = "nd_namespace_blk", .release = namespace_blk_release, }; @@ -962,8 +962,8 @@ static ssize_t __size_store(struct device *dev, unsigned long long val) struct nvdimm_drvdata *ndd; struct nd_label_id label_id; u32 flags = 0, remainder; + int rc, i, id = -1; u8 *uuid = NULL; - int rc, i; if (dev->driver || ndns->claim) return -EBUSY; @@ -972,11 +972,13 @@ static ssize_t __size_store(struct device *dev, unsigned long long val) struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); uuid = nspm->uuid; + id = nspm->id; } else if (is_namespace_blk(dev)) { struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); uuid = nsblk->uuid; flags = NSLABEL_FLAG_LOCAL; + id = nsblk->id; } /* @@ -1039,10 +1041,11 @@ static ssize_t __size_store(struct device *dev, unsigned long long val) /* * Try to delete the namespace if we deleted all of its - * allocation, this is not the seed device for the region, and - * it is not actively claimed by a btt instance. + * allocation, this is not the seed or 0th device for the + * region, and it is not actively claimed by a btt, pfn, or dax + * instance. */ - if (val == 0 && nd_region->ns_seed != dev && !ndns->claim) + if (val == 0 && id != 0 && nd_region->ns_seed != dev && !ndns->claim) nd_device_unregister(dev, ND_ASYNC); return rc; diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index a2ac9e641aa9..6c033c9a2f06 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -627,15 +627,12 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) size = resource_size(&nsio->res); npfns = (size - start_pad - end_trunc - SZ_8K) / SZ_4K; if (nd_pfn->mode == PFN_MODE_PMEM) { - unsigned long memmap_size; - /* * vmemmap_populate_hugepages() allocates the memmap array in * HPAGE_SIZE chunks. */ - memmap_size = ALIGN(64 * npfns, HPAGE_SIZE); - offset = ALIGN(start + SZ_8K + memmap_size + dax_label_reserve, - nd_pfn->align) - start; + offset = ALIGN(start + SZ_8K + 64 * npfns + dax_label_reserve, + max(nd_pfn->align, HPAGE_SIZE)) - start; } else if (nd_pfn->mode == PFN_MODE_RAM) offset = ALIGN(start + SZ_8K + dax_label_reserve, nd_pfn->align) - start; diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 17ac1dce3286..3dd8bcbb3011 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -532,25 +532,32 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev) link = kzalloc(sizeof(*link), GFP_KERNEL); if (!link) return NULL; + INIT_LIST_HEAD(&link->sibling); INIT_LIST_HEAD(&link->children); INIT_LIST_HEAD(&link->link); link->pdev = pdev; - if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) { + + /* + * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe + * hierarchies. + */ + if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT || + pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE) { + link->root = link; + } else { struct pcie_link_state *parent; + parent = pdev->bus->parent->self->link_state; if (!parent) { kfree(link); return NULL; } + link->parent = parent; + link->root = link->parent->root; list_add(&link->link, &parent->children); } - /* Setup a pointer to the root port link */ - if (!link->parent) - link->root = link; - else - link->root = link->parent->root; list_add(&link->sibling, &link_list); pdev->link_state = link; diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c index e6a512ebeae2..a3ade9e4ef47 100644 --- a/drivers/regulator/axp20x-regulator.c +++ b/drivers/regulator/axp20x-regulator.c @@ -272,7 +272,7 @@ static const struct regulator_desc axp806_regulators[] = { 64, AXP806_DCDCD_V_CTRL, 0x3f, AXP806_PWR_OUT_CTRL1, BIT(3)), AXP_DESC(AXP806, DCDCE, "dcdce", "vine", 1100, 3400, 100, - AXP806_DCDCB_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(4)), + AXP806_DCDCE_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(4)), AXP_DESC(AXP806, ALDO1, "aldo1", "aldoin", 700, 3300, 100, AXP806_ALDO1_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(5)), AXP_DESC(AXP806, ALDO2, "aldo2", "aldoin", 700, 3400, 100, diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c index a43b0e8a438d..988a7472c2ab 100644 --- a/drivers/regulator/fixed.c +++ b/drivers/regulator/fixed.c @@ -30,9 +30,6 @@ #include <linux/of_gpio.h> #include <linux/regulator/of_regulator.h> #include <linux/regulator/machine.h> -#include <linux/acpi.h> -#include <linux/property.h> -#include <linux/gpio/consumer.h> struct fixed_voltage_data { struct regulator_desc desc; @@ -97,44 +94,6 @@ of_get_fixed_voltage_config(struct device *dev, return config; } -/** - * acpi_get_fixed_voltage_config - extract fixed_voltage_config structure info - * @dev: device requesting for fixed_voltage_config - * @desc: regulator description - * - * Populates fixed_voltage_config structure by extracting data through ACPI - * interface, returns a pointer to the populated structure of NULL if memory - * alloc fails. - */ -static struct fixed_voltage_config * -acpi_get_fixed_voltage_config(struct device *dev, - const struct regulator_desc *desc) -{ - struct fixed_voltage_config *config; - const char *supply_name; - struct gpio_desc *gpiod; - int ret; - - config = devm_kzalloc(dev, sizeof(*config), GFP_KERNEL); - if (!config) - return ERR_PTR(-ENOMEM); - - ret = device_property_read_string(dev, "supply-name", &supply_name); - if (!ret) - config->supply_name = supply_name; - - gpiod = gpiod_get(dev, "gpio", GPIOD_ASIS); - if (IS_ERR(gpiod)) - return ERR_PTR(-ENODEV); - - config->gpio = desc_to_gpio(gpiod); - config->enable_high = device_property_read_bool(dev, - "enable-active-high"); - gpiod_put(gpiod); - - return config; -} - static struct regulator_ops fixed_voltage_ops = { }; @@ -155,11 +114,6 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev) &drvdata->desc); if (IS_ERR(config)) return PTR_ERR(config); - } else if (ACPI_HANDLE(&pdev->dev)) { - config = acpi_get_fixed_voltage_config(&pdev->dev, - &drvdata->desc); - if (IS_ERR(config)) - return PTR_ERR(config); } else { config = dev_get_platdata(&pdev->dev); } diff --git a/drivers/regulator/twl6030-regulator.c b/drivers/regulator/twl6030-regulator.c index 4864b9d742c0..716191046a70 100644 --- a/drivers/regulator/twl6030-regulator.c +++ b/drivers/regulator/twl6030-regulator.c @@ -452,7 +452,7 @@ static int twl6030smps_map_voltage(struct regulator_dev *rdev, int min_uV, vsel = 62; else if ((min_uV > 1800000) && (min_uV <= 1900000)) vsel = 61; - else if ((min_uV > 1350000) && (min_uV <= 1800000)) + else if ((min_uV > 1500000) && (min_uV <= 1800000)) vsel = 60; else if ((min_uV > 1350000) && (min_uV <= 1500000)) vsel = 59; diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index ec91bd07f00a..c680d7641311 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -534,7 +534,9 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi, { struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc); + unsigned long flags; int req_size; + int ret; BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize); @@ -562,8 +564,15 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi, req_size = sizeof(cmd->req.cmd); } - if (virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)) != 0) + ret = virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)); + if (ret == -EIO) { + cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET; + spin_lock_irqsave(&req_vq->vq_lock, flags); + virtscsi_complete_cmd(vscsi, cmd); + spin_unlock_irqrestore(&req_vq->vq_lock, flags); + } else if (ret != 0) { return SCSI_MLQUEUE_HOST_BUSY; + } return 0; } diff --git a/drivers/staging/greybus/timesync_platform.c b/drivers/staging/greybus/timesync_platform.c index 113f3d6c4b3a..27f75b17679b 100644 --- a/drivers/staging/greybus/timesync_platform.c +++ b/drivers/staging/greybus/timesync_platform.c @@ -45,12 +45,18 @@ u32 gb_timesync_platform_get_clock_rate(void) int gb_timesync_platform_lock_bus(struct gb_timesync_svc *pdata) { + if (!arche_platform_change_state_cb) + return 0; + return arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_TIME_SYNC, pdata); } void gb_timesync_platform_unlock_bus(void) { + if (!arche_platform_change_state_cb) + return; + arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_ACTIVE, NULL); } diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index d2e50a27140c..24f9f98968a5 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -37,6 +37,10 @@ static const struct usb_device_id usb_quirk_list[] = { /* CBM - Flash disk */ { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME }, + /* WORLDE easy key (easykey.25) MIDI controller */ + { USB_DEVICE(0x0218, 0x0401), .driver_info = + USB_QUIRK_CONFIG_INTF_STRINGS }, + /* HP 5300/5370C scanner */ { USB_DEVICE(0x03f0, 0x0701), .driver_info = USB_QUIRK_STRING_FETCH_255 }, diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 5490fc51638e..fd80c1b9c823 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -2269,6 +2269,8 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type, if (len < sizeof(*d) || h->interface >= ffs->interfaces_count) return -EINVAL; length = le32_to_cpu(d->dwSize); + if (len < length) + return -EINVAL; type = le32_to_cpu(d->dwPropertyDataType); if (type < USB_EXT_PROP_UNICODE || type > USB_EXT_PROP_UNICODE_MULTI) { @@ -2277,6 +2279,11 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type, return -EINVAL; } pnl = le16_to_cpu(d->wPropertyNameLength); + if (length < 14 + pnl) { + pr_vdebug("invalid os descriptor length: %d pnl:%d (descriptor %d)\n", + length, pnl, type); + return -EINVAL; + } pdl = le32_to_cpu(*(u32 *)((u8 *)data + 10 + pnl)); if (length != 14 + pnl + pdl) { pr_vdebug("invalid os descriptor length: %d pnl:%d pdl:%d (descriptor %d)\n", @@ -2363,6 +2370,9 @@ static int __ffs_data_got_descs(struct ffs_data *ffs, } } if (flags & (1 << i)) { + if (len < 4) { + goto error; + } os_descs_count = get_unaligned_le32(data); data += 4; len -= 4; @@ -2435,7 +2445,8 @@ static int __ffs_data_got_strings(struct ffs_data *ffs, ENTER(); - if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC || + if (unlikely(len < 16 || + get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC || get_unaligned_le32(data + 4) != len)) goto error; str_count = get_unaligned_le32(data + 8); diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index fca288bbc800..772f15821242 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -594,11 +594,11 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, | MUSB_PORT_STAT_RESUME; musb->rh_timer = jiffies + msecs_to_jiffies(USB_RESUME_TIMEOUT); - musb->need_finish_resume = 1; - musb->xceiv->otg->state = OTG_STATE_A_HOST; musb->is_active = 1; musb_host_resume_root_hub(musb); + schedule_delayed_work(&musb->finish_resume_work, + msecs_to_jiffies(USB_RESUME_TIMEOUT)); break; case OTG_STATE_B_WAIT_ACON: musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL; @@ -1925,6 +1925,14 @@ static void musb_pm_runtime_check_session(struct musb *musb) static void musb_irq_work(struct work_struct *data) { struct musb *musb = container_of(data, struct musb, irq_work.work); + int error; + + error = pm_runtime_get_sync(musb->controller); + if (error < 0) { + dev_err(musb->controller, "Could not enable: %i\n", error); + + return; + } musb_pm_runtime_check_session(musb); @@ -1932,6 +1940,9 @@ static void musb_irq_work(struct work_struct *data) musb->xceiv_old_state = musb->xceiv->otg->state; sysfs_notify(&musb->controller->kobj, NULL, "mode"); } + + pm_runtime_mark_last_busy(musb->controller); + pm_runtime_put_autosuspend(musb->controller); } static void musb_recover_from_babble(struct musb *musb) @@ -2710,11 +2721,6 @@ static int musb_resume(struct device *dev) mask = MUSB_DEVCTL_BDEVICE | MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV; if ((devctl & mask) != (musb->context.devctl & mask)) musb->port1_status = 0; - if (musb->need_finish_resume) { - musb->need_finish_resume = 0; - schedule_delayed_work(&musb->finish_resume_work, - msecs_to_jiffies(USB_RESUME_TIMEOUT)); - } /* * The USB HUB code expects the device to be in RPM_ACTIVE once it came @@ -2766,12 +2772,6 @@ static int musb_runtime_resume(struct device *dev) musb_restore_context(musb); - if (musb->need_finish_resume) { - musb->need_finish_resume = 0; - schedule_delayed_work(&musb->finish_resume_work, - msecs_to_jiffies(USB_RESUME_TIMEOUT)); - } - spin_lock_irqsave(&musb->lock, flags); error = musb_run_resume_work(musb); if (error) diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h index ade902ea1221..ce5a18c98c6d 100644 --- a/drivers/usb/musb/musb_core.h +++ b/drivers/usb/musb/musb_core.h @@ -410,7 +410,6 @@ struct musb { /* is_suspended means USB B_PERIPHERAL suspend */ unsigned is_suspended:1; - unsigned need_finish_resume :1; /* may_wakeup means remote wakeup is enabled */ unsigned may_wakeup:1; diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 7ce31a4c7e7f..42cc72e54c05 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -2007,6 +2007,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */ { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 46fca6b75846..1db4b61bdf7b 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -49,6 +49,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) }, { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) }, { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) }, + { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) }, { USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) }, { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) }, { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID_UCSGT) }, diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index e3b7af8adfb7..09d9be88209e 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h @@ -27,6 +27,7 @@ #define ATEN_VENDOR_ID 0x0557 #define ATEN_VENDOR_ID2 0x0547 #define ATEN_PRODUCT_ID 0x2008 +#define ATEN_PRODUCT_ID2 0x2118 #define IODATA_VENDOR_ID 0x04bb #define IODATA_PRODUCT_ID 0x0a03 diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 1bc6089b9008..696458db7e3c 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -124,6 +124,7 @@ static const struct usb_device_id id_table[] = { {USB_DEVICE(0x1410, 0xa021)}, /* Novatel Gobi 3000 Composite */ {USB_DEVICE(0x413c, 0x8193)}, /* Dell Gobi 3000 QDL */ {USB_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */ + {USB_DEVICE(0x413c, 0x81a6)}, /* Dell DW5570 QDL (MC8805) */ {USB_DEVICE(0x1199, 0x68a4)}, /* Sierra Wireless QDL */ {USB_DEVICE(0x1199, 0x68a5)}, /* Sierra Wireless Modem */ {USB_DEVICE(0x1199, 0x68a8)}, /* Sierra Wireless QDL */ diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index 128d10282d16..7690e5bf3cf1 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c @@ -1123,12 +1123,11 @@ static long tce_iommu_ioctl(void *iommu_data, mutex_lock(&container->lock); ret = tce_iommu_create_default_window(container); - if (ret) - return ret; - - ret = tce_iommu_create_window(container, create.page_shift, - create.window_size, create.levels, - &create.start_addr); + if (!ret) + ret = tce_iommu_create_window(container, + create.page_shift, + create.window_size, create.levels, + &create.start_addr); mutex_unlock(&container->lock); diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 9f118388a5b7..4269e621e254 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -130,14 +130,14 @@ static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx, static void vhost_init_is_le(struct vhost_virtqueue *vq) { - if (vhost_has_feature(vq, VIRTIO_F_VERSION_1)) - vq->is_le = true; + vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) + || virtio_legacy_is_little_endian(); } #endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */ static void vhost_reset_is_le(struct vhost_virtqueue *vq) { - vq->is_le = virtio_legacy_is_little_endian(); + vhost_init_is_le(vq); } struct vhost_flush_struct { @@ -1714,10 +1714,8 @@ int vhost_vq_init_access(struct vhost_virtqueue *vq) int r; bool is_le = vq->is_le; - if (!vq->private_data) { - vhost_reset_is_le(vq); + if (!vq->private_data) return 0; - } vhost_init_is_le(vq); diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 7e38ed79c3fc..409aeaa49246 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -159,13 +159,6 @@ static bool vring_use_dma_api(struct virtio_device *vdev) if (xen_domain()) return true; - /* - * On ARM-based machines, the DMA ops will do the right thing, - * so always use them with legacy devices. - */ - if (IS_ENABLED(CONFIG_ARM) || IS_ENABLED(CONFIG_ARM64)) - return !virtio_has_feature(vdev, VIRTIO_F_VERSION_1); - return false; } @@ -1031,6 +1031,11 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, struct blk_dax_ctl dax = { 0 }; ssize_t map_len; + if (fatal_signal_pending(current)) { + ret = -EINTR; + break; + } + dax.sector = dax_iomap_sector(iomap, pos); dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK; map_len = dax_map_atomic(iomap->bdev, &dax); diff --git a/fs/iomap.c b/fs/iomap.c index 354a123f170e..a51cb4c07d4d 100644 --- a/fs/iomap.c +++ b/fs/iomap.c @@ -114,6 +114,9 @@ iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags, BUG_ON(pos + len > iomap->offset + iomap->length); + if (fatal_signal_pending(current)) + return -EINTR; + page = grab_cache_page_write_begin(inode->i_mapping, index, flags); if (!page) return -ENOMEM; diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c index 596205d939a1..1fc07a9c70e9 100644 --- a/fs/nfsd/nfs4layouts.c +++ b/fs/nfsd/nfs4layouts.c @@ -223,10 +223,11 @@ nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate, struct nfs4_layout_stateid *ls; struct nfs4_stid *stp; - stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache); + stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache, + nfsd4_free_layout_stateid); if (!stp) return NULL; - stp->sc_free = nfsd4_free_layout_stateid; + get_nfs4_file(fp); stp->sc_file = fp; diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 4b4beaaa4eaa..a0dee8ae9f97 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -633,8 +633,8 @@ out: return co; } -struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, - struct kmem_cache *slab) +struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab, + void (*sc_free)(struct nfs4_stid *)) { struct nfs4_stid *stid; int new_id; @@ -650,6 +650,8 @@ struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, idr_preload_end(); if (new_id < 0) goto out_free; + + stid->sc_free = sc_free; stid->sc_client = cl; stid->sc_stateid.si_opaque.so_id = new_id; stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid; @@ -675,15 +677,12 @@ out_free: static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp) { struct nfs4_stid *stid; - struct nfs4_ol_stateid *stp; - stid = nfs4_alloc_stid(clp, stateid_slab); + stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid); if (!stid) return NULL; - stp = openlockstateid(stid); - stp->st_stid.sc_free = nfs4_free_ol_stateid; - return stp; + return openlockstateid(stid); } static void nfs4_free_deleg(struct nfs4_stid *stid) @@ -781,11 +780,10 @@ alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh, goto out_dec; if (delegation_blocked(¤t_fh->fh_handle)) goto out_dec; - dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab)); + dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg)); if (dp == NULL) goto out_dec; - dp->dl_stid.sc_free = nfs4_free_deleg; /* * delegation seqid's are never incremented. The 4.1 special * meaning of seqid 0 isn't meaningful, really, but let's avoid @@ -5580,7 +5578,6 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo, stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner); get_nfs4_file(fp); stp->st_stid.sc_file = fp; - stp->st_stid.sc_free = nfs4_free_lock_stateid; stp->st_access_bmap = 0; stp->st_deny_bmap = open_stp->st_deny_bmap; stp->st_openstp = open_stp; @@ -5623,7 +5620,7 @@ find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi, lst = find_lock_stateid(lo, fi); if (lst == NULL) { spin_unlock(&clp->cl_lock); - ns = nfs4_alloc_stid(clp, stateid_slab); + ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid); if (ns == NULL) return NULL; diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index c9399366f9df..4516e8b7d776 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h @@ -603,8 +603,8 @@ extern __be32 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp, __be32 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid, unsigned char typemask, struct nfs4_stid **s, struct nfsd_net *nn); -struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, - struct kmem_cache *slab); +struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab, + void (*sc_free)(struct nfs4_stid *)); void nfs4_unhash_stid(struct nfs4_stid *s); void nfs4_put_stid(struct nfs4_stid *s); void nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid); diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 26c6fdb4bf67..ca13236dbb1f 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -332,37 +332,6 @@ nfsd_sanitize_attrs(struct inode *inode, struct iattr *iap) } } -static __be32 -nfsd_get_write_access(struct svc_rqst *rqstp, struct svc_fh *fhp, - struct iattr *iap) -{ - struct inode *inode = d_inode(fhp->fh_dentry); - int host_err; - - if (iap->ia_size < inode->i_size) { - __be32 err; - - err = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry, - NFSD_MAY_TRUNC | NFSD_MAY_OWNER_OVERRIDE); - if (err) - return err; - } - - host_err = get_write_access(inode); - if (host_err) - goto out_nfserrno; - - host_err = locks_verify_truncate(inode, NULL, iap->ia_size); - if (host_err) - goto out_put_write_access; - return 0; - -out_put_write_access: - put_write_access(inode); -out_nfserrno: - return nfserrno(host_err); -} - /* * Set various file attributes. After this call fhp needs an fh_put. */ @@ -377,7 +346,6 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap, __be32 err; int host_err; bool get_write_count; - int size_change = 0; if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_SIZE)) accmode |= NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE; @@ -390,11 +358,11 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap, /* Get inode */ err = fh_verify(rqstp, fhp, ftype, accmode); if (err) - goto out; + return err; if (get_write_count) { host_err = fh_want_write(fhp); if (host_err) - return nfserrno(host_err); + goto out_host_err; } dentry = fhp->fh_dentry; @@ -405,50 +373,59 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap, iap->ia_valid &= ~ATTR_MODE; if (!iap->ia_valid) - goto out; + return 0; nfsd_sanitize_attrs(inode, iap); + if (check_guard && guardtime != inode->i_ctime.tv_sec) + return nfserr_notsync; + /* * The size case is special, it changes the file in addition to the - * attributes. + * attributes, and file systems don't expect it to be mixed with + * "random" attribute changes. We thus split out the size change + * into a separate call for vfs_truncate, and do the rest as a + * a separate setattr call. */ if (iap->ia_valid & ATTR_SIZE) { - err = nfsd_get_write_access(rqstp, fhp, iap); - if (err) - goto out; - size_change = 1; + struct path path = { + .mnt = fhp->fh_export->ex_path.mnt, + .dentry = dentry, + }; + bool implicit_mtime = false; /* - * RFC5661, Section 18.30.4: - * Changing the size of a file with SETATTR indirectly - * changes the time_modify and change attributes. - * - * (and similar for the older RFCs) + * vfs_truncate implicity updates the mtime IFF the file size + * actually changes. Avoid the additional seattr call below if + * the only other attribute that the client sends is the mtime. */ - if (iap->ia_size != i_size_read(inode)) - iap->ia_valid |= ATTR_MTIME; - } + if (iap->ia_size != i_size_read(inode) && + ((iap->ia_valid & ~(ATTR_SIZE | ATTR_MTIME)) == 0)) + implicit_mtime = true; - iap->ia_valid |= ATTR_CTIME; + host_err = vfs_truncate(&path, iap->ia_size); + if (host_err) + goto out_host_err; - if (check_guard && guardtime != inode->i_ctime.tv_sec) { - err = nfserr_notsync; - goto out_put_write_access; + iap->ia_valid &= ~ATTR_SIZE; + if (implicit_mtime) + iap->ia_valid &= ~ATTR_MTIME; + if (!iap->ia_valid) + goto done; } + iap->ia_valid |= ATTR_CTIME; + fh_lock(fhp); host_err = notify_change(dentry, iap, NULL); fh_unlock(fhp); - err = nfserrno(host_err); + if (host_err) + goto out_host_err; -out_put_write_access: - if (size_change) - put_write_access(inode); - if (!err) - err = nfserrno(commit_metadata(fhp)); -out: - return err; +done: + host_err = commit_metadata(fhp); +out_host_err: + return nfserrno(host_err); } #if defined(CONFIG_NFSD_V4) diff --git a/fs/proc/page.c b/fs/proc/page.c index a2066e6dee90..2726536489b1 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c @@ -173,7 +173,8 @@ u64 stable_page_flags(struct page *page) u |= kpf_copy_bit(k, KPF_ACTIVE, PG_active); u |= kpf_copy_bit(k, KPF_RECLAIM, PG_reclaim); - u |= kpf_copy_bit(k, KPF_SWAPCACHE, PG_swapcache); + if (PageSwapCache(page)) + u |= 1 << KPF_SWAPCACHE; u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked); u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable); diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h index 63554e9f6e0c..719db1968d81 100644 --- a/include/asm-generic/export.h +++ b/include/asm-generic/export.h @@ -9,18 +9,15 @@ #ifndef KSYM_ALIGN #define KSYM_ALIGN 8 #endif -#ifndef KCRC_ALIGN -#define KCRC_ALIGN 8 -#endif #else #define __put .long #ifndef KSYM_ALIGN #define KSYM_ALIGN 4 #endif +#endif #ifndef KCRC_ALIGN #define KCRC_ALIGN 4 #endif -#endif #ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX #define KSYM(name) _##name @@ -52,7 +49,11 @@ KSYM(__kstrtab_\name): .section ___kcrctab\sec+\name,"a" .balign KCRC_ALIGN KSYM(__kcrctab_\name): - __put KSYM(__crc_\name) +#if defined(CONFIG_MODULE_REL_CRCS) + .long KSYM(__crc_\name) - . +#else + .long KSYM(__crc_\name) +#endif .weak KSYM(__crc_\name) .previous #endif diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 192016e2b518..9c4ee144b5f6 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -517,6 +517,7 @@ struct drm_device { struct drm_minor *control; /**< Control node */ struct drm_minor *primary; /**< Primary node */ struct drm_minor *render; /**< Render node */ + bool registered; /* currently active master for this device. Protected by master_mutex */ struct drm_master *master; diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index a9b95246e26e..045a97cbeba2 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -381,6 +381,8 @@ struct drm_connector_funcs { * core drm connector interfaces. Everything added from this callback * should be unregistered in the early_unregister callback. * + * This is called while holding drm_connector->mutex. + * * Returns: * * 0 on success, or a negative error code on failure. @@ -395,6 +397,8 @@ struct drm_connector_funcs { * late_register(). It is called from drm_connector_unregister(), * early in the driver unload sequence to disable userspace access * before data structures are torndown. + * + * This is called while holding drm_connector->mutex. */ void (*early_unregister)(struct drm_connector *connector); @@ -559,7 +563,6 @@ struct drm_cmdline_mode { * @interlace_allowed: can this connector handle interlaced modes? * @doublescan_allowed: can this connector handle doublescan? * @stereo_allowed: can this connector handle stereo modes? - * @registered: is this connector exposed (registered) with userspace? * @modes: modes available on this connector (from fill_modes() + user) * @status: one of the drm_connector_status enums (connected, not, or unknown) * @probed_modes: list of modes derived directly from the display @@ -608,6 +611,13 @@ struct drm_connector { char *name; /** + * @mutex: Lock for general connector state, but currently only protects + * @registered. Most of the connector state is still protected by the + * mutex in &drm_mode_config. + */ + struct mutex mutex; + + /** * @index: Compacted connector index, which matches the position inside * the mode_config.list for drivers not supporting hot-add/removing. Can * be used as an array index. It is invariant over the lifetime of the @@ -620,6 +630,10 @@ struct drm_connector { bool interlace_allowed; bool doublescan_allowed; bool stereo_allowed; + /** + * @registered: Is this connector exposed (registered) with userspace? + * Protected by @mutex. + */ bool registered; struct list_head modes; /* list of modes on this connector */ diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index d936a0021839..921acaaa1601 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -8,9 +8,7 @@ enum cpuhp_state { CPUHP_CREATE_THREADS, CPUHP_PERF_PREPARE, CPUHP_PERF_X86_PREPARE, - CPUHP_PERF_X86_UNCORE_PREP, CPUHP_PERF_X86_AMD_UNCORE_PREP, - CPUHP_PERF_X86_RAPL_PREP, CPUHP_PERF_BFIN, CPUHP_PERF_POWER, CPUHP_PERF_SUPERH, @@ -86,7 +84,6 @@ enum cpuhp_state { CPUHP_AP_IRQ_ARMADA_XP_STARTING, CPUHP_AP_IRQ_BCM2836_STARTING, CPUHP_AP_ARM_MVEBU_COHERENCY, - CPUHP_AP_PERF_X86_UNCORE_STARTING, CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, CPUHP_AP_PERF_X86_STARTING, CPUHP_AP_PERF_X86_AMD_IBS_STARTING, diff --git a/include/linux/export.h b/include/linux/export.h index 2a0f61fbc731..1a1dfdb2a5c6 100644 --- a/include/linux/export.h +++ b/include/linux/export.h @@ -43,12 +43,19 @@ extern struct module __this_module; #ifdef CONFIG_MODVERSIONS /* Mark the CRC weak since genksyms apparently decides not to * generate a checksums for some symbols */ +#if defined(CONFIG_MODULE_REL_CRCS) #define __CRC_SYMBOL(sym, sec) \ - extern __visible void *__crc_##sym __attribute__((weak)); \ - static const unsigned long __kcrctab_##sym \ - __used \ - __attribute__((section("___kcrctab" sec "+" #sym), used)) \ - = (unsigned long) &__crc_##sym; + asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \ + " .weak " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \ + " .long " VMLINUX_SYMBOL_STR(__crc_##sym) " - . \n" \ + " .previous \n"); +#else +#define __CRC_SYMBOL(sym, sec) \ + asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \ + " .weak " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \ + " .long " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \ + " .previous \n"); +#endif #else #define __CRC_SYMBOL(sym, sec) #endif diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 42fe43fb0c80..183efde54269 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -128,6 +128,7 @@ struct hv_ring_buffer_info { u32 ring_data_startoffset; u32 priv_write_index; u32 priv_read_index; + u32 cached_read_index; }; /* @@ -180,6 +181,19 @@ static inline u32 hv_get_bytes_to_write(struct hv_ring_buffer_info *rbi) return write; } +static inline u32 hv_get_cached_bytes_to_write( + const struct hv_ring_buffer_info *rbi) +{ + u32 read_loc, write_loc, dsize, write; + + dsize = rbi->ring_datasize; + read_loc = rbi->cached_read_index; + write_loc = rbi->ring_buffer->write_index; + + write = write_loc >= read_loc ? dsize - (write_loc - read_loc) : + read_loc - write_loc; + return write; +} /* * VMBUS version is 32 bit entity broken up into * two 16 bit quantities: major_number. minor_number. @@ -1488,7 +1502,7 @@ hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info) static inline void hv_signal_on_read(struct vmbus_channel *channel) { - u32 cur_write_sz; + u32 cur_write_sz, cached_write_sz; u32 pending_sz; struct hv_ring_buffer_info *rbi = &channel->inbound; @@ -1512,12 +1526,24 @@ static inline void hv_signal_on_read(struct vmbus_channel *channel) cur_write_sz = hv_get_bytes_to_write(rbi); - if (cur_write_sz >= pending_sz) + if (cur_write_sz < pending_sz) + return; + + cached_write_sz = hv_get_cached_bytes_to_write(rbi); + if (cached_write_sz < pending_sz) vmbus_setevent(channel); return; } +static inline void +init_cached_read_index(struct vmbus_channel *channel) +{ + struct hv_ring_buffer_info *rbi = &channel->inbound; + + rbi->cached_read_index = rbi->ring_buffer->read_index; +} + /* * An API to support in-place processing of incoming VMBUS packets. */ @@ -1569,6 +1595,8 @@ static inline void put_pkt_raw(struct vmbus_channel *channel, * This call commits the read index and potentially signals the host. * Here is the pattern for using the "in-place" consumption APIs: * + * init_cached_read_index(); + * * while (get_next_pkt_raw() { * process the packet "in-place"; * put_pkt_raw(); diff --git a/include/linux/irq.h b/include/linux/irq.h index e79875574b39..39e3254e5769 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -184,6 +184,7 @@ struct irq_data { * * IRQD_TRIGGER_MASK - Mask for the trigger type bits * IRQD_SETAFFINITY_PENDING - Affinity setting is pending + * IRQD_ACTIVATED - Interrupt has already been activated * IRQD_NO_BALANCING - Balancing disabled for this IRQ * IRQD_PER_CPU - Interrupt is per cpu * IRQD_AFFINITY_SET - Interrupt affinity was set @@ -202,6 +203,7 @@ struct irq_data { enum { IRQD_TRIGGER_MASK = 0xf, IRQD_SETAFFINITY_PENDING = (1 << 8), + IRQD_ACTIVATED = (1 << 9), IRQD_NO_BALANCING = (1 << 10), IRQD_PER_CPU = (1 << 11), IRQD_AFFINITY_SET = (1 << 12), @@ -312,6 +314,21 @@ static inline bool irqd_affinity_is_managed(struct irq_data *d) return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED; } +static inline bool irqd_is_activated(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_ACTIVATED; +} + +static inline void irqd_set_activated(struct irq_data *d) +{ + __irqd_to_state(d) |= IRQD_ACTIVATED; +} + +static inline void irqd_clr_activated(struct irq_data *d) +{ + __irqd_to_state(d) &= ~IRQD_ACTIVATED; +} + #undef __irqd_to_state static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) diff --git a/include/linux/log2.h b/include/linux/log2.h index fd7ff3d91e6a..ef3d4f67118c 100644 --- a/include/linux/log2.h +++ b/include/linux/log2.h @@ -203,6 +203,17 @@ unsigned long __rounddown_pow_of_two(unsigned long n) * ... and so on. */ -#define order_base_2(n) ilog2(roundup_pow_of_two(n)) +static inline __attribute_const__ +int __order_base_2(unsigned long n) +{ + return n > 1 ? ilog2(n - 1) + 1 : 0; +} +#define order_base_2(n) \ +( \ + __builtin_constant_p(n) ? ( \ + ((n) == 0 || (n) == 1) ? 0 : \ + ilog2((n) - 1) + 1) : \ + __order_base_2(n) \ +) #endif /* _LINUX_LOG2_H */ diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index c1784c0b4f35..134a2f69c21a 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -85,7 +85,8 @@ extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages); extern int add_one_highpage(struct page *page, int pfn, int bad_ppro); /* VM interface that may be used by firmware interface */ extern int online_pages(unsigned long, unsigned long, int); -extern int test_pages_in_a_zone(unsigned long, unsigned long); +extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn, + unsigned long *valid_start, unsigned long *valid_end); extern void __offline_isolated_pages(unsigned long, unsigned long); typedef void (*online_page_callback_t)(struct page *page); diff --git a/include/linux/module.h b/include/linux/module.h index 7c84273d60b9..cc7cba219b20 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -346,7 +346,7 @@ struct module { /* Exported symbols */ const struct kernel_symbol *syms; - const unsigned long *crcs; + const s32 *crcs; unsigned int num_syms; /* Kernel parameters. */ @@ -359,18 +359,18 @@ struct module { /* GPL-only exported symbols. */ unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; - const unsigned long *gpl_crcs; + const s32 *gpl_crcs; #ifdef CONFIG_UNUSED_SYMBOLS /* unused exported symbols. */ const struct kernel_symbol *unused_syms; - const unsigned long *unused_crcs; + const s32 *unused_crcs; unsigned int num_unused_syms; /* GPL-only, unused exported symbols. */ unsigned int num_unused_gpl_syms; const struct kernel_symbol *unused_gpl_syms; - const unsigned long *unused_gpl_crcs; + const s32 *unused_gpl_crcs; #endif #ifdef CONFIG_MODULE_SIG @@ -382,7 +382,7 @@ struct module { /* symbols that will be GPL-only in the near future. */ const struct kernel_symbol *gpl_future_syms; - const unsigned long *gpl_future_crcs; + const s32 *gpl_future_crcs; unsigned int num_gpl_future_syms; /* Exception table */ @@ -523,7 +523,7 @@ struct module *find_module(const char *name); struct symsearch { const struct kernel_symbol *start, *stop; - const unsigned long *crcs; + const s32 *crcs; enum { NOT_GPL_ONLY, GPL_ONLY, @@ -539,7 +539,7 @@ struct symsearch { */ const struct kernel_symbol *find_symbol(const char *name, struct module **owner, - const unsigned long **crc, + const s32 **crc, bool gplok, bool warn); diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h index 3ebb168b9afc..a34b141f125f 100644 --- a/include/net/cipso_ipv4.h +++ b/include/net/cipso_ipv4.h @@ -309,6 +309,10 @@ static inline int cipso_v4_validate(const struct sk_buff *skb, } for (opt_iter = 6; opt_iter < opt_len;) { + if (opt_iter + 1 == opt_len) { + err_offset = opt_iter; + goto out; + } tag_len = opt[opt_iter + 1]; if ((tag_len == 0) || (tag_len > (opt_len - opt_iter))) { err_offset = opt_iter + 1; diff --git a/include/net/sock.h b/include/net/sock.h index 6f83e78eaa5a..9ccefa5c5487 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -2035,7 +2035,9 @@ void sk_reset_timer(struct sock *sk, struct timer_list *timer, void sk_stop_timer(struct sock *sk, struct timer_list *timer); int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb, - unsigned int flags); + unsigned int flags, + void (*destructor)(struct sock *sk, + struct sk_buff *skb)); int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); diff --git a/include/uapi/linux/seg6.h b/include/uapi/linux/seg6.h index 33496595064c..61df8d392f41 100644 --- a/include/uapi/linux/seg6.h +++ b/include/uapi/linux/seg6.h @@ -25,14 +25,12 @@ struct ipv6_sr_hdr { __u8 type; __u8 segments_left; __u8 first_segment; - __u8 flag_1; - __u8 flag_2; - __u8 reserved; + __u8 flags; + __u16 reserved; struct in6_addr segments[0]; }; -#define SR6_FLAG1_CLEANUP (1 << 7) #define SR6_FLAG1_PROTECTED (1 << 6) #define SR6_FLAG1_OAM (1 << 5) #define SR6_FLAG1_ALERT (1 << 4) @@ -44,8 +42,7 @@ struct ipv6_sr_hdr { #define SR6_TLV_PADDING 4 #define SR6_TLV_HMAC 5 -#define sr_has_cleanup(srh) ((srh)->flag_1 & SR6_FLAG1_CLEANUP) -#define sr_has_hmac(srh) ((srh)->flag_1 & SR6_FLAG1_HMAC) +#define sr_has_hmac(srh) ((srh)->flags & SR6_FLAG1_HMAC) struct sr6_tlv { __u8 type; diff --git a/init/Kconfig b/init/Kconfig index e1a937348a3e..4dd8bd232a1d 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1987,6 +1987,10 @@ config MODVERSIONS make them incompatible with the kernel you are running. If unsure, say N. +config MODULE_REL_CRCS + bool + depends on MODVERSIONS + config MODULE_SRCVERSION_ALL bool "Source checksum for all modules" help diff --git a/kernel/events/core.c b/kernel/events/core.c index 110b38a58493..e5aaa806702d 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1469,7 +1469,6 @@ ctx_group_list(struct perf_event *event, struct perf_event_context *ctx) static void list_add_event(struct perf_event *event, struct perf_event_context *ctx) { - lockdep_assert_held(&ctx->lock); WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); @@ -1624,6 +1623,8 @@ static void perf_group_attach(struct perf_event *event) { struct perf_event *group_leader = event->group_leader, *pos; + lockdep_assert_held(&event->ctx->lock); + /* * We can have double attach due to group movement in perf_event_open. */ @@ -1697,6 +1698,8 @@ static void perf_group_detach(struct perf_event *event) struct perf_event *sibling, *tmp; struct list_head *list = NULL; + lockdep_assert_held(&event->ctx->lock); + /* * We can have double detach due to exit/hot-unplug + close. */ @@ -1895,9 +1898,29 @@ __perf_remove_from_context(struct perf_event *event, */ static void perf_remove_from_context(struct perf_event *event, unsigned long flags) { - lockdep_assert_held(&event->ctx->mutex); + struct perf_event_context *ctx = event->ctx; + + lockdep_assert_held(&ctx->mutex); event_function_call(event, __perf_remove_from_context, (void *)flags); + + /* + * The above event_function_call() can NO-OP when it hits + * TASK_TOMBSTONE. In that case we must already have been detached + * from the context (by perf_event_exit_event()) but the grouping + * might still be in-tact. + */ + WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); + if ((flags & DETACH_GROUP) && + (event->attach_state & PERF_ATTACH_GROUP)) { + /* + * Since in that case we cannot possibly be scheduled, simply + * detach now. + */ + raw_spin_lock_irq(&ctx->lock); + perf_group_detach(event); + raw_spin_unlock_irq(&ctx->lock); + } } /* @@ -6609,6 +6632,27 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) char *buf = NULL; char *name; + if (vma->vm_flags & VM_READ) + prot |= PROT_READ; + if (vma->vm_flags & VM_WRITE) + prot |= PROT_WRITE; + if (vma->vm_flags & VM_EXEC) + prot |= PROT_EXEC; + + if (vma->vm_flags & VM_MAYSHARE) + flags = MAP_SHARED; + else + flags = MAP_PRIVATE; + + if (vma->vm_flags & VM_DENYWRITE) + flags |= MAP_DENYWRITE; + if (vma->vm_flags & VM_MAYEXEC) + flags |= MAP_EXECUTABLE; + if (vma->vm_flags & VM_LOCKED) + flags |= MAP_LOCKED; + if (vma->vm_flags & VM_HUGETLB) + flags |= MAP_HUGETLB; + if (file) { struct inode *inode; dev_t dev; @@ -6635,27 +6679,6 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) maj = MAJOR(dev); min = MINOR(dev); - if (vma->vm_flags & VM_READ) - prot |= PROT_READ; - if (vma->vm_flags & VM_WRITE) - prot |= PROT_WRITE; - if (vma->vm_flags & VM_EXEC) - prot |= PROT_EXEC; - - if (vma->vm_flags & VM_MAYSHARE) - flags = MAP_SHARED; - else - flags = MAP_PRIVATE; - - if (vma->vm_flags & VM_DENYWRITE) - flags |= MAP_DENYWRITE; - if (vma->vm_flags & VM_MAYEXEC) - flags |= MAP_EXECUTABLE; - if (vma->vm_flags & VM_LOCKED) - flags |= MAP_LOCKED; - if (vma->vm_flags & VM_HUGETLB) - flags |= MAP_HUGETLB; - goto got_name; } else { if (vma->vm_ops && vma->vm_ops->name) { diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index 8c0a0ae43521..b59e6768c5e9 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -1346,6 +1346,30 @@ void irq_domain_free_irqs_parent(struct irq_domain *domain, } EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent); +static void __irq_domain_activate_irq(struct irq_data *irq_data) +{ + if (irq_data && irq_data->domain) { + struct irq_domain *domain = irq_data->domain; + + if (irq_data->parent_data) + __irq_domain_activate_irq(irq_data->parent_data); + if (domain->ops->activate) + domain->ops->activate(domain, irq_data); + } +} + +static void __irq_domain_deactivate_irq(struct irq_data *irq_data) +{ + if (irq_data && irq_data->domain) { + struct irq_domain *domain = irq_data->domain; + + if (domain->ops->deactivate) + domain->ops->deactivate(domain, irq_data); + if (irq_data->parent_data) + __irq_domain_deactivate_irq(irq_data->parent_data); + } +} + /** * irq_domain_activate_irq - Call domain_ops->activate recursively to activate * interrupt @@ -1356,13 +1380,9 @@ EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent); */ void irq_domain_activate_irq(struct irq_data *irq_data) { - if (irq_data && irq_data->domain) { - struct irq_domain *domain = irq_data->domain; - - if (irq_data->parent_data) - irq_domain_activate_irq(irq_data->parent_data); - if (domain->ops->activate) - domain->ops->activate(domain, irq_data); + if (!irqd_is_activated(irq_data)) { + __irq_domain_activate_irq(irq_data); + irqd_set_activated(irq_data); } } @@ -1376,13 +1396,9 @@ void irq_domain_activate_irq(struct irq_data *irq_data) */ void irq_domain_deactivate_irq(struct irq_data *irq_data) { - if (irq_data && irq_data->domain) { - struct irq_domain *domain = irq_data->domain; - - if (domain->ops->deactivate) - domain->ops->deactivate(domain, irq_data); - if (irq_data->parent_data) - irq_domain_deactivate_irq(irq_data->parent_data); + if (irqd_is_activated(irq_data)) { + __irq_domain_deactivate_irq(irq_data); + irqd_clr_activated(irq_data); } } diff --git a/kernel/module.c b/kernel/module.c index 38d4270925d4..3d8f126208e3 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -389,16 +389,16 @@ extern const struct kernel_symbol __start___ksymtab_gpl[]; extern const struct kernel_symbol __stop___ksymtab_gpl[]; extern const struct kernel_symbol __start___ksymtab_gpl_future[]; extern const struct kernel_symbol __stop___ksymtab_gpl_future[]; -extern const unsigned long __start___kcrctab[]; -extern const unsigned long __start___kcrctab_gpl[]; -extern const unsigned long __start___kcrctab_gpl_future[]; +extern const s32 __start___kcrctab[]; +extern const s32 __start___kcrctab_gpl[]; +extern const s32 __start___kcrctab_gpl_future[]; #ifdef CONFIG_UNUSED_SYMBOLS extern const struct kernel_symbol __start___ksymtab_unused[]; extern const struct kernel_symbol __stop___ksymtab_unused[]; extern const struct kernel_symbol __start___ksymtab_unused_gpl[]; extern const struct kernel_symbol __stop___ksymtab_unused_gpl[]; -extern const unsigned long __start___kcrctab_unused[]; -extern const unsigned long __start___kcrctab_unused_gpl[]; +extern const s32 __start___kcrctab_unused[]; +extern const s32 __start___kcrctab_unused_gpl[]; #endif #ifndef CONFIG_MODVERSIONS @@ -497,7 +497,7 @@ struct find_symbol_arg { /* Output */ struct module *owner; - const unsigned long *crc; + const s32 *crc; const struct kernel_symbol *sym; }; @@ -563,7 +563,7 @@ static bool find_symbol_in_section(const struct symsearch *syms, * (optional) module which owns it. Needs preempt disabled or module_mutex. */ const struct kernel_symbol *find_symbol(const char *name, struct module **owner, - const unsigned long **crc, + const s32 **crc, bool gplok, bool warn) { @@ -1249,23 +1249,17 @@ static int try_to_force_load(struct module *mod, const char *reason) } #ifdef CONFIG_MODVERSIONS -/* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */ -static unsigned long maybe_relocated(unsigned long crc, - const struct module *crc_owner) + +static u32 resolve_rel_crc(const s32 *crc) { -#ifdef ARCH_RELOCATES_KCRCTAB - if (crc_owner == NULL) - return crc - (unsigned long)reloc_start; -#endif - return crc; + return *(u32 *)((void *)crc + *crc); } static int check_version(Elf_Shdr *sechdrs, unsigned int versindex, const char *symname, struct module *mod, - const unsigned long *crc, - const struct module *crc_owner) + const s32 *crc) { unsigned int i, num_versions; struct modversion_info *versions; @@ -1283,13 +1277,19 @@ static int check_version(Elf_Shdr *sechdrs, / sizeof(struct modversion_info); for (i = 0; i < num_versions; i++) { + u32 crcval; + if (strcmp(versions[i].name, symname) != 0) continue; - if (versions[i].crc == maybe_relocated(*crc, crc_owner)) + if (IS_ENABLED(CONFIG_MODULE_REL_CRCS)) + crcval = resolve_rel_crc(crc); + else + crcval = *crc; + if (versions[i].crc == crcval) return 1; - pr_debug("Found checksum %lX vs module %lX\n", - maybe_relocated(*crc, crc_owner), versions[i].crc); + pr_debug("Found checksum %X vs module %lX\n", + crcval, versions[i].crc); goto bad_version; } @@ -1307,7 +1307,7 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs, unsigned int versindex, struct module *mod) { - const unsigned long *crc; + const s32 *crc; /* * Since this should be found in kernel (which can't be removed), no @@ -1321,8 +1321,7 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs, } preempt_enable(); return check_version(sechdrs, versindex, - VMLINUX_SYMBOL_STR(module_layout), mod, crc, - NULL); + VMLINUX_SYMBOL_STR(module_layout), mod, crc); } /* First part is kernel version, which we ignore if module has crcs. */ @@ -1340,8 +1339,7 @@ static inline int check_version(Elf_Shdr *sechdrs, unsigned int versindex, const char *symname, struct module *mod, - const unsigned long *crc, - const struct module *crc_owner) + const s32 *crc) { return 1; } @@ -1368,7 +1366,7 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod, { struct module *owner; const struct kernel_symbol *sym; - const unsigned long *crc; + const s32 *crc; int err; /* @@ -1383,8 +1381,7 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod, if (!sym) goto unlock; - if (!check_version(info->sechdrs, info->index.vers, name, mod, crc, - owner)) { + if (!check_version(info->sechdrs, info->index.vers, name, mod, crc)) { sym = ERR_PTR(-EINVAL); goto getname; } diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index a133ecd741e4..7ad9e53ad174 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1372,7 +1372,7 @@ kprobe_trace_selftest_target(int a1, int a2, int a3, int a4, int a5, int a6) return a1 + a2 + a3 + a4 + a5 + a6; } -static struct __init trace_event_file * +static __init struct trace_event_file * find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr) { struct trace_event_file *file; diff --git a/mm/filemap.c b/mm/filemap.c index b772a33ef640..3f9afded581b 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1791,6 +1791,11 @@ static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos, cond_resched(); find_page: + if (fatal_signal_pending(current)) { + error = -EINTR; + goto out; + } + page = find_get_page(mapping, index); if (!page) { page_cache_sync_readahead(mapping, diff --git a/mm/kasan/report.c b/mm/kasan/report.c index b82b3e215157..f479365530b6 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -13,6 +13,7 @@ * */ +#include <linux/ftrace.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/printk.h> @@ -300,6 +301,8 @@ void kasan_report(unsigned long addr, size_t size, if (likely(!kasan_report_enabled())) return; + disable_trace_on_warning(); + info.access_addr = (void *)addr; info.access_size = size; info.is_write = is_write; diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index ca2723d47338..b8c11e063ff0 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1483,17 +1483,20 @@ bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) } /* - * Confirm all pages in a range [start, end) is belongs to the same zone. + * Confirm all pages in a range [start, end) belong to the same zone. + * When true, return its valid [start, end). */ -int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) +int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn, + unsigned long *valid_start, unsigned long *valid_end) { unsigned long pfn, sec_end_pfn; + unsigned long start, end; struct zone *zone = NULL; struct page *page; int i; - for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn); + for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1); pfn < end_pfn; - pfn = sec_end_pfn + 1, sec_end_pfn += PAGES_PER_SECTION) { + pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) { /* Make sure the memory section is present first */ if (!present_section_nr(pfn_to_section_nr(pfn))) continue; @@ -1509,10 +1512,20 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) page = pfn_to_page(pfn + i); if (zone && page_zone(page) != zone) return 0; + if (!zone) + start = pfn + i; zone = page_zone(page); + end = pfn + MAX_ORDER_NR_PAGES; } } - return 1; + + if (zone) { + *valid_start = start; + *valid_end = end; + return 1; + } else { + return 0; + } } /* @@ -1839,6 +1852,7 @@ static int __ref __offline_pages(unsigned long start_pfn, long offlined_pages; int ret, drain, retry_max, node; unsigned long flags; + unsigned long valid_start, valid_end; struct zone *zone; struct memory_notify arg; @@ -1849,10 +1863,10 @@ static int __ref __offline_pages(unsigned long start_pfn, return -EINVAL; /* This makes hotplug much easier...and readable. we assume this for now. .*/ - if (!test_pages_in_a_zone(start_pfn, end_pfn)) + if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end)) return -EINVAL; - zone = page_zone(pfn_to_page(start_pfn)); + zone = page_zone(pfn_to_page(valid_start)); node = zone_to_nid(zone); nr_pages = end_pfn - start_pfn; diff --git a/mm/shmem.c b/mm/shmem.c index bb53285a1d99..3a7587a0314d 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -415,6 +415,7 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, struct shrink_control *sc, unsigned long nr_to_split) { LIST_HEAD(list), *pos, *next; + LIST_HEAD(to_remove); struct inode *inode; struct shmem_inode_info *info; struct page *page; @@ -441,9 +442,8 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, /* Check if there's anything to gain */ if (round_up(inode->i_size, PAGE_SIZE) == round_up(inode->i_size, HPAGE_PMD_SIZE)) { - list_del_init(&info->shrinklist); + list_move(&info->shrinklist, &to_remove); removed++; - iput(inode); goto next; } @@ -454,6 +454,13 @@ next: } spin_unlock(&sbinfo->shrinklist_lock); + list_for_each_safe(pos, next, &to_remove) { + info = list_entry(pos, struct shmem_inode_info, shrinklist); + inode = &info->vfs_inode; + list_del_init(&info->shrinklist); + iput(inode); + } + list_for_each_safe(pos, next, &list) { int ret; diff --git a/mm/zswap.c b/mm/zswap.c index 067a0d62f318..cabf09e0128b 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -78,7 +78,13 @@ static u64 zswap_duplicate_entry; /* Enable/disable zswap (disabled by default) */ static bool zswap_enabled; -module_param_named(enabled, zswap_enabled, bool, 0644); +static int zswap_enabled_param_set(const char *, + const struct kernel_param *); +static struct kernel_param_ops zswap_enabled_param_ops = { + .set = zswap_enabled_param_set, + .get = param_get_bool, +}; +module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644); /* Crypto compressor to use */ #define ZSWAP_COMPRESSOR_DEFAULT "lzo" @@ -176,6 +182,9 @@ static atomic_t zswap_pools_count = ATOMIC_INIT(0); /* used by param callback function */ static bool zswap_init_started; +/* fatal error during init */ +static bool zswap_init_failed; + /********************************* * helpers and fwd declarations **********************************/ @@ -624,6 +633,11 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp, char *s = strstrip((char *)val); int ret; + if (zswap_init_failed) { + pr_err("can't set param, initialization failed\n"); + return -ENODEV; + } + /* no change required */ if (!strcmp(s, *(char **)kp->arg)) return 0; @@ -703,6 +717,17 @@ static int zswap_zpool_param_set(const char *val, return __zswap_param_set(val, kp, NULL, zswap_compressor); } +static int zswap_enabled_param_set(const char *val, + const struct kernel_param *kp) +{ + if (zswap_init_failed) { + pr_err("can't enable, initialization failed\n"); + return -ENODEV; + } + + return param_set_bool(val, kp); +} + /********************************* * writeback code **********************************/ @@ -1201,6 +1226,9 @@ hp_fail: dstmem_fail: zswap_entry_cache_destroy(); cache_fail: + /* if built-in, we aren't unloaded on failure; don't allow use */ + zswap_init_failed = true; + zswap_enabled = false; return -ENOMEM; } /* must be late so crypto has time to come up */ diff --git a/net/core/datagram.c b/net/core/datagram.c index 662bea587165..ea633342ab0d 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -332,7 +332,9 @@ void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len) EXPORT_SYMBOL(__skb_free_datagram_locked); int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb, - unsigned int flags) + unsigned int flags, + void (*destructor)(struct sock *sk, + struct sk_buff *skb)) { int err = 0; @@ -342,6 +344,8 @@ int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb, if (skb == skb_peek(&sk->sk_receive_queue)) { __skb_unlink(skb, &sk->sk_receive_queue); atomic_dec(&skb->users); + if (destructor) + destructor(sk, skb); err = 0; } spin_unlock_bh(&sk->sk_receive_queue.lock); @@ -375,7 +379,7 @@ EXPORT_SYMBOL(__sk_queue_drop_skb); int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags) { - int err = __sk_queue_drop_skb(sk, skb, flags); + int err = __sk_queue_drop_skb(sk, skb, flags, NULL); kfree_skb(skb); sk_mem_reclaim_partial(sk); diff --git a/net/core/dev.c b/net/core/dev.c index 3e1a60102e64..0921609dfa81 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1695,24 +1695,19 @@ EXPORT_SYMBOL_GPL(net_dec_egress_queue); static struct static_key netstamp_needed __read_mostly; #ifdef HAVE_JUMP_LABEL -/* We are not allowed to call static_key_slow_dec() from irq context - * If net_disable_timestamp() is called from irq context, defer the - * static_key_slow_dec() calls. - */ static atomic_t netstamp_needed_deferred; -#endif - -void net_enable_timestamp(void) +static void netstamp_clear(struct work_struct *work) { -#ifdef HAVE_JUMP_LABEL int deferred = atomic_xchg(&netstamp_needed_deferred, 0); - if (deferred) { - while (--deferred) - static_key_slow_dec(&netstamp_needed); - return; - } + while (deferred--) + static_key_slow_dec(&netstamp_needed); +} +static DECLARE_WORK(netstamp_work, netstamp_clear); #endif + +void net_enable_timestamp(void) +{ static_key_slow_inc(&netstamp_needed); } EXPORT_SYMBOL(net_enable_timestamp); @@ -1720,12 +1715,12 @@ EXPORT_SYMBOL(net_enable_timestamp); void net_disable_timestamp(void) { #ifdef HAVE_JUMP_LABEL - if (in_interrupt()) { - atomic_inc(&netstamp_needed_deferred); - return; - } -#endif + /* net_disable_timestamp() can be called from non process context */ + atomic_inc(&netstamp_needed_deferred); + schedule_work(&netstamp_work); +#else static_key_slow_dec(&netstamp_needed); +#endif } EXPORT_SYMBOL(net_disable_timestamp); diff --git a/net/core/ethtool.c b/net/core/ethtool.c index d5f412b3093d..be7bab1adcde 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -1404,9 +1404,12 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr) if (regs.len > reglen) regs.len = reglen; - regbuf = vzalloc(reglen); - if (reglen && !regbuf) - return -ENOMEM; + regbuf = NULL; + if (reglen) { + regbuf = vzalloc(reglen); + if (!regbuf) + return -ENOMEM; + } ops->get_regs(dev, ®s, regbuf); diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index 72d6f056d863..ae206163c273 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c @@ -1587,6 +1587,10 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option) goto validate_return_locked; } + if (opt_iter + 1 == opt_len) { + err_offset = opt_iter; + goto validate_return_locked; + } tag_len = tag[1]; if (tag_len > (opt_len - opt_iter)) { err_offset = opt_iter + 1; diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 8a4409dd390a..ce1386a67e24 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -1243,7 +1243,14 @@ void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb) pktinfo->ipi_ifindex = 0; pktinfo->ipi_spec_dst.s_addr = 0; } - skb_dst_drop(skb); + /* We need to keep the dst for __ip_options_echo() + * We could restrict the test to opt.ts_needtime || opt.srr, + * but the following is good enough as IP options are not often used. + */ + if (unlikely(IPCB(skb)->opt.optlen)) + skb_dst_force(skb); + else + skb_dst_drop(skb); } int ip_setsockopt(struct sock *sk, int level, diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index b751abc56935..d44a6989e76d 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -773,6 +773,12 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, ret = -EAGAIN; break; } + /* if __tcp_splice_read() got nothing while we have + * an skb in receive queue, we do not want to loop. + * This might happen with URG data. + */ + if (!skb_queue_empty(&sk->sk_receive_queue)) + break; sk_wait_data(sk, &timeo, NULL); if (signal_pending(current)) { ret = sock_intr_errno(timeo); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 4a1ba04565d1..ea6e4cff9faf 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1490,7 +1490,7 @@ try_again: return err; csum_copy_err: - if (!__sk_queue_drop_skb(sk, skb, flags)) { + if (!__sk_queue_drop_skb(sk, skb, flags, udp_skb_destructor)) { UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 156ed578d3c0..a69ae7d4e6f8 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -3393,9 +3393,15 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, } if (idev) { - if (idev->if_flags & IF_READY) - /* device is already configured. */ + if (idev->if_flags & IF_READY) { + /* device is already configured - + * but resend MLD reports, we might + * have roamed and need to update + * multicast snooping switches + */ + ipv6_mc_up(idev); break; + } idev->if_flags |= IF_READY; } diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index e4198502fd98..275cac628a95 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -327,7 +327,6 @@ static int ipv6_srh_rcv(struct sk_buff *skb) struct ipv6_sr_hdr *hdr; struct inet6_dev *idev; struct in6_addr *addr; - bool cleanup = false; int accept_seg6; hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb); @@ -351,11 +350,7 @@ static int ipv6_srh_rcv(struct sk_buff *skb) #endif looped_back: - if (hdr->segments_left > 0) { - if (hdr->nexthdr != NEXTHDR_IPV6 && hdr->segments_left == 1 && - sr_has_cleanup(hdr)) - cleanup = true; - } else { + if (hdr->segments_left == 0) { if (hdr->nexthdr == NEXTHDR_IPV6) { int offset = (hdr->hdrlen + 1) << 3; @@ -418,21 +413,6 @@ looped_back: ipv6_hdr(skb)->daddr = *addr; - if (cleanup) { - int srhlen = (hdr->hdrlen + 1) << 3; - int nh = hdr->nexthdr; - - skb_pull_rcsum(skb, sizeof(struct ipv6hdr) + srhlen); - memmove(skb_network_header(skb) + srhlen, - skb_network_header(skb), - (unsigned char *)hdr - skb_network_header(skb)); - skb->network_header += srhlen; - ipv6_hdr(skb)->nexthdr = nh; - ipv6_hdr(skb)->payload_len = htons(skb->len - - sizeof(struct ipv6hdr)); - skb_push_rcsum(skb, sizeof(struct ipv6hdr)); - } - skb_dst_drop(skb); ip6_route_input(skb); @@ -453,13 +433,8 @@ looped_back: } ipv6_hdr(skb)->hop_limit--; - /* be sure that srh is still present before reinjecting */ - if (!cleanup) { - skb_pull(skb, sizeof(struct ipv6hdr)); - goto looped_back; - } - skb_set_transport_header(skb, sizeof(struct ipv6hdr)); - IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr); + skb_pull(skb, sizeof(struct ipv6hdr)); + goto looped_back; } dst_input(skb); diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 51b9835b3176..6fcb7cb49bb2 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -367,35 +367,37 @@ static void ip6gre_tunnel_uninit(struct net_device *dev) static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt, - u8 type, u8 code, int offset, __be32 info) + u8 type, u8 code, int offset, __be32 info) { - const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data; - __be16 *p = (__be16 *)(skb->data + offset); - int grehlen = offset + 4; + const struct gre_base_hdr *greh; + const struct ipv6hdr *ipv6h; + int grehlen = sizeof(*greh); struct ip6_tnl *t; + int key_off = 0; __be16 flags; + __be32 key; - flags = p[0]; - if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) { - if (flags&(GRE_VERSION|GRE_ROUTING)) - return; - if (flags&GRE_KEY) { - grehlen += 4; - if (flags&GRE_CSUM) - grehlen += 4; - } + if (!pskb_may_pull(skb, offset + grehlen)) + return; + greh = (const struct gre_base_hdr *)(skb->data + offset); + flags = greh->flags; + if (flags & (GRE_VERSION | GRE_ROUTING)) + return; + if (flags & GRE_CSUM) + grehlen += 4; + if (flags & GRE_KEY) { + key_off = grehlen + offset; + grehlen += 4; } - /* If only 8 bytes returned, keyed message will be dropped here */ - if (!pskb_may_pull(skb, grehlen)) + if (!pskb_may_pull(skb, offset + grehlen)) return; ipv6h = (const struct ipv6hdr *)skb->data; - p = (__be16 *)(skb->data + offset); + greh = (const struct gre_base_hdr *)(skb->data + offset); + key = key_off ? *(__be32 *)(skb->data + key_off) : 0; t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr, - flags & GRE_KEY ? - *(((__be32 *)p) + (grehlen / 4) - 1) : 0, - p[1]); + key, greh->protocol); if (!t) return; diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c index b274f1d95e03..f950cb53d5e3 100644 --- a/net/ipv6/seg6_hmac.c +++ b/net/ipv6/seg6_hmac.c @@ -174,7 +174,7 @@ int seg6_hmac_compute(struct seg6_hmac_info *hinfo, struct ipv6_sr_hdr *hdr, * hash function (RadioGatun) with up to 1216 bits */ - /* saddr(16) + first_seg(1) + cleanup(1) + keyid(4) + seglist(16n) */ + /* saddr(16) + first_seg(1) + flags(1) + keyid(4) + seglist(16n) */ plen = 16 + 1 + 1 + 4 + (hdr->first_segment + 1) * 16; /* this limit allows for 14 segments */ @@ -186,7 +186,7 @@ int seg6_hmac_compute(struct seg6_hmac_info *hinfo, struct ipv6_sr_hdr *hdr, * * 1. Source IPv6 address (128 bits) * 2. first_segment value (8 bits) - * 3. cleanup flag (8 bits: highest bit is cleanup value, others are 0) + * 3. Flags (8 bits) * 4. HMAC Key ID (32 bits) * 5. All segments in the segments list (n * 128 bits) */ @@ -202,8 +202,8 @@ int seg6_hmac_compute(struct seg6_hmac_info *hinfo, struct ipv6_sr_hdr *hdr, /* first_segment value */ *off++ = hdr->first_segment; - /* cleanup flag */ - *off++ = !!(sr_has_cleanup(hdr)) << 7; + /* flags */ + *off++ = hdr->flags; /* HMAC Key ID */ memcpy(off, &hmackeyid, 4); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 6b9fc63fd4d2..b5d27212db2f 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -996,6 +996,16 @@ drop: return 0; /* don't send reset */ } +static void tcp_v6_restore_cb(struct sk_buff *skb) +{ + /* We need to move header back to the beginning if xfrm6_policy_check() + * and tcp_v6_fill_cb() are going to be called again. + * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there. + */ + memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6, + sizeof(struct inet6_skb_parm)); +} + static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct dst_entry *dst, @@ -1184,8 +1194,10 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * sk_gfp_mask(sk, GFP_ATOMIC)); consume_skb(ireq->pktopts); ireq->pktopts = NULL; - if (newnp->pktoptions) + if (newnp->pktoptions) { + tcp_v6_restore_cb(newnp->pktoptions); skb_set_owner_r(newnp->pktoptions, newsk); + } } } @@ -1200,16 +1212,6 @@ out: return NULL; } -static void tcp_v6_restore_cb(struct sk_buff *skb) -{ - /* We need to move header back to the beginning if xfrm6_policy_check() - * and tcp_v6_fill_cb() are going to be called again. - * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there. - */ - memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6, - sizeof(struct inet6_skb_parm)); -} - /* The socket must have it's spinlock held when we get * here, unless it is a TCP_LISTEN socket. * diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 51346fa70298..df71ba05f41d 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -454,7 +454,7 @@ try_again: return err; csum_copy_err: - if (!__sk_queue_drop_skb(sk, skb, flags)) { + if (!__sk_queue_drop_skb(sk, skb, flags, udp_skb_destructor)) { if (is_udp4) { UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); diff --git a/net/mac80211/fils_aead.c b/net/mac80211/fils_aead.c index ecfdd97758a3..5c3af5eb4052 100644 --- a/net/mac80211/fils_aead.c +++ b/net/mac80211/fils_aead.c @@ -124,7 +124,7 @@ static int aes_siv_encrypt(const u8 *key, size_t key_len, /* CTR */ - tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, 0); + tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm2)) { kfree(tmp); return PTR_ERR(tfm2); @@ -183,7 +183,7 @@ static int aes_siv_decrypt(const u8 *key, size_t key_len, /* CTR */ - tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, 0); + tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm2)) return PTR_ERR(tfm2); /* K2 for CTR */ @@ -272,7 +272,7 @@ int fils_encrypt_assoc_req(struct sk_buff *skb, crypt_len = skb->data + skb->len - encr; skb_put(skb, AES_BLOCK_SIZE); return aes_siv_encrypt(assoc_data->fils_kek, assoc_data->fils_kek_len, - encr, crypt_len, 1, addr, len, encr); + encr, crypt_len, 5, addr, len, encr); } int fils_decrypt_assoc_resp(struct ieee80211_sub_if_data *sdata, diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 9c23172feba0..c28b0af9c1f2 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -335,7 +335,7 @@ int mesh_add_vendor_ies(struct ieee80211_sub_if_data *sdata, /* fast-forward to vendor IEs */ offset = ieee80211_ie_split_vendor(ifmsh->ie, ifmsh->ie_len, 0); - if (offset) { + if (offset < ifmsh->ie_len) { len = ifmsh->ie_len - offset; data = ifmsh->ie + offset; if (skb_tailroom(skb) < len) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index a4609a0be76d..a8b4252fe084 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -239,7 +239,7 @@ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk, union sctp_addr *laddr = (union sctp_addr *)addr; struct sctp_transport *transport; - if (sctp_verify_addr(sk, laddr, af->sockaddr_len)) + if (!af || sctp_verify_addr(sk, laddr, af->sockaddr_len)) return NULL; addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep, @@ -7540,7 +7540,8 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, */ release_sock(sk); current_timeo = schedule_timeout(current_timeo); - BUG_ON(sk != asoc->base.sk); + if (sk != asoc->base.sk) + goto do_error; lock_sock(sk); *timeo_p = current_timeo; diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c index dc6fb79a361f..25d9a9cf7b66 100644 --- a/net/sunrpc/auth_gss/gss_rpc_xdr.c +++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c @@ -260,7 +260,7 @@ static int gssx_dec_option_array(struct xdr_stream *xdr, if (!oa->data) return -ENOMEM; - creds = kmalloc(sizeof(struct svc_cred), GFP_KERNEL); + creds = kzalloc(sizeof(struct svc_cred), GFP_KERNEL); if (!creds) { kfree(oa->data); return -ENOMEM; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 63dfa60a29ef..3aee94b0c6c5 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -5921,6 +5921,7 @@ do { \ break; } cfg->ht_opmode = ht_opmode; + mask |= (1 << (NL80211_MESHCONF_HT_OPMODE - 1)); } FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout, 1, 65535, mask, diff --git a/scripts/Makefile.build b/scripts/Makefile.build index eadcd4d359d9..d883116ebaa4 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -164,6 +164,7 @@ cmd_gensymtypes_c = \ $(CPP) -D__GENKSYMS__ $(c_flags) $< | \ $(GENKSYMS) $(if $(1), -T $(2)) \ $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \ + $(patsubst y,-R,$(CONFIG_MODULE_REL_CRCS)) \ $(if $(KBUILD_PRESERVE),-p) \ -r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null)) @@ -337,6 +338,7 @@ cmd_gensymtypes_S = \ $(CPP) -D__GENKSYMS__ $(c_flags) -xc - | \ $(GENKSYMS) $(if $(1), -T $(2)) \ $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \ + $(patsubst y,-R,$(CONFIG_MODULE_REL_CRCS)) \ $(if $(KBUILD_PRESERVE),-p) \ -r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null)) diff --git a/scripts/genksyms/genksyms.c b/scripts/genksyms/genksyms.c index 06121ce524a7..c9235d8340f1 100644 --- a/scripts/genksyms/genksyms.c +++ b/scripts/genksyms/genksyms.c @@ -44,7 +44,7 @@ char *cur_filename, *source_file; int in_source_file; static int flag_debug, flag_dump_defs, flag_reference, flag_dump_types, - flag_preserve, flag_warnings; + flag_preserve, flag_warnings, flag_rel_crcs; static const char *mod_prefix = ""; static int errors; @@ -693,7 +693,10 @@ void export_symbol(const char *name) fputs(">\n", debugfile); /* Used as a linker script. */ - printf("%s__crc_%s = 0x%08lx ;\n", mod_prefix, name, crc); + printf(!flag_rel_crcs ? "%s__crc_%s = 0x%08lx;\n" : + "SECTIONS { .rodata : ALIGN(4) { " + "%s__crc_%s = .; LONG(0x%08lx); } }\n", + mod_prefix, name, crc); } } @@ -730,7 +733,7 @@ void error_with_pos(const char *fmt, ...) static void genksyms_usage(void) { - fputs("Usage:\n" "genksyms [-adDTwqhV] > /path/to/.tmp_obj.ver\n" "\n" + fputs("Usage:\n" "genksyms [-adDTwqhVR] > /path/to/.tmp_obj.ver\n" "\n" #ifdef __GNU_LIBRARY__ " -s, --symbol-prefix Select symbol prefix\n" " -d, --debug Increment the debug level (repeatable)\n" @@ -742,6 +745,7 @@ static void genksyms_usage(void) " -q, --quiet Disable warnings (default)\n" " -h, --help Print this message\n" " -V, --version Print the release version\n" + " -R, --relative-crc Emit section relative symbol CRCs\n" #else /* __GNU_LIBRARY__ */ " -s Select symbol prefix\n" " -d Increment the debug level (repeatable)\n" @@ -753,6 +757,7 @@ static void genksyms_usage(void) " -q Disable warnings (default)\n" " -h Print this message\n" " -V Print the release version\n" + " -R Emit section relative symbol CRCs\n" #endif /* __GNU_LIBRARY__ */ , stderr); } @@ -774,13 +779,14 @@ int main(int argc, char **argv) {"preserve", 0, 0, 'p'}, {"version", 0, 0, 'V'}, {"help", 0, 0, 'h'}, + {"relative-crc", 0, 0, 'R'}, {0, 0, 0, 0} }; - while ((o = getopt_long(argc, argv, "s:dwqVDr:T:ph", + while ((o = getopt_long(argc, argv, "s:dwqVDr:T:phR", &long_opts[0], NULL)) != EOF) #else /* __GNU_LIBRARY__ */ - while ((o = getopt(argc, argv, "s:dwqVDr:T:ph")) != EOF) + while ((o = getopt(argc, argv, "s:dwqVDr:T:phR")) != EOF) #endif /* __GNU_LIBRARY__ */ switch (o) { case 's': @@ -823,6 +829,9 @@ int main(int argc, char **argv) case 'h': genksyms_usage(); return 0; + case 'R': + flag_rel_crcs = 1; + break; default: genksyms_usage(); return 1; diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c index 299b92ca1ae0..5d554419170b 100644 --- a/scripts/kallsyms.c +++ b/scripts/kallsyms.c @@ -219,6 +219,10 @@ static int symbol_valid(struct sym_entry *s) "_SDA2_BASE_", /* ppc */ NULL }; + static char *special_prefixes[] = { + "__crc_", /* modversions */ + NULL }; + static char *special_suffixes[] = { "_veneer", /* arm */ "_from_arm", /* arm */ @@ -259,6 +263,14 @@ static int symbol_valid(struct sym_entry *s) if (strcmp(sym_name, special_symbols[i]) == 0) return 0; + for (i = 0; special_prefixes[i]; i++) { + int l = strlen(special_prefixes[i]); + + if (l <= strlen(sym_name) && + strncmp(sym_name, special_prefixes[i], l) == 0) + return 0; + } + for (i = 0; special_suffixes[i]; i++) { int l = strlen(sym_name) - strlen(special_suffixes[i]); diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index 29c89a6bad3d..4dedd0d3d3a7 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c @@ -621,6 +621,16 @@ static void handle_modversions(struct module *mod, struct elf_info *info, if (strncmp(symname, CRC_PFX, strlen(CRC_PFX)) == 0) { is_crc = true; crc = (unsigned int) sym->st_value; + if (sym->st_shndx != SHN_UNDEF && sym->st_shndx != SHN_ABS) { + unsigned int *crcp; + + /* symbol points to the CRC in the ELF object */ + crcp = (void *)info->hdr + sym->st_value + + info->sechdrs[sym->st_shndx].sh_offset - + (info->hdr->e_type != ET_REL ? + info->sechdrs[sym->st_shndx].sh_addr : 0); + crc = *crcp; + } sym_update_crc(symname + strlen(CRC_PFX), mod, crc, export); } diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c index 5e0dea2cdc01..039636ffb6c8 100644 --- a/tools/objtool/arch/x86/decode.c +++ b/tools/objtool/arch/x86/decode.c @@ -150,9 +150,9 @@ int arch_decode_instruction(struct elf *elf, struct section *sec, *type = INSN_RETURN; break; - case 0xc5: /* iret */ case 0xca: /* retf */ case 0xcb: /* retf */ + case 0xcf: /* iret */ *type = INSN_CONTEXT_SWITCH; break; |