diff options
740 files changed, 13009 insertions, 8827 deletions
@@ -198,6 +198,9 @@ Maxime Ripard <mripard@kernel.org> <maxime.ripard@free-electrons.com> Mayuresh Janorkar <mayur@ti.com> Michael Buesch <m@bues.ch> Michel Dänzer <michel@tungstengraphics.com> +Mike Rapoport <rppt@kernel.org> <mike@compulab.co.il> +Mike Rapoport <rppt@kernel.org> <mike.rapoport@gmail.com> +Mike Rapoport <rppt@kernel.org> <rppt@linux.ibm.com> Miodrag Dinic <miodrag.dinic@mips.com> <miodrag.dinic@imgtec.com> Miquel Raynal <miquel.raynal@bootlin.com> <miquel.raynal@free-electrons.com> Mitesh shah <mshah@teja.com> diff --git a/Documentation/ABI/testing/debugfs-driver-habanalabs b/Documentation/ABI/testing/debugfs-driver-habanalabs index f6d9c2a8d528..2e9ae311e02d 100644 --- a/Documentation/ABI/testing/debugfs-driver-habanalabs +++ b/Documentation/ABI/testing/debugfs-driver-habanalabs @@ -16,7 +16,16 @@ Description: Allow the root user to disable/enable in runtime the clock gating mechanism in Gaudi. Due to how Gaudi is built, the clock gating needs to be disabled in order to access the registers of the TPC and MME engines. This is sometimes needed - during debug by the user and hence the user needs this option + during debug by the user and hence the user needs this option. + The user can supply a bitmask value, each bit represents + a different engine to disable/enable its clock gating feature. + The bitmask is composed of 20 bits: + 0 - 7 : DMA channels + 8 - 11 : MME engines + 12 - 19 : TPC engines + The bit's location of a specific engine can be determined + using (1 << GAUDI_ENGINE_ID_*). GAUDI_ENGINE_ID_* values + are defined in uapi habanalabs.h file in enum gaudi_engine_id What: /sys/kernel/debug/habanalabs/hl<n>/command_buffers Date: Jan 2019 diff --git a/Documentation/admin-guide/ext4.rst b/Documentation/admin-guide/ext4.rst index 9443fcef1876..2162d7909970 100644 --- a/Documentation/admin-guide/ext4.rst +++ b/Documentation/admin-guide/ext4.rst @@ -395,6 +395,13 @@ When mounting an ext4 filesystem, the following option are accepted: Documentation/filesystems/dax.txt. Note that this option is incompatible with data=journal. + inlinecrypt + When possible, encrypt/decrypt the contents of encrypted files using the + blk-crypto framework rather than filesystem-layer encryption. This + allows the use of inline encryption hardware. The on-disk format is + unaffected. For more details, see + Documentation/block/inline-encryption.rst. + Data Mode ========= There are 3 different data modes: diff --git a/Documentation/core-api/padata.rst b/Documentation/core-api/padata.rst index 0830e5b0e821..35175710b43c 100644 --- a/Documentation/core-api/padata.rst +++ b/Documentation/core-api/padata.rst @@ -27,22 +27,11 @@ padata_instance structure for overall control of how jobs are to be run:: #include <linux/padata.h> - struct padata_instance *padata_alloc_possible(const char *name); + struct padata_instance *padata_alloc(const char *name); 'name' simply identifies the instance. -There are functions for enabling and disabling the instance:: - - int padata_start(struct padata_instance *pinst); - void padata_stop(struct padata_instance *pinst); - -These functions are setting or clearing the "PADATA_INIT" flag; if that flag is -not set, other functions will refuse to work. padata_start() returns zero on -success (flag set) or -EINVAL if the padata cpumask contains no active CPU -(flag not set). padata_stop() clears the flag and blocks until the padata -instance is unused. - -Finally, complete padata initialization by allocating a padata_shell:: +Then, complete padata initialization by allocating a padata_shell:: struct padata_shell *padata_alloc_shell(struct padata_instance *pinst); @@ -155,11 +144,10 @@ submitted. Destroying ---------- -Cleaning up a padata instance predictably involves calling the three free +Cleaning up a padata instance predictably involves calling the two free functions that correspond to the allocation in reverse:: void padata_free_shell(struct padata_shell *ps); - void padata_stop(struct padata_instance *pinst); void padata_free(struct padata_instance *pinst); It is the user's responsibility to ensure all outstanding jobs are complete diff --git a/Documentation/crypto/api-intro.txt b/Documentation/crypto/api-intro.txt index 45d943fcae5b..40137f93e04f 100644 --- a/Documentation/crypto/api-intro.txt +++ b/Documentation/crypto/api-intro.txt @@ -169,7 +169,7 @@ Portions of this API were derived from the following projects: and; - Nettle (http://www.lysator.liu.se/~nisse/nettle/) + Nettle (https://www.lysator.liu.se/~nisse/nettle/) Niels Möller Original developers of the crypto algorithms: diff --git a/Documentation/crypto/userspace-if.rst b/Documentation/crypto/userspace-if.rst index ff86befa61e0..52019e905900 100644 --- a/Documentation/crypto/userspace-if.rst +++ b/Documentation/crypto/userspace-if.rst @@ -23,7 +23,7 @@ user space, however. This includes the difference between synchronous and asynchronous invocations. The user space API call is fully synchronous. -[1] http://www.chronox.de/libkcapi.html +[1] https://www.chronox.de/libkcapi.html User Space API General Remarks ------------------------------ @@ -384,4 +384,4 @@ Please see [1] for libkcapi which provides an easy-to-use wrapper around the aforementioned Netlink kernel interface. [1] also contains a test application that invokes all libkcapi API calls. -[1] http://www.chronox.de/libkcapi.html +[1] https://www.chronox.de/libkcapi.html diff --git a/Documentation/devicetree/bindings/crypto/ti,sa2ul.yaml b/Documentation/devicetree/bindings/crypto/ti,sa2ul.yaml new file mode 100644 index 000000000000..85ef69ffebed --- /dev/null +++ b/Documentation/devicetree/bindings/crypto/ti,sa2ul.yaml @@ -0,0 +1,76 @@ +# SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/crypto/ti,sa2ul.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: K3 SoC SA2UL crypto module + +maintainers: + - Tero Kristo <t-kristo@ti.com> + +properties: + compatible: + enum: + - ti,j721e-sa2ul + - ti,am654-sa2ul + + reg: + maxItems: 1 + + power-domains: + maxItems: 1 + + dmas: + items: + - description: TX DMA Channel + - description: RX DMA Channel #1 + - description: RX DMA Channel #2 + + dma-names: + items: + - const: tx + - const: rx1 + - const: rx2 + + dma-coherent: true + + "#address-cells": + const: 2 + + "#size-cells": + const: 2 + + ranges: + description: + Address translation for the possible RNG child node for SA2UL + +patternProperties: + "^rng@[a-f0-9]+$": + type: object + description: + Child RNG node for SA2UL + +required: + - compatible + - reg + - power-domains + - dmas + - dma-names + - dma-coherent + +additionalProperties: false + +examples: + - | + #include <dt-bindings/soc/ti,sci_pm_domain.h> + + main_crypto: crypto@4e00000 { + compatible = "ti,j721-sa2ul"; + reg = <0x0 0x4e00000 0x0 0x1200>; + power-domains = <&k3_pds 264 TI_SCI_PD_EXCLUSIVE>; + dmas = <&main_udmap 0xc000>, <&main_udmap 0x4000>, + <&main_udmap 0x4001>; + dma-names = "tx", "rx1", "rx2"; + dma-coherent; + }; diff --git a/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-video-engine.yaml b/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-video-engine.yaml index 526593c8c614..4cc1a670c986 100644 --- a/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-video-engine.yaml +++ b/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-video-engine.yaml @@ -47,6 +47,9 @@ properties: $ref: /schemas/types.yaml#/definitions/phandle-array description: Phandle to the device SRAM + iommus: + maxItems: 1 + memory-region: description: CMA pool to use for buffers allocation instead of the default diff --git a/Documentation/devicetree/bindings/rng/imx-rng.txt b/Documentation/devicetree/bindings/rng/imx-rng.txt index 405c2b00ccb0..659d4efdd664 100644 --- a/Documentation/devicetree/bindings/rng/imx-rng.txt +++ b/Documentation/devicetree/bindings/rng/imx-rng.txt @@ -5,6 +5,9 @@ Required properties: "fsl,imx21-rnga" "fsl,imx31-rnga" (backward compatible with "fsl,imx21-rnga") "fsl,imx25-rngb" + "fsl,imx6sl-rngb" (backward compatible with "fsl,imx25-rngb") + "fsl,imx6sll-rngb" (backward compatible with "fsl,imx25-rngb") + "fsl,imx6ull-rngb" (backward compatible with "fsl,imx25-rngb") "fsl,imx35-rngc" - reg : offset and length of the register set of this block - interrupts : the interrupt number for the RNG block diff --git a/Documentation/devicetree/bindings/rng/ingenic,rng.yaml b/Documentation/devicetree/bindings/rng/ingenic,rng.yaml new file mode 100644 index 000000000000..b2e4a6a7f93a --- /dev/null +++ b/Documentation/devicetree/bindings/rng/ingenic,rng.yaml @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/rng/ingenic,rng.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Bindings for RNG in Ingenic SoCs + +maintainers: + - 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com> + +description: + The Random Number Generator in Ingenic SoCs. + +properties: + compatible: + enum: + - ingenic,jz4780-rng + - ingenic,x1000-rng + + reg: + maxItems: 1 + +required: + - compatible + - reg + +additionalProperties: false + +examples: + - | + rng: rng@d8 { + compatible = "ingenic,jz4780-rng"; + reg = <0xd8 0x8>; + }; +... diff --git a/Documentation/devicetree/bindings/rng/silex-insight,ba431-rng.yaml b/Documentation/devicetree/bindings/rng/silex-insight,ba431-rng.yaml new file mode 100644 index 000000000000..48ab82abf50e --- /dev/null +++ b/Documentation/devicetree/bindings/rng/silex-insight,ba431-rng.yaml @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/rng/silex-insight,ba431-rng.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Silex Insight BA431 RNG bindings + +description: | + The BA431 hardware random number generator is an IP that is FIPS-140-2/3 + certified. + +maintainers: + - Olivier Sobrie <olivier.sobrie@silexinsight.com> + +properties: + compatible: + const: silex-insight,ba431-rng + + reg: + maxItems: 1 + +required: + - compatible + - reg + +additionalProperties: false + +examples: + - | + rng@42800000 { + compatible = "silex-insight,ba431-rng"; + reg = <0x42800000 0x1000>; + }; + +... diff --git a/Documentation/driver-api/ptp.rst b/Documentation/driver-api/ptp.rst index a15192e32347..664838ae7776 100644 --- a/Documentation/driver-api/ptp.rst +++ b/Documentation/driver-api/ptp.rst @@ -23,6 +23,7 @@ PTP hardware clock infrastructure for Linux + Ancillary clock features - Time stamp external events - Period output signals configurable from user space + - Low Pass Filter (LPF) access from user space - Synchronization of the Linux system time via the PPS subsystem PTP hardware clock kernel API @@ -94,3 +95,14 @@ Supported hardware - Auxiliary Slave/Master Mode Snapshot (optional interrupt) - Target Time (optional interrupt) + + * Renesas (IDT) ClockMatrix™ + + - Up to 4 independent PHC channels + - Integrated low pass filter (LPF), access via .adjPhase (compliant to ITU-T G.8273.2) + - Programmable output periodic signals + - Programmable inputs can time stamp external triggers + - Driver and/or hardware configuration through firmware (idtcm.bin) + - LPF settings (bandwidth, phase limiting, automatic holdover, physical layer assist (per ITU-T G.8273.2)) + - Programmable output PTP clocks, any frequency up to 1GHz (to other PHY/MAC time stampers, refclk to ASSPs/SoCs/FPGAs) + - Lock to GNSS input, automatic switching between GNSS and user-space PHC control (optional) diff --git a/Documentation/filesystems/f2fs.rst b/Documentation/filesystems/f2fs.rst index 099d45ac8d8f..8b4fac44f4e1 100644 --- a/Documentation/filesystems/f2fs.rst +++ b/Documentation/filesystems/f2fs.rst @@ -258,6 +258,13 @@ compress_extension=%s Support adding specified extension, so that f2fs can enab on compression extension list and enable compression on these file by default rather than to enable it via ioctl. For other files, we can still enable compression via ioctl. +inlinecrypt + When possible, encrypt/decrypt the contents of encrypted + files using the blk-crypto framework rather than + filesystem-layer encryption. This allows the use of + inline encryption hardware. The on-disk format is + unaffected. For more details, see + Documentation/block/inline-encryption.rst. ====================== ============================================================ Debugfs Entries diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst index f517af8ec11c..423c5a0daf45 100644 --- a/Documentation/filesystems/fscrypt.rst +++ b/Documentation/filesystems/fscrypt.rst @@ -1158,7 +1158,7 @@ setxattr() because of the special semantics of the encryption xattr. were to be added to or removed from anything other than an empty directory.) These structs are defined as follows:: - #define FS_KEY_DERIVATION_NONCE_SIZE 16 + #define FSCRYPT_FILE_NONCE_SIZE 16 #define FSCRYPT_KEY_DESCRIPTOR_SIZE 8 struct fscrypt_context_v1 { @@ -1167,7 +1167,7 @@ directory.) These structs are defined as follows:: u8 filenames_encryption_mode; u8 flags; u8 master_key_descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE]; - u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE]; + u8 nonce[FSCRYPT_FILE_NONCE_SIZE]; }; #define FSCRYPT_KEY_IDENTIFIER_SIZE 16 @@ -1178,7 +1178,7 @@ directory.) These structs are defined as follows:: u8 flags; u8 __reserved[4]; u8 master_key_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]; - u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE]; + u8 nonce[FSCRYPT_FILE_NONCE_SIZE]; }; The context structs contain the same information as the corresponding @@ -1204,6 +1204,18 @@ buffer. Some filesystems, such as UBIFS, already use temporary buffers regardless of encryption. Other filesystems, such as ext4 and F2FS, have to allocate bounce pages specially for encryption. +Fscrypt is also able to use inline encryption hardware instead of the +kernel crypto API for en/decryption of file contents. When possible, +and if directed to do so (by specifying the 'inlinecrypt' mount option +for an ext4/F2FS filesystem), it adds encryption contexts to bios and +uses blk-crypto to perform the en/decryption instead of making use of +the above read/write path changes. Of course, even if directed to +make use of inline encryption, fscrypt will only be able to do so if +either hardware inline encryption support is available for the +selected encryption algorithm or CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK +is selected. If neither is the case, fscrypt will fall back to using +the above mentioned read/write path changes for en/decryption. + Filename hashing and encoding ----------------------------- @@ -1250,11 +1262,14 @@ Tests To test fscrypt, use xfstests, which is Linux's de facto standard filesystem test suite. First, run all the tests in the "encrypt" -group on the relevant filesystem(s). For example, to test ext4 and +group on the relevant filesystem(s). One can also run the tests +with the 'inlinecrypt' mount option to test the implementation for +inline encryption support. For example, to test ext4 and f2fs encryption using `kvm-xfstests <https://github.com/tytso/xfstests-bld/blob/master/Documentation/kvm-quickstart.md>`_:: kvm-xfstests -c ext4,f2fs -g encrypt + kvm-xfstests -c ext4,f2fs -g encrypt -m inlinecrypt UBIFS encryption can also be tested this way, but it should be done in a separate command, and it takes some time for kvm-xfstests to set up @@ -1276,6 +1291,7 @@ This tests the encrypted I/O paths more thoroughly. To do this with kvm-xfstests, use the "encrypt" filesystem configuration:: kvm-xfstests -c ext4/encrypt,f2fs/encrypt -g auto + kvm-xfstests -c ext4/encrypt,f2fs/encrypt -g auto -m inlinecrypt Because this runs many more tests than "-g encrypt" does, it takes much longer to run; so also consider using `gce-xfstests @@ -1283,3 +1299,4 @@ much longer to run; so also consider using `gce-xfstests instead of kvm-xfstests:: gce-xfstests -c ext4/encrypt,f2fs/encrypt -g auto + gce-xfstests -c ext4/encrypt,f2fs/encrypt -g auto -m inlinecrypt diff --git a/Documentation/networking/bareudp.rst b/Documentation/networking/bareudp.rst index 465a8b251bfe..b9d04ee6dac1 100644 --- a/Documentation/networking/bareudp.rst +++ b/Documentation/networking/bareudp.rst @@ -8,9 +8,8 @@ There are various L3 encapsulation standards using UDP being discussed to leverage the UDP based load balancing capability of different networks. MPLSoUDP (__ https://tools.ietf.org/html/rfc7510) is one among them. -The Bareudp tunnel module provides a generic L3 encapsulation tunnelling -support for tunnelling different L3 protocols like MPLS, IP, NSH etc. inside -a UDP tunnel. +The Bareudp tunnel module provides a generic L3 encapsulation support for +tunnelling different L3 protocols like MPLS, IP, NSH etc. inside a UDP tunnel. Special Handling ---------------- @@ -26,7 +25,7 @@ Usage 1) Device creation & deletion - a) ip link add dev bareudp0 type bareudp dstport 6635 ethertype 0x8847. + a) ip link add dev bareudp0 type bareudp dstport 6635 ethertype mpls_uc This creates a bareudp tunnel device which tunnels L3 traffic with ethertype 0x8847 (MPLS traffic). The destination port of the UDP header will be set to @@ -34,14 +33,21 @@ Usage b) ip link delete bareudp0 -2) Device creation with multiple proto mode enabled +2) Device creation with multiproto mode enabled -There are two ways to create a bareudp device for MPLS & IP with multiproto mode -enabled. +The multiproto mode allows bareudp tunnels to handle several protocols of the +same family. It is currently only available for IP and MPLS. This mode has to +be enabled explicitly with the "multiproto" flag. - a) ip link add dev bareudp0 type bareudp dstport 6635 ethertype 0x8847 multiproto + a) ip link add dev bareudp0 type bareudp dstport 6635 ethertype ipv4 multiproto - b) ip link add dev bareudp0 type bareudp dstport 6635 ethertype mpls + For an IPv4 tunnel the multiproto mode allows the tunnel to also handle + IPv6. + + b) ip link add dev bareudp0 type bareudp dstport 6635 ethertype mpls_uc multiproto + + For MPLS, the multiproto mode allows the tunnel to handle both unicast + and multicast MPLS packets. 3) Device Usage diff --git a/Documentation/networking/devlink/devlink-trap.rst b/Documentation/networking/devlink/devlink-trap.rst index 1e3f3ffee248..2014307fbe63 100644 --- a/Documentation/networking/devlink/devlink-trap.rst +++ b/Documentation/networking/devlink/devlink-trap.rst @@ -486,6 +486,10 @@ narrow. The description of these groups must be added to the following table: - Contains packet traps for packets that should be locally delivered after routing, but do not match more specific packet traps (e.g., ``ipv4_bgp``) + * - ``external_delivery`` + - Contains packet traps for packets that should be routed through an + external interface (e.g., management interface) that does not belong to + the same device (e.g., switch ASIC) as the ingress interface * - ``ipv6`` - Contains packet traps for various IPv6 control packets (e.g., Router Advertisements) diff --git a/MAINTAINERS b/MAINTAINERS index 0887816d125e..de9b86cb2f36 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -782,7 +782,7 @@ F: include/dt-bindings/reset/altr,rst-mgr-a10sr.h F: include/linux/mfd/altera-a10sr.h ALTERA TRIPLE SPEED ETHERNET DRIVER -M: Thor Thayer <thor.thayer@linux.intel.com> +M: Joyce Ooi <joyce.ooi@intel.com> L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/altera/ @@ -830,11 +830,20 @@ F: include/uapi/rdma/efa-abi.h AMD CRYPTOGRAPHIC COPROCESSOR (CCP) DRIVER M: Tom Lendacky <thomas.lendacky@amd.com> +M: John Allen <john.allen@amd.com> L: linux-crypto@vger.kernel.org S: Supported F: drivers/crypto/ccp/ F: include/linux/ccp.h +AMD CRYPTOGRAPHIC COPROCESSOR (CCP) DRIVER - SEV SUPPORT +M: Brijesh Singh <brijesh.singh@amd.com> +M: Tom Lendacky <thomas.lendacky@amd.com> +L: linux-crypto@vger.kernel.org +S: Supported +F: drivers/crypto/ccp/sev* +F: include/uapi/linux/psp-sev.h + AMD DISPLAY CORE M: Harry Wentland <harry.wentland@amd.com> M: Leo Li <sunpeng.li@amd.com> @@ -1425,7 +1434,7 @@ F: arch/arm*/include/asm/perf_event.h F: arch/arm*/kernel/hw_breakpoint.c F: arch/arm*/kernel/perf_* F: arch/arm/oprofile/common.c -F: drivers/perf/* +F: drivers/perf/ F: include/linux/perf/arm_pmu.h ARM PORT @@ -9306,6 +9315,17 @@ F: Documentation/kbuild/kconfig* F: scripts/Kconfig.include F: scripts/kconfig/ +KCOV +R: Dmitry Vyukov <dvyukov@google.com> +R: Andrey Konovalov <andreyknvl@google.com> +L: kasan-dev@googlegroups.com +S: Maintained +F: Documentation/dev-tools/kcov.rst +F: include/linux/kcov.h +F: include/uapi/linux/kcov.h +F: kernel/kcov.c +F: scripts/Makefile.kcov + KCSAN M: Marco Elver <elver@google.com> R: Dmitry Vyukov <dvyukov@google.com> @@ -11241,7 +11261,7 @@ S: Maintained F: drivers/crypto/atmel-ecc.* MICROCHIP I2C DRIVER -M: Ludovic Desroches <ludovic.desroches@microchip.com> +M: Codrin Ciubotariu <codrin.ciubotariu@microchip.com> L: linux-i2c@vger.kernel.org S: Supported F: drivers/i2c/busses/i2c-at91-*.c @@ -14177,7 +14197,8 @@ F: Documentation/devicetree/bindings/net/qcom,ethqos.txt F: drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c QUALCOMM GENERIC INTERFACE I2C DRIVER -M: Alok Chauhan <alokc@codeaurora.org> +M: Akash Asthana <akashast@codeaurora.org> +M: Mukesh Savaliya <msavaliy@codeaurora.org> L: linux-i2c@vger.kernel.org L: linux-arm-msm@vger.kernel.org S: Supported @@ -14863,6 +14884,7 @@ F: drivers/s390/block/dasd* F: include/linux/dasd_mod.h S390 IOMMU (PCI) +M: Matthew Rosato <mjrosato@linux.ibm.com> M: Gerald Schaefer <gerald.schaefer@linux.ibm.com> L: linux-s390@vger.kernel.org S: Supported @@ -2,7 +2,7 @@ VERSION = 5 PATCHLEVEL = 8 SUBLEVEL = 0 -EXTRAVERSION = -rc6 +EXTRAVERSION = NAME = Kleptomaniac Octopus # *DOCUMENTATION* @@ -567,7 +567,7 @@ ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),) ifneq ($(CROSS_COMPILE),) CLANG_FLAGS += --target=$(notdir $(CROSS_COMPILE:%-=%)) GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit)) -CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR) +CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE)) GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..) endif ifneq ($(GCC_TOOLCHAIN),) @@ -1754,7 +1754,7 @@ PHONY += descend $(build-dirs) descend: $(build-dirs) $(build-dirs): prepare $(Q)$(MAKE) $(build)=$@ \ - single-build=$(if $(filter-out $@/, $(filter $@/%, $(single-no-ko))),1) \ + single-build=$(if $(filter-out $@/, $(filter $@/%, $(KBUILD_SINGLE_TARGETS))),1) \ need-builtin=1 need-modorder=1 clean-dirs := $(addprefix _clean_, $(clean-dirs)) diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi index 348116501aa2..9b1a24cc5e91 100644 --- a/arch/arm/boot/dts/armada-38x.dtsi +++ b/arch/arm/boot/dts/armada-38x.dtsi @@ -342,7 +342,8 @@ comphy: phy@18300 { compatible = "marvell,armada-380-comphy"; - reg = <0x18300 0x100>; + reg-names = "comphy", "conf"; + reg = <0x18300 0x100>, <0x18460 4>; #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm/boot/dts/imx6qdl-icore.dtsi b/arch/arm/boot/dts/imx6qdl-icore.dtsi index 756f3a9f1b4f..12997dae35d9 100644 --- a/arch/arm/boot/dts/imx6qdl-icore.dtsi +++ b/arch/arm/boot/dts/imx6qdl-icore.dtsi @@ -397,7 +397,7 @@ pinctrl_usbotg: usbotggrp { fsl,pins = < - MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059 + MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID 0x17059 >; }; @@ -409,6 +409,7 @@ MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x17070 MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x17070 MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x17070 + MX6QDL_PAD_GPIO_1__GPIO1_IO01 0x1b0b0 >; }; diff --git a/arch/arm/boot/dts/imx6sx-sabreauto.dts b/arch/arm/boot/dts/imx6sx-sabreauto.dts index 825924448ab4..14fd1de52a68 100644 --- a/arch/arm/boot/dts/imx6sx-sabreauto.dts +++ b/arch/arm/boot/dts/imx6sx-sabreauto.dts @@ -99,7 +99,7 @@ &fec2 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_enet2>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; phy-handle = <ðphy0>; fsl,magic-packet; status = "okay"; diff --git a/arch/arm/boot/dts/imx6sx-sdb.dtsi b/arch/arm/boot/dts/imx6sx-sdb.dtsi index 3e5fb72f21fc..c99aa273c296 100644 --- a/arch/arm/boot/dts/imx6sx-sdb.dtsi +++ b/arch/arm/boot/dts/imx6sx-sdb.dtsi @@ -213,7 +213,7 @@ &fec2 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_enet2>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; phy-handle = <ðphy2>; status = "okay"; }; diff --git a/arch/arm/boot/dts/keystone-k2g-evm.dts b/arch/arm/boot/dts/keystone-k2g-evm.dts index db640bab8c1d..8b3d64c913d8 100644 --- a/arch/arm/boot/dts/keystone-k2g-evm.dts +++ b/arch/arm/boot/dts/keystone-k2g-evm.dts @@ -402,7 +402,7 @@ &gbe0 { phy-handle = <ðphy0>; - phy-mode = "rgmii-id"; + phy-mode = "rgmii-rxid"; status = "okay"; }; diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi index bf531efc0610..0f95a6ef8543 100644 --- a/arch/arm/boot/dts/sun4i-a10.dtsi +++ b/arch/arm/boot/dts/sun4i-a10.dtsi @@ -198,7 +198,7 @@ default-pool { compatible = "shared-dma-pool"; size = <0x6000000>; - alloc-ranges = <0x4a000000 0x6000000>; + alloc-ranges = <0x40000000 0x10000000>; reusable; linux,cma-default; }; diff --git a/arch/arm/boot/dts/sun5i.dtsi b/arch/arm/boot/dts/sun5i.dtsi index e6b036734a64..c2b4fbf552a3 100644 --- a/arch/arm/boot/dts/sun5i.dtsi +++ b/arch/arm/boot/dts/sun5i.dtsi @@ -117,7 +117,7 @@ default-pool { compatible = "shared-dma-pool"; size = <0x6000000>; - alloc-ranges = <0x4a000000 0x6000000>; + alloc-ranges = <0x40000000 0x10000000>; reusable; linux,cma-default; }; diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi index ffe1d10a1a84..6d6a37940db2 100644 --- a/arch/arm/boot/dts/sun7i-a20.dtsi +++ b/arch/arm/boot/dts/sun7i-a20.dtsi @@ -181,7 +181,7 @@ default-pool { compatible = "shared-dma-pool"; size = <0x6000000>; - alloc-ranges = <0x4a000000 0x6000000>; + alloc-ranges = <0x40000000 0x10000000>; reusable; linux,cma-default; }; diff --git a/arch/arm/crypto/crc32-ce-core.S b/arch/arm/crypto/crc32-ce-core.S index 5cbd4a6fedad..3f13a76b9066 100644 --- a/arch/arm/crypto/crc32-ce-core.S +++ b/arch/arm/crypto/crc32-ce-core.S @@ -39,7 +39,7 @@ * CRC32 polynomial:0x04c11db7(BE)/0xEDB88320(LE) * PCLMULQDQ is a new instruction in Intel SSE4.2, the reference can be found * at: - * http://www.intel.com/products/processor/manuals/ + * https://www.intel.com/products/processor/manuals/ * Intel(R) 64 and IA-32 Architectures Software Developer's Manual * Volume 2B: Instruction Set Reference, N-Z * diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c index a00fd329255f..f13401f3e669 100644 --- a/arch/arm/crypto/ghash-ce-glue.c +++ b/arch/arm/crypto/ghash-ce-glue.c @@ -16,6 +16,7 @@ #include <crypto/gf128mul.h> #include <linux/cpufeature.h> #include <linux/crypto.h> +#include <linux/jump_label.h> #include <linux/module.h> MODULE_DESCRIPTION("GHASH hash function using ARMv8 Crypto Extensions"); @@ -27,12 +28,8 @@ MODULE_ALIAS_CRYPTO("ghash"); #define GHASH_DIGEST_SIZE 16 struct ghash_key { - u64 h[2]; - u64 h2[2]; - u64 h3[2]; - u64 h4[2]; - be128 k; + u64 h[][2]; }; struct ghash_desc_ctx { @@ -46,16 +43,12 @@ struct ghash_async_ctx { }; asmlinkage void pmull_ghash_update_p64(int blocks, u64 dg[], const char *src, - struct ghash_key const *k, - const char *head); + u64 const h[][2], const char *head); asmlinkage void pmull_ghash_update_p8(int blocks, u64 dg[], const char *src, - struct ghash_key const *k, - const char *head); + u64 const h[][2], const char *head); -static void (*pmull_ghash_update)(int blocks, u64 dg[], const char *src, - struct ghash_key const *k, - const char *head); +static __ro_after_init DEFINE_STATIC_KEY_FALSE(use_p64); static int ghash_init(struct shash_desc *desc) { @@ -70,7 +63,10 @@ static void ghash_do_update(int blocks, u64 dg[], const char *src, { if (likely(crypto_simd_usable())) { kernel_neon_begin(); - pmull_ghash_update(blocks, dg, src, key, head); + if (static_branch_likely(&use_p64)) + pmull_ghash_update_p64(blocks, dg, src, key->h, head); + else + pmull_ghash_update_p8(blocks, dg, src, key->h, head); kernel_neon_end(); } else { be128 dst = { cpu_to_be64(dg[1]), cpu_to_be64(dg[0]) }; @@ -161,25 +157,26 @@ static int ghash_setkey(struct crypto_shash *tfm, const u8 *inkey, unsigned int keylen) { struct ghash_key *key = crypto_shash_ctx(tfm); - be128 h; if (keylen != GHASH_BLOCK_SIZE) return -EINVAL; /* needed for the fallback */ memcpy(&key->k, inkey, GHASH_BLOCK_SIZE); - ghash_reflect(key->h, &key->k); + ghash_reflect(key->h[0], &key->k); - h = key->k; - gf128mul_lle(&h, &key->k); - ghash_reflect(key->h2, &h); + if (static_branch_likely(&use_p64)) { + be128 h = key->k; - gf128mul_lle(&h, &key->k); - ghash_reflect(key->h3, &h); + gf128mul_lle(&h, &key->k); + ghash_reflect(key->h[1], &h); - gf128mul_lle(&h, &key->k); - ghash_reflect(key->h4, &h); + gf128mul_lle(&h, &key->k); + ghash_reflect(key->h[2], &h); + gf128mul_lle(&h, &key->k); + ghash_reflect(key->h[3], &h); + } return 0; } @@ -195,7 +192,7 @@ static struct shash_alg ghash_alg = { .base.cra_driver_name = "ghash-ce-sync", .base.cra_priority = 300 - 1, .base.cra_blocksize = GHASH_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct ghash_key), + .base.cra_ctxsize = sizeof(struct ghash_key) + sizeof(u64[2]), .base.cra_module = THIS_MODULE, }; @@ -354,10 +351,10 @@ static int __init ghash_ce_mod_init(void) if (!(elf_hwcap & HWCAP_NEON)) return -ENODEV; - if (elf_hwcap2 & HWCAP2_PMULL) - pmull_ghash_update = pmull_ghash_update_p64; - else - pmull_ghash_update = pmull_ghash_update_p8; + if (elf_hwcap2 & HWCAP2_PMULL) { + ghash_alg.base.cra_ctxsize += 3 * sizeof(u64[2]); + static_branch_enable(&use_p64); + } err = crypto_register_shash(&ghash_alg); if (err) diff --git a/arch/arm/crypto/sha1-armv4-large.S b/arch/arm/crypto/sha1-armv4-large.S index f82cd8cf5a09..1c8b685149f2 100644 --- a/arch/arm/crypto/sha1-armv4-large.S +++ b/arch/arm/crypto/sha1-armv4-large.S @@ -13,7 +13,7 @@ @ Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL @ project. The module is, however, dual licensed under OpenSSL and @ CRYPTOGAMS licenses depending on where you obtain it. For further -@ details see http://www.openssl.org/~appro/cryptogams/. +@ details see https://www.openssl.org/~appro/cryptogams/. @ ==================================================================== @ sha1_block procedure for ARMv4. diff --git a/arch/arm/crypto/sha256-armv4.pl b/arch/arm/crypto/sha256-armv4.pl index a03cf4dfb781..9f96ff48e4a8 100644 --- a/arch/arm/crypto/sha256-armv4.pl +++ b/arch/arm/crypto/sha256-armv4.pl @@ -13,7 +13,7 @@ # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL # project. The module is, however, dual licensed under OpenSSL and # CRYPTOGAMS licenses depending on where you obtain it. For further -# details see http://www.openssl.org/~appro/cryptogams/. +# details see https://www.openssl.org/~appro/cryptogams/. # ==================================================================== # SHA256 block procedure for ARMv4. May 2007. diff --git a/arch/arm/crypto/sha256-core.S_shipped b/arch/arm/crypto/sha256-core.S_shipped index 054aae0edfce..ea04b2ab0c33 100644 --- a/arch/arm/crypto/sha256-core.S_shipped +++ b/arch/arm/crypto/sha256-core.S_shipped @@ -12,7 +12,7 @@ @ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL @ project. The module is, however, dual licensed under OpenSSL and @ CRYPTOGAMS licenses depending on where you obtain it. For further -@ details see http://www.openssl.org/~appro/cryptogams/. +@ details see https://www.openssl.org/~appro/cryptogams/. @ ==================================================================== @ SHA256 block procedure for ARMv4. May 2007. diff --git a/arch/arm/crypto/sha512-armv4.pl b/arch/arm/crypto/sha512-armv4.pl index 788c17b56ecc..69df68981acd 100644 --- a/arch/arm/crypto/sha512-armv4.pl +++ b/arch/arm/crypto/sha512-armv4.pl @@ -13,7 +13,7 @@ # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL # project. The module is, however, dual licensed under OpenSSL and # CRYPTOGAMS licenses depending on where you obtain it. For further -# details see http://www.openssl.org/~appro/cryptogams/. +# details see https://www.openssl.org/~appro/cryptogams/. # ==================================================================== # SHA512 block procedure for ARMv4. September 2007. @@ -43,7 +43,7 @@ # terms it's 22.6 cycles per byte, which is disappointing result. # Technical writers asserted that 3-way S4 pipeline can sustain # multiple NEON instructions per cycle, but dual NEON issue could -# not be observed, see http://www.openssl.org/~appro/Snapdragon-S4.html +# not be observed, see https://www.openssl.org/~appro/Snapdragon-S4.html # for further details. On side note Cortex-A15 processes one byte in # 16 cycles. diff --git a/arch/arm/crypto/sha512-core.S_shipped b/arch/arm/crypto/sha512-core.S_shipped index 710ea309769e..cb147db5cbfe 100644 --- a/arch/arm/crypto/sha512-core.S_shipped +++ b/arch/arm/crypto/sha512-core.S_shipped @@ -12,7 +12,7 @@ @ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL @ project. The module is, however, dual licensed under OpenSSL and @ CRYPTOGAMS licenses depending on where you obtain it. For further -@ details see http://www.openssl.org/~appro/cryptogams/. +@ details see https://www.openssl.org/~appro/cryptogams/. @ ==================================================================== @ SHA512 block procedure for ARMv4. September 2007. @@ -42,7 +42,7 @@ @ terms it's 22.6 cycles per byte, which is disappointing result. @ Technical writers asserted that 3-way S4 pipeline can sustain @ multiple NEON instructions per cycle, but dual NEON issue could -@ not be observed, see http://www.openssl.org/~appro/Snapdragon-S4.html +@ not be observed, see https://www.openssl.org/~appro/Snapdragon-S4.html @ for further details. On side note Cortex-A15 processes one byte in @ 16 cycles. diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h index f44f448537f2..1a3eedbac4a2 100644 --- a/arch/arm/include/asm/percpu.h +++ b/arch/arm/include/asm/percpu.h @@ -5,6 +5,8 @@ #ifndef _ASM_ARM_PERCPU_H_ #define _ASM_ARM_PERCPU_H_ +#include <asm/thread_info.h> + /* * Same as asm-generic/percpu.h, except that we store the per cpu offset * in the TPIDRPRW. TPIDRPRW only exists on V6K and V7 diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 02ca7adf5375..7fff88e61252 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -683,6 +683,12 @@ static void disable_single_step(struct perf_event *bp) arch_install_hw_breakpoint(bp); } +static int watchpoint_fault_on_uaccess(struct pt_regs *regs, + struct arch_hw_breakpoint *info) +{ + return !user_mode(regs) && info->ctrl.privilege == ARM_BREAKPOINT_USER; +} + static void watchpoint_handler(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { @@ -742,16 +748,27 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr, } pr_debug("watchpoint fired: address = 0x%x\n", info->trigger); + + /* + * If we triggered a user watchpoint from a uaccess routine, + * then handle the stepping ourselves since userspace really + * can't help us with this. + */ + if (watchpoint_fault_on_uaccess(regs, info)) + goto step; + perf_bp_event(wp, regs); /* - * If no overflow handler is present, insert a temporary - * mismatch breakpoint so we can single-step over the - * watchpoint trigger. + * Defer stepping to the overflow handler if one is installed. + * Otherwise, insert a temporary mismatch breakpoint so that + * we can single-step over the watchpoint trigger. */ - if (is_default_overflow_handler(wp)) - enable_single_step(wp, instruction_pointer(regs)); + if (!is_default_overflow_handler(wp)) + goto unlock; +step: + enable_single_step(wp, instruction_pointer(regs)); unlock: rcu_read_unlock(); } diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c index 6bfdca4769a7..fddd08a6e063 100644 --- a/arch/arm/kernel/vdso.c +++ b/arch/arm/kernel/vdso.c @@ -184,6 +184,7 @@ static void __init patch_vdso(void *ehdr) if (!cntvct_ok) { vdso_nullpatch_one(&einfo, "__vdso_gettimeofday"); vdso_nullpatch_one(&einfo, "__vdso_clock_gettime"); + vdso_nullpatch_one(&einfo, "__vdso_clock_gettime64"); } } diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 628028bfbb92..bcd82614c25d 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -966,7 +966,7 @@ void __init create_mapping_late(struct mm_struct *mm, struct map_desc *md, pud_t *pud; p4d = p4d_alloc(mm, pgd_offset(mm, md->virtual), md->virtual); - if (!WARN_ON(!p4d)) + if (WARN_ON(!p4d)) return; pud = pud_alloc(mm, p4d, md->virtual); if (WARN_ON(!pud)) diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index a0d94d063fa8..70f5905954dd 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -137,7 +137,7 @@ export TEXT_OFFSET core-y += arch/arm64/ libs-y := arch/arm64/lib/ $(libs-y) -core-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a +libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a # Default target when executing plain make boot := arch/arm64/boot diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi index 78b1361dfbb9..9ce78a7b117d 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi @@ -161,6 +161,7 @@ resets = <&ccu RST_BUS_VE>; interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>; allwinner,sram = <&ve_sram 1>; + iommus = <&iommu 3>; }; gpu: gpu@1800000 { diff --git a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts index c8243da71041..eb01cc96ba7a 100644 --- a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts +++ b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts @@ -454,10 +454,7 @@ status = "okay"; phy-mode = "2500base-x"; phys = <&cp1_comphy5 2>; - fixed-link { - speed = <2500>; - full-duplex; - }; + managed = "in-band-status"; }; &cp1_spi1 { diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c index 22831d3b7f62..da1034867aaa 100644 --- a/arch/arm64/crypto/ghash-ce-glue.c +++ b/arch/arm64/crypto/ghash-ce-glue.c @@ -31,12 +31,8 @@ MODULE_ALIAS_CRYPTO("ghash"); #define GCM_IV_SIZE 12 struct ghash_key { - u64 h[2]; - u64 h2[2]; - u64 h3[2]; - u64 h4[2]; - be128 k; + u64 h[][2]; }; struct ghash_desc_ctx { @@ -51,22 +47,18 @@ struct gcm_aes_ctx { }; asmlinkage void pmull_ghash_update_p64(int blocks, u64 dg[], const char *src, - struct ghash_key const *k, - const char *head); + u64 const h[][2], const char *head); asmlinkage void pmull_ghash_update_p8(int blocks, u64 dg[], const char *src, - struct ghash_key const *k, - const char *head); + u64 const h[][2], const char *head); asmlinkage void pmull_gcm_encrypt(int bytes, u8 dst[], const u8 src[], - struct ghash_key const *k, u64 dg[], - u8 ctr[], u32 const rk[], int rounds, - u8 tag[]); + u64 const h[][2], u64 dg[], u8 ctr[], + u32 const rk[], int rounds, u8 tag[]); asmlinkage void pmull_gcm_decrypt(int bytes, u8 dst[], const u8 src[], - struct ghash_key const *k, u64 dg[], - u8 ctr[], u32 const rk[], int rounds, - u8 tag[]); + u64 const h[][2], u64 dg[], u8 ctr[], + u32 const rk[], int rounds, u8 tag[]); static int ghash_init(struct shash_desc *desc) { @@ -77,48 +69,51 @@ static int ghash_init(struct shash_desc *desc) } static void ghash_do_update(int blocks, u64 dg[], const char *src, - struct ghash_key *key, const char *head, - void (*simd_update)(int blocks, u64 dg[], - const char *src, - struct ghash_key const *k, - const char *head)) + struct ghash_key *key, const char *head) { - if (likely(crypto_simd_usable() && simd_update)) { - kernel_neon_begin(); - simd_update(blocks, dg, src, key, head); - kernel_neon_end(); - } else { - be128 dst = { cpu_to_be64(dg[1]), cpu_to_be64(dg[0]) }; + be128 dst = { cpu_to_be64(dg[1]), cpu_to_be64(dg[0]) }; - do { - const u8 *in = src; - - if (head) { - in = head; - blocks++; - head = NULL; - } else { - src += GHASH_BLOCK_SIZE; - } + do { + const u8 *in = src; + + if (head) { + in = head; + blocks++; + head = NULL; + } else { + src += GHASH_BLOCK_SIZE; + } - crypto_xor((u8 *)&dst, in, GHASH_BLOCK_SIZE); - gf128mul_lle(&dst, &key->k); - } while (--blocks); + crypto_xor((u8 *)&dst, in, GHASH_BLOCK_SIZE); + gf128mul_lle(&dst, &key->k); + } while (--blocks); - dg[0] = be64_to_cpu(dst.b); - dg[1] = be64_to_cpu(dst.a); + dg[0] = be64_to_cpu(dst.b); + dg[1] = be64_to_cpu(dst.a); +} + +static __always_inline +void ghash_do_simd_update(int blocks, u64 dg[], const char *src, + struct ghash_key *key, const char *head, + void (*simd_update)(int blocks, u64 dg[], + const char *src, + u64 const h[][2], + const char *head)) +{ + if (likely(crypto_simd_usable())) { + kernel_neon_begin(); + simd_update(blocks, dg, src, key->h, head); + kernel_neon_end(); + } else { + ghash_do_update(blocks, dg, src, key, head); } } /* avoid hogging the CPU for too long */ #define MAX_BLOCKS (SZ_64K / GHASH_BLOCK_SIZE) -static int __ghash_update(struct shash_desc *desc, const u8 *src, - unsigned int len, - void (*simd_update)(int blocks, u64 dg[], - const char *src, - struct ghash_key const *k, - const char *head)) +static int ghash_update(struct shash_desc *desc, const u8 *src, + unsigned int len) { struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); unsigned int partial = ctx->count % GHASH_BLOCK_SIZE; @@ -143,9 +138,9 @@ static int __ghash_update(struct shash_desc *desc, const u8 *src, do { int chunk = min(blocks, MAX_BLOCKS); - ghash_do_update(chunk, ctx->digest, src, key, - partial ? ctx->buf : NULL, - simd_update); + ghash_do_simd_update(chunk, ctx->digest, src, key, + partial ? ctx->buf : NULL, + pmull_ghash_update_p8); blocks -= chunk; src += chunk * GHASH_BLOCK_SIZE; @@ -157,39 +152,7 @@ static int __ghash_update(struct shash_desc *desc, const u8 *src, return 0; } -static int ghash_update_p8(struct shash_desc *desc, const u8 *src, - unsigned int len) -{ - return __ghash_update(desc, src, len, pmull_ghash_update_p8); -} - -static int ghash_update_p64(struct shash_desc *desc, const u8 *src, - unsigned int len) -{ - return __ghash_update(desc, src, len, pmull_ghash_update_p64); -} - -static int ghash_final_p8(struct shash_desc *desc, u8 *dst) -{ - struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); - unsigned int partial = ctx->count % GHASH_BLOCK_SIZE; - - if (partial) { - struct ghash_key *key = crypto_shash_ctx(desc->tfm); - - memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial); - - ghash_do_update(1, ctx->digest, ctx->buf, key, NULL, - pmull_ghash_update_p8); - } - put_unaligned_be64(ctx->digest[1], dst); - put_unaligned_be64(ctx->digest[0], dst + 8); - - *ctx = (struct ghash_desc_ctx){}; - return 0; -} - -static int ghash_final_p64(struct shash_desc *desc, u8 *dst) +static int ghash_final(struct shash_desc *desc, u8 *dst) { struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); unsigned int partial = ctx->count % GHASH_BLOCK_SIZE; @@ -199,8 +162,8 @@ static int ghash_final_p64(struct shash_desc *desc, u8 *dst) memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial); - ghash_do_update(1, ctx->digest, ctx->buf, key, NULL, - pmull_ghash_update_p64); + ghash_do_simd_update(1, ctx->digest, ctx->buf, key, NULL, + pmull_ghash_update_p8); } put_unaligned_be64(ctx->digest[1], dst); put_unaligned_be64(ctx->digest[0], dst + 8); @@ -220,29 +183,6 @@ static void ghash_reflect(u64 h[], const be128 *k) h[1] ^= 0xc200000000000000UL; } -static int __ghash_setkey(struct ghash_key *key, - const u8 *inkey, unsigned int keylen) -{ - be128 h; - - /* needed for the fallback */ - memcpy(&key->k, inkey, GHASH_BLOCK_SIZE); - - ghash_reflect(key->h, &key->k); - - h = key->k; - gf128mul_lle(&h, &key->k); - ghash_reflect(key->h2, &h); - - gf128mul_lle(&h, &key->k); - ghash_reflect(key->h3, &h); - - gf128mul_lle(&h, &key->k); - ghash_reflect(key->h4, &h); - - return 0; -} - static int ghash_setkey(struct crypto_shash *tfm, const u8 *inkey, unsigned int keylen) { @@ -251,38 +191,28 @@ static int ghash_setkey(struct crypto_shash *tfm, if (keylen != GHASH_BLOCK_SIZE) return -EINVAL; - return __ghash_setkey(key, inkey, keylen); + /* needed for the fallback */ + memcpy(&key->k, inkey, GHASH_BLOCK_SIZE); + + ghash_reflect(key->h[0], &key->k); + return 0; } -static struct shash_alg ghash_alg[] = {{ +static struct shash_alg ghash_alg = { .base.cra_name = "ghash", .base.cra_driver_name = "ghash-neon", .base.cra_priority = 150, .base.cra_blocksize = GHASH_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct ghash_key), - .base.cra_module = THIS_MODULE, - - .digestsize = GHASH_DIGEST_SIZE, - .init = ghash_init, - .update = ghash_update_p8, - .final = ghash_final_p8, - .setkey = ghash_setkey, - .descsize = sizeof(struct ghash_desc_ctx), -}, { - .base.cra_name = "ghash", - .base.cra_driver_name = "ghash-ce", - .base.cra_priority = 200, - .base.cra_blocksize = GHASH_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct ghash_key), + .base.cra_ctxsize = sizeof(struct ghash_key) + sizeof(u64[2]), .base.cra_module = THIS_MODULE, .digestsize = GHASH_DIGEST_SIZE, .init = ghash_init, - .update = ghash_update_p64, - .final = ghash_final_p64, + .update = ghash_update, + .final = ghash_final, .setkey = ghash_setkey, .descsize = sizeof(struct ghash_desc_ctx), -}}; +}; static int num_rounds(struct crypto_aes_ctx *ctx) { @@ -301,6 +231,7 @@ static int gcm_setkey(struct crypto_aead *tfm, const u8 *inkey, { struct gcm_aes_ctx *ctx = crypto_aead_ctx(tfm); u8 key[GHASH_BLOCK_SIZE]; + be128 h; int ret; ret = aes_expandkey(&ctx->aes_key, inkey, keylen); @@ -309,7 +240,22 @@ static int gcm_setkey(struct crypto_aead *tfm, const u8 *inkey, aes_encrypt(&ctx->aes_key, key, (u8[AES_BLOCK_SIZE]){}); - return __ghash_setkey(&ctx->ghash_key, key, sizeof(be128)); + /* needed for the fallback */ + memcpy(&ctx->ghash_key.k, key, GHASH_BLOCK_SIZE); + + ghash_reflect(ctx->ghash_key.h[0], &ctx->ghash_key.k); + + h = ctx->ghash_key.k; + gf128mul_lle(&h, &ctx->ghash_key.k); + ghash_reflect(ctx->ghash_key.h[1], &h); + + gf128mul_lle(&h, &ctx->ghash_key.k); + ghash_reflect(ctx->ghash_key.h[2], &h); + + gf128mul_lle(&h, &ctx->ghash_key.k); + ghash_reflect(ctx->ghash_key.h[3], &h); + + return 0; } static int gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) @@ -341,9 +287,9 @@ static void gcm_update_mac(u64 dg[], const u8 *src, int count, u8 buf[], if (count >= GHASH_BLOCK_SIZE || *buf_count == GHASH_BLOCK_SIZE) { int blocks = count / GHASH_BLOCK_SIZE; - ghash_do_update(blocks, dg, src, &ctx->ghash_key, - *buf_count ? buf : NULL, - pmull_ghash_update_p64); + ghash_do_simd_update(blocks, dg, src, &ctx->ghash_key, + *buf_count ? buf : NULL, + pmull_ghash_update_p64); src += blocks * GHASH_BLOCK_SIZE; count %= GHASH_BLOCK_SIZE; @@ -387,8 +333,8 @@ static void gcm_calculate_auth_mac(struct aead_request *req, u64 dg[]) if (buf_count) { memset(&buf[buf_count], 0, GHASH_BLOCK_SIZE - buf_count); - ghash_do_update(1, dg, buf, &ctx->ghash_key, NULL, - pmull_ghash_update_p64); + ghash_do_simd_update(1, dg, buf, &ctx->ghash_key, NULL, + pmull_ghash_update_p64); } } @@ -433,8 +379,8 @@ static int gcm_encrypt(struct aead_request *req) } kernel_neon_begin(); - pmull_gcm_encrypt(nbytes, dst, src, &ctx->ghash_key, dg, - iv, ctx->aes_key.key_enc, nrounds, + pmull_gcm_encrypt(nbytes, dst, src, ctx->ghash_key.h, + dg, iv, ctx->aes_key.key_enc, nrounds, tag); kernel_neon_end(); @@ -464,7 +410,7 @@ static int gcm_encrypt(struct aead_request *req) } while (--remaining > 0); ghash_do_update(blocks, dg, walk.dst.virt.addr, - &ctx->ghash_key, NULL, NULL); + &ctx->ghash_key, NULL); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); @@ -483,7 +429,7 @@ static int gcm_encrypt(struct aead_request *req) tag = (u8 *)&lengths; ghash_do_update(1, dg, tag, &ctx->ghash_key, - walk.nbytes ? buf : NULL, NULL); + walk.nbytes ? buf : NULL); if (walk.nbytes) err = skcipher_walk_done(&walk, 0); @@ -547,8 +493,8 @@ static int gcm_decrypt(struct aead_request *req) } kernel_neon_begin(); - pmull_gcm_decrypt(nbytes, dst, src, &ctx->ghash_key, dg, - iv, ctx->aes_key.key_enc, nrounds, + pmull_gcm_decrypt(nbytes, dst, src, ctx->ghash_key.h, + dg, iv, ctx->aes_key.key_enc, nrounds, tag); kernel_neon_end(); @@ -568,7 +514,7 @@ static int gcm_decrypt(struct aead_request *req) u8 *dst = walk.dst.virt.addr; ghash_do_update(blocks, dg, walk.src.virt.addr, - &ctx->ghash_key, NULL, NULL); + &ctx->ghash_key, NULL); do { aes_encrypt(&ctx->aes_key, buf, iv); @@ -591,7 +537,7 @@ static int gcm_decrypt(struct aead_request *req) tag = (u8 *)&lengths; ghash_do_update(1, dg, tag, &ctx->ghash_key, - walk.nbytes ? buf : NULL, NULL); + walk.nbytes ? buf : NULL); if (walk.nbytes) { aes_encrypt(&ctx->aes_key, buf, iv); @@ -635,43 +581,28 @@ static struct aead_alg gcm_aes_alg = { .base.cra_driver_name = "gcm-aes-ce", .base.cra_priority = 300, .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct gcm_aes_ctx), + .base.cra_ctxsize = sizeof(struct gcm_aes_ctx) + + 4 * sizeof(u64[2]), .base.cra_module = THIS_MODULE, }; static int __init ghash_ce_mod_init(void) { - int ret; - if (!cpu_have_named_feature(ASIMD)) return -ENODEV; if (cpu_have_named_feature(PMULL)) - ret = crypto_register_shashes(ghash_alg, - ARRAY_SIZE(ghash_alg)); - else - /* only register the first array element */ - ret = crypto_register_shash(ghash_alg); + return crypto_register_aead(&gcm_aes_alg); - if (ret) - return ret; - - if (cpu_have_named_feature(PMULL)) { - ret = crypto_register_aead(&gcm_aes_alg); - if (ret) - crypto_unregister_shashes(ghash_alg, - ARRAY_SIZE(ghash_alg)); - } - return ret; + return crypto_register_shash(&ghash_alg); } static void __exit ghash_ce_mod_exit(void) { if (cpu_have_named_feature(PMULL)) - crypto_unregister_shashes(ghash_alg, ARRAY_SIZE(ghash_alg)); + crypto_unregister_aead(&gcm_aes_alg); else - crypto_unregister_shash(ghash_alg); - crypto_unregister_aead(&gcm_aes_alg); + crypto_unregister_shash(&ghash_alg); } static const struct cpu_feature ghash_cpu_feature[] = { diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index 12f0eb56a1cc..619db9b4c9d5 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -77,9 +77,9 @@ static inline void apply_alternatives_module(void *start, size_t length) { } "663:\n\t" \ newinstr "\n" \ "664:\n\t" \ - ".previous\n\t" \ ".org . - (664b-663b) + (662b-661b)\n\t" \ - ".org . - (662b-661b) + (664b-663b)\n" \ + ".org . - (662b-661b) + (664b-663b)\n\t" \ + ".previous\n" \ ".endif\n" #define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb) \ diff --git a/arch/arm64/include/asm/checksum.h b/arch/arm64/include/asm/checksum.h index b6f7bc6da5fb..93a161b3bf3f 100644 --- a/arch/arm64/include/asm/checksum.h +++ b/arch/arm64/include/asm/checksum.h @@ -24,16 +24,17 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) { __uint128_t tmp; u64 sum; + int n = ihl; /* we want it signed */ tmp = *(const __uint128_t *)iph; iph += 16; - ihl -= 4; + n -= 4; tmp += ((tmp >> 64) | (tmp << 64)); sum = tmp >> 64; do { sum += *(const u32 *)iph; iph += 4; - } while (--ihl); + } while (--n > 0); sum += ((sum >> 32) | (sum << 32)); return csum_fold((__force u32)(sum >> 32)); diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index c3e6fcc664b1..e21d4a01372f 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -380,9 +380,14 @@ struct kvm_vcpu_arch { #define vcpu_has_sve(vcpu) (system_supports_sve() && \ ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE)) -#define vcpu_has_ptrauth(vcpu) ((system_supports_address_auth() || \ - system_supports_generic_auth()) && \ - ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH)) +#ifdef CONFIG_ARM64_PTR_AUTH +#define vcpu_has_ptrauth(vcpu) \ + ((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \ + cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \ + (vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH) +#else +#define vcpu_has_ptrauth(vcpu) false +#endif #define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs) diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index ea268d88b6f7..a0c8a0b65259 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h @@ -30,7 +30,6 @@ #include <linux/threads.h> #include <linux/cpumask.h> #include <linux/thread_info.h> -#include <asm/pointer_auth.h> DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number); diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile index d88148bef6b0..5139a5f19256 100644 --- a/arch/arm64/kernel/vdso32/Makefile +++ b/arch/arm64/kernel/vdso32/Makefile @@ -14,7 +14,7 @@ COMPAT_GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE_COMPAT)elfedit)) COMPAT_GCC_TOOLCHAIN := $(realpath $(COMPAT_GCC_TOOLCHAIN_DIR)/..) CC_COMPAT_CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE_COMPAT:%-=%)) -CC_COMPAT_CLANG_FLAGS += --prefix=$(COMPAT_GCC_TOOLCHAIN_DIR) +CC_COMPAT_CLANG_FLAGS += --prefix=$(COMPAT_GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE_COMPAT)) CC_COMPAT_CLANG_FLAGS += -no-integrated-as -Qunused-arguments ifneq ($(COMPAT_GCC_TOOLCHAIN),) CC_COMPAT_CLANG_FLAGS += --gcc-toolchain=$(COMPAT_GCC_TOOLCHAIN) diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 8c0035cab6b6..31058e6e7c2a 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1326,7 +1326,7 @@ static bool stage2_get_leaf_entry(struct kvm *kvm, phys_addr_t addr, return true; } -static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr) +static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr, unsigned long sz) { pud_t *pudp; pmd_t *pmdp; @@ -1338,11 +1338,11 @@ static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr) return false; if (pudp) - return kvm_s2pud_exec(pudp); + return sz <= PUD_SIZE && kvm_s2pud_exec(pudp); else if (pmdp) - return kvm_s2pmd_exec(pmdp); + return sz <= PMD_SIZE && kvm_s2pmd_exec(pmdp); else - return kvm_s2pte_exec(ptep); + return sz == PAGE_SIZE && kvm_s2pte_exec(ptep); } static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, @@ -1958,7 +1958,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, * execute permissions, and we preserve whatever we have. */ needs_exec = exec_fault || - (fault_status == FSC_PERM && stage2_is_exec(kvm, fault_ipa)); + (fault_status == FSC_PERM && + stage2_is_exec(kvm, fault_ipa, vma_pagesize)); if (vma_pagesize == PUD_SIZE) { pud_t new_pud = kvm_pfn_pud(pfn, mem_type); diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 118953d41763..6dd4171c9530 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -212,6 +212,8 @@ atomic64_set(atomic64_t *v, s64 i) _atomic_spin_unlock_irqrestore(v, flags); } +#define atomic64_set_release(v, i) atomic64_set((v), (i)) + static __inline__ s64 atomic64_read(const atomic64_t *v) { diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h index ab5c215cf46c..068958575871 100644 --- a/arch/parisc/include/asm/cmpxchg.h +++ b/arch/parisc/include/asm/cmpxchg.h @@ -60,6 +60,7 @@ extern void __cmpxchg_called_with_bad_pointer(void); extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, unsigned int new_); extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_); +extern u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new_); /* don't worry...optimizer will get rid of most of this */ static inline unsigned long @@ -71,6 +72,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) #endif case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int)old, (unsigned int)new_); + case 1: return __cmpxchg_u8((u8 *)ptr, (u8)old, (u8)new_); } __cmpxchg_called_with_bad_pointer(); return old; diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c index 70ffbcf889b8..2e4d1f05a926 100644 --- a/arch/parisc/lib/bitops.c +++ b/arch/parisc/lib/bitops.c @@ -79,3 +79,15 @@ unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsign _atomic_spin_unlock_irqrestore(ptr, flags); return (unsigned long)prev; } + +u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new) +{ + unsigned long flags; + u8 prev; + + _atomic_spin_lock_irqsave(ptr, flags); + if ((prev = *ptr) == old) + *ptr = new; + _atomic_spin_unlock_irqrestore(ptr, flags); + return prev; +} diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 0fc8bad878b2..446e54c3f71e 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -3072,10 +3072,18 @@ do_hash_page: ori r0,r0,DSISR_BAD_FAULT_64S@l and. r0,r5,r0 /* weird error? */ bne- handle_page_fault /* if not, try to insert a HPTE */ + + /* + * If we are in an "NMI" (e.g., an interrupt when soft-disabled), then + * don't call hash_page, just fail the fault. This is required to + * prevent re-entrancy problems in the hash code, namely perf + * interrupts hitting while something holds H_PAGE_BUSY, and taking a + * hash fault. See the comment in hash_preload(). + */ ld r11, PACA_THREAD_INFO(r13) - lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ - andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ - bne 77f /* then don't call hash_page now */ + lwz r0,TI_PREEMPT(r11) + andis. r0,r0,NMI_MASK@h + bne 77f /* * r3 contains the trap number diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 468169e33c86..9b9f92ad0e7a 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -1559,6 +1559,7 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea, pgd_t *pgdir; int rc, ssize, update_flags = 0; unsigned long access = _PAGE_PRESENT | _PAGE_READ | (is_exec ? _PAGE_EXEC : 0); + unsigned long flags; BUG_ON(get_region_id(ea) != USER_REGION_ID); @@ -1592,6 +1593,28 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea, return; #endif /* CONFIG_PPC_64K_PAGES */ + /* + * __hash_page_* must run with interrupts off, as it sets the + * H_PAGE_BUSY bit. It's possible for perf interrupts to hit at any + * time and may take a hash fault reading the user stack, see + * read_user_stack_slow() in the powerpc/perf code. + * + * If that takes a hash fault on the same page as we lock here, it + * will bail out when seeing H_PAGE_BUSY set, and retry the access + * leading to an infinite loop. + * + * Disabling interrupts here does not prevent perf interrupts, but it + * will prevent them taking hash faults (see the NMI test in + * do_hash_page), then read_user_stack's copy_from_user_nofault will + * fail and perf will fall back to read_user_stack_slow(), which + * walks the Linux page tables. + * + * Interrupts must also be off for the duration of the + * mm_is_thread_local test and update, to prevent preempt running the + * mm on another CPU (XXX: this may be racy vs kthread_use_mm). + */ + local_irq_save(flags); + /* Is that local to this CPU ? */ if (mm_is_thread_local(mm)) update_flags |= HPTE_LOCAL_UPDATE; @@ -1614,6 +1637,8 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea, mm_ctx_user_psize(&mm->context), mm_ctx_user_psize(&mm->context), pte_val(*ptep)); + + local_irq_restore(flags); } /* diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index cd6a742ac6ef..01d70280d287 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -2179,6 +2179,12 @@ static void __perf_event_interrupt(struct pt_regs *regs) perf_read_regs(regs); + /* + * If perf interrupts hit in a local_irq_disable (soft-masked) region, + * we consider them as NMIs. This is required to prevent hash faults on + * user addresses when reading callchains. See the NMI test in + * do_hash_page. + */ nmi = perf_intr_is_nmi(regs); if (nmi) nmi_enter(); diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index f4adb3684f3d..79e9d55bdf1a 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -95,19 +95,40 @@ void __init mem_init(void) #ifdef CONFIG_BLK_DEV_INITRD static void __init setup_initrd(void) { + phys_addr_t start; unsigned long size; - if (initrd_start >= initrd_end) { - pr_info("initrd not found or empty"); + /* Ignore the virtul address computed during device tree parsing */ + initrd_start = initrd_end = 0; + + if (!phys_initrd_size) + return; + /* + * Round the memory region to page boundaries as per free_initrd_mem() + * This allows us to detect whether the pages overlapping the initrd + * are in use, but more importantly, reserves the entire set of pages + * as we don't want these pages allocated for other purposes. + */ + start = round_down(phys_initrd_start, PAGE_SIZE); + size = phys_initrd_size + (phys_initrd_start - start); + size = round_up(size, PAGE_SIZE); + + if (!memblock_is_region_memory(start, size)) { + pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region", + (u64)start, size); goto disable; } - if (__pa_symbol(initrd_end) > PFN_PHYS(max_low_pfn)) { - pr_err("initrd extends beyond end of memory"); + + if (memblock_is_region_reserved(start, size)) { + pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region\n", + (u64)start, size); goto disable; } - size = initrd_end - initrd_start; - memblock_reserve(__pa_symbol(initrd_start), size); + memblock_reserve(start, size); + /* Now convert initrd to virtual addresses */ + initrd_start = (unsigned long)__va(phys_initrd_start); + initrd_end = initrd_start + phys_initrd_size; initrd_below_start_ok = 1; pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n", @@ -126,33 +147,36 @@ void __init setup_bootmem(void) { struct memblock_region *reg; phys_addr_t mem_size = 0; + phys_addr_t total_mem = 0; + phys_addr_t mem_start, end = 0; phys_addr_t vmlinux_end = __pa_symbol(&_end); phys_addr_t vmlinux_start = __pa_symbol(&_start); /* Find the memory region containing the kernel */ for_each_memblock(memory, reg) { - phys_addr_t end = reg->base + reg->size; - - if (reg->base <= vmlinux_start && vmlinux_end <= end) { - mem_size = min(reg->size, (phys_addr_t)-PAGE_OFFSET); - - /* - * Remove memblock from the end of usable area to the - * end of region - */ - if (reg->base + mem_size < end) - memblock_remove(reg->base + mem_size, - end - reg->base - mem_size); - } + end = reg->base + reg->size; + if (!total_mem) + mem_start = reg->base; + if (reg->base <= vmlinux_start && vmlinux_end <= end) + BUG_ON(reg->size == 0); + total_mem = total_mem + reg->size; } - BUG_ON(mem_size == 0); + + /* + * Remove memblock from the end of usable area to the + * end of region + */ + mem_size = min(total_mem, (phys_addr_t)-PAGE_OFFSET); + if (mem_start + mem_size < end) + memblock_remove(mem_start + mem_size, + end - mem_start - mem_size); /* Reserve from the start of the kernel to the end of the kernel */ memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start); - set_max_mapnr(PFN_DOWN(mem_size)); max_pfn = PFN_DOWN(memblock_end_of_DRAM()); max_low_pfn = max_pfn; + set_max_mapnr(max_low_pfn); #ifdef CONFIG_BLK_DEV_INITRD setup_initrd(); diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c index 4a8b61806633..87b4ab3d3c77 100644 --- a/arch/riscv/mm/kasan_init.c +++ b/arch/riscv/mm/kasan_init.c @@ -44,7 +44,7 @@ asmlinkage void __init kasan_early_init(void) (__pa(((uintptr_t) kasan_early_shadow_pmd))), __pgprot(_PAGE_TABLE))); - flush_tlb_all(); + local_flush_tlb_all(); } static void __init populate(void *start, void *end) @@ -79,7 +79,7 @@ static void __init populate(void *start, void *end) pfn_pgd(PFN_DOWN(__pa(&pmd[offset])), __pgprot(_PAGE_TABLE))); - flush_tlb_all(); + local_flush_tlb_all(); memset(start, 0, end - start); } diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c index 1e3df52b2b65..37265f551a11 100644 --- a/arch/s390/kernel/perf_cpum_cf_events.c +++ b/arch/s390/kernel/perf_cpum_cf_events.c @@ -292,7 +292,7 @@ CPUMF_EVENT_ATTR(cf_z15, TX_C_TABORT_SPECIAL, 0x00f5); CPUMF_EVENT_ATTR(cf_z15, DFLT_ACCESS, 0x00f7); CPUMF_EVENT_ATTR(cf_z15, DFLT_CYCLES, 0x00fc); CPUMF_EVENT_ATTR(cf_z15, DFLT_CC, 0x00108); -CPUMF_EVENT_ATTR(cf_z15, DFLT_CCERROR, 0x00109); +CPUMF_EVENT_ATTR(cf_z15, DFLT_CCFINISH, 0x00109); CPUMF_EVENT_ATTR(cf_z15, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0); CPUMF_EVENT_ATTR(cf_z15, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1); @@ -629,7 +629,7 @@ static struct attribute *cpumcf_z15_pmu_event_attr[] __initdata = { CPUMF_EVENT_PTR(cf_z15, DFLT_ACCESS), CPUMF_EVENT_PTR(cf_z15, DFLT_CYCLES), CPUMF_EVENT_PTR(cf_z15, DFLT_CC), - CPUMF_EVENT_PTR(cf_z15, DFLT_CCERROR), + CPUMF_EVENT_PTR(cf_z15, DFLT_CCFINISH), CPUMF_EVENT_PTR(cf_z15, MT_DIAG_CYCLES_ONE_THR_ACTIVE), CPUMF_EVENT_PTR(cf_z15, MT_DIAG_CYCLES_TWO_THR_ACTIVE), NULL, diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h index 22d968bfe9bb..d770da3f8b6f 100644 --- a/arch/sh/include/asm/pgalloc.h +++ b/arch/sh/include/asm/pgalloc.h @@ -12,6 +12,7 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd); extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address); extern void pmd_free(struct mm_struct *mm, pmd_t *pmd); +#define __pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, (pmdp)) #endif static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, @@ -33,13 +34,4 @@ do { \ tlb_remove_page((tlb), (pte)); \ } while (0) -#if CONFIG_PGTABLE_LEVELS > 2 -#define __pmd_free_tlb(tlb, pmdp, addr) \ -do { \ - struct page *page = virt_to_page(pmdp); \ - pgtable_pmd_page_dtor(page); \ - tlb_remove_page((tlb), page); \ -} while (0); -#endif - #endif /* __ASM_SH_PGALLOC_H */ diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S index 956a7a03b0c8..9bac5bbb67f3 100644 --- a/arch/sh/kernel/entry-common.S +++ b/arch/sh/kernel/entry-common.S @@ -199,7 +199,7 @@ syscall_trace_entry: mov.l @(OFF_R7,r15), r7 ! arg3 mov.l @(OFF_R3,r15), r3 ! syscall_nr ! - mov.l 2f, r10 ! Number of syscalls + mov.l 6f, r10 ! Number of syscalls cmp/hs r10, r3 bf syscall_call mov #-ENOSYS, r0 @@ -353,7 +353,7 @@ ENTRY(system_call) tst r9, r8 bf syscall_trace_entry ! - mov.l 2f, r8 ! Number of syscalls + mov.l 6f, r8 ! Number of syscalls cmp/hs r8, r3 bt syscall_badsys ! @@ -392,7 +392,7 @@ syscall_exit: #if !defined(CONFIG_CPU_SH2) 1: .long TRA #endif -2: .long NR_syscalls +6: .long NR_syscalls 3: .long sys_call_table 7: .long do_syscall_trace_enter 8: .long do_syscall_trace_leave diff --git a/arch/sparc/crypto/sha256_glue.c b/arch/sparc/crypto/sha256_glue.c index 286bc8ecf15b..ca2547df9652 100644 --- a/arch/sparc/crypto/sha256_glue.c +++ b/arch/sparc/crypto/sha256_glue.c @@ -156,7 +156,7 @@ static int sha256_sparc64_import(struct shash_desc *desc, const void *in) return 0; } -static struct shash_alg sha256 = { +static struct shash_alg sha256_alg = { .digestsize = SHA256_DIGEST_SIZE, .init = sha256_sparc64_init, .update = sha256_sparc64_update, @@ -174,7 +174,7 @@ static struct shash_alg sha256 = { } }; -static struct shash_alg sha224 = { +static struct shash_alg sha224_alg = { .digestsize = SHA224_DIGEST_SIZE, .init = sha224_sparc64_init, .update = sha256_sparc64_update, @@ -206,13 +206,13 @@ static bool __init sparc64_has_sha256_opcode(void) static int __init sha256_sparc64_mod_init(void) { if (sparc64_has_sha256_opcode()) { - int ret = crypto_register_shash(&sha224); + int ret = crypto_register_shash(&sha224_alg); if (ret < 0) return ret; - ret = crypto_register_shash(&sha256); + ret = crypto_register_shash(&sha256_alg); if (ret < 0) { - crypto_unregister_shash(&sha224); + crypto_unregister_shash(&sha224_alg); return ret; } @@ -225,8 +225,8 @@ static int __init sha256_sparc64_mod_init(void) static void __exit sha256_sparc64_mod_fini(void) { - crypto_unregister_shash(&sha224); - crypto_unregister_shash(&sha256); + crypto_unregister_shash(&sha224_alg); + crypto_unregister_shash(&sha256_alg); } module_init(sha256_sparc64_mod_init); diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S index ec437db1fa54..3f0fc7dd87d7 100644 --- a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S +++ b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S @@ -63,7 +63,6 @@ */ #include <linux/linkage.h> -#include <asm/inst.h> #define VMOVDQ vmovdqu @@ -127,10 +126,6 @@ ddq_add_8: /* generate a unique variable for ddq_add_x */ -.macro setddq n - var_ddq_add = ddq_add_\n -.endm - /* generate a unique variable for xmm register */ .macro setxdata n var_xdata = %xmm\n @@ -140,9 +135,7 @@ ddq_add_8: .macro club name, id .altmacro - .if \name == DDQ_DATA - setddq %\id - .elseif \name == XDATA + .if \name == XDATA setxdata %\id .endif .noaltmacro @@ -165,9 +158,8 @@ ddq_add_8: .set i, 1 .rept (by - 1) - club DDQ_DATA, i club XDATA, i - vpaddq var_ddq_add(%rip), xcounter, var_xdata + vpaddq (ddq_add_1 + 16 * (i - 1))(%rip), xcounter, var_xdata vptest ddq_low_msk(%rip), var_xdata jnz 1f vpaddq ddq_high_add_1(%rip), var_xdata, var_xdata @@ -180,8 +172,7 @@ ddq_add_8: vmovdqa 1*16(p_keys), xkeyA vpxor xkey0, xdata0, xdata0 - club DDQ_DATA, by - vpaddq var_ddq_add(%rip), xcounter, xcounter + vpaddq (ddq_add_1 + 16 * (by - 1))(%rip), xcounter, xcounter vptest ddq_low_msk(%rip), xcounter jnz 1f vpaddq ddq_high_add_1(%rip), xcounter, xcounter diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index 54e7d15dbd0d..1852b19a73a0 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S @@ -26,7 +26,6 @@ */ #include <linux/linkage.h> -#include <asm/inst.h> #include <asm/frame.h> #include <asm/nospec-branch.h> @@ -201,7 +200,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff mov \SUBKEY, %r12 movdqu (%r12), \TMP3 movdqa SHUF_MASK(%rip), \TMP2 - PSHUFB_XMM \TMP2, \TMP3 + pshufb \TMP2, \TMP3 # precompute HashKey<<1 mod poly from the HashKey (required for GHASH) @@ -263,10 +262,10 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff movdqu %xmm0, OrigIV(%arg2) # ctx_data.orig_IV = iv movdqa SHUF_MASK(%rip), %xmm2 - PSHUFB_XMM %xmm2, %xmm0 + pshufb %xmm2, %xmm0 movdqu %xmm0, CurCount(%arg2) # ctx_data.current_counter = iv - PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, + PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7 movdqu HashKey(%arg2), %xmm13 CALC_AAD_HASH %xmm13, \AAD, \AADLEN, %xmm0, %xmm1, %xmm2, %xmm3, \ @@ -347,7 +346,7 @@ _zero_cipher_left_\@: paddd ONE(%rip), %xmm0 # INCR CNT to get Yn movdqu %xmm0, CurCount(%arg2) movdqa SHUF_MASK(%rip), %xmm10 - PSHUFB_XMM %xmm10, %xmm0 + pshufb %xmm10, %xmm0 ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # Encrypt(K, Yn) movdqu %xmm0, PBlockEncKey(%arg2) @@ -377,7 +376,7 @@ _large_enough_update_\@: # get the appropriate shuffle mask movdqu (%r12), %xmm2 # shift right 16-r13 bytes - PSHUFB_XMM %xmm2, %xmm1 + pshufb %xmm2, %xmm1 _data_read_\@: lea ALL_F+16(%rip), %r12 @@ -393,12 +392,12 @@ _data_read_\@: .ifc \operation, dec pand %xmm1, %xmm2 movdqa SHUF_MASK(%rip), %xmm10 - PSHUFB_XMM %xmm10 ,%xmm2 + pshufb %xmm10 ,%xmm2 pxor %xmm2, %xmm8 .else movdqa SHUF_MASK(%rip), %xmm10 - PSHUFB_XMM %xmm10,%xmm0 + pshufb %xmm10,%xmm0 pxor %xmm0, %xmm8 .endif @@ -408,17 +407,17 @@ _data_read_\@: # GHASH computation for the last <16 byte block movdqa SHUF_MASK(%rip), %xmm10 # shuffle xmm0 back to output as ciphertext - PSHUFB_XMM %xmm10, %xmm0 + pshufb %xmm10, %xmm0 .endif # Output %r13 bytes - MOVQ_R64_XMM %xmm0, %rax + movq %xmm0, %rax cmp $8, %r13 jle _less_than_8_bytes_left_\@ mov %rax, (%arg3 , %r11, 1) add $8, %r11 psrldq $8, %xmm0 - MOVQ_R64_XMM %xmm0, %rax + movq %xmm0, %rax sub $8, %r13 _less_than_8_bytes_left_\@: mov %al, (%arg3, %r11, 1) @@ -449,7 +448,7 @@ _partial_done\@: movd %r12d, %xmm15 # len(A) in %xmm15 mov InLen(%arg2), %r12 shl $3, %r12 # len(C) in bits (*128) - MOVQ_R64_XMM %r12, %xmm1 + movq %r12, %xmm1 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000 pxor %xmm1, %xmm15 # %xmm15 = len(A)||len(C) @@ -457,7 +456,7 @@ _partial_done\@: GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6 # final GHASH computation movdqa SHUF_MASK(%rip), %xmm10 - PSHUFB_XMM %xmm10, %xmm8 + pshufb %xmm10, %xmm8 movdqu OrigIV(%arg2), %xmm0 # %xmm0 = Y0 ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # E(K, Y0) @@ -470,7 +469,7 @@ _return_T_\@: cmp $8, %r11 jl _T_4_\@ _T_8_\@: - MOVQ_R64_XMM %xmm0, %rax + movq %xmm0, %rax mov %rax, (%r10) add $8, %r10 sub $8, %r11 @@ -518,9 +517,9 @@ _return_T_done_\@: pshufd $78, \HK, \TMP3 pxor \GH, \TMP2 # TMP2 = a1+a0 pxor \HK, \TMP3 # TMP3 = b1+b0 - PCLMULQDQ 0x11, \HK, \TMP1 # TMP1 = a1*b1 - PCLMULQDQ 0x00, \HK, \GH # GH = a0*b0 - PCLMULQDQ 0x00, \TMP3, \TMP2 # TMP2 = (a0+a1)*(b1+b0) + pclmulqdq $0x11, \HK, \TMP1 # TMP1 = a1*b1 + pclmulqdq $0x00, \HK, \GH # GH = a0*b0 + pclmulqdq $0x00, \TMP3, \TMP2 # TMP2 = (a0+a1)*(b1+b0) pxor \GH, \TMP2 pxor \TMP1, \TMP2 # TMP2 = (a0*b0)+(a1*b0) movdqa \TMP2, \TMP3 @@ -570,7 +569,7 @@ _return_T_done_\@: cmp $8, \DLEN jl _read_lt8_\@ mov (\DPTR), %rax - MOVQ_R64_XMM %rax, \XMMDst + movq %rax, \XMMDst sub $8, \DLEN jz _done_read_partial_block_\@ xor %eax, %eax @@ -579,7 +578,7 @@ _read_next_byte_\@: mov 7(\DPTR, \DLEN, 1), %al dec \DLEN jnz _read_next_byte_\@ - MOVQ_R64_XMM %rax, \XMM1 + movq %rax, \XMM1 pslldq $8, \XMM1 por \XMM1, \XMMDst jmp _done_read_partial_block_\@ @@ -590,7 +589,7 @@ _read_next_byte_lt8_\@: mov -1(\DPTR, \DLEN, 1), %al dec \DLEN jnz _read_next_byte_lt8_\@ - MOVQ_R64_XMM %rax, \XMMDst + movq %rax, \XMMDst _done_read_partial_block_\@: .endm @@ -608,7 +607,7 @@ _done_read_partial_block_\@: jl _get_AAD_rest\@ _get_AAD_blocks\@: movdqu (%r10), \TMP7 - PSHUFB_XMM %xmm14, \TMP7 # byte-reflect the AAD data + pshufb %xmm14, \TMP7 # byte-reflect the AAD data pxor \TMP7, \TMP6 GHASH_MUL \TMP6, \HASHKEY, \TMP1, \TMP2, \TMP3, \TMP4, \TMP5 add $16, %r10 @@ -624,7 +623,7 @@ _get_AAD_rest\@: je _get_AAD_done\@ READ_PARTIAL_BLOCK %r10, %r11, \TMP1, \TMP7 - PSHUFB_XMM %xmm14, \TMP7 # byte-reflect the AAD data + pshufb %xmm14, \TMP7 # byte-reflect the AAD data pxor \TMP6, \TMP7 GHASH_MUL \TMP7, \HASHKEY, \TMP1, \TMP2, \TMP3, \TMP4, \TMP5 movdqu \TMP7, \TMP6 @@ -667,7 +666,7 @@ _data_read_\@: # Finished reading in data # r16-r13 is the number of bytes in plaintext mod 16) add %r13, %r12 movdqu (%r12), %xmm2 # get the appropriate shuffle mask - PSHUFB_XMM %xmm2, %xmm9 # shift right r13 bytes + pshufb %xmm2, %xmm9 # shift right r13 bytes .ifc \operation, dec movdqa %xmm1, %xmm3 @@ -689,8 +688,8 @@ _no_extra_mask_1_\@: pand %xmm1, %xmm3 movdqa SHUF_MASK(%rip), %xmm10 - PSHUFB_XMM %xmm10, %xmm3 - PSHUFB_XMM %xmm2, %xmm3 + pshufb %xmm10, %xmm3 + pshufb %xmm2, %xmm3 pxor %xmm3, \AAD_HASH cmp $0, %r10 @@ -724,8 +723,8 @@ _no_extra_mask_2_\@: pand %xmm1, %xmm9 movdqa SHUF_MASK(%rip), %xmm1 - PSHUFB_XMM %xmm1, %xmm9 - PSHUFB_XMM %xmm2, %xmm9 + pshufb %xmm1, %xmm9 + pshufb %xmm2, %xmm9 pxor %xmm9, \AAD_HASH cmp $0, %r10 @@ -744,8 +743,8 @@ _encode_done_\@: movdqa SHUF_MASK(%rip), %xmm10 # shuffle xmm9 back to output as ciphertext - PSHUFB_XMM %xmm10, %xmm9 - PSHUFB_XMM %xmm2, %xmm9 + pshufb %xmm10, %xmm9 + pshufb %xmm2, %xmm9 .endif # output encrypted Bytes cmp $0, %r10 @@ -759,14 +758,14 @@ _partial_fill_\@: mov \PLAIN_CYPH_LEN, %r13 _count_set_\@: movdqa %xmm9, %xmm0 - MOVQ_R64_XMM %xmm0, %rax + movq %xmm0, %rax cmp $8, %r13 jle _less_than_8_bytes_left_\@ mov %rax, (\CYPH_PLAIN_OUT, \DATA_OFFSET, 1) add $8, \DATA_OFFSET psrldq $8, %xmm0 - MOVQ_R64_XMM %xmm0, %rax + movq %xmm0, %rax sub $8, %r13 _less_than_8_bytes_left_\@: movb %al, (\CYPH_PLAIN_OUT, \DATA_OFFSET, 1) @@ -810,7 +809,7 @@ _partial_block_done_\@: .else MOVADQ \XMM0, %xmm\index .endif - PSHUFB_XMM %xmm14, %xmm\index # perform a 16 byte swap + pshufb %xmm14, %xmm\index # perform a 16 byte swap pxor \TMP2, %xmm\index .endr lea 0x10(%arg1),%r10 @@ -821,7 +820,7 @@ _partial_block_done_\@: aes_loop_initial_\@: MOVADQ (%r10),\TMP1 .irpc index, \i_seq - AESENC \TMP1, %xmm\index + aesenc \TMP1, %xmm\index .endr add $16,%r10 sub $1,%eax @@ -829,7 +828,7 @@ aes_loop_initial_\@: MOVADQ (%r10), \TMP1 .irpc index, \i_seq - AESENCLAST \TMP1, %xmm\index # Last Round + aesenclast \TMP1, %xmm\index # Last Round .endr .irpc index, \i_seq movdqu (%arg4 , %r11, 1), \TMP1 @@ -841,7 +840,7 @@ aes_loop_initial_\@: .ifc \operation, dec movdqa \TMP1, %xmm\index .endif - PSHUFB_XMM %xmm14, %xmm\index + pshufb %xmm14, %xmm\index # prepare plaintext/ciphertext for GHASH computation .endr @@ -876,19 +875,19 @@ aes_loop_initial_\@: MOVADQ ONE(%RIP),\TMP1 paddd \TMP1, \XMM0 # INCR Y0 MOVADQ \XMM0, \XMM1 - PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap + pshufb %xmm14, \XMM1 # perform a 16 byte swap paddd \TMP1, \XMM0 # INCR Y0 MOVADQ \XMM0, \XMM2 - PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap + pshufb %xmm14, \XMM2 # perform a 16 byte swap paddd \TMP1, \XMM0 # INCR Y0 MOVADQ \XMM0, \XMM3 - PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap + pshufb %xmm14, \XMM3 # perform a 16 byte swap paddd \TMP1, \XMM0 # INCR Y0 MOVADQ \XMM0, \XMM4 - PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap + pshufb %xmm14, \XMM4 # perform a 16 byte swap MOVADQ 0(%arg1),\TMP1 pxor \TMP1, \XMM1 @@ -897,17 +896,17 @@ aes_loop_initial_\@: pxor \TMP1, \XMM4 .irpc index, 1234 # do 4 rounds movaps 0x10*\index(%arg1), \TMP1 - AESENC \TMP1, \XMM1 - AESENC \TMP1, \XMM2 - AESENC \TMP1, \XMM3 - AESENC \TMP1, \XMM4 + aesenc \TMP1, \XMM1 + aesenc \TMP1, \XMM2 + aesenc \TMP1, \XMM3 + aesenc \TMP1, \XMM4 .endr .irpc index, 56789 # do next 5 rounds movaps 0x10*\index(%arg1), \TMP1 - AESENC \TMP1, \XMM1 - AESENC \TMP1, \XMM2 - AESENC \TMP1, \XMM3 - AESENC \TMP1, \XMM4 + aesenc \TMP1, \XMM1 + aesenc \TMP1, \XMM2 + aesenc \TMP1, \XMM3 + aesenc \TMP1, \XMM4 .endr lea 0xa0(%arg1),%r10 mov keysize,%eax @@ -918,7 +917,7 @@ aes_loop_initial_\@: aes_loop_pre_\@: MOVADQ (%r10),\TMP2 .irpc index, 1234 - AESENC \TMP2, %xmm\index + aesenc \TMP2, %xmm\index .endr add $16,%r10 sub $1,%eax @@ -926,10 +925,10 @@ aes_loop_pre_\@: aes_loop_pre_done\@: MOVADQ (%r10), \TMP2 - AESENCLAST \TMP2, \XMM1 - AESENCLAST \TMP2, \XMM2 - AESENCLAST \TMP2, \XMM3 - AESENCLAST \TMP2, \XMM4 + aesenclast \TMP2, \XMM1 + aesenclast \TMP2, \XMM2 + aesenclast \TMP2, \XMM3 + aesenclast \TMP2, \XMM4 movdqu 16*0(%arg4 , %r11 , 1), \TMP1 pxor \TMP1, \XMM1 .ifc \operation, dec @@ -961,12 +960,12 @@ aes_loop_pre_done\@: .endif add $64, %r11 - PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap + pshufb %xmm14, \XMM1 # perform a 16 byte swap pxor \XMMDst, \XMM1 # combine GHASHed value with the corresponding ciphertext - PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap - PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap - PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap + pshufb %xmm14, \XMM2 # perform a 16 byte swap + pshufb %xmm14, \XMM3 # perform a 16 byte swap + pshufb %xmm14, \XMM4 # perform a 16 byte swap _initial_blocks_done\@: @@ -978,7 +977,7 @@ _initial_blocks_done\@: * arg1, %arg3, %arg4 are used as pointers only, not modified * %r11 is the data offset value */ -.macro GHASH_4_ENCRYPT_4_PARALLEL_ENC TMP1 TMP2 TMP3 TMP4 TMP5 \ +.macro GHASH_4_ENCRYPT_4_PARALLEL_enc TMP1 TMP2 TMP3 TMP4 TMP5 \ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation movdqa \XMM1, \XMM5 @@ -994,7 +993,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation pxor \XMM5, \TMP6 paddd ONE(%rip), \XMM0 # INCR CNT movdqu HashKey_4(%arg2), \TMP5 - PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1 + pclmulqdq $0x11, \TMP5, \TMP4 # TMP4 = a1*b1 movdqa \XMM0, \XMM1 paddd ONE(%rip), \XMM0 # INCR CNT movdqa \XMM0, \XMM2 @@ -1002,51 +1001,51 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation movdqa \XMM0, \XMM3 paddd ONE(%rip), \XMM0 # INCR CNT movdqa \XMM0, \XMM4 - PSHUFB_XMM %xmm15, \XMM1 # perform a 16 byte swap - PCLMULQDQ 0x00, \TMP5, \XMM5 # XMM5 = a0*b0 - PSHUFB_XMM %xmm15, \XMM2 # perform a 16 byte swap - PSHUFB_XMM %xmm15, \XMM3 # perform a 16 byte swap - PSHUFB_XMM %xmm15, \XMM4 # perform a 16 byte swap + pshufb %xmm15, \XMM1 # perform a 16 byte swap + pclmulqdq $0x00, \TMP5, \XMM5 # XMM5 = a0*b0 + pshufb %xmm15, \XMM2 # perform a 16 byte swap + pshufb %xmm15, \XMM3 # perform a 16 byte swap + pshufb %xmm15, \XMM4 # perform a 16 byte swap pxor (%arg1), \XMM1 pxor (%arg1), \XMM2 pxor (%arg1), \XMM3 pxor (%arg1), \XMM4 movdqu HashKey_4_k(%arg2), \TMP5 - PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0) + pclmulqdq $0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0) movaps 0x10(%arg1), \TMP1 - AESENC \TMP1, \XMM1 # Round 1 - AESENC \TMP1, \XMM2 - AESENC \TMP1, \XMM3 - AESENC \TMP1, \XMM4 + aesenc \TMP1, \XMM1 # Round 1 + aesenc \TMP1, \XMM2 + aesenc \TMP1, \XMM3 + aesenc \TMP1, \XMM4 movaps 0x20(%arg1), \TMP1 - AESENC \TMP1, \XMM1 # Round 2 - AESENC \TMP1, \XMM2 - AESENC \TMP1, \XMM3 - AESENC \TMP1, \XMM4 + aesenc \TMP1, \XMM1 # Round 2 + aesenc \TMP1, \XMM2 + aesenc \TMP1, \XMM3 + aesenc \TMP1, \XMM4 movdqa \XMM6, \TMP1 pshufd $78, \XMM6, \TMP2 pxor \XMM6, \TMP2 movdqu HashKey_3(%arg2), \TMP5 - PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1 + pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1 * b1 movaps 0x30(%arg1), \TMP3 - AESENC \TMP3, \XMM1 # Round 3 - AESENC \TMP3, \XMM2 - AESENC \TMP3, \XMM3 - AESENC \TMP3, \XMM4 - PCLMULQDQ 0x00, \TMP5, \XMM6 # XMM6 = a0*b0 + aesenc \TMP3, \XMM1 # Round 3 + aesenc \TMP3, \XMM2 + aesenc \TMP3, \XMM3 + aesenc \TMP3, \XMM4 + pclmulqdq $0x00, \TMP5, \XMM6 # XMM6 = a0*b0 movaps 0x40(%arg1), \TMP3 - AESENC \TMP3, \XMM1 # Round 4 - AESENC \TMP3, \XMM2 - AESENC \TMP3, \XMM3 - AESENC \TMP3, \XMM4 + aesenc \TMP3, \XMM1 # Round 4 + aesenc \TMP3, \XMM2 + aesenc \TMP3, \XMM3 + aesenc \TMP3, \XMM4 movdqu HashKey_3_k(%arg2), \TMP5 - PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) + pclmulqdq $0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movaps 0x50(%arg1), \TMP3 - AESENC \TMP3, \XMM1 # Round 5 - AESENC \TMP3, \XMM2 - AESENC \TMP3, \XMM3 - AESENC \TMP3, \XMM4 + aesenc \TMP3, \XMM1 # Round 5 + aesenc \TMP3, \XMM2 + aesenc \TMP3, \XMM3 + aesenc \TMP3, \XMM4 pxor \TMP1, \TMP4 # accumulate the results in TMP4:XMM5, TMP6 holds the middle part pxor \XMM6, \XMM5 @@ -1058,25 +1057,25 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation # Multiply TMP5 * HashKey using karatsuba - PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 + pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1 movaps 0x60(%arg1), \TMP3 - AESENC \TMP3, \XMM1 # Round 6 - AESENC \TMP3, \XMM2 - AESENC \TMP3, \XMM3 - AESENC \TMP3, \XMM4 - PCLMULQDQ 0x00, \TMP5, \XMM7 # XMM7 = a0*b0 + aesenc \TMP3, \XMM1 # Round 6 + aesenc \TMP3, \XMM2 + aesenc \TMP3, \XMM3 + aesenc \TMP3, \XMM4 + pclmulqdq $0x00, \TMP5, \XMM7 # XMM7 = a0*b0 movaps 0x70(%arg1), \TMP3 - AESENC \TMP3, \XMM1 # Round 7 - AESENC \TMP3, \XMM2 - AESENC \TMP3, \XMM3 - AESENC \TMP3, \XMM4 + aesenc \TMP3, \XMM1 # Round 7 + aesenc \TMP3, \XMM2 + aesenc \TMP3, \XMM3 + aesenc \TMP3, \XMM4 movdqu HashKey_2_k(%arg2), \TMP5 - PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) + pclmulqdq $0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movaps 0x80(%arg1), \TMP3 - AESENC \TMP3, \XMM1 # Round 8 - AESENC \TMP3, \XMM2 - AESENC \TMP3, \XMM3 - AESENC \TMP3, \XMM4 + aesenc \TMP3, \XMM1 # Round 8 + aesenc \TMP3, \XMM2 + aesenc \TMP3, \XMM3 + aesenc \TMP3, \XMM4 pxor \TMP1, \TMP4 # accumulate the results in TMP4:XMM5, TMP6 holds the middle part pxor \XMM7, \XMM5 @@ -1089,13 +1088,13 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation pshufd $78, \XMM8, \TMP2 pxor \XMM8, \TMP2 movdqu HashKey(%arg2), \TMP5 - PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 + pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1 movaps 0x90(%arg1), \TMP3 - AESENC \TMP3, \XMM1 # Round 9 - AESENC \TMP3, \XMM2 - AESENC \TMP3, \XMM3 - AESENC \TMP3, \XMM4 - PCLMULQDQ 0x00, \TMP5, \XMM8 # XMM8 = a0*b0 + aesenc \TMP3, \XMM1 # Round 9 + aesenc \TMP3, \XMM2 + aesenc \TMP3, \XMM3 + aesenc \TMP3, \XMM4 + pclmulqdq $0x00, \TMP5, \XMM8 # XMM8 = a0*b0 lea 0xa0(%arg1),%r10 mov keysize,%eax shr $2,%eax # 128->4, 192->6, 256->8 @@ -1105,7 +1104,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation aes_loop_par_enc\@: MOVADQ (%r10),\TMP3 .irpc index, 1234 - AESENC \TMP3, %xmm\index + aesenc \TMP3, %xmm\index .endr add $16,%r10 sub $1,%eax @@ -1113,12 +1112,12 @@ aes_loop_par_enc\@: aes_loop_par_enc_done\@: MOVADQ (%r10), \TMP3 - AESENCLAST \TMP3, \XMM1 # Round 10 - AESENCLAST \TMP3, \XMM2 - AESENCLAST \TMP3, \XMM3 - AESENCLAST \TMP3, \XMM4 + aesenclast \TMP3, \XMM1 # Round 10 + aesenclast \TMP3, \XMM2 + aesenclast \TMP3, \XMM3 + aesenclast \TMP3, \XMM4 movdqu HashKey_k(%arg2), \TMP5 - PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) + pclmulqdq $0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movdqu (%arg4,%r11,1), \TMP3 pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK movdqu 16(%arg4,%r11,1), \TMP3 @@ -1131,10 +1130,10 @@ aes_loop_par_enc_done\@: movdqu \XMM2, 16(%arg3,%r11,1) # Write to the ciphertext buffer movdqu \XMM3, 32(%arg3,%r11,1) # Write to the ciphertext buffer movdqu \XMM4, 48(%arg3,%r11,1) # Write to the ciphertext buffer - PSHUFB_XMM %xmm15, \XMM1 # perform a 16 byte swap - PSHUFB_XMM %xmm15, \XMM2 # perform a 16 byte swap - PSHUFB_XMM %xmm15, \XMM3 # perform a 16 byte swap - PSHUFB_XMM %xmm15, \XMM4 # perform a 16 byte swap + pshufb %xmm15, \XMM1 # perform a 16 byte swap + pshufb %xmm15, \XMM2 # perform a 16 byte swap + pshufb %xmm15, \XMM3 # perform a 16 byte swap + pshufb %xmm15, \XMM4 # perform a 16 byte swap pxor \TMP4, \TMP1 pxor \XMM8, \XMM5 @@ -1186,7 +1185,7 @@ aes_loop_par_enc_done\@: * arg1, %arg3, %arg4 are used as pointers only, not modified * %r11 is the data offset value */ -.macro GHASH_4_ENCRYPT_4_PARALLEL_DEC TMP1 TMP2 TMP3 TMP4 TMP5 \ +.macro GHASH_4_ENCRYPT_4_PARALLEL_dec TMP1 TMP2 TMP3 TMP4 TMP5 \ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation movdqa \XMM1, \XMM5 @@ -1202,7 +1201,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation pxor \XMM5, \TMP6 paddd ONE(%rip), \XMM0 # INCR CNT movdqu HashKey_4(%arg2), \TMP5 - PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1 + pclmulqdq $0x11, \TMP5, \TMP4 # TMP4 = a1*b1 movdqa \XMM0, \XMM1 paddd ONE(%rip), \XMM0 # INCR CNT movdqa \XMM0, \XMM2 @@ -1210,51 +1209,51 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation movdqa \XMM0, \XMM3 paddd ONE(%rip), \XMM0 # INCR CNT movdqa \XMM0, \XMM4 - PSHUFB_XMM %xmm15, \XMM1 # perform a 16 byte swap - PCLMULQDQ 0x00, \TMP5, \XMM5 # XMM5 = a0*b0 - PSHUFB_XMM %xmm15, \XMM2 # perform a 16 byte swap - PSHUFB_XMM %xmm15, \XMM3 # perform a 16 byte swap - PSHUFB_XMM %xmm15, \XMM4 # perform a 16 byte swap + pshufb %xmm15, \XMM1 # perform a 16 byte swap + pclmulqdq $0x00, \TMP5, \XMM5 # XMM5 = a0*b0 + pshufb %xmm15, \XMM2 # perform a 16 byte swap + pshufb %xmm15, \XMM3 # perform a 16 byte swap + pshufb %xmm15, \XMM4 # perform a 16 byte swap pxor (%arg1), \XMM1 pxor (%arg1), \XMM2 pxor (%arg1), \XMM3 pxor (%arg1), \XMM4 movdqu HashKey_4_k(%arg2), \TMP5 - PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0) + pclmulqdq $0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0) movaps 0x10(%arg1), \TMP1 - AESENC \TMP1, \XMM1 # Round 1 - AESENC \TMP1, \XMM2 - AESENC \TMP1, \XMM3 - AESENC \TMP1, \XMM4 + aesenc \TMP1, \XMM1 # Round 1 + aesenc \TMP1, \XMM2 + aesenc \TMP1, \XMM3 + aesenc \TMP1, \XMM4 movaps 0x20(%arg1), \TMP1 - AESENC \TMP1, \XMM1 # Round 2 - AESENC \TMP1, \XMM2 - AESENC \TMP1, \XMM3 - AESENC \TMP1, \XMM4 + aesenc \TMP1, \XMM1 # Round 2 + aesenc \TMP1, \XMM2 + aesenc \TMP1, \XMM3 + aesenc \TMP1, \XMM4 movdqa \XMM6, \TMP1 pshufd $78, \XMM6, \TMP2 pxor \XMM6, \TMP2 movdqu HashKey_3(%arg2), \TMP5 - PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1 + pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1 * b1 movaps 0x30(%arg1), \TMP3 - AESENC \TMP3, \XMM1 # Round 3 - AESENC \TMP3, \XMM2 - AESENC \TMP3, \XMM3 - AESENC \TMP3, \XMM4 - PCLMULQDQ 0x00, \TMP5, \XMM6 # XMM6 = a0*b0 + aesenc \TMP3, \XMM1 # Round 3 + aesenc \TMP3, \XMM2 + aesenc \TMP3, \XMM3 + aesenc \TMP3, \XMM4 + pclmulqdq $0x00, \TMP5, \XMM6 # XMM6 = a0*b0 movaps 0x40(%arg1), \TMP3 - AESENC \TMP3, \XMM1 # Round 4 - AESENC \TMP3, \XMM2 - AESENC \TMP3, \XMM3 - AESENC \TMP3, \XMM4 + aesenc \TMP3, \XMM1 # Round 4 + aesenc \TMP3, \XMM2 + aesenc \TMP3, \XMM3 + aesenc \TMP3, \XMM4 movdqu HashKey_3_k(%arg2), \TMP5 - PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) + pclmulqdq $0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movaps 0x50(%arg1), \TMP3 - AESENC \TMP3, \XMM1 # Round 5 - AESENC \TMP3, \XMM2 - AESENC \TMP3, \XMM3 - AESENC \TMP3, \XMM4 + aesenc \TMP3, \XMM1 # Round 5 + aesenc \TMP3, \XMM2 + aesenc \TMP3, \XMM3 + aesenc \TMP3, \XMM4 pxor \TMP1, \TMP4 # accumulate the results in TMP4:XMM5, TMP6 holds the middle part pxor \XMM6, \XMM5 @@ -1266,25 +1265,25 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation # Multiply TMP5 * HashKey using karatsuba - PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 + pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1 movaps 0x60(%arg1), \TMP3 - AESENC \TMP3, \XMM1 # Round 6 - AESENC \TMP3, \XMM2 - AESENC \TMP3, \XMM3 - AESENC \TMP3, \XMM4 - PCLMULQDQ 0x00, \TMP5, \XMM7 # XMM7 = a0*b0 + aesenc \TMP3, \XMM1 # Round 6 + aesenc \TMP3, \XMM2 + aesenc \TMP3, \XMM3 + aesenc \TMP3, \XMM4 + pclmulqdq $0x00, \TMP5, \XMM7 # XMM7 = a0*b0 movaps 0x70(%arg1), \TMP3 - AESENC \TMP3, \XMM1 # Round 7 - AESENC \TMP3, \XMM2 - AESENC \TMP3, \XMM3 - AESENC \TMP3, \XMM4 + aesenc \TMP3, \XMM1 # Round 7 + aesenc \TMP3, \XMM2 + aesenc \TMP3, \XMM3 + aesenc \TMP3, \XMM4 movdqu HashKey_2_k(%arg2), \TMP5 - PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) + pclmulqdq $0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movaps 0x80(%arg1), \TMP3 - AESENC \TMP3, \XMM1 # Round 8 - AESENC \TMP3, \XMM2 - AESENC \TMP3, \XMM3 - AESENC \TMP3, \XMM4 + aesenc \TMP3, \XMM1 # Round 8 + aesenc \TMP3, \XMM2 + aesenc \TMP3, \XMM3 + aesenc \TMP3, \XMM4 pxor \TMP1, \TMP4 # accumulate the results in TMP4:XMM5, TMP6 holds the middle part pxor \XMM7, \XMM5 @@ -1297,13 +1296,13 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation pshufd $78, \XMM8, \TMP2 pxor \XMM8, \TMP2 movdqu HashKey(%arg2), \TMP5 - PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 + pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1 movaps 0x90(%arg1), \TMP3 - AESENC \TMP3, \XMM1 # Round 9 - AESENC \TMP3, \XMM2 - AESENC \TMP3, \XMM3 - AESENC \TMP3, \XMM4 - PCLMULQDQ 0x00, \TMP5, \XMM8 # XMM8 = a0*b0 + aesenc \TMP3, \XMM1 # Round 9 + aesenc \TMP3, \XMM2 + aesenc \TMP3, \XMM3 + aesenc \TMP3, \XMM4 + pclmulqdq $0x00, \TMP5, \XMM8 # XMM8 = a0*b0 lea 0xa0(%arg1),%r10 mov keysize,%eax shr $2,%eax # 128->4, 192->6, 256->8 @@ -1313,7 +1312,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation aes_loop_par_dec\@: MOVADQ (%r10),\TMP3 .irpc index, 1234 - AESENC \TMP3, %xmm\index + aesenc \TMP3, %xmm\index .endr add $16,%r10 sub $1,%eax @@ -1321,12 +1320,12 @@ aes_loop_par_dec\@: aes_loop_par_dec_done\@: MOVADQ (%r10), \TMP3 - AESENCLAST \TMP3, \XMM1 # last round - AESENCLAST \TMP3, \XMM2 - AESENCLAST \TMP3, \XMM3 - AESENCLAST \TMP3, \XMM4 + aesenclast \TMP3, \XMM1 # last round + aesenclast \TMP3, \XMM2 + aesenclast \TMP3, \XMM3 + aesenclast \TMP3, \XMM4 movdqu HashKey_k(%arg2), \TMP5 - PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) + pclmulqdq $0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movdqu (%arg4,%r11,1), \TMP3 pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK movdqu \XMM1, (%arg3,%r11,1) # Write to plaintext buffer @@ -1343,10 +1342,10 @@ aes_loop_par_dec_done\@: pxor \TMP3, \XMM4 # Ciphertext/Plaintext XOR EK movdqu \XMM4, 48(%arg3,%r11,1) # Write to plaintext buffer movdqa \TMP3, \XMM4 - PSHUFB_XMM %xmm15, \XMM1 # perform a 16 byte swap - PSHUFB_XMM %xmm15, \XMM2 # perform a 16 byte swap - PSHUFB_XMM %xmm15, \XMM3 # perform a 16 byte swap - PSHUFB_XMM %xmm15, \XMM4 # perform a 16 byte swap + pshufb %xmm15, \XMM1 # perform a 16 byte swap + pshufb %xmm15, \XMM2 # perform a 16 byte swap + pshufb %xmm15, \XMM3 # perform a 16 byte swap + pshufb %xmm15, \XMM4 # perform a 16 byte swap pxor \TMP4, \TMP1 pxor \XMM8, \XMM5 @@ -1402,10 +1401,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst pshufd $78, \XMM1, \TMP2 pxor \XMM1, \TMP2 movdqu HashKey_4(%arg2), \TMP5 - PCLMULQDQ 0x11, \TMP5, \TMP6 # TMP6 = a1*b1 - PCLMULQDQ 0x00, \TMP5, \XMM1 # XMM1 = a0*b0 + pclmulqdq $0x11, \TMP5, \TMP6 # TMP6 = a1*b1 + pclmulqdq $0x00, \TMP5, \XMM1 # XMM1 = a0*b0 movdqu HashKey_4_k(%arg2), \TMP4 - PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) + pclmulqdq $0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movdqa \XMM1, \XMMDst movdqa \TMP2, \XMM1 # result in TMP6, XMMDst, XMM1 @@ -1415,10 +1414,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst pshufd $78, \XMM2, \TMP2 pxor \XMM2, \TMP2 movdqu HashKey_3(%arg2), \TMP5 - PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 - PCLMULQDQ 0x00, \TMP5, \XMM2 # XMM2 = a0*b0 + pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1 + pclmulqdq $0x00, \TMP5, \XMM2 # XMM2 = a0*b0 movdqu HashKey_3_k(%arg2), \TMP4 - PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) + pclmulqdq $0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) pxor \TMP1, \TMP6 pxor \XMM2, \XMMDst pxor \TMP2, \XMM1 @@ -1430,10 +1429,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst pshufd $78, \XMM3, \TMP2 pxor \XMM3, \TMP2 movdqu HashKey_2(%arg2), \TMP5 - PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 - PCLMULQDQ 0x00, \TMP5, \XMM3 # XMM3 = a0*b0 + pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1 + pclmulqdq $0x00, \TMP5, \XMM3 # XMM3 = a0*b0 movdqu HashKey_2_k(%arg2), \TMP4 - PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) + pclmulqdq $0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) pxor \TMP1, \TMP6 pxor \XMM3, \XMMDst pxor \TMP2, \XMM1 # results accumulated in TMP6, XMMDst, XMM1 @@ -1443,10 +1442,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst pshufd $78, \XMM4, \TMP2 pxor \XMM4, \TMP2 movdqu HashKey(%arg2), \TMP5 - PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 - PCLMULQDQ 0x00, \TMP5, \XMM4 # XMM4 = a0*b0 + pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1 + pclmulqdq $0x00, \TMP5, \XMM4 # XMM4 = a0*b0 movdqu HashKey_k(%arg2), \TMP4 - PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) + pclmulqdq $0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) pxor \TMP1, \TMP6 pxor \XMM4, \XMMDst pxor \XMM1, \TMP2 @@ -1504,13 +1503,13 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst _esb_loop_\@: MOVADQ (%r10),\TMP1 - AESENC \TMP1,\XMM0 + aesenc \TMP1,\XMM0 add $16,%r10 sub $1,%eax jnz _esb_loop_\@ MOVADQ (%r10),\TMP1 - AESENCLAST \TMP1,\XMM0 + aesenclast \TMP1,\XMM0 .endm /***************************************************************************** * void aesni_gcm_dec(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary. @@ -1849,72 +1848,72 @@ SYM_FUNC_START(aesni_set_key) movups 0x10(UKEYP), %xmm2 # other user key movaps %xmm2, (TKEYP) add $0x10, TKEYP - AESKEYGENASSIST 0x1 %xmm2 %xmm1 # round 1 + aeskeygenassist $0x1, %xmm2, %xmm1 # round 1 call _key_expansion_256a - AESKEYGENASSIST 0x1 %xmm0 %xmm1 + aeskeygenassist $0x1, %xmm0, %xmm1 call _key_expansion_256b - AESKEYGENASSIST 0x2 %xmm2 %xmm1 # round 2 + aeskeygenassist $0x2, %xmm2, %xmm1 # round 2 call _key_expansion_256a - AESKEYGENASSIST 0x2 %xmm0 %xmm1 + aeskeygenassist $0x2, %xmm0, %xmm1 call _key_expansion_256b - AESKEYGENASSIST 0x4 %xmm2 %xmm1 # round 3 + aeskeygenassist $0x4, %xmm2, %xmm1 # round 3 call _key_expansion_256a - AESKEYGENASSIST 0x4 %xmm0 %xmm1 + aeskeygenassist $0x4, %xmm0, %xmm1 call _key_expansion_256b - AESKEYGENASSIST 0x8 %xmm2 %xmm1 # round 4 + aeskeygenassist $0x8, %xmm2, %xmm1 # round 4 call _key_expansion_256a - AESKEYGENASSIST 0x8 %xmm0 %xmm1 + aeskeygenassist $0x8, %xmm0, %xmm1 call _key_expansion_256b - AESKEYGENASSIST 0x10 %xmm2 %xmm1 # round 5 + aeskeygenassist $0x10, %xmm2, %xmm1 # round 5 call _key_expansion_256a - AESKEYGENASSIST 0x10 %xmm0 %xmm1 + aeskeygenassist $0x10, %xmm0, %xmm1 call _key_expansion_256b - AESKEYGENASSIST 0x20 %xmm2 %xmm1 # round 6 + aeskeygenassist $0x20, %xmm2, %xmm1 # round 6 call _key_expansion_256a - AESKEYGENASSIST 0x20 %xmm0 %xmm1 + aeskeygenassist $0x20, %xmm0, %xmm1 call _key_expansion_256b - AESKEYGENASSIST 0x40 %xmm2 %xmm1 # round 7 + aeskeygenassist $0x40, %xmm2, %xmm1 # round 7 call _key_expansion_256a jmp .Ldec_key .Lenc_key192: movq 0x10(UKEYP), %xmm2 # other user key - AESKEYGENASSIST 0x1 %xmm2 %xmm1 # round 1 + aeskeygenassist $0x1, %xmm2, %xmm1 # round 1 call _key_expansion_192a - AESKEYGENASSIST 0x2 %xmm2 %xmm1 # round 2 + aeskeygenassist $0x2, %xmm2, %xmm1 # round 2 call _key_expansion_192b - AESKEYGENASSIST 0x4 %xmm2 %xmm1 # round 3 + aeskeygenassist $0x4, %xmm2, %xmm1 # round 3 call _key_expansion_192a - AESKEYGENASSIST 0x8 %xmm2 %xmm1 # round 4 + aeskeygenassist $0x8, %xmm2, %xmm1 # round 4 call _key_expansion_192b - AESKEYGENASSIST 0x10 %xmm2 %xmm1 # round 5 + aeskeygenassist $0x10, %xmm2, %xmm1 # round 5 call _key_expansion_192a - AESKEYGENASSIST 0x20 %xmm2 %xmm1 # round 6 + aeskeygenassist $0x20, %xmm2, %xmm1 # round 6 call _key_expansion_192b - AESKEYGENASSIST 0x40 %xmm2 %xmm1 # round 7 + aeskeygenassist $0x40, %xmm2, %xmm1 # round 7 call _key_expansion_192a - AESKEYGENASSIST 0x80 %xmm2 %xmm1 # round 8 + aeskeygenassist $0x80, %xmm2, %xmm1 # round 8 call _key_expansion_192b jmp .Ldec_key .Lenc_key128: - AESKEYGENASSIST 0x1 %xmm0 %xmm1 # round 1 + aeskeygenassist $0x1, %xmm0, %xmm1 # round 1 call _key_expansion_128 - AESKEYGENASSIST 0x2 %xmm0 %xmm1 # round 2 + aeskeygenassist $0x2, %xmm0, %xmm1 # round 2 call _key_expansion_128 - AESKEYGENASSIST 0x4 %xmm0 %xmm1 # round 3 + aeskeygenassist $0x4, %xmm0, %xmm1 # round 3 call _key_expansion_128 - AESKEYGENASSIST 0x8 %xmm0 %xmm1 # round 4 + aeskeygenassist $0x8, %xmm0, %xmm1 # round 4 call _key_expansion_128 - AESKEYGENASSIST 0x10 %xmm0 %xmm1 # round 5 + aeskeygenassist $0x10, %xmm0, %xmm1 # round 5 call _key_expansion_128 - AESKEYGENASSIST 0x20 %xmm0 %xmm1 # round 6 + aeskeygenassist $0x20, %xmm0, %xmm1 # round 6 call _key_expansion_128 - AESKEYGENASSIST 0x40 %xmm0 %xmm1 # round 7 + aeskeygenassist $0x40, %xmm0, %xmm1 # round 7 call _key_expansion_128 - AESKEYGENASSIST 0x80 %xmm0 %xmm1 # round 8 + aeskeygenassist $0x80, %xmm0, %xmm1 # round 8 call _key_expansion_128 - AESKEYGENASSIST 0x1b %xmm0 %xmm1 # round 9 + aeskeygenassist $0x1b, %xmm0, %xmm1 # round 9 call _key_expansion_128 - AESKEYGENASSIST 0x36 %xmm0 %xmm1 # round 10 + aeskeygenassist $0x36, %xmm0, %xmm1 # round 10 call _key_expansion_128 .Ldec_key: sub $0x10, TKEYP @@ -1927,7 +1926,7 @@ SYM_FUNC_START(aesni_set_key) .align 4 .Ldec_key_loop: movaps (KEYP), %xmm0 - AESIMC %xmm0 %xmm1 + aesimc %xmm0, %xmm1 movaps %xmm1, (UKEYP) add $0x10, KEYP sub $0x10, UKEYP @@ -1988,37 +1987,37 @@ SYM_FUNC_START_LOCAL(_aesni_enc1) je .Lenc192 add $0x20, TKEYP movaps -0x60(TKEYP), KEY - AESENC KEY STATE + aesenc KEY, STATE movaps -0x50(TKEYP), KEY - AESENC KEY STATE + aesenc KEY, STATE .align 4 .Lenc192: movaps -0x40(TKEYP), KEY - AESENC KEY STATE + aesenc KEY, STATE movaps -0x30(TKEYP), KEY - AESENC KEY STATE + aesenc KEY, STATE .align 4 .Lenc128: movaps -0x20(TKEYP), KEY - AESENC KEY STATE + aesenc KEY, STATE movaps -0x10(TKEYP), KEY - AESENC KEY STATE + aesenc KEY, STATE movaps (TKEYP), KEY - AESENC KEY STATE + aesenc KEY, STATE movaps 0x10(TKEYP), KEY - AESENC KEY STATE + aesenc KEY, STATE movaps 0x20(TKEYP), KEY - AESENC KEY STATE + aesenc KEY, STATE movaps 0x30(TKEYP), KEY - AESENC KEY STATE + aesenc KEY, STATE movaps 0x40(TKEYP), KEY - AESENC KEY STATE + aesenc KEY, STATE movaps 0x50(TKEYP), KEY - AESENC KEY STATE + aesenc KEY, STATE movaps 0x60(TKEYP), KEY - AESENC KEY STATE + aesenc KEY, STATE movaps 0x70(TKEYP), KEY - AESENCLAST KEY STATE + aesenclast KEY, STATE ret SYM_FUNC_END(_aesni_enc1) @@ -2054,79 +2053,79 @@ SYM_FUNC_START_LOCAL(_aesni_enc4) je .L4enc192 add $0x20, TKEYP movaps -0x60(TKEYP), KEY - AESENC KEY STATE1 - AESENC KEY STATE2 - AESENC KEY STATE3 - AESENC KEY STATE4 + aesenc KEY, STATE1 + aesenc KEY, STATE2 + aesenc KEY, STATE3 + aesenc KEY, STATE4 movaps -0x50(TKEYP), KEY - AESENC KEY STATE1 - AESENC KEY STATE2 - AESENC KEY STATE3 - AESENC KEY STATE4 + aesenc KEY, STATE1 + aesenc KEY, STATE2 + aesenc KEY, STATE3 + aesenc KEY, STATE4 #.align 4 .L4enc192: movaps -0x40(TKEYP), KEY - AESENC KEY STATE1 - AESENC KEY STATE2 - AESENC KEY STATE3 - AESENC KEY STATE4 + aesenc KEY, STATE1 + aesenc KEY, STATE2 + aesenc KEY, STATE3 + aesenc KEY, STATE4 movaps -0x30(TKEYP), KEY - AESENC KEY STATE1 - AESENC KEY STATE2 - AESENC KEY STATE3 - AESENC KEY STATE4 + aesenc KEY, STATE1 + aesenc KEY, STATE2 + aesenc KEY, STATE3 + aesenc KEY, STATE4 #.align 4 .L4enc128: movaps -0x20(TKEYP), KEY - AESENC KEY STATE1 - AESENC KEY STATE2 - AESENC KEY STATE3 - AESENC KEY STATE4 + aesenc KEY, STATE1 + aesenc KEY, STATE2 + aesenc KEY, STATE3 + aesenc KEY, STATE4 movaps -0x10(TKEYP), KEY - AESENC KEY STATE1 - AESENC KEY STATE2 - AESENC KEY STATE3 - AESENC KEY STATE4 + aesenc KEY, STATE1 + aesenc KEY, STATE2 + aesenc KEY, STATE3 + aesenc KEY, STATE4 movaps (TKEYP), KEY - AESENC KEY STATE1 - AESENC KEY STATE2 - AESENC KEY STATE3 - AESENC KEY STATE4 + aesenc KEY, STATE1 + aesenc KEY, STATE2 + aesenc KEY, STATE3 + aesenc KEY, STATE4 movaps 0x10(TKEYP), KEY - AESENC KEY STATE1 - AESENC KEY STATE2 - AESENC KEY STATE3 - AESENC KEY STATE4 + aesenc KEY, STATE1 + aesenc KEY, STATE2 + aesenc KEY, STATE3 + aesenc KEY, STATE4 movaps 0x20(TKEYP), KEY - AESENC KEY STATE1 - AESENC KEY STATE2 - AESENC KEY STATE3 - AESENC KEY STATE4 + aesenc KEY, STATE1 + aesenc KEY, STATE2 + aesenc KEY, STATE3 + aesenc KEY, STATE4 movaps 0x30(TKEYP), KEY - AESENC KEY STATE1 - AESENC KEY STATE2 - AESENC KEY STATE3 - AESENC KEY STATE4 + aesenc KEY, STATE1 + aesenc KEY, STATE2 + aesenc KEY, STATE3 + aesenc KEY, STATE4 movaps 0x40(TKEYP), KEY - AESENC KEY STATE1 - AESENC KEY STATE2 - AESENC KEY STATE3 - AESENC KEY STATE4 + aesenc KEY, STATE1 + aesenc KEY, STATE2 + aesenc KEY, STATE3 + aesenc KEY, STATE4 movaps 0x50(TKEYP), KEY - AESENC KEY STATE1 - AESENC KEY STATE2 - AESENC KEY STATE3 - AESENC KEY STATE4 + aesenc KEY, STATE1 + aesenc KEY, STATE2 + aesenc KEY, STATE3 + aesenc KEY, STATE4 movaps 0x60(TKEYP), KEY - AESENC KEY STATE1 - AESENC KEY STATE2 - AESENC KEY STATE3 - AESENC KEY STATE4 + aesenc KEY, STATE1 + aesenc KEY, STATE2 + aesenc KEY, STATE3 + aesenc KEY, STATE4 movaps 0x70(TKEYP), KEY - AESENCLAST KEY STATE1 # last round - AESENCLAST KEY STATE2 - AESENCLAST KEY STATE3 - AESENCLAST KEY STATE4 + aesenclast KEY, STATE1 # last round + aesenclast KEY, STATE2 + aesenclast KEY, STATE3 + aesenclast KEY, STATE4 ret SYM_FUNC_END(_aesni_enc4) @@ -2178,37 +2177,37 @@ SYM_FUNC_START_LOCAL(_aesni_dec1) je .Ldec192 add $0x20, TKEYP movaps -0x60(TKEYP), KEY - AESDEC KEY STATE + aesdec KEY, STATE movaps -0x50(TKEYP), KEY - AESDEC KEY STATE + aesdec KEY, STATE .align 4 .Ldec192: movaps -0x40(TKEYP), KEY - AESDEC KEY STATE + aesdec KEY, STATE movaps -0x30(TKEYP), KEY - AESDEC KEY STATE + aesdec KEY, STATE .align 4 .Ldec128: movaps -0x20(TKEYP), KEY - AESDEC KEY STATE + aesdec KEY, STATE movaps -0x10(TKEYP), KEY - AESDEC KEY STATE + aesdec KEY, STATE movaps (TKEYP), KEY - AESDEC KEY STATE + aesdec KEY, STATE movaps 0x10(TKEYP), KEY - AESDEC KEY STATE + aesdec KEY, STATE movaps 0x20(TKEYP), KEY - AESDEC KEY STATE + aesdec KEY, STATE movaps 0x30(TKEYP), KEY - AESDEC KEY STATE + aesdec KEY, STATE movaps 0x40(TKEYP), KEY - AESDEC KEY STATE + aesdec KEY, STATE movaps 0x50(TKEYP), KEY - AESDEC KEY STATE + aesdec KEY, STATE movaps 0x60(TKEYP), KEY - AESDEC KEY STATE + aesdec KEY, STATE movaps 0x70(TKEYP), KEY - AESDECLAST KEY STATE + aesdeclast KEY, STATE ret SYM_FUNC_END(_aesni_dec1) @@ -2244,79 +2243,79 @@ SYM_FUNC_START_LOCAL(_aesni_dec4) je .L4dec192 add $0x20, TKEYP movaps -0x60(TKEYP), KEY - AESDEC KEY STATE1 - AESDEC KEY STATE2 - AESDEC KEY STATE3 - AESDEC KEY STATE4 + aesdec KEY, STATE1 + aesdec KEY, STATE2 + aesdec KEY, STATE3 + aesdec KEY, STATE4 movaps -0x50(TKEYP), KEY - AESDEC KEY STATE1 - AESDEC KEY STATE2 - AESDEC KEY STATE3 - AESDEC KEY STATE4 + aesdec KEY, STATE1 + aesdec KEY, STATE2 + aesdec KEY, STATE3 + aesdec KEY, STATE4 .align 4 .L4dec192: movaps -0x40(TKEYP), KEY - AESDEC KEY STATE1 - AESDEC KEY STATE2 - AESDEC KEY STATE3 - AESDEC KEY STATE4 + aesdec KEY, STATE1 + aesdec KEY, STATE2 + aesdec KEY, STATE3 + aesdec KEY, STATE4 movaps -0x30(TKEYP), KEY - AESDEC KEY STATE1 - AESDEC KEY STATE2 - AESDEC KEY STATE3 - AESDEC KEY STATE4 + aesdec KEY, STATE1 + aesdec KEY, STATE2 + aesdec KEY, STATE3 + aesdec KEY, STATE4 .align 4 .L4dec128: movaps -0x20(TKEYP), KEY - AESDEC KEY STATE1 - AESDEC KEY STATE2 - AESDEC KEY STATE3 - AESDEC KEY STATE4 + aesdec KEY, STATE1 + aesdec KEY, STATE2 + aesdec KEY, STATE3 + aesdec KEY, STATE4 movaps -0x10(TKEYP), KEY - AESDEC KEY STATE1 - AESDEC KEY STATE2 - AESDEC KEY STATE3 - AESDEC KEY STATE4 + aesdec KEY, STATE1 + aesdec KEY, STATE2 + aesdec KEY, STATE3 + aesdec KEY, STATE4 movaps (TKEYP), KEY - AESDEC KEY STATE1 - AESDEC KEY STATE2 - AESDEC KEY STATE3 - AESDEC KEY STATE4 + aesdec KEY, STATE1 + aesdec KEY, STATE2 + aesdec KEY, STATE3 + aesdec KEY, STATE4 movaps 0x10(TKEYP), KEY - AESDEC KEY STATE1 - AESDEC KEY STATE2 - AESDEC KEY STATE3 - AESDEC KEY STATE4 + aesdec KEY, STATE1 + aesdec KEY, STATE2 + aesdec KEY, STATE3 + aesdec KEY, STATE4 movaps 0x20(TKEYP), KEY - AESDEC KEY STATE1 - AESDEC KEY STATE2 - AESDEC KEY STATE3 - AESDEC KEY STATE4 + aesdec KEY, STATE1 + aesdec KEY, STATE2 + aesdec KEY, STATE3 + aesdec KEY, STATE4 movaps 0x30(TKEYP), KEY - AESDEC KEY STATE1 - AESDEC KEY STATE2 - AESDEC KEY STATE3 - AESDEC KEY STATE4 + aesdec KEY, STATE1 + aesdec KEY, STATE2 + aesdec KEY, STATE3 + aesdec KEY, STATE4 movaps 0x40(TKEYP), KEY - AESDEC KEY STATE1 - AESDEC KEY STATE2 - AESDEC KEY STATE3 - AESDEC KEY STATE4 + aesdec KEY, STATE1 + aesdec KEY, STATE2 + aesdec KEY, STATE3 + aesdec KEY, STATE4 movaps 0x50(TKEYP), KEY - AESDEC KEY STATE1 - AESDEC KEY STATE2 - AESDEC KEY STATE3 - AESDEC KEY STATE4 + aesdec KEY, STATE1 + aesdec KEY, STATE2 + aesdec KEY, STATE3 + aesdec KEY, STATE4 movaps 0x60(TKEYP), KEY - AESDEC KEY STATE1 - AESDEC KEY STATE2 - AESDEC KEY STATE3 - AESDEC KEY STATE4 + aesdec KEY, STATE1 + aesdec KEY, STATE2 + aesdec KEY, STATE3 + aesdec KEY, STATE4 movaps 0x70(TKEYP), KEY - AESDECLAST KEY STATE1 # last round - AESDECLAST KEY STATE2 - AESDECLAST KEY STATE3 - AESDECLAST KEY STATE4 + aesdeclast KEY, STATE1 # last round + aesdeclast KEY, STATE2 + aesdeclast KEY, STATE3 + aesdeclast KEY, STATE4 ret SYM_FUNC_END(_aesni_dec4) @@ -2599,10 +2598,10 @@ SYM_FUNC_END(aesni_cbc_dec) SYM_FUNC_START_LOCAL(_aesni_inc_init) movaps .Lbswap_mask, BSWAP_MASK movaps IV, CTR - PSHUFB_XMM BSWAP_MASK CTR + pshufb BSWAP_MASK, CTR mov $1, TCTR_LOW - MOVQ_R64_XMM TCTR_LOW INC - MOVQ_R64_XMM CTR TCTR_LOW + movq TCTR_LOW, INC + movq CTR, TCTR_LOW ret SYM_FUNC_END(_aesni_inc_init) @@ -2630,7 +2629,7 @@ SYM_FUNC_START_LOCAL(_aesni_inc) psrldq $8, INC .Linc_low: movaps CTR, IV - PSHUFB_XMM BSWAP_MASK IV + pshufb BSWAP_MASK, IV ret SYM_FUNC_END(_aesni_inc) diff --git a/arch/x86/crypto/aesni-intel_avx-x86_64.S b/arch/x86/crypto/aesni-intel_avx-x86_64.S index 0cea33295287..5fee47956f3b 100644 --- a/arch/x86/crypto/aesni-intel_avx-x86_64.S +++ b/arch/x86/crypto/aesni-intel_avx-x86_64.S @@ -120,7 +120,6 @@ ## #include <linux/linkage.h> -#include <asm/inst.h> # constants in mergeable sections, linker can reorder and merge .section .rodata.cst16.POLY, "aM", @progbits, 16 diff --git a/arch/x86/crypto/chacha-ssse3-x86_64.S b/arch/x86/crypto/chacha-ssse3-x86_64.S index a38ab2512a6f..ca1788bfee16 100644 --- a/arch/x86/crypto/chacha-ssse3-x86_64.S +++ b/arch/x86/crypto/chacha-ssse3-x86_64.S @@ -120,10 +120,10 @@ SYM_FUNC_START(chacha_block_xor_ssse3) FRAME_BEGIN # x0..3 = s0..3 - movdqa 0x00(%rdi),%xmm0 - movdqa 0x10(%rdi),%xmm1 - movdqa 0x20(%rdi),%xmm2 - movdqa 0x30(%rdi),%xmm3 + movdqu 0x00(%rdi),%xmm0 + movdqu 0x10(%rdi),%xmm1 + movdqu 0x20(%rdi),%xmm2 + movdqu 0x30(%rdi),%xmm3 movdqa %xmm0,%xmm8 movdqa %xmm1,%xmm9 movdqa %xmm2,%xmm10 @@ -205,10 +205,10 @@ SYM_FUNC_START(hchacha_block_ssse3) # %edx: nrounds FRAME_BEGIN - movdqa 0x00(%rdi),%xmm0 - movdqa 0x10(%rdi),%xmm1 - movdqa 0x20(%rdi),%xmm2 - movdqa 0x30(%rdi),%xmm3 + movdqu 0x00(%rdi),%xmm0 + movdqu 0x10(%rdi),%xmm1 + movdqu 0x20(%rdi),%xmm2 + movdqu 0x30(%rdi),%xmm3 mov %edx,%r8d call chacha_permute diff --git a/arch/x86/crypto/chacha_glue.c b/arch/x86/crypto/chacha_glue.c index 22250091cdbe..e67a59130025 100644 --- a/arch/x86/crypto/chacha_glue.c +++ b/arch/x86/crypto/chacha_glue.c @@ -14,8 +14,6 @@ #include <linux/module.h> #include <asm/simd.h> -#define CHACHA_STATE_ALIGN 16 - asmlinkage void chacha_block_xor_ssse3(u32 *state, u8 *dst, const u8 *src, unsigned int len, int nrounds); asmlinkage void chacha_4block_xor_ssse3(u32 *state, u8 *dst, const u8 *src, @@ -124,8 +122,6 @@ static void chacha_dosimd(u32 *state, u8 *dst, const u8 *src, void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds) { - state = PTR_ALIGN(state, CHACHA_STATE_ALIGN); - if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable()) { hchacha_block_generic(state, stream, nrounds); } else { @@ -138,8 +134,6 @@ EXPORT_SYMBOL(hchacha_block_arch); void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv) { - state = PTR_ALIGN(state, CHACHA_STATE_ALIGN); - chacha_init_generic(state, key, iv); } EXPORT_SYMBOL(chacha_init_arch); @@ -147,8 +141,6 @@ EXPORT_SYMBOL(chacha_init_arch); void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes, int nrounds) { - state = PTR_ALIGN(state, CHACHA_STATE_ALIGN); - if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable() || bytes <= CHACHA_BLOCK_SIZE) return chacha_crypt_generic(state, dst, src, bytes, nrounds); @@ -170,15 +162,12 @@ EXPORT_SYMBOL(chacha_crypt_arch); static int chacha_simd_stream_xor(struct skcipher_request *req, const struct chacha_ctx *ctx, const u8 *iv) { - u32 *state, state_buf[16 + 2] __aligned(8); + u32 state[CHACHA_STATE_WORDS] __aligned(8); struct skcipher_walk walk; int err; err = skcipher_walk_virt(&walk, req, false); - BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16); - state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN); - chacha_init_generic(state, ctx->key, iv); while (walk.nbytes > 0) { @@ -217,12 +206,10 @@ static int xchacha_simd(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); - u32 *state, state_buf[16 + 2] __aligned(8); + u32 state[CHACHA_STATE_WORDS] __aligned(8); struct chacha_ctx subctx; u8 real_iv[16]; - BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16); - state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN); chacha_init_generic(state, ctx->key, req->iv); if (req->cryptlen > CHACHA_BLOCK_SIZE && crypto_simd_usable()) { diff --git a/arch/x86/crypto/crc32-pclmul_asm.S b/arch/x86/crypto/crc32-pclmul_asm.S index 9fd28ff65bc2..6e7d4c4d3208 100644 --- a/arch/x86/crypto/crc32-pclmul_asm.S +++ b/arch/x86/crypto/crc32-pclmul_asm.S @@ -38,7 +38,6 @@ */ #include <linux/linkage.h> -#include <asm/inst.h> .section .rodata @@ -129,17 +128,17 @@ loop_64:/* 64 bytes Full cache line folding */ #ifdef __x86_64__ movdqa %xmm4, %xmm8 #endif - PCLMULQDQ 00, CONSTANT, %xmm1 - PCLMULQDQ 00, CONSTANT, %xmm2 - PCLMULQDQ 00, CONSTANT, %xmm3 + pclmulqdq $0x00, CONSTANT, %xmm1 + pclmulqdq $0x00, CONSTANT, %xmm2 + pclmulqdq $0x00, CONSTANT, %xmm3 #ifdef __x86_64__ - PCLMULQDQ 00, CONSTANT, %xmm4 + pclmulqdq $0x00, CONSTANT, %xmm4 #endif - PCLMULQDQ 0x11, CONSTANT, %xmm5 - PCLMULQDQ 0x11, CONSTANT, %xmm6 - PCLMULQDQ 0x11, CONSTANT, %xmm7 + pclmulqdq $0x11, CONSTANT, %xmm5 + pclmulqdq $0x11, CONSTANT, %xmm6 + pclmulqdq $0x11, CONSTANT, %xmm7 #ifdef __x86_64__ - PCLMULQDQ 0x11, CONSTANT, %xmm8 + pclmulqdq $0x11, CONSTANT, %xmm8 #endif pxor %xmm5, %xmm1 pxor %xmm6, %xmm2 @@ -149,8 +148,8 @@ loop_64:/* 64 bytes Full cache line folding */ #else /* xmm8 unsupported for x32 */ movdqa %xmm4, %xmm5 - PCLMULQDQ 00, CONSTANT, %xmm4 - PCLMULQDQ 0x11, CONSTANT, %xmm5 + pclmulqdq $0x00, CONSTANT, %xmm4 + pclmulqdq $0x11, CONSTANT, %xmm5 pxor %xmm5, %xmm4 #endif @@ -172,20 +171,20 @@ less_64:/* Folding cache line into 128bit */ prefetchnta (BUF) movdqa %xmm1, %xmm5 - PCLMULQDQ 0x00, CONSTANT, %xmm1 - PCLMULQDQ 0x11, CONSTANT, %xmm5 + pclmulqdq $0x00, CONSTANT, %xmm1 + pclmulqdq $0x11, CONSTANT, %xmm5 pxor %xmm5, %xmm1 pxor %xmm2, %xmm1 movdqa %xmm1, %xmm5 - PCLMULQDQ 0x00, CONSTANT, %xmm1 - PCLMULQDQ 0x11, CONSTANT, %xmm5 + pclmulqdq $0x00, CONSTANT, %xmm1 + pclmulqdq $0x11, CONSTANT, %xmm5 pxor %xmm5, %xmm1 pxor %xmm3, %xmm1 movdqa %xmm1, %xmm5 - PCLMULQDQ 0x00, CONSTANT, %xmm1 - PCLMULQDQ 0x11, CONSTANT, %xmm5 + pclmulqdq $0x00, CONSTANT, %xmm1 + pclmulqdq $0x11, CONSTANT, %xmm5 pxor %xmm5, %xmm1 pxor %xmm4, %xmm1 @@ -193,8 +192,8 @@ less_64:/* Folding cache line into 128bit */ jb fold_64 loop_16:/* Folding rest buffer into 128bit */ movdqa %xmm1, %xmm5 - PCLMULQDQ 0x00, CONSTANT, %xmm1 - PCLMULQDQ 0x11, CONSTANT, %xmm5 + pclmulqdq $0x00, CONSTANT, %xmm1 + pclmulqdq $0x11, CONSTANT, %xmm5 pxor %xmm5, %xmm1 pxor (BUF), %xmm1 sub $0x10, LEN @@ -205,7 +204,7 @@ loop_16:/* Folding rest buffer into 128bit */ fold_64: /* perform the last 64 bit fold, also adds 32 zeroes * to the input stream */ - PCLMULQDQ 0x01, %xmm1, CONSTANT /* R4 * xmm1.low */ + pclmulqdq $0x01, %xmm1, CONSTANT /* R4 * xmm1.low */ psrldq $0x08, %xmm1 pxor CONSTANT, %xmm1 @@ -220,7 +219,7 @@ fold_64: #endif psrldq $0x04, %xmm2 pand %xmm3, %xmm1 - PCLMULQDQ 0x00, CONSTANT, %xmm1 + pclmulqdq $0x00, CONSTANT, %xmm1 pxor %xmm2, %xmm1 /* Finish up with the bit-reversed barrett reduction 64 ==> 32 bits */ @@ -231,11 +230,11 @@ fold_64: #endif movdqa %xmm1, %xmm2 pand %xmm3, %xmm1 - PCLMULQDQ 0x10, CONSTANT, %xmm1 + pclmulqdq $0x10, CONSTANT, %xmm1 pand %xmm3, %xmm1 - PCLMULQDQ 0x00, CONSTANT, %xmm1 + pclmulqdq $0x00, CONSTANT, %xmm1 pxor %xmm2, %xmm1 - PEXTRD 0x01, %xmm1, %eax + pextrd $0x01, %xmm1, %eax ret SYM_FUNC_END(crc32_pclmul_le_16) diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S index 8501ec4532f4..884dc767b051 100644 --- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S +++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S @@ -43,7 +43,6 @@ * SOFTWARE. */ -#include <asm/inst.h> #include <linux/linkage.h> #include <asm/nospec-branch.h> @@ -170,7 +169,7 @@ continue_block: ## branch into array lea jump_table(%rip), %bufp - movzxw (%bufp, %rax, 2), len + movzwq (%bufp, %rax, 2), len lea crc_array(%rip), %bufp lea (%bufp, len, 1), %bufp JMP_NOSPEC bufp @@ -225,10 +224,10 @@ LABEL crc_ %i subq %rax, tmp # tmp -= rax*24 movq crc_init, %xmm1 # CRC for block 1 - PCLMULQDQ 0x00,%xmm0,%xmm1 # Multiply by K2 + pclmulqdq $0x00, %xmm0, %xmm1 # Multiply by K2 movq crc1, %xmm2 # CRC for block 2 - PCLMULQDQ 0x10, %xmm0, %xmm2 # Multiply by K1 + pclmulqdq $0x10, %xmm0, %xmm2 # Multiply by K1 pxor %xmm2,%xmm1 movq %xmm1, %rax diff --git a/arch/x86/crypto/curve25519-x86_64.c b/arch/x86/crypto/curve25519-x86_64.c index 8a17621f7d3a..8acbb6584a37 100644 --- a/arch/x86/crypto/curve25519-x86_64.c +++ b/arch/x86/crypto/curve25519-x86_64.c @@ -948,10 +948,8 @@ static void store_felem(u64 *b, u64 *f) { u64 f30 = f[3U]; u64 top_bit0 = f30 >> (u32)63U; - u64 carry0; u64 f31; u64 top_bit; - u64 carry; u64 f0; u64 f1; u64 f2; @@ -970,11 +968,11 @@ static void store_felem(u64 *b, u64 *f) u64 o2; u64 o3; f[3U] = f30 & (u64)0x7fffffffffffffffU; - carry0 = add_scalar(f, f, (u64)19U * top_bit0); + add_scalar(f, f, (u64)19U * top_bit0); f31 = f[3U]; top_bit = f31 >> (u32)63U; f[3U] = f31 & (u64)0x7fffffffffffffffU; - carry = add_scalar(f, f, (u64)19U * top_bit); + add_scalar(f, f, (u64)19U * top_bit); f0 = f[0U]; f1 = f[1U]; f2 = f[2U]; diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S index bb9735fbb865..99ac25e18e09 100644 --- a/arch/x86/crypto/ghash-clmulni-intel_asm.S +++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S @@ -14,7 +14,6 @@ */ #include <linux/linkage.h> -#include <asm/inst.h> #include <asm/frame.h> .section .rodata.cst16.bswap_mask, "aM", @progbits, 16 @@ -51,9 +50,9 @@ SYM_FUNC_START_LOCAL(__clmul_gf128mul_ble) pxor DATA, T2 pxor SHASH, T3 - PCLMULQDQ 0x00 SHASH DATA # DATA = a0 * b0 - PCLMULQDQ 0x11 SHASH T1 # T1 = a1 * b1 - PCLMULQDQ 0x00 T3 T2 # T2 = (a1 + a0) * (b1 + b0) + pclmulqdq $0x00, SHASH, DATA # DATA = a0 * b0 + pclmulqdq $0x11, SHASH, T1 # T1 = a1 * b1 + pclmulqdq $0x00, T3, T2 # T2 = (a1 + a0) * (b1 + b0) pxor DATA, T2 pxor T1, T2 # T2 = a0 * b1 + a1 * b0 @@ -95,9 +94,9 @@ SYM_FUNC_START(clmul_ghash_mul) movups (%rdi), DATA movups (%rsi), SHASH movaps .Lbswap_mask, BSWAP - PSHUFB_XMM BSWAP DATA + pshufb BSWAP, DATA call __clmul_gf128mul_ble - PSHUFB_XMM BSWAP DATA + pshufb BSWAP, DATA movups DATA, (%rdi) FRAME_END ret @@ -114,18 +113,18 @@ SYM_FUNC_START(clmul_ghash_update) movaps .Lbswap_mask, BSWAP movups (%rdi), DATA movups (%rcx), SHASH - PSHUFB_XMM BSWAP DATA + pshufb BSWAP, DATA .align 4 .Lupdate_loop: movups (%rsi), IN1 - PSHUFB_XMM BSWAP IN1 + pshufb BSWAP, IN1 pxor IN1, DATA call __clmul_gf128mul_ble sub $16, %rdx add $16, %rsi cmp $16, %rdx jge .Lupdate_loop - PSHUFB_XMM BSWAP DATA + pshufb BSWAP, DATA movups DATA, (%rdi) .Lupdate_just_ret: FRAME_END diff --git a/arch/x86/include/asm/inst.h b/arch/x86/include/asm/inst.h index f5a796da07f8..438ccd4f3cc4 100644 --- a/arch/x86/include/asm/inst.h +++ b/arch/x86/include/asm/inst.h @@ -12,7 +12,6 @@ #define REG_TYPE_R32 0 #define REG_TYPE_R64 1 -#define REG_TYPE_XMM 2 #define REG_TYPE_INVALID 100 .macro R32_NUM opd r32 @@ -123,77 +122,18 @@ #endif .endm - .macro XMM_NUM opd xmm - \opd = REG_NUM_INVALID - .ifc \xmm,%xmm0 - \opd = 0 - .endif - .ifc \xmm,%xmm1 - \opd = 1 - .endif - .ifc \xmm,%xmm2 - \opd = 2 - .endif - .ifc \xmm,%xmm3 - \opd = 3 - .endif - .ifc \xmm,%xmm4 - \opd = 4 - .endif - .ifc \xmm,%xmm5 - \opd = 5 - .endif - .ifc \xmm,%xmm6 - \opd = 6 - .endif - .ifc \xmm,%xmm7 - \opd = 7 - .endif - .ifc \xmm,%xmm8 - \opd = 8 - .endif - .ifc \xmm,%xmm9 - \opd = 9 - .endif - .ifc \xmm,%xmm10 - \opd = 10 - .endif - .ifc \xmm,%xmm11 - \opd = 11 - .endif - .ifc \xmm,%xmm12 - \opd = 12 - .endif - .ifc \xmm,%xmm13 - \opd = 13 - .endif - .ifc \xmm,%xmm14 - \opd = 14 - .endif - .ifc \xmm,%xmm15 - \opd = 15 - .endif - .endm - .macro REG_TYPE type reg R32_NUM reg_type_r32 \reg R64_NUM reg_type_r64 \reg - XMM_NUM reg_type_xmm \reg .if reg_type_r64 <> REG_NUM_INVALID \type = REG_TYPE_R64 .elseif reg_type_r32 <> REG_NUM_INVALID \type = REG_TYPE_R32 - .elseif reg_type_xmm <> REG_NUM_INVALID - \type = REG_TYPE_XMM .else \type = REG_TYPE_INVALID .endif .endm - .macro PFX_OPD_SIZE - .byte 0x66 - .endm - .macro PFX_REX opd1 opd2 W=0 .if ((\opd1 | \opd2) & 8) || \W .byte 0x40 | ((\opd1 & 8) >> 3) | ((\opd2 & 8) >> 1) | (\W << 3) @@ -203,109 +143,6 @@ .macro MODRM mod opd1 opd2 .byte \mod | (\opd1 & 7) | ((\opd2 & 7) << 3) .endm - - .macro PSHUFB_XMM xmm1 xmm2 - XMM_NUM pshufb_opd1 \xmm1 - XMM_NUM pshufb_opd2 \xmm2 - PFX_OPD_SIZE - PFX_REX pshufb_opd1 pshufb_opd2 - .byte 0x0f, 0x38, 0x00 - MODRM 0xc0 pshufb_opd1 pshufb_opd2 - .endm - - .macro PCLMULQDQ imm8 xmm1 xmm2 - XMM_NUM clmul_opd1 \xmm1 - XMM_NUM clmul_opd2 \xmm2 - PFX_OPD_SIZE - PFX_REX clmul_opd1 clmul_opd2 - .byte 0x0f, 0x3a, 0x44 - MODRM 0xc0 clmul_opd1 clmul_opd2 - .byte \imm8 - .endm - - .macro PEXTRD imm8 xmm gpr - R32_NUM extrd_opd1 \gpr - XMM_NUM extrd_opd2 \xmm - PFX_OPD_SIZE - PFX_REX extrd_opd1 extrd_opd2 - .byte 0x0f, 0x3a, 0x16 - MODRM 0xc0 extrd_opd1 extrd_opd2 - .byte \imm8 - .endm - - .macro AESKEYGENASSIST rcon xmm1 xmm2 - XMM_NUM aeskeygen_opd1 \xmm1 - XMM_NUM aeskeygen_opd2 \xmm2 - PFX_OPD_SIZE - PFX_REX aeskeygen_opd1 aeskeygen_opd2 - .byte 0x0f, 0x3a, 0xdf - MODRM 0xc0 aeskeygen_opd1 aeskeygen_opd2 - .byte \rcon - .endm - - .macro AESIMC xmm1 xmm2 - XMM_NUM aesimc_opd1 \xmm1 - XMM_NUM aesimc_opd2 \xmm2 - PFX_OPD_SIZE - PFX_REX aesimc_opd1 aesimc_opd2 - .byte 0x0f, 0x38, 0xdb - MODRM 0xc0 aesimc_opd1 aesimc_opd2 - .endm - - .macro AESENC xmm1 xmm2 - XMM_NUM aesenc_opd1 \xmm1 - XMM_NUM aesenc_opd2 \xmm2 - PFX_OPD_SIZE - PFX_REX aesenc_opd1 aesenc_opd2 - .byte 0x0f, 0x38, 0xdc - MODRM 0xc0 aesenc_opd1 aesenc_opd2 - .endm - - .macro AESENCLAST xmm1 xmm2 - XMM_NUM aesenclast_opd1 \xmm1 - XMM_NUM aesenclast_opd2 \xmm2 - PFX_OPD_SIZE - PFX_REX aesenclast_opd1 aesenclast_opd2 - .byte 0x0f, 0x38, 0xdd - MODRM 0xc0 aesenclast_opd1 aesenclast_opd2 - .endm - - .macro AESDEC xmm1 xmm2 - XMM_NUM aesdec_opd1 \xmm1 - XMM_NUM aesdec_opd2 \xmm2 - PFX_OPD_SIZE - PFX_REX aesdec_opd1 aesdec_opd2 - .byte 0x0f, 0x38, 0xde - MODRM 0xc0 aesdec_opd1 aesdec_opd2 - .endm - - .macro AESDECLAST xmm1 xmm2 - XMM_NUM aesdeclast_opd1 \xmm1 - XMM_NUM aesdeclast_opd2 \xmm2 - PFX_OPD_SIZE - PFX_REX aesdeclast_opd1 aesdeclast_opd2 - .byte 0x0f, 0x38, 0xdf - MODRM 0xc0 aesdeclast_opd1 aesdeclast_opd2 - .endm - - .macro MOVQ_R64_XMM opd1 opd2 - REG_TYPE movq_r64_xmm_opd1_type \opd1 - .if movq_r64_xmm_opd1_type == REG_TYPE_XMM - XMM_NUM movq_r64_xmm_opd1 \opd1 - R64_NUM movq_r64_xmm_opd2 \opd2 - .else - R64_NUM movq_r64_xmm_opd1 \opd1 - XMM_NUM movq_r64_xmm_opd2 \opd2 - .endif - PFX_OPD_SIZE - PFX_REX movq_r64_xmm_opd1 movq_r64_xmm_opd2 1 - .if movq_r64_xmm_opd1_type == REG_TYPE_XMM - .byte 0x0f, 0x7e - .else - .byte 0x0f, 0x6e - .endif - MODRM 0xc0 movq_r64_xmm_opd1 movq_r64_xmm_opd2 - .endm #endif #endif diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index b037cfa7c0c5..7401cc12c3cc 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -71,6 +71,22 @@ static void printk_stack_address(unsigned long address, int reliable, printk("%s %s%pB\n", log_lvl, reliable ? "" : "? ", (void *)address); } +static int copy_code(struct pt_regs *regs, u8 *buf, unsigned long src, + unsigned int nbytes) +{ + if (!user_mode(regs)) + return copy_from_kernel_nofault(buf, (u8 *)src, nbytes); + + /* + * Make sure userspace isn't trying to trick us into dumping kernel + * memory by pointing the userspace instruction pointer at it. + */ + if (__chk_range_not_ok(src, nbytes, TASK_SIZE_MAX)) + return -EINVAL; + + return copy_from_user_nmi(buf, (void __user *)src, nbytes); +} + /* * There are a couple of reasons for the 2/3rd prologue, courtesy of Linus: * @@ -97,17 +113,8 @@ void show_opcodes(struct pt_regs *regs, const char *loglvl) #define OPCODE_BUFSIZE (PROLOGUE_SIZE + 1 + EPILOGUE_SIZE) u8 opcodes[OPCODE_BUFSIZE]; unsigned long prologue = regs->ip - PROLOGUE_SIZE; - bool bad_ip; - - /* - * Make sure userspace isn't trying to trick us into dumping kernel - * memory by pointing the userspace instruction pointer at it. - */ - bad_ip = user_mode(regs) && - __chk_range_not_ok(prologue, OPCODE_BUFSIZE, TASK_SIZE_MAX); - if (bad_ip || copy_from_kernel_nofault(opcodes, (u8 *)prologue, - OPCODE_BUFSIZE)) { + if (copy_code(regs, opcodes, prologue, sizeof(opcodes))) { printk("%sCode: Bad RIP value.\n", loglvl); } else { printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %" diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c index f3c76252247d..282b4ee1339f 100644 --- a/arch/x86/kernel/i8259.c +++ b/arch/x86/kernel/i8259.c @@ -207,7 +207,7 @@ spurious_8259A_irq: * lets ACK and report it. [once per IRQ] */ if (!(spurious_irq_mask & irqmask)) { - printk(KERN_DEBUG + printk_deferred(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq); spurious_irq_mask |= irqmask; } diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c index 6ad43fc44556..2fd698e28e4d 100644 --- a/arch/x86/kernel/stacktrace.c +++ b/arch/x86/kernel/stacktrace.c @@ -58,7 +58,6 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, * or a page fault), which can make frame pointers * unreliable. */ - if (IS_ENABLED(CONFIG_FRAME_POINTER)) return -EINVAL; } @@ -81,10 +80,6 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, if (unwind_error(&state)) return -EINVAL; - /* Success path for non-user tasks, i.e. kthreads and idle tasks */ - if (!(task->flags & (PF_KTHREAD | PF_IDLE))) - return -EINVAL; - return 0; } diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c index 7f969b2d240f..ec88bbe08a32 100644 --- a/arch/x86/kernel/unwind_orc.c +++ b/arch/x86/kernel/unwind_orc.c @@ -440,8 +440,11 @@ bool unwind_next_frame(struct unwind_state *state) /* * Find the orc_entry associated with the text address. * - * Decrement call return addresses by one so they work for sibling - * calls and calls to noreturn functions. + * For a call frame (as opposed to a signal frame), state->ip points to + * the instruction after the call. That instruction's stack layout + * could be different from the call instruction's layout, for example + * if the call was to a noreturn function. So get the ORC data for the + * call instruction itself. */ orc = orc_find(state->signal ? state->ip : state->ip - 1); if (!orc) { @@ -662,6 +665,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task, state->sp = task->thread.sp; state->bp = READ_ONCE_NOCHECK(frame->bp); state->ip = READ_ONCE_NOCHECK(frame->ret_addr); + state->signal = (void *)state->ip == ret_from_fork; } if (get_stack_info((unsigned long *)state->sp, state->task, diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 3bfc8dd8a43d..9a03e5b23135 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -358,6 +358,7 @@ SECTIONS .bss : AT(ADDR(.bss) - LOAD_OFFSET) { __bss_start = .; *(.bss..page_aligned) + . = ALIGN(PAGE_SIZE); *(BSS_MAIN) BSS_DECRYPTED . = ALIGN(PAGE_SIZE); diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 5bf72fc86a8e..4ce2ddd26c0b 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -2195,7 +2195,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data) { struct kvm_lapic *apic = vcpu->arch.apic; - if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) || + if (!kvm_apic_present(vcpu) || apic_lvtt_oneshot(apic) || apic_lvtt_period(apic)) return; diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index c0da4dd78ac5..5bbf76189afa 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1090,7 +1090,7 @@ static void init_vmcb(struct vcpu_svm *svm) svm->nested.vmcb = 0; svm->vcpu.arch.hflags = 0; - if (pause_filter_count) { + if (!kvm_pause_in_guest(svm->vcpu.kvm)) { control->pause_filter_count = pause_filter_count; if (pause_filter_thresh) control->pause_filter_thresh = pause_filter_thresh; @@ -2693,7 +2693,7 @@ static int pause_interception(struct vcpu_svm *svm) struct kvm_vcpu *vcpu = &svm->vcpu; bool in_kernel = (svm_get_cpl(vcpu) == 0); - if (pause_filter_thresh) + if (!kvm_pause_in_guest(vcpu->kvm)) grow_ple_window(vcpu); kvm_vcpu_on_spin(vcpu, in_kernel); @@ -3780,7 +3780,7 @@ static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu) static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu) { - if (pause_filter_thresh) + if (!kvm_pause_in_guest(vcpu->kvm)) shrink_ple_window(vcpu); } @@ -3958,6 +3958,9 @@ static void svm_vm_destroy(struct kvm *kvm) static int svm_vm_init(struct kvm *kvm) { + if (!pause_filter_count || !pause_filter_thresh) + kvm->arch.pause_in_guest = true; + if (avic) { int ret = avic_vm_init(kvm); if (ret) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index d4a4cec034d0..11e4df560018 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -6079,6 +6079,9 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON)) return -EINVAL; + if (kvm_state->hdr.vmx.flags & ~KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) + return -EINVAL; + /* * SMM temporarily disables VMX, so we cannot be in guest mode, * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags @@ -6108,9 +6111,16 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, if (ret) return ret; - /* Empty 'VMXON' state is permitted */ - if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12)) - return 0; + /* Empty 'VMXON' state is permitted if no VMCS loaded */ + if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12)) { + /* See vmx_has_valid_vmcs12. */ + if ((kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE) || + (kvm_state->flags & KVM_STATE_NESTED_EVMCS) || + (kvm_state->hdr.vmx.vmcs12_pa != -1ull)) + return -EINVAL; + else + return 0; + } if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) { if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa || diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h index 758bccc26cf9..197148d76b8f 100644 --- a/arch/x86/kvm/vmx/nested.h +++ b/arch/x86/kvm/vmx/nested.h @@ -47,6 +47,11 @@ static inline struct vmcs12 *get_shadow_vmcs12(struct kvm_vcpu *vcpu) return to_vmx(vcpu)->nested.cached_shadow_vmcs12; } +/* + * Note: the same condition is checked against the state provided by userspace + * in vmx_set_nested_state; if it is satisfied, the nested state must include + * the VMCS12. + */ static inline int vmx_has_valid_vmcs12(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); diff --git a/arch/xtensa/include/asm/checksum.h b/arch/xtensa/include/asm/checksum.h index d8292cc9ebdf..243a5fe79d3c 100644 --- a/arch/xtensa/include/asm/checksum.h +++ b/arch/xtensa/include/asm/checksum.h @@ -57,7 +57,7 @@ static inline __wsum csum_and_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr) { - if (access_ok(dst, len)) + if (access_ok(src, len)) return csum_partial_copy_generic((__force const void *)src, dst, len, sum, err_ptr, NULL); if (len) diff --git a/crypto/Kconfig b/crypto/Kconfig index 091c0a0bbf26..1b57419fa2e7 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -548,7 +548,7 @@ config CRYPTO_XCBC select CRYPTO_MANAGER help XCBC: Keyed-Hashing with encryption algorithm - http://www.ietf.org/rfc/rfc3566.txt + https://www.ietf.org/rfc/rfc3566.txt http://csrc.nist.gov/encryption/modes/proposedmodes/ xcbc-mac/xcbc-mac-spec.pdf @@ -561,7 +561,7 @@ config CRYPTO_VMAC very high speed on 64-bit architectures. See also: - <http://fastcrypto.org/vmac> + <https://fastcrypto.org/vmac> comment "Digest" @@ -816,7 +816,7 @@ config CRYPTO_RMD128 RIPEMD-160 should be used. Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. - See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html> + See <https://homes.esat.kuleuven.be/~bosselae/ripemd160.html> config CRYPTO_RMD160 tristate "RIPEMD-160 digest algorithm" @@ -833,7 +833,7 @@ config CRYPTO_RMD160 against RIPEMD-160. Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. - See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html> + See <https://homes.esat.kuleuven.be/~bosselae/ripemd160.html> config CRYPTO_RMD256 tristate "RIPEMD-256 digest algorithm" @@ -845,7 +845,7 @@ config CRYPTO_RMD256 (than RIPEMD-128). Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. - See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html> + See <https://homes.esat.kuleuven.be/~bosselae/ripemd160.html> config CRYPTO_RMD320 tristate "RIPEMD-320 digest algorithm" @@ -857,7 +857,7 @@ config CRYPTO_RMD320 (than RIPEMD-160). Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. - See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html> + See <https://homes.esat.kuleuven.be/~bosselae/ripemd160.html> config CRYPTO_SHA1 tristate "SHA1 digest algorithm" @@ -1045,7 +1045,7 @@ config CRYPTO_TGR192 Tiger was developed by Ross Anderson and Eli Biham. See also: - <http://www.cs.technion.ac.il/~biham/Reports/Tiger/>. + <https://www.cs.technion.ac.il/~biham/Reports/Tiger/>. config CRYPTO_WP512 tristate "Whirlpool digest algorithms" @@ -1221,7 +1221,7 @@ config CRYPTO_BLOWFISH designed for use on "large microprocessors". See also: - <http://www.schneier.com/blowfish.html> + <https://www.schneier.com/blowfish.html> config CRYPTO_BLOWFISH_COMMON tristate @@ -1230,7 +1230,7 @@ config CRYPTO_BLOWFISH_COMMON generic c and the assembler implementations. See also: - <http://www.schneier.com/blowfish.html> + <https://www.schneier.com/blowfish.html> config CRYPTO_BLOWFISH_X86_64 tristate "Blowfish cipher algorithm (x86_64)" @@ -1245,7 +1245,7 @@ config CRYPTO_BLOWFISH_X86_64 designed for use on "large microprocessors". See also: - <http://www.schneier.com/blowfish.html> + <https://www.schneier.com/blowfish.html> config CRYPTO_CAMELLIA tristate "Camellia cipher algorithms" @@ -1441,10 +1441,10 @@ config CRYPTO_SALSA20 Salsa20 stream cipher algorithm. Salsa20 is a stream cipher submitted to eSTREAM, the ECRYPT - Stream Cipher Project. See <http://www.ecrypt.eu.org/stream/> + Stream Cipher Project. See <https://www.ecrypt.eu.org/stream/> The Salsa20 stream cipher algorithm is designed by Daniel J. - Bernstein <djb@cr.yp.to>. See <http://cr.yp.to/snuffle.html> + Bernstein <djb@cr.yp.to>. See <https://cr.yp.to/snuffle.html> config CRYPTO_CHACHA20 tristate "ChaCha stream cipher algorithms" @@ -1456,7 +1456,7 @@ config CRYPTO_CHACHA20 ChaCha20 is a 256-bit high-speed stream cipher designed by Daniel J. Bernstein and further specified in RFC7539 for use in IETF protocols. This is the portable C implementation of ChaCha20. See also: - <http://cr.yp.to/chacha/chacha-20080128.pdf> + <https://cr.yp.to/chacha/chacha-20080128.pdf> XChaCha20 is the application of the XSalsa20 construction to ChaCha20 rather than to Salsa20. XChaCha20 extends ChaCha20's nonce length @@ -1509,7 +1509,7 @@ config CRYPTO_SERPENT variant of Serpent for compatibility with old kerneli.org code. See also: - <http://www.cl.cam.ac.uk/~rja14/serpent.html> + <https://www.cl.cam.ac.uk/~rja14/serpent.html> config CRYPTO_SERPENT_SSE2_X86_64 tristate "Serpent cipher algorithm (x86_64/SSE2)" @@ -1528,7 +1528,7 @@ config CRYPTO_SERPENT_SSE2_X86_64 blocks parallel using SSE2 instruction set. See also: - <http://www.cl.cam.ac.uk/~rja14/serpent.html> + <https://www.cl.cam.ac.uk/~rja14/serpent.html> config CRYPTO_SERPENT_SSE2_586 tristate "Serpent cipher algorithm (i586/SSE2)" @@ -1547,7 +1547,7 @@ config CRYPTO_SERPENT_SSE2_586 blocks parallel using SSE2 instruction set. See also: - <http://www.cl.cam.ac.uk/~rja14/serpent.html> + <https://www.cl.cam.ac.uk/~rja14/serpent.html> config CRYPTO_SERPENT_AVX_X86_64 tristate "Serpent cipher algorithm (x86_64/AVX)" @@ -1567,7 +1567,7 @@ config CRYPTO_SERPENT_AVX_X86_64 eight blocks parallel using the AVX instruction set. See also: - <http://www.cl.cam.ac.uk/~rja14/serpent.html> + <https://www.cl.cam.ac.uk/~rja14/serpent.html> config CRYPTO_SERPENT_AVX2_X86_64 tristate "Serpent cipher algorithm (x86_64/AVX2)" @@ -1583,7 +1583,7 @@ config CRYPTO_SERPENT_AVX2_X86_64 blocks parallel using AVX2 instruction set. See also: - <http://www.cl.cam.ac.uk/~rja14/serpent.html> + <https://www.cl.cam.ac.uk/~rja14/serpent.html> config CRYPTO_SM4 tristate "SM4 cipher algorithm" @@ -1640,7 +1640,7 @@ config CRYPTO_TWOFISH bits. See also: - <http://www.schneier.com/twofish.html> + <https://www.schneier.com/twofish.html> config CRYPTO_TWOFISH_COMMON tristate @@ -1662,7 +1662,7 @@ config CRYPTO_TWOFISH_586 bits. See also: - <http://www.schneier.com/twofish.html> + <https://www.schneier.com/twofish.html> config CRYPTO_TWOFISH_X86_64 tristate "Twofish cipher algorithm (x86_64)" @@ -1678,7 +1678,7 @@ config CRYPTO_TWOFISH_X86_64 bits. See also: - <http://www.schneier.com/twofish.html> + <https://www.schneier.com/twofish.html> config CRYPTO_TWOFISH_X86_64_3WAY tristate "Twofish cipher algorithm (x86_64, 3-way parallel)" @@ -1699,7 +1699,7 @@ config CRYPTO_TWOFISH_X86_64_3WAY blocks parallel, utilizing resources of out-of-order CPUs better. See also: - <http://www.schneier.com/twofish.html> + <https://www.schneier.com/twofish.html> config CRYPTO_TWOFISH_AVX_X86_64 tristate "Twofish cipher algorithm (x86_64/AVX)" @@ -1722,7 +1722,7 @@ config CRYPTO_TWOFISH_AVX_X86_64 eight blocks parallel using the AVX Instruction Set. See also: - <http://www.schneier.com/twofish.html> + <https://www.schneier.com/twofish.html> comment "Compression" diff --git a/crypto/acompress.c b/crypto/acompress.c index 84a76723e851..c32c72048a1c 100644 --- a/crypto/acompress.c +++ b/crypto/acompress.c @@ -109,6 +109,14 @@ struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type, } EXPORT_SYMBOL_GPL(crypto_alloc_acomp); +struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type, + u32 mask, int node) +{ + return crypto_alloc_tfm_node(alg_name, &crypto_acomp_type, type, mask, + node); +} +EXPORT_SYMBOL_GPL(crypto_alloc_acomp_node); + struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp) { struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); diff --git a/crypto/adiantum.c b/crypto/adiantum.c index cf2b9f4103dd..7fbdc3270984 100644 --- a/crypto/adiantum.c +++ b/crypto/adiantum.c @@ -490,7 +490,6 @@ static bool adiantum_supported_algorithms(struct skcipher_alg *streamcipher_alg, static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; const char *nhpoly1305_name; struct skcipher_instance *inst; @@ -500,14 +499,9 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) struct shash_alg *hash_alg; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL); if (!inst) @@ -565,8 +559,6 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) hash_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = streamcipher_alg->base.cra_flags & - CRYPTO_ALG_ASYNC; inst->alg.base.cra_blocksize = BLOCKCIPHER_BLOCK_SIZE; inst->alg.base.cra_ctxsize = sizeof(struct adiantum_tfm_ctx); inst->alg.base.cra_alignmask = streamcipher_alg->base.cra_alignmask | diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 28fc323e3fe3..5882ed46f1ad 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -635,6 +635,7 @@ void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst, if (!ctx->used) ctx->merge = 0; + ctx->init = ctx->more; } EXPORT_SYMBOL_GPL(af_alg_pull_tsgl); @@ -734,9 +735,10 @@ EXPORT_SYMBOL_GPL(af_alg_wmem_wakeup); * * @sk socket of connection to user space * @flags If MSG_DONTWAIT is set, then only report if function would sleep + * @min Set to minimum request size if partial requests are allowed. * @return 0 when writable memory is available, < 0 upon error */ -int af_alg_wait_for_data(struct sock *sk, unsigned flags) +int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min) { DEFINE_WAIT_FUNC(wait, woken_wake_function); struct alg_sock *ask = alg_sk(sk); @@ -754,7 +756,9 @@ int af_alg_wait_for_data(struct sock *sk, unsigned flags) if (signal_pending(current)) break; timeout = MAX_SCHEDULE_TIMEOUT; - if (sk_wait_event(sk, &timeout, (ctx->used || !ctx->more), + if (sk_wait_event(sk, &timeout, + ctx->init && (!ctx->more || + (min && ctx->used >= min)), &wait)) { err = 0; break; @@ -843,10 +847,11 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, } lock_sock(sk); - if (!ctx->more && ctx->used) { + if (ctx->init && (init || !ctx->more)) { err = -EINVAL; goto unlock; } + ctx->init = true; if (init) { ctx->enc = enc; diff --git a/crypto/algapi.c b/crypto/algapi.c index 92abdf675992..fdabf2675b63 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -690,6 +690,8 @@ int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst, spawn->mask = mask; spawn->next = inst->spawns; inst->spawns = spawn; + inst->alg.cra_flags |= + (alg->cra_flags & CRYPTO_ALG_INHERITED_FLAGS); err = 0; } up_write(&crypto_alg_sem); @@ -816,7 +818,23 @@ struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb) } EXPORT_SYMBOL_GPL(crypto_get_attr_type); -int crypto_check_attr_type(struct rtattr **tb, u32 type) +/** + * crypto_check_attr_type() - check algorithm type and compute inherited mask + * @tb: the template parameters + * @type: the algorithm type the template would be instantiated as + * @mask_ret: (output) the mask that should be passed to crypto_grab_*() + * to restrict the flags of any inner algorithms + * + * Validate that the algorithm type the user requested is compatible with the + * one the template would actually be instantiated as. E.g., if the user is + * doing crypto_alloc_shash("cbc(aes)", ...), this would return an error because + * the "cbc" template creates an "skcipher" algorithm, not an "shash" algorithm. + * + * Also compute the mask to use to restrict the flags of any inner algorithms. + * + * Return: 0 on success; -errno on failure + */ +int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret) { struct crypto_attr_type *algt; @@ -827,6 +845,7 @@ int crypto_check_attr_type(struct rtattr **tb, u32 type) if ((algt->type ^ type) & algt->mask) return -EINVAL; + *mask_ret = crypto_algt_inherited_mask(algt); return 0; } EXPORT_SYMBOL_GPL(crypto_check_attr_type); diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 0ae000a61c7f..d48d2156e621 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -106,8 +106,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t usedpages = 0; /* [in] RX bufs to be used from user */ size_t processed = 0; /* [in] TX bufs to be consumed */ - if (!ctx->used) { - err = af_alg_wait_for_data(sk, flags); + if (!ctx->init || ctx->more) { + err = af_alg_wait_for_data(sk, flags, 0); if (err) return err; } diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index ec5567c87a6d..a51ba22fef58 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -61,8 +61,8 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, int err = 0; size_t len = 0; - if (!ctx->used) { - err = af_alg_wait_for_data(sk, flags); + if (!ctx->init || (ctx->more && ctx->used < bs)) { + err = af_alg_wait_for_data(sk, flags, bs); if (err) return err; } diff --git a/crypto/api.c b/crypto/api.c index edcf690800d4..5d8fe60b36c1 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -433,8 +433,9 @@ err: } EXPORT_SYMBOL_GPL(crypto_alloc_base); -void *crypto_create_tfm(struct crypto_alg *alg, - const struct crypto_type *frontend) +void *crypto_create_tfm_node(struct crypto_alg *alg, + const struct crypto_type *frontend, + int node) { char *mem; struct crypto_tfm *tfm = NULL; @@ -445,12 +446,13 @@ void *crypto_create_tfm(struct crypto_alg *alg, tfmsize = frontend->tfmsize; total = tfmsize + sizeof(*tfm) + frontend->extsize(alg); - mem = kzalloc(total, GFP_KERNEL); + mem = kzalloc_node(total, GFP_KERNEL, node); if (mem == NULL) goto out_err; tfm = (struct crypto_tfm *)(mem + tfmsize); tfm->__crt_alg = alg; + tfm->node = node; err = frontend->init_tfm(tfm); if (err) @@ -472,7 +474,7 @@ out_err: out: return mem; } -EXPORT_SYMBOL_GPL(crypto_create_tfm); +EXPORT_SYMBOL_GPL(crypto_create_tfm_node); struct crypto_alg *crypto_find_alg(const char *alg_name, const struct crypto_type *frontend, @@ -490,11 +492,13 @@ struct crypto_alg *crypto_find_alg(const char *alg_name, EXPORT_SYMBOL_GPL(crypto_find_alg); /* - * crypto_alloc_tfm - Locate algorithm and allocate transform + * crypto_alloc_tfm_node - Locate algorithm and allocate transform * @alg_name: Name of algorithm * @frontend: Frontend algorithm type * @type: Type of algorithm * @mask: Mask for type comparison + * @node: NUMA node in which users desire to put requests, if node is + * NUMA_NO_NODE, it means users have no special requirement. * * crypto_alloc_tfm() will first attempt to locate an already loaded * algorithm. If that fails and the kernel supports dynamically loadable @@ -509,8 +513,10 @@ EXPORT_SYMBOL_GPL(crypto_find_alg); * * In case of error the return value is an error pointer. */ -void *crypto_alloc_tfm(const char *alg_name, - const struct crypto_type *frontend, u32 type, u32 mask) + +void *crypto_alloc_tfm_node(const char *alg_name, + const struct crypto_type *frontend, u32 type, u32 mask, + int node) { void *tfm; int err; @@ -524,7 +530,7 @@ void *crypto_alloc_tfm(const char *alg_name, goto err; } - tfm = crypto_create_tfm(alg, frontend); + tfm = crypto_create_tfm_node(alg, frontend, node); if (!IS_ERR(tfm)) return tfm; @@ -542,7 +548,7 @@ err: return ERR_PTR(err); } -EXPORT_SYMBOL_GPL(crypto_alloc_tfm); +EXPORT_SYMBOL_GPL(crypto_alloc_tfm_node); /* * crypto_destroy_tfm - Free crypto transform diff --git a/crypto/authenc.c b/crypto/authenc.c index 775e7138fd10..670bf1a01d00 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -372,7 +372,6 @@ static void crypto_authenc_free(struct aead_instance *inst) static int crypto_authenc_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct authenc_instance_ctx *ctx; @@ -381,14 +380,9 @@ static int crypto_authenc_create(struct crypto_template *tmpl, struct skcipher_alg *enc; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -423,8 +417,6 @@ static int crypto_authenc_create(struct crypto_template *tmpl, enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = (auth_base->cra_flags | - enc->base.cra_flags) & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = enc->base.cra_priority * 10 + auth_base->cra_priority; inst->alg.base.cra_blocksize = enc->base.cra_blocksize; diff --git a/crypto/authencesn.c b/crypto/authencesn.c index 149b70df2a91..b60e61b1904c 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c @@ -390,7 +390,6 @@ static void crypto_authenc_esn_free(struct aead_instance *inst) static int crypto_authenc_esn_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct authenc_esn_instance_ctx *ctx; @@ -399,14 +398,9 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl, struct skcipher_alg *enc; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -437,8 +431,6 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl, enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = (auth_base->cra_flags | - enc->base.cra_flags) & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = enc->base.cra_priority * 10 + auth_base->cra_priority; inst->alg.base.cra_blocksize = enc->base.cra_blocksize; diff --git a/crypto/blake2b_generic.c b/crypto/blake2b_generic.c index 0ffd8d92e308..a2ffe60e06d3 100644 --- a/crypto/blake2b_generic.c +++ b/crypto/blake2b_generic.c @@ -8,7 +8,7 @@ * * - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0 * - OpenSSL license : https://www.openssl.org/source/license.html - * - Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0 + * - Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0 * * More information about the BLAKE2 hash function can be found at * https://blake2.net. diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c index 9a5783e5196a..0b9f409f7370 100644 --- a/crypto/camellia_generic.c +++ b/crypto/camellia_generic.c @@ -6,7 +6,7 @@ /* * Algorithm Specification - * http://info.isl.ntt.co.jp/crypt/eng/camellia/specifications.html + * https://info.isl.ntt.co.jp/crypt/eng/camellia/specifications.html */ /* diff --git a/crypto/ccm.c b/crypto/ccm.c index d1fb01bbc814..494d70901186 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c @@ -447,7 +447,6 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, const char *ctr_name, const char *mac_name) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct ccm_instance_ctx *ictx; @@ -455,14 +454,9 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, struct hash_alg_common *mac; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL); if (!inst) @@ -470,7 +464,7 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, ictx = aead_instance_ctx(inst); err = crypto_grab_ahash(&ictx->mac, aead_crypto_instance(inst), - mac_name, 0, CRYPTO_ALG_ASYNC); + mac_name, 0, mask | CRYPTO_ALG_ASYNC); if (err) goto err_free_inst; mac = crypto_spawn_ahash_alg(&ictx->mac); @@ -507,7 +501,6 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, mac->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = ctr->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = (mac->base.cra_priority + ctr->base.cra_priority) / 2; inst->alg.base.cra_blocksize = 1; @@ -712,21 +705,15 @@ static void crypto_rfc4309_free(struct aead_instance *inst) static int crypto_rfc4309_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct crypto_aead_spawn *spawn; struct aead_alg *alg; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) @@ -759,7 +746,6 @@ static int crypto_rfc4309_create(struct crypto_template *tmpl, CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = 1; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; @@ -878,9 +864,10 @@ static int cbcmac_create(struct crypto_template *tmpl, struct rtattr **tb) struct shash_instance *inst; struct crypto_cipher_spawn *spawn; struct crypto_alg *alg; + u32 mask; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); if (err) return err; @@ -890,7 +877,7 @@ static int cbcmac_create(struct crypto_template *tmpl, struct rtattr **tb) spawn = shash_instance_ctx(inst); err = crypto_grab_cipher(spawn, shash_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), 0, 0); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; alg = crypto_spawn_cipher_alg(spawn); diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c index ccaea5cb66d1..97bbb135e9a6 100644 --- a/crypto/chacha20poly1305.c +++ b/crypto/chacha20poly1305.c @@ -555,7 +555,6 @@ static void chachapoly_free(struct aead_instance *inst) static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, const char *name, unsigned int ivsize) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct chachapoly_instance_ctx *ctx; @@ -566,14 +565,9 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, if (ivsize > CHACHAPOLY_IV_SIZE) return -EINVAL; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -613,8 +607,6 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, poly->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = (chacha->base.cra_flags | - poly->base.cra_flags) & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = (chacha->base.cra_priority + poly->base.cra_priority) / 2; inst->alg.base.cra_blocksize = 1; diff --git a/crypto/cmac.c b/crypto/cmac.c index 143a6544c873..df36be1efb81 100644 --- a/crypto/cmac.c +++ b/crypto/cmac.c @@ -225,9 +225,10 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb) struct crypto_cipher_spawn *spawn; struct crypto_alg *alg; unsigned long alignmask; + u32 mask; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); if (err) return err; @@ -237,7 +238,7 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb) spawn = shash_instance_ctx(inst); err = crypto_grab_cipher(spawn, shash_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), 0, 0); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; alg = crypto_spawn_cipher_alg(spawn); diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 283212262adb..a1bea0f4baa8 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -191,17 +191,20 @@ static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm) return ictx->queue; } -static inline void cryptd_check_internal(struct rtattr **tb, u32 *type, - u32 *mask) +static void cryptd_type_and_mask(struct crypto_attr_type *algt, + u32 *type, u32 *mask) { - struct crypto_attr_type *algt; + /* + * cryptd is allowed to wrap internal algorithms, but in that case the + * resulting cryptd instance will be marked as internal as well. + */ + *type = algt->type & CRYPTO_ALG_INTERNAL; + *mask = algt->mask & CRYPTO_ALG_INTERNAL; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return; + /* No point in cryptd wrapping an algorithm that's already async. */ + *mask |= CRYPTO_ALG_ASYNC; - *type |= algt->type & CRYPTO_ALG_INTERNAL; - *mask |= algt->mask & CRYPTO_ALG_INTERNAL; + *mask |= crypto_algt_inherited_mask(algt); } static int cryptd_init_instance(struct crypto_instance *inst, @@ -364,6 +367,7 @@ static void cryptd_skcipher_free(struct skcipher_instance *inst) static int cryptd_create_skcipher(struct crypto_template *tmpl, struct rtattr **tb, + struct crypto_attr_type *algt, struct cryptd_queue *queue) { struct skcipherd_instance_ctx *ctx; @@ -373,10 +377,7 @@ static int cryptd_create_skcipher(struct crypto_template *tmpl, u32 mask; int err; - type = 0; - mask = CRYPTO_ALG_ASYNC; - - cryptd_check_internal(tb, &type, &mask); + cryptd_type_and_mask(algt, &type, &mask); inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -395,9 +396,8 @@ static int cryptd_create_skcipher(struct crypto_template *tmpl, if (err) goto err_free_inst; - inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | - (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); - + inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC | + (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg); inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg); @@ -633,16 +633,17 @@ static void cryptd_hash_free(struct ahash_instance *inst) } static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, + struct crypto_attr_type *algt, struct cryptd_queue *queue) { struct hashd_instance_ctx *ctx; struct ahash_instance *inst; struct shash_alg *alg; - u32 type = 0; - u32 mask = 0; + u32 type; + u32 mask; int err; - cryptd_check_internal(tb, &type, &mask); + cryptd_type_and_mask(algt, &type, &mask); inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -661,10 +662,9 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, if (err) goto err_free_inst; - inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC | - (alg->base.cra_flags & (CRYPTO_ALG_INTERNAL | + inst->alg.halg.base.cra_flags |= CRYPTO_ALG_ASYNC | + (alg->base.cra_flags & (CRYPTO_ALG_INTERNAL| CRYPTO_ALG_OPTIONAL_KEY)); - inst->alg.halg.digestsize = alg->digestsize; inst->alg.halg.statesize = alg->statesize; inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); @@ -820,16 +820,17 @@ static void cryptd_aead_free(struct aead_instance *inst) static int cryptd_create_aead(struct crypto_template *tmpl, struct rtattr **tb, + struct crypto_attr_type *algt, struct cryptd_queue *queue) { struct aead_instance_ctx *ctx; struct aead_instance *inst; struct aead_alg *alg; - u32 type = 0; - u32 mask = CRYPTO_ALG_ASYNC; + u32 type; + u32 mask; int err; - cryptd_check_internal(tb, &type, &mask); + cryptd_type_and_mask(algt, &type, &mask); inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -848,8 +849,8 @@ static int cryptd_create_aead(struct crypto_template *tmpl, if (err) goto err_free_inst; - inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | - (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); + inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC | + (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx); inst->alg.ivsize = crypto_aead_alg_ivsize(alg); @@ -884,11 +885,11 @@ static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_SKCIPHER: - return cryptd_create_skcipher(tmpl, tb, &queue); + return cryptd_create_skcipher(tmpl, tb, algt, &queue); case CRYPTO_ALG_TYPE_HASH: - return cryptd_create_hash(tmpl, tb, &queue); + return cryptd_create_hash(tmpl, tb, algt, &queue); case CRYPTO_ALG_TYPE_AEAD: - return cryptd_create_aead(tmpl, tb, &queue); + return cryptd_create_aead(tmpl, tb, algt, &queue); } return -EINVAL; diff --git a/crypto/ctr.c b/crypto/ctr.c index 31ac4ae598e1..c39fcffba27f 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c @@ -256,29 +256,20 @@ static void crypto_rfc3686_free(struct skcipher_instance *inst) static int crypto_rfc3686_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; struct skcipher_instance *inst; struct skcipher_alg *alg; struct crypto_skcipher_spawn *spawn; u32 mask; - int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) - return -EINVAL; + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) return -ENOMEM; - mask = crypto_requires_sync(algt->type, algt->mask) | - crypto_requires_off(algt->type, algt->mask, - CRYPTO_ALG_NEED_FALLBACK); - spawn = skcipher_instance_ctx(inst); err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst), @@ -310,8 +301,6 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl, inst->alg.base.cra_blocksize = 1; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; - inst->alg.ivsize = CTR_RFC3686_IV_SIZE; inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) + diff --git a/crypto/cts.c b/crypto/cts.c index 5e005c4f0221..3766d47ebcc0 100644 --- a/crypto/cts.c +++ b/crypto/cts.c @@ -325,19 +325,13 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_skcipher_spawn *spawn; struct skcipher_instance *inst; - struct crypto_attr_type *algt; struct skcipher_alg *alg; u32 mask; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) @@ -364,7 +358,6 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb) if (err) goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = alg->base.cra_blocksize; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; diff --git a/crypto/dh.c b/crypto/dh.c index 566f624a2de2..cd4f32092e5c 100644 --- a/crypto/dh.c +++ b/crypto/dh.c @@ -9,6 +9,7 @@ #include <crypto/internal/kpp.h> #include <crypto/kpp.h> #include <crypto/dh.h> +#include <linux/fips.h> #include <linux/mpi.h> struct dh_ctx { @@ -179,6 +180,43 @@ static int dh_compute_value(struct kpp_request *req) if (ret) goto err_free_base; + if (fips_enabled) { + /* SP800-56A rev3 5.7.1.1 check: Validation of shared secret */ + if (req->src) { + MPI pone; + + /* z <= 1 */ + if (mpi_cmp_ui(val, 1) < 1) { + ret = -EBADMSG; + goto err_free_base; + } + + /* z == p - 1 */ + pone = mpi_alloc(0); + + if (!pone) { + ret = -ENOMEM; + goto err_free_base; + } + + ret = mpi_sub_ui(pone, ctx->p, 1); + if (!ret && !mpi_cmp(pone, val)) + ret = -EBADMSG; + + mpi_free(pone); + + if (ret) + goto err_free_base; + + /* SP800-56A rev 3 5.6.2.1.3 key check */ + } else { + if (dh_is_pubkey_valid(ctx, val)) { + ret = -EAGAIN; + goto err_free_val; + } + } + } + ret = mpi_write_to_sgl(val, req->dst, req->dst_len, &sign); if (ret) goto err_free_base; diff --git a/crypto/ecc.c b/crypto/ecc.c index 02d35be7702b..8acf8433ca29 100644 --- a/crypto/ecc.c +++ b/crypto/ecc.c @@ -940,7 +940,7 @@ static bool ecc_point_is_zero(const struct ecc_point *point) } /* Point multiplication algorithm using Montgomery's ladder with co-Z - * coordinates. From http://eprint.iacr.org/2011/338.pdf + * coordinates. From https://eprint.iacr.org/2011/338.pdf */ /* Double in place */ @@ -1404,7 +1404,9 @@ int ecc_make_pub_key(unsigned int curve_id, unsigned int ndigits, } ecc_point_mult(pk, &curve->g, priv, NULL, curve, ndigits); - if (ecc_point_is_zero(pk)) { + + /* SP800-56A rev 3 5.6.2.1.3 key check */ + if (ecc_is_pubkey_valid_full(curve, pk)) { ret = -EAGAIN; goto err_free_point; } @@ -1452,6 +1454,33 @@ int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve, } EXPORT_SYMBOL(ecc_is_pubkey_valid_partial); +/* SP800-56A section 5.6.2.3.3 full verification */ +int ecc_is_pubkey_valid_full(const struct ecc_curve *curve, + struct ecc_point *pk) +{ + struct ecc_point *nQ; + + /* Checks 1 through 3 */ + int ret = ecc_is_pubkey_valid_partial(curve, pk); + + if (ret) + return ret; + + /* Check 4: Verify that nQ is the zero point. */ + nQ = ecc_alloc_point(pk->ndigits); + if (!nQ) + return -ENOMEM; + + ecc_point_mult(nQ, pk, curve->n, NULL, curve, pk->ndigits); + if (!ecc_point_is_zero(nQ)) + ret = -EINVAL; + + ecc_free_point(nQ); + + return ret; +} +EXPORT_SYMBOL(ecc_is_pubkey_valid_full); + int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, const u64 *private_key, const u64 *public_key, u64 *secret) @@ -1495,11 +1524,16 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, ecc_point_mult(product, pk, priv, rand_z, curve, ndigits); - ecc_swap_digits(product->x, secret, ndigits); - - if (ecc_point_is_zero(product)) + if (ecc_point_is_zero(product)) { ret = -EFAULT; + goto err_validity; + } + + ecc_swap_digits(product->x, secret, ndigits); +err_validity: + memzero_explicit(priv, sizeof(priv)); + memzero_explicit(rand_z, sizeof(rand_z)); ecc_free_point(product); err_alloc_product: ecc_free_point(pk); diff --git a/crypto/ecc.h b/crypto/ecc.h index ab0eb70b9c09..d4e546b9ad79 100644 --- a/crypto/ecc.h +++ b/crypto/ecc.h @@ -148,6 +148,20 @@ int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve, struct ecc_point *pk); /** + * ecc_is_pubkey_valid_full() - Full public key validation + * + * @curve: elliptic curve domain parameters + * @pk: public key as a point + * + * Valdiate public key according to SP800-56A section 5.6.2.3.3 ECC Full + * Public-Key Validation Routine. + * + * Return: 0 if validation is successful, -EINVAL if validation is failed. + */ +int ecc_is_pubkey_valid_full(const struct ecc_curve *curve, + struct ecc_point *pk); + +/** * vli_is_zero() - Determine is vli is zero * * @vli: vli to check. diff --git a/crypto/echainiv.c b/crypto/echainiv.c index 4a2f02baba14..69686668625e 100644 --- a/crypto/echainiv.c +++ b/crypto/echainiv.c @@ -115,7 +115,7 @@ static int echainiv_aead_create(struct crypto_template *tmpl, struct aead_instance *inst; int err; - inst = aead_geniv_alloc(tmpl, tb, 0, 0); + inst = aead_geniv_alloc(tmpl, tb); if (IS_ERR(inst)) return PTR_ERR(inst); diff --git a/crypto/essiv.c b/crypto/essiv.c index a7f45dbc4ee2..d012be23d496 100644 --- a/crypto/essiv.c +++ b/crypto/essiv.c @@ -466,7 +466,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) return PTR_ERR(shash_name); type = algt->type & algt->mask; - mask = crypto_requires_sync(algt->type, algt->mask); + mask = crypto_algt_inherited_mask(algt); switch (type) { case CRYPTO_ALG_TYPE_SKCIPHER: @@ -525,7 +525,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) /* Synchronous hash, e.g., "sha256" */ _hash_alg = crypto_alg_mod_lookup(shash_name, CRYPTO_ALG_TYPE_SHASH, - CRYPTO_ALG_TYPE_MASK); + CRYPTO_ALG_TYPE_MASK | mask); if (IS_ERR(_hash_alg)) { err = PTR_ERR(_hash_alg); goto out_drop_skcipher; @@ -557,7 +557,12 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) hash_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto out_free_hash; - base->cra_flags = block_base->cra_flags & CRYPTO_ALG_ASYNC; + /* + * hash_alg wasn't gotten via crypto_grab*(), so we need to inherit its + * flags manually. + */ + base->cra_flags |= (hash_alg->base.cra_flags & + CRYPTO_ALG_INHERITED_FLAGS); base->cra_blocksize = block_base->cra_blocksize; base->cra_ctxsize = sizeof(struct essiv_tfm_ctx); base->cra_alignmask = block_base->cra_alignmask; diff --git a/crypto/gcm.c b/crypto/gcm.c index 0103d28c541e..3a36a9533c96 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -578,7 +578,6 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, const char *ctr_name, const char *ghash_name) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct gcm_instance_ctx *ctx; @@ -586,14 +585,9 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, struct hash_alg_common *ghash; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -635,8 +629,6 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = (ghash->base.cra_flags | - ctr->base.cra_flags) & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = (ghash->base.cra_priority + ctr->base.cra_priority) / 2; inst->alg.base.cra_blocksize = 1; @@ -835,21 +827,15 @@ static void crypto_rfc4106_free(struct aead_instance *inst) static int crypto_rfc4106_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct crypto_aead_spawn *spawn; struct aead_alg *alg; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) @@ -882,7 +868,6 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl, CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = 1; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; @@ -1057,21 +1042,15 @@ static void crypto_rfc4543_free(struct aead_instance *inst) static int crypto_rfc4543_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct aead_alg *alg; struct crypto_rfc4543_instance_ctx *ctx; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -1104,7 +1083,6 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl, CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = 1; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; diff --git a/crypto/geniv.c b/crypto/geniv.c index 6a90c52d49ad..bee4621b4f12 100644 --- a/crypto/geniv.c +++ b/crypto/geniv.c @@ -39,22 +39,19 @@ static void aead_geniv_free(struct aead_instance *inst) } struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, - struct rtattr **tb, u32 type, u32 mask) + struct rtattr **tb) { struct crypto_aead_spawn *spawn; - struct crypto_attr_type *algt; struct aead_instance *inst; struct aead_alg *alg; unsigned int ivsize; unsigned int maxauthsize; + u32 mask; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return ERR_CAST(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return ERR_PTR(-EINVAL); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return ERR_PTR(err); inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) @@ -62,11 +59,8 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, spawn = aead_instance_ctx(inst); - /* Ignore async algorithms if necessary. */ - mask |= crypto_requires_sync(algt->type, algt->mask); - err = crypto_grab_aead(spawn, aead_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), type, mask); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; @@ -89,7 +83,6 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = alg->base.cra_blocksize; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; diff --git a/crypto/hmac.c b/crypto/hmac.c index e38bfb948278..25856aa7ccbf 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c @@ -168,11 +168,12 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) struct crypto_shash_spawn *spawn; struct crypto_alg *alg; struct shash_alg *salg; + u32 mask; int err; int ds; int ss; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); if (err) return err; @@ -182,7 +183,7 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) spawn = shash_instance_ctx(inst); err = crypto_grab_shash(spawn, shash_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), 0, 0); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; salg = crypto_spawn_shash_alg(spawn); diff --git a/crypto/internal.h b/crypto/internal.h index ff06a3bd1ca1..1b92a5a61852 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -68,13 +68,28 @@ void crypto_remove_final(struct list_head *list); void crypto_shoot_alg(struct crypto_alg *alg); struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, u32 mask); -void *crypto_create_tfm(struct crypto_alg *alg, - const struct crypto_type *frontend); +void *crypto_create_tfm_node(struct crypto_alg *alg, + const struct crypto_type *frontend, int node); + +static inline void *crypto_create_tfm(struct crypto_alg *alg, + const struct crypto_type *frontend) +{ + return crypto_create_tfm_node(alg, frontend, NUMA_NO_NODE); +} + struct crypto_alg *crypto_find_alg(const char *alg_name, const struct crypto_type *frontend, u32 type, u32 mask); -void *crypto_alloc_tfm(const char *alg_name, - const struct crypto_type *frontend, u32 type, u32 mask); + +void *crypto_alloc_tfm_node(const char *alg_name, + const struct crypto_type *frontend, u32 type, u32 mask, + int node); + +static inline void *crypto_alloc_tfm(const char *alg_name, + const struct crypto_type *frontend, u32 type, u32 mask) +{ + return crypto_alloc_tfm_node(alg_name, frontend, type, mask, NUMA_NO_NODE); +} int crypto_probing_notify(unsigned long val, void *v); diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c index 57f4a1ac738b..6e147c43fc18 100644 --- a/crypto/jitterentropy.c +++ b/crypto/jitterentropy.c @@ -7,7 +7,7 @@ * Design * ====== * - * See http://www.chronox.de/jent.html + * See https://www.chronox.de/jent.html * * License * ======= @@ -47,7 +47,7 @@ /* * This Jitterentropy RNG is based on the jitterentropy library - * version 2.2.0 provided at http://www.chronox.de/jent.html + * version 2.2.0 provided at https://www.chronox.de/jent.html */ #ifdef __OPTIMIZE__ diff --git a/crypto/lrw.c b/crypto/lrw.c index 5b07a7c09296..bcf09fbc750a 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -9,7 +9,7 @@ */ /* This implementation is checked against the test vectors in the above * document and by a test vector provided by Ken Buchanan at - * http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html + * https://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html * * The test vectors are included in the testing module tcrypt.[ch] */ @@ -27,7 +27,7 @@ #define LRW_BLOCK_SIZE 16 -struct priv { +struct lrw_tfm_ctx { struct crypto_skcipher *child; /* @@ -49,12 +49,12 @@ struct priv { be128 mulinc[128]; }; -struct rctx { +struct lrw_request_ctx { be128 t; struct skcipher_request subreq; }; -static inline void setbit128_bbe(void *b, int bit) +static inline void lrw_setbit128_bbe(void *b, int bit) { __set_bit(bit ^ (0x80 - #ifdef __BIG_ENDIAN @@ -65,10 +65,10 @@ static inline void setbit128_bbe(void *b, int bit) ), b); } -static int setkey(struct crypto_skcipher *parent, const u8 *key, - unsigned int keylen) +static int lrw_setkey(struct crypto_skcipher *parent, const u8 *key, + unsigned int keylen) { - struct priv *ctx = crypto_skcipher_ctx(parent); + struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(parent); struct crypto_skcipher *child = ctx->child; int err, bsize = LRW_BLOCK_SIZE; const u8 *tweak = key + keylen - bsize; @@ -92,7 +92,7 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key, /* initialize optimization table */ for (i = 0; i < 128; i++) { - setbit128_bbe(&tmp, i); + lrw_setbit128_bbe(&tmp, i); ctx->mulinc[i] = tmp; gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table); } @@ -108,10 +108,10 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key, * For example: * * u32 counter[4] = { 0xFFFFFFFF, 0x1, 0x0, 0x0 }; - * int i = next_index(&counter); + * int i = lrw_next_index(&counter); * // i == 33, counter == { 0x0, 0x2, 0x0, 0x0 } */ -static int next_index(u32 *counter) +static int lrw_next_index(u32 *counter) { int i, res = 0; @@ -135,14 +135,14 @@ static int next_index(u32 *counter) * We compute the tweak masks twice (both before and after the ECB encryption or * decryption) to avoid having to allocate a temporary buffer and/or make * mutliple calls to the 'ecb(..)' instance, which usually would be slower than - * just doing the next_index() calls again. + * just doing the lrw_next_index() calls again. */ -static int xor_tweak(struct skcipher_request *req, bool second_pass) +static int lrw_xor_tweak(struct skcipher_request *req, bool second_pass) { const int bs = LRW_BLOCK_SIZE; struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct priv *ctx = crypto_skcipher_ctx(tfm); - struct rctx *rctx = skcipher_request_ctx(req); + const struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + struct lrw_request_ctx *rctx = skcipher_request_ctx(req); be128 t = rctx->t; struct skcipher_walk w; __be32 *iv; @@ -178,7 +178,8 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass) /* T <- I*Key2, using the optimization * discussed in the specification */ - be128_xor(&t, &t, &ctx->mulinc[next_index(counter)]); + be128_xor(&t, &t, + &ctx->mulinc[lrw_next_index(counter)]); } while ((avail -= bs) >= bs); if (second_pass && w.nbytes == w.total) { @@ -194,38 +195,40 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass) return err; } -static int xor_tweak_pre(struct skcipher_request *req) +static int lrw_xor_tweak_pre(struct skcipher_request *req) { - return xor_tweak(req, false); + return lrw_xor_tweak(req, false); } -static int xor_tweak_post(struct skcipher_request *req) +static int lrw_xor_tweak_post(struct skcipher_request *req) { - return xor_tweak(req, true); + return lrw_xor_tweak(req, true); } -static void crypt_done(struct crypto_async_request *areq, int err) +static void lrw_crypt_done(struct crypto_async_request *areq, int err) { struct skcipher_request *req = areq->data; if (!err) { - struct rctx *rctx = skcipher_request_ctx(req); + struct lrw_request_ctx *rctx = skcipher_request_ctx(req); rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - err = xor_tweak_post(req); + err = lrw_xor_tweak_post(req); } skcipher_request_complete(req, err); } -static void init_crypt(struct skcipher_request *req) +static void lrw_init_crypt(struct skcipher_request *req) { - struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); - struct rctx *rctx = skcipher_request_ctx(req); + const struct lrw_tfm_ctx *ctx = + crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); + struct lrw_request_ctx *rctx = skcipher_request_ctx(req); struct skcipher_request *subreq = &rctx->subreq; skcipher_request_set_tfm(subreq, ctx->child); - skcipher_request_set_callback(subreq, req->base.flags, crypt_done, req); + skcipher_request_set_callback(subreq, req->base.flags, lrw_crypt_done, + req); /* pass req->iv as IV (will be used by xor_tweak, ECB will ignore it) */ skcipher_request_set_crypt(subreq, req->dst, req->dst, req->cryptlen, req->iv); @@ -237,33 +240,33 @@ static void init_crypt(struct skcipher_request *req) gf128mul_64k_bbe(&rctx->t, ctx->table); } -static int encrypt(struct skcipher_request *req) +static int lrw_encrypt(struct skcipher_request *req) { - struct rctx *rctx = skcipher_request_ctx(req); + struct lrw_request_ctx *rctx = skcipher_request_ctx(req); struct skcipher_request *subreq = &rctx->subreq; - init_crypt(req); - return xor_tweak_pre(req) ?: + lrw_init_crypt(req); + return lrw_xor_tweak_pre(req) ?: crypto_skcipher_encrypt(subreq) ?: - xor_tweak_post(req); + lrw_xor_tweak_post(req); } -static int decrypt(struct skcipher_request *req) +static int lrw_decrypt(struct skcipher_request *req) { - struct rctx *rctx = skcipher_request_ctx(req); + struct lrw_request_ctx *rctx = skcipher_request_ctx(req); struct skcipher_request *subreq = &rctx->subreq; - init_crypt(req); - return xor_tweak_pre(req) ?: + lrw_init_crypt(req); + return lrw_xor_tweak_pre(req) ?: crypto_skcipher_decrypt(subreq) ?: - xor_tweak_post(req); + lrw_xor_tweak_post(req); } -static int init_tfm(struct crypto_skcipher *tfm) +static int lrw_init_tfm(struct crypto_skcipher *tfm) { struct skcipher_instance *inst = skcipher_alg_instance(tfm); struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst); - struct priv *ctx = crypto_skcipher_ctx(tfm); + struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); struct crypto_skcipher *cipher; cipher = crypto_spawn_skcipher(spawn); @@ -273,45 +276,39 @@ static int init_tfm(struct crypto_skcipher *tfm) ctx->child = cipher; crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(cipher) + - sizeof(struct rctx)); + sizeof(struct lrw_request_ctx)); return 0; } -static void exit_tfm(struct crypto_skcipher *tfm) +static void lrw_exit_tfm(struct crypto_skcipher *tfm) { - struct priv *ctx = crypto_skcipher_ctx(tfm); + struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); if (ctx->table) gf128mul_free_64k(ctx->table); crypto_free_skcipher(ctx->child); } -static void crypto_lrw_free(struct skcipher_instance *inst) +static void lrw_free_instance(struct skcipher_instance *inst) { crypto_drop_skcipher(skcipher_instance_ctx(inst)); kfree(inst); } -static int create(struct crypto_template *tmpl, struct rtattr **tb) +static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_skcipher_spawn *spawn; struct skcipher_instance *inst; - struct crypto_attr_type *algt; struct skcipher_alg *alg; const char *cipher_name; char ecb_name[CRYPTO_MAX_ALG_NAME]; u32 mask; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); + if (err) + return err; cipher_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(cipher_name)) @@ -379,7 +376,6 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) } else goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE; inst->alg.base.cra_alignmask = alg->base.cra_alignmask | @@ -391,43 +387,43 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) + LRW_BLOCK_SIZE; - inst->alg.base.cra_ctxsize = sizeof(struct priv); + inst->alg.base.cra_ctxsize = sizeof(struct lrw_tfm_ctx); - inst->alg.init = init_tfm; - inst->alg.exit = exit_tfm; + inst->alg.init = lrw_init_tfm; + inst->alg.exit = lrw_exit_tfm; - inst->alg.setkey = setkey; - inst->alg.encrypt = encrypt; - inst->alg.decrypt = decrypt; + inst->alg.setkey = lrw_setkey; + inst->alg.encrypt = lrw_encrypt; + inst->alg.decrypt = lrw_decrypt; - inst->free = crypto_lrw_free; + inst->free = lrw_free_instance; err = skcipher_register_instance(tmpl, inst); if (err) { err_free_inst: - crypto_lrw_free(inst); + lrw_free_instance(inst); } return err; } -static struct crypto_template crypto_tmpl = { +static struct crypto_template lrw_tmpl = { .name = "lrw", - .create = create, + .create = lrw_create, .module = THIS_MODULE, }; -static int __init crypto_module_init(void) +static int __init lrw_module_init(void) { - return crypto_register_template(&crypto_tmpl); + return crypto_register_template(&lrw_tmpl); } -static void __exit crypto_module_exit(void) +static void __exit lrw_module_exit(void) { - crypto_unregister_template(&crypto_tmpl); + crypto_unregister_template(&lrw_tmpl); } -subsys_initcall(crypto_module_init); -module_exit(crypto_module_exit); +subsys_initcall(lrw_module_init); +module_exit(lrw_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("LRW block cipher mode"); diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index 8bddc65cd509..d569c7ed6c80 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -226,18 +226,14 @@ static int pcrypt_init_instance(struct crypto_instance *inst, } static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb, - u32 type, u32 mask) + struct crypto_attr_type *algt) { struct pcrypt_instance_ctx *ctx; - struct crypto_attr_type *algt; struct aead_instance *inst; struct aead_alg *alg; + u32 mask = crypto_algt_inherited_mask(algt); int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) return -ENOMEM; @@ -254,7 +250,7 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb, goto err_free_inst; err = crypto_grab_aead(&ctx->spawn, aead_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), 0, 0); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; @@ -263,7 +259,7 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb, if (err) goto err_free_inst; - inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC; + inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC; inst->alg.ivsize = crypto_aead_alg_ivsize(alg); inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); @@ -298,7 +294,7 @@ static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb) switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_AEAD: - return pcrypt_create_aead(tmpl, tb, algt->type, algt->mask); + return pcrypt_create_aead(tmpl, tb, algt); } return -EINVAL; @@ -320,7 +316,7 @@ static int pcrypt_init_padata(struct padata_instance **pinst, const char *name) { int ret = -ENOMEM; - *pinst = padata_alloc_possible(name); + *pinst = padata_alloc(name); if (!*pinst) return ret; @@ -331,12 +327,6 @@ static int pcrypt_init_padata(struct padata_instance **pinst, const char *name) return ret; } -static void pcrypt_fini_padata(struct padata_instance *pinst) -{ - padata_stop(pinst); - padata_free(pinst); -} - static struct crypto_template pcrypt_tmpl = { .name = "pcrypt", .create = pcrypt_create, @@ -359,13 +349,10 @@ static int __init pcrypt_init(void) if (err) goto err_deinit_pencrypt; - padata_start(pencrypt); - padata_start(pdecrypt); - return crypto_register_template(&pcrypt_tmpl); err_deinit_pencrypt: - pcrypt_fini_padata(pencrypt); + padata_free(pencrypt); err_unreg_kset: kset_unregister(pcrypt_kset); err: @@ -376,8 +363,8 @@ static void __exit pcrypt_exit(void) { crypto_unregister_template(&pcrypt_tmpl); - pcrypt_fini_padata(pencrypt); - pcrypt_fini_padata(pdecrypt); + padata_free(pencrypt); + padata_free(pdecrypt); kset_unregister(pcrypt_kset); } diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c index d31031de51bc..4983b2b4a223 100644 --- a/crypto/rsa-pkcs1pad.c +++ b/crypto/rsa-pkcs1pad.c @@ -596,7 +596,6 @@ static void pkcs1pad_free(struct akcipher_instance *inst) static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; struct akcipher_instance *inst; struct pkcs1pad_inst_ctx *ctx; @@ -604,14 +603,9 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) const char *hash_name; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AKCIPHER) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AKCIPHER, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -658,7 +652,6 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) goto err_free_inst; } - inst->alg.base.cra_flags = rsa_alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = rsa_alg->base.cra_priority; inst->alg.base.cra_ctxsize = sizeof(struct pkcs1pad_ctx); diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c index c81a44404086..3418869dabef 100644 --- a/crypto/salsa20_generic.c +++ b/crypto/salsa20_generic.c @@ -9,8 +9,8 @@ * Salsa20 is a stream cipher candidate in eSTREAM, the ECRYPT Stream * Cipher Project. It is designed by Daniel J. Bernstein <djb@cr.yp.to>. * More information about eSTREAM and Salsa20 can be found here: - * http://www.ecrypt.eu.org/stream/ - * http://cr.yp.to/snuffle.html + * https://www.ecrypt.eu.org/stream/ + * https://cr.yp.to/snuffle.html * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free diff --git a/crypto/seqiv.c b/crypto/seqiv.c index f124b9b54e15..23e22d8b63e6 100644 --- a/crypto/seqiv.c +++ b/crypto/seqiv.c @@ -138,7 +138,7 @@ static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb) struct aead_instance *inst; int err; - inst = aead_geniv_alloc(tmpl, tb, 0, 0); + inst = aead_geniv_alloc(tmpl, tb); if (IS_ERR(inst)) return PTR_ERR(inst); @@ -164,23 +164,9 @@ free_inst: return err; } -static int seqiv_create(struct crypto_template *tmpl, struct rtattr **tb) -{ - struct crypto_attr_type *algt; - - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) - return -EINVAL; - - return seqiv_aead_create(tmpl, tb); -} - static struct crypto_template seqiv_tmpl = { .name = "seqiv", - .create = seqiv_create, + .create = seqiv_aead_create, .module = THIS_MODULE, }; diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c index 44e263e25599..3e4069935b53 100644 --- a/crypto/sha3_generic.c +++ b/crypto/sha3_generic.c @@ -3,7 +3,7 @@ * Cryptographic API. * * SHA-3, as specified in - * http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf + * https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf * * SHA-3 code by Jeff Garzik <jeff@garzik.org> * Ard Biesheuvel <ard.biesheuvel@linaro.org> diff --git a/crypto/simd.c b/crypto/simd.c index 56885af49c24..edaa479a1ec5 100644 --- a/crypto/simd.c +++ b/crypto/simd.c @@ -171,7 +171,8 @@ struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname, drvname) >= CRYPTO_MAX_ALG_NAME) goto out_free_salg; - alg->base.cra_flags = CRYPTO_ALG_ASYNC; + alg->base.cra_flags = CRYPTO_ALG_ASYNC | + (ialg->base.cra_flags & CRYPTO_ALG_INHERITED_FLAGS); alg->base.cra_priority = ialg->base.cra_priority; alg->base.cra_blocksize = ialg->base.cra_blocksize; alg->base.cra_alignmask = ialg->base.cra_alignmask; @@ -417,7 +418,8 @@ struct simd_aead_alg *simd_aead_create_compat(const char *algname, drvname) >= CRYPTO_MAX_ALG_NAME) goto out_free_salg; - alg->base.cra_flags = CRYPTO_ALG_ASYNC; + alg->base.cra_flags = CRYPTO_ALG_ASYNC | + (ialg->base.cra_flags & CRYPTO_ALG_INHERITED_FLAGS); alg->base.cra_priority = ialg->base.cra_priority; alg->base.cra_blocksize = ialg->base.cra_blocksize; alg->base.cra_alignmask = ialg->base.cra_alignmask; diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 7221def7b9a7..467af525848a 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -934,22 +934,15 @@ static void skcipher_free_instance_simple(struct skcipher_instance *inst) struct skcipher_instance *skcipher_alloc_instance_simple( struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; struct skcipher_instance *inst; struct crypto_cipher_spawn *spawn; struct crypto_alg *cipher_alg; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return ERR_CAST(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) - return ERR_PTR(-EINVAL); - - mask = crypto_requires_off(algt->type, algt->mask, - CRYPTO_ALG_NEED_FALLBACK); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); + if (err) + return ERR_PTR(err); inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) diff --git a/crypto/testmgr.h b/crypto/testmgr.h index d29983908c38..b9a2d73d9f8d 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -3916,7 +3916,7 @@ static const struct hash_testvec hmac_sm3_tv_template[] = { }; /* - * SHA1 test vectors from from FIPS PUB 180-1 + * SHA1 test vectors from FIPS PUB 180-1 * Long vector from CAVS 5.0 */ static const struct hash_testvec sha1_tv_template[] = { @@ -4103,7 +4103,7 @@ static const struct hash_testvec sha1_tv_template[] = { /* - * SHA224 test vectors from from FIPS PUB 180-2 + * SHA224 test vectors from FIPS PUB 180-2 */ static const struct hash_testvec sha224_tv_template[] = { { @@ -4273,7 +4273,7 @@ static const struct hash_testvec sha224_tv_template[] = { }; /* - * SHA256 test vectors from from NIST + * SHA256 test vectors from NIST */ static const struct hash_testvec sha256_tv_template[] = { { @@ -4442,7 +4442,7 @@ static const struct hash_testvec sha256_tv_template[] = { }; /* - * SHA384 test vectors from from NIST and kerneli + * SHA384 test vectors from NIST and kerneli */ static const struct hash_testvec sha384_tv_template[] = { { @@ -4632,7 +4632,7 @@ static const struct hash_testvec sha384_tv_template[] = { }; /* - * SHA512 test vectors from from NIST and kerneli + * SHA512 test vectors from NIST and kerneli */ static const struct hash_testvec sha512_tv_template[] = { { diff --git a/crypto/vmac.c b/crypto/vmac.c index 2d906830df96..9b565d1040d6 100644 --- a/crypto/vmac.c +++ b/crypto/vmac.c @@ -620,9 +620,10 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb) struct shash_instance *inst; struct crypto_cipher_spawn *spawn; struct crypto_alg *alg; + u32 mask; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); if (err) return err; @@ -632,7 +633,7 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb) spawn = shash_instance_ctx(inst); err = crypto_grab_cipher(spawn, shash_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), 0, 0); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; alg = crypto_spawn_cipher_alg(spawn); diff --git a/crypto/xcbc.c b/crypto/xcbc.c index 598ec88abf0f..af3b7eb5d7c7 100644 --- a/crypto/xcbc.c +++ b/crypto/xcbc.c @@ -191,9 +191,10 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb) struct crypto_cipher_spawn *spawn; struct crypto_alg *alg; unsigned long alignmask; + u32 mask; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); if (err) return err; @@ -203,7 +204,7 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb) spawn = shash_instance_ctx(inst); err = crypto_grab_cipher(spawn, shash_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), 0, 0); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; alg = crypto_spawn_cipher_alg(spawn); diff --git a/crypto/xts.c b/crypto/xts.c index 3565f3b863a6..ad45b009774b 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -20,7 +20,7 @@ #include <crypto/b128ops.h> #include <crypto/gf128mul.h> -struct priv { +struct xts_tfm_ctx { struct crypto_skcipher *child; struct crypto_cipher *tweak; }; @@ -30,17 +30,17 @@ struct xts_instance_ctx { char name[CRYPTO_MAX_ALG_NAME]; }; -struct rctx { +struct xts_request_ctx { le128 t; struct scatterlist *tail; struct scatterlist sg[2]; struct skcipher_request subreq; }; -static int setkey(struct crypto_skcipher *parent, const u8 *key, - unsigned int keylen) +static int xts_setkey(struct crypto_skcipher *parent, const u8 *key, + unsigned int keylen) { - struct priv *ctx = crypto_skcipher_ctx(parent); + struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(parent); struct crypto_skcipher *child; struct crypto_cipher *tweak; int err; @@ -78,9 +78,10 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key, * mutliple calls to the 'ecb(..)' instance, which usually would be slower than * just doing the gf128mul_x_ble() calls again. */ -static int xor_tweak(struct skcipher_request *req, bool second_pass, bool enc) +static int xts_xor_tweak(struct skcipher_request *req, bool second_pass, + bool enc) { - struct rctx *rctx = skcipher_request_ctx(req); + struct xts_request_ctx *rctx = skcipher_request_ctx(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); const bool cts = (req->cryptlen % XTS_BLOCK_SIZE); const int bs = XTS_BLOCK_SIZE; @@ -128,23 +129,23 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass, bool enc) return err; } -static int xor_tweak_pre(struct skcipher_request *req, bool enc) +static int xts_xor_tweak_pre(struct skcipher_request *req, bool enc) { - return xor_tweak(req, false, enc); + return xts_xor_tweak(req, false, enc); } -static int xor_tweak_post(struct skcipher_request *req, bool enc) +static int xts_xor_tweak_post(struct skcipher_request *req, bool enc) { - return xor_tweak(req, true, enc); + return xts_xor_tweak(req, true, enc); } -static void cts_done(struct crypto_async_request *areq, int err) +static void xts_cts_done(struct crypto_async_request *areq, int err) { struct skcipher_request *req = areq->data; le128 b; if (!err) { - struct rctx *rctx = skcipher_request_ctx(req); + struct xts_request_ctx *rctx = skcipher_request_ctx(req); scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 0); le128_xor(&b, &rctx->t, &b); @@ -154,12 +155,13 @@ static void cts_done(struct crypto_async_request *areq, int err) skcipher_request_complete(req, err); } -static int cts_final(struct skcipher_request *req, - int (*crypt)(struct skcipher_request *req)) +static int xts_cts_final(struct skcipher_request *req, + int (*crypt)(struct skcipher_request *req)) { - struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); + const struct xts_tfm_ctx *ctx = + crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); int offset = req->cryptlen & ~(XTS_BLOCK_SIZE - 1); - struct rctx *rctx = skcipher_request_ctx(req); + struct xts_request_ctx *rctx = skcipher_request_ctx(req); struct skcipher_request *subreq = &rctx->subreq; int tail = req->cryptlen % XTS_BLOCK_SIZE; le128 b[2]; @@ -169,7 +171,7 @@ static int cts_final(struct skcipher_request *req, offset - XTS_BLOCK_SIZE); scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0); - memcpy(b + 1, b, tail); + b[1] = b[0]; scatterwalk_map_and_copy(b, req->src, offset, tail, 0); le128_xor(b, &rctx->t, b); @@ -177,7 +179,8 @@ static int cts_final(struct skcipher_request *req, scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE + tail, 1); skcipher_request_set_tfm(subreq, ctx->child); - skcipher_request_set_callback(subreq, req->base.flags, cts_done, req); + skcipher_request_set_callback(subreq, req->base.flags, xts_cts_done, + req); skcipher_request_set_crypt(subreq, rctx->tail, rctx->tail, XTS_BLOCK_SIZE, NULL); @@ -192,18 +195,18 @@ static int cts_final(struct skcipher_request *req, return 0; } -static void encrypt_done(struct crypto_async_request *areq, int err) +static void xts_encrypt_done(struct crypto_async_request *areq, int err) { struct skcipher_request *req = areq->data; if (!err) { - struct rctx *rctx = skcipher_request_ctx(req); + struct xts_request_ctx *rctx = skcipher_request_ctx(req); rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - err = xor_tweak_post(req, true); + err = xts_xor_tweak_post(req, true); if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) { - err = cts_final(req, crypto_skcipher_encrypt); + err = xts_cts_final(req, crypto_skcipher_encrypt); if (err == -EINPROGRESS) return; } @@ -212,18 +215,18 @@ static void encrypt_done(struct crypto_async_request *areq, int err) skcipher_request_complete(req, err); } -static void decrypt_done(struct crypto_async_request *areq, int err) +static void xts_decrypt_done(struct crypto_async_request *areq, int err) { struct skcipher_request *req = areq->data; if (!err) { - struct rctx *rctx = skcipher_request_ctx(req); + struct xts_request_ctx *rctx = skcipher_request_ctx(req); rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - err = xor_tweak_post(req, false); + err = xts_xor_tweak_post(req, false); if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) { - err = cts_final(req, crypto_skcipher_decrypt); + err = xts_cts_final(req, crypto_skcipher_decrypt); if (err == -EINPROGRESS) return; } @@ -232,10 +235,12 @@ static void decrypt_done(struct crypto_async_request *areq, int err) skcipher_request_complete(req, err); } -static int init_crypt(struct skcipher_request *req, crypto_completion_t compl) +static int xts_init_crypt(struct skcipher_request *req, + crypto_completion_t compl) { - struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); - struct rctx *rctx = skcipher_request_ctx(req); + const struct xts_tfm_ctx *ctx = + crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); + struct xts_request_ctx *rctx = skcipher_request_ctx(req); struct skcipher_request *subreq = &rctx->subreq; if (req->cryptlen < XTS_BLOCK_SIZE) @@ -252,45 +257,45 @@ static int init_crypt(struct skcipher_request *req, crypto_completion_t compl) return 0; } -static int encrypt(struct skcipher_request *req) +static int xts_encrypt(struct skcipher_request *req) { - struct rctx *rctx = skcipher_request_ctx(req); + struct xts_request_ctx *rctx = skcipher_request_ctx(req); struct skcipher_request *subreq = &rctx->subreq; int err; - err = init_crypt(req, encrypt_done) ?: - xor_tweak_pre(req, true) ?: + err = xts_init_crypt(req, xts_encrypt_done) ?: + xts_xor_tweak_pre(req, true) ?: crypto_skcipher_encrypt(subreq) ?: - xor_tweak_post(req, true); + xts_xor_tweak_post(req, true); if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0)) return err; - return cts_final(req, crypto_skcipher_encrypt); + return xts_cts_final(req, crypto_skcipher_encrypt); } -static int decrypt(struct skcipher_request *req) +static int xts_decrypt(struct skcipher_request *req) { - struct rctx *rctx = skcipher_request_ctx(req); + struct xts_request_ctx *rctx = skcipher_request_ctx(req); struct skcipher_request *subreq = &rctx->subreq; int err; - err = init_crypt(req, decrypt_done) ?: - xor_tweak_pre(req, false) ?: + err = xts_init_crypt(req, xts_decrypt_done) ?: + xts_xor_tweak_pre(req, false) ?: crypto_skcipher_decrypt(subreq) ?: - xor_tweak_post(req, false); + xts_xor_tweak_post(req, false); if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0)) return err; - return cts_final(req, crypto_skcipher_decrypt); + return xts_cts_final(req, crypto_skcipher_decrypt); } -static int init_tfm(struct crypto_skcipher *tfm) +static int xts_init_tfm(struct crypto_skcipher *tfm) { struct skcipher_instance *inst = skcipher_alg_instance(tfm); struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst); - struct priv *ctx = crypto_skcipher_ctx(tfm); + struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); struct crypto_skcipher *child; struct crypto_cipher *tweak; @@ -309,41 +314,39 @@ static int init_tfm(struct crypto_skcipher *tfm) ctx->tweak = tweak; crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) + - sizeof(struct rctx)); + sizeof(struct xts_request_ctx)); return 0; } -static void exit_tfm(struct crypto_skcipher *tfm) +static void xts_exit_tfm(struct crypto_skcipher *tfm) { - struct priv *ctx = crypto_skcipher_ctx(tfm); + struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); crypto_free_skcipher(ctx->child); crypto_free_cipher(ctx->tweak); } -static void crypto_xts_free(struct skcipher_instance *inst) +static void xts_free_instance(struct skcipher_instance *inst) { - crypto_drop_skcipher(skcipher_instance_ctx(inst)); + struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst); + + crypto_drop_skcipher(&ictx->spawn); kfree(inst); } -static int create(struct crypto_template *tmpl, struct rtattr **tb) +static int xts_create(struct crypto_template *tmpl, struct rtattr **tb) { struct skcipher_instance *inst; - struct crypto_attr_type *algt; struct xts_instance_ctx *ctx; struct skcipher_alg *alg; const char *cipher_name; u32 mask; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) - return -EINVAL; + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); + if (err) + return err; cipher_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(cipher_name)) @@ -355,10 +358,6 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) ctx = skcipher_instance_ctx(inst); - mask = crypto_requires_off(algt->type, algt->mask, - CRYPTO_ALG_NEED_FALLBACK | - CRYPTO_ALG_ASYNC); - err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst), cipher_name, 0, mask); if (err == -ENOENT) { @@ -415,7 +414,6 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) } else goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE; inst->alg.base.cra_alignmask = alg->base.cra_alignmask | @@ -425,43 +423,43 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2; inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2; - inst->alg.base.cra_ctxsize = sizeof(struct priv); + inst->alg.base.cra_ctxsize = sizeof(struct xts_tfm_ctx); - inst->alg.init = init_tfm; - inst->alg.exit = exit_tfm; + inst->alg.init = xts_init_tfm; + inst->alg.exit = xts_exit_tfm; - inst->alg.setkey = setkey; - inst->alg.encrypt = encrypt; - inst->alg.decrypt = decrypt; + inst->alg.setkey = xts_setkey; + inst->alg.encrypt = xts_encrypt; + inst->alg.decrypt = xts_decrypt; - inst->free = crypto_xts_free; + inst->free = xts_free_instance; err = skcipher_register_instance(tmpl, inst); if (err) { err_free_inst: - crypto_xts_free(inst); + xts_free_instance(inst); } return err; } -static struct crypto_template crypto_tmpl = { +static struct crypto_template xts_tmpl = { .name = "xts", - .create = create, + .create = xts_create, .module = THIS_MODULE, }; -static int __init crypto_module_init(void) +static int __init xts_module_init(void) { - return crypto_register_template(&crypto_tmpl); + return crypto_register_template(&xts_tmpl); } -static void __exit crypto_module_exit(void) +static void __exit xts_module_exit(void) { - crypto_unregister_template(&crypto_tmpl); + crypto_unregister_template(&xts_tmpl); } -subsys_initcall(crypto_module_init); -module_exit(crypto_module_exit); +subsys_initcall(xts_module_init); +module_exit(xts_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("XTS block cipher mode"); diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 42c672f1584e..cbe6aa77d50d 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -947,7 +947,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item, trace_binder_unmap_user_end(alloc, index); } mmap_read_unlock(mm); - mmput(mm); + mmput_async(mm); trace_binder_unmap_kernel_start(alloc, index); diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c index d9fd70280482..7f814da3c2d0 100644 --- a/drivers/atm/atmtcp.c +++ b/drivers/atm/atmtcp.c @@ -433,9 +433,15 @@ static int atmtcp_remove_persistent(int itf) return -EMEDIUMTYPE; } dev_data = PRIV(dev); - if (!dev_data->persist) return 0; + if (!dev_data->persist) { + atm_dev_put(dev); + return 0; + } dev_data->persist = 0; - if (PRIV(dev)->vcc) return 0; + if (PRIV(dev)->vcc) { + atm_dev_put(dev); + return 0; + } kfree(dev_data); atm_dev_put(dev); atm_dev_deregister(dev); diff --git a/drivers/base/property.c b/drivers/base/property.c index 1e6d75e65938..d58aa98fe964 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -721,7 +721,7 @@ struct fwnode_handle *device_get_next_child_node(struct device *dev, return next; /* When no more children in primary, continue with secondary */ - if (!IS_ERR_OR_NULL(fwnode->secondary)) + if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary)) next = fwnode_get_next_child_node(fwnode->secondary, child); return next; diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 4f513fa3362f..191c97b84715 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -2865,6 +2865,24 @@ static int sysc_check_disabled_devices(struct sysc *ddata) return error; } +/* + * Ignore timers tagged with no-reset and no-idle. These are likely in use, + * for example by drivers/clocksource/timer-ti-dm-systimer.c. If more checks + * are needed, we could also look at the timer register configuration. + */ +static int sysc_check_active_timer(struct sysc *ddata) +{ + if (ddata->cap->type != TI_SYSC_OMAP2_TIMER && + ddata->cap->type != TI_SYSC_OMAP4_TIMER) + return 0; + + if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) && + (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE)) + return -EBUSY; + + return 0; +} + static const struct of_device_id sysc_match_table[] = { { .compatible = "simple-bus", }, { /* sentinel */ }, @@ -2921,6 +2939,10 @@ static int sysc_probe(struct platform_device *pdev) if (error) return error; + error = sysc_check_active_timer(ddata); + if (error) + return error; + error = sysc_get_clocks(ddata); if (error) return error; diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 0ad17efc96df..f976a49e1fb5 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -74,6 +74,16 @@ config HW_RANDOM_ATMEL If unsure, say Y. +config HW_RANDOM_BA431 + tristate "Silex Insight BA431 Random Number Generator support" + depends on HAS_IOMEM + help + This driver provides kernel-side support for the Random Number + Generator hardware based on Silex Insight BA431 IP. + + To compile this driver as a module, choose M here: the + module will be called ba431-rng. + config HW_RANDOM_BCM2835 tristate "Broadcom BCM2835/BCM63xx Random Number Generator support" depends on ARCH_BCM2835 || ARCH_BCM_NSP || ARCH_BCM_5301X || \ @@ -245,7 +255,7 @@ config HW_RANDOM_MXC_RNGA config HW_RANDOM_IMX_RNGC tristate "Freescale i.MX RNGC Random Number Generator" depends on HAS_IOMEM && HAVE_CLK - depends on SOC_IMX25 || COMPILE_TEST + depends on SOC_IMX25 || SOC_IMX6SL || SOC_IMX6SLL || SOC_IMX6UL || COMPILE_TEST default HW_RANDOM help This driver provides kernel-side support for the Random Number @@ -257,6 +267,21 @@ config HW_RANDOM_IMX_RNGC If unsure, say Y. +config HW_RANDOM_INGENIC_RNG + tristate "Ingenic Random Number Generator support" + depends on HW_RANDOM + depends on MACH_JZ4780 || MACH_X1000 + default HW_RANDOM + help + This driver provides kernel-side support for the Random Number Generator + hardware found in ingenic JZ4780 and X1000 SoC. MIPS Creator CI20 uses + JZ4780 SoC, YSH & ATIL CU1000-Neo uses X1000 SoC. + + To compile this driver as a module, choose M here: the + module will be called ingenic-rng. + + If unsure, say Y. + config HW_RANDOM_NOMADIK tristate "ST-Ericsson Nomadik Random Number Generator support" depends on ARCH_NOMADIK diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile index 2c6724735345..26ae06844f09 100644 --- a/drivers/char/hw_random/Makefile +++ b/drivers/char/hw_random/Makefile @@ -9,6 +9,7 @@ obj-$(CONFIG_HW_RANDOM_TIMERIOMEM) += timeriomem-rng.o obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o obj-$(CONFIG_HW_RANDOM_ATMEL) += atmel-rng.o +obj-$(CONFIG_HW_RANDOM_BA431) += ba431-rng.o obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o obj-$(CONFIG_HW_RANDOM_N2RNG) += n2-rng.o n2-rng-y := n2-drv.o n2-asm.o @@ -22,6 +23,7 @@ obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o obj-$(CONFIG_HW_RANDOM_IMX_RNGC) += imx-rngc.o +obj-$(CONFIG_HW_RANDOM_INGENIC_RNG) += ingenic-rng.o obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o diff --git a/drivers/char/hw_random/ba431-rng.c b/drivers/char/hw_random/ba431-rng.c new file mode 100644 index 000000000000..410b50b05e21 --- /dev/null +++ b/drivers/char/hw_random/ba431-rng.c @@ -0,0 +1,235 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020 Silex Insight + +#include <linux/delay.h> +#include <linux/hw_random.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/workqueue.h> + +#define BA431_RESET_DELAY 1 /* usec */ +#define BA431_RESET_READ_STATUS_TIMEOUT 1000 /* usec */ +#define BA431_RESET_READ_STATUS_INTERVAL 10 /* usec */ +#define BA431_READ_RETRY_INTERVAL 1 /* usec */ + +#define BA431_REG_CTRL 0x00 +#define BA431_REG_FIFO_LEVEL 0x04 +#define BA431_REG_STATUS 0x30 +#define BA431_REG_FIFODATA 0x80 + +#define BA431_CTRL_ENABLE BIT(0) +#define BA431_CTRL_SOFTRESET BIT(8) + +#define BA431_STATUS_STATE_MASK (BIT(1) | BIT(2) | BIT(3)) +#define BA431_STATUS_STATE_OFFSET 1 + +enum ba431_state { + BA431_STATE_RESET, + BA431_STATE_STARTUP, + BA431_STATE_FIFOFULLON, + BA431_STATE_FIFOFULLOFF, + BA431_STATE_RUNNING, + BA431_STATE_ERROR +}; + +struct ba431_trng { + struct device *dev; + void __iomem *base; + struct hwrng rng; + atomic_t reset_pending; + struct work_struct reset_work; +}; + +static inline u32 ba431_trng_read_reg(struct ba431_trng *ba431, u32 reg) +{ + return ioread32(ba431->base + reg); +} + +static inline void ba431_trng_write_reg(struct ba431_trng *ba431, u32 reg, + u32 val) +{ + iowrite32(val, ba431->base + reg); +} + +static inline enum ba431_state ba431_trng_get_state(struct ba431_trng *ba431) +{ + u32 status = ba431_trng_read_reg(ba431, BA431_REG_STATUS); + + return (status & BA431_STATUS_STATE_MASK) >> BA431_STATUS_STATE_OFFSET; +} + +static int ba431_trng_is_in_error(struct ba431_trng *ba431) +{ + enum ba431_state state = ba431_trng_get_state(ba431); + + if ((state < BA431_STATE_STARTUP) || + (state >= BA431_STATE_ERROR)) + return 1; + + return 0; +} + +static int ba431_trng_reset(struct ba431_trng *ba431) +{ + int ret; + + /* Disable interrupts, random generation and enable the softreset */ + ba431_trng_write_reg(ba431, BA431_REG_CTRL, BA431_CTRL_SOFTRESET); + udelay(BA431_RESET_DELAY); + ba431_trng_write_reg(ba431, BA431_REG_CTRL, BA431_CTRL_ENABLE); + + /* Wait until the state changed */ + if (readx_poll_timeout(ba431_trng_is_in_error, ba431, ret, !ret, + BA431_RESET_READ_STATUS_INTERVAL, + BA431_RESET_READ_STATUS_TIMEOUT)) { + dev_err(ba431->dev, "reset failed (state: %d)\n", + ba431_trng_get_state(ba431)); + return -ETIMEDOUT; + } + + dev_info(ba431->dev, "reset done\n"); + + return 0; +} + +static void ba431_trng_reset_work(struct work_struct *work) +{ + struct ba431_trng *ba431 = container_of(work, struct ba431_trng, + reset_work); + ba431_trng_reset(ba431); + atomic_set(&ba431->reset_pending, 0); +} + +static void ba431_trng_schedule_reset(struct ba431_trng *ba431) +{ + if (atomic_cmpxchg(&ba431->reset_pending, 0, 1)) + return; + + schedule_work(&ba431->reset_work); +} + +static int ba431_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait) +{ + struct ba431_trng *ba431 = container_of(rng, struct ba431_trng, rng); + u32 *data = buf; + unsigned int level, i; + int n = 0; + + while (max > 0) { + level = ba431_trng_read_reg(ba431, BA431_REG_FIFO_LEVEL); + if (!level) { + if (ba431_trng_is_in_error(ba431)) { + ba431_trng_schedule_reset(ba431); + break; + } + + if (!wait) + break; + + udelay(BA431_READ_RETRY_INTERVAL); + continue; + } + + i = level; + do { + data[n++] = ba431_trng_read_reg(ba431, + BA431_REG_FIFODATA); + max -= sizeof(*data); + } while (--i && (max > 0)); + + if (ba431_trng_is_in_error(ba431)) { + n -= (level - i); + ba431_trng_schedule_reset(ba431); + break; + } + } + + n *= sizeof(data); + return (n || !wait) ? n : -EIO; +} + +static void ba431_trng_cleanup(struct hwrng *rng) +{ + struct ba431_trng *ba431 = container_of(rng, struct ba431_trng, rng); + + ba431_trng_write_reg(ba431, BA431_REG_CTRL, 0); + cancel_work_sync(&ba431->reset_work); +} + +static int ba431_trng_init(struct hwrng *rng) +{ + struct ba431_trng *ba431 = container_of(rng, struct ba431_trng, rng); + + return ba431_trng_reset(ba431); +} + +static int ba431_trng_probe(struct platform_device *pdev) +{ + struct ba431_trng *ba431; + struct resource *res; + int ret; + + ba431 = devm_kzalloc(&pdev->dev, sizeof(*ba431), GFP_KERNEL); + if (!ba431) + return -ENOMEM; + + ba431->dev = &pdev->dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ba431->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(ba431->base)) + return PTR_ERR(ba431->base); + + atomic_set(&ba431->reset_pending, 0); + INIT_WORK(&ba431->reset_work, ba431_trng_reset_work); + ba431->rng.name = pdev->name; + ba431->rng.init = ba431_trng_init; + ba431->rng.cleanup = ba431_trng_cleanup; + ba431->rng.read = ba431_trng_read; + + platform_set_drvdata(pdev, ba431); + + ret = hwrng_register(&ba431->rng); + if (ret) { + dev_err(&pdev->dev, "BA431 registration failed (%d)\n", ret); + return ret; + } + + dev_info(&pdev->dev, "BA431 TRNG registered\n"); + + return 0; +} + +static int ba431_trng_remove(struct platform_device *pdev) +{ + struct ba431_trng *ba431 = platform_get_drvdata(pdev); + + hwrng_unregister(&ba431->rng); + + return 0; +} + +static const struct of_device_id ba431_trng_dt_ids[] = { + { .compatible = "silex-insight,ba431-rng", .data = NULL }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, ba431_trng_dt_ids); + +static struct platform_driver ba431_trng_driver = { + .driver = { + .name = "ba431-rng", + .of_match_table = ba431_trng_dt_ids, + }, + .probe = ba431_trng_probe, + .remove = ba431_trng_remove, +}; + +module_platform_driver(ba431_trng_driver); + +MODULE_AUTHOR("Olivier Sobrie <olivier@sobrie.be>"); +MODULE_DESCRIPTION("TRNG driver for Silex Insight BA431"); +MODULE_LICENSE("GPL"); diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c index cbf5eaea662c..1a7c43b43c6b 100644 --- a/drivers/char/hw_random/bcm2835-rng.c +++ b/drivers/char/hw_random/bcm2835-rng.c @@ -139,7 +139,6 @@ static int bcm2835_rng_probe(struct platform_device *pdev) { const struct bcm2835_rng_of_data *of_data; struct device *dev = &pdev->dev; - struct device_node *np = dev->of_node; const struct of_device_id *rng_id; struct bcm2835_rng_priv *priv; int err; @@ -166,7 +165,7 @@ static int bcm2835_rng_probe(struct platform_device *pdev) priv->rng.cleanup = bcm2835_rng_cleanup; if (dev_of_node(dev)) { - rng_id = of_match_node(bcm2835_rng_of_match, np); + rng_id = of_match_node(bcm2835_rng_of_match, dev->of_node); if (!rng_id) return -EINVAL; @@ -188,7 +187,7 @@ static int bcm2835_rng_probe(struct platform_device *pdev) MODULE_DEVICE_TABLE(of, bcm2835_rng_of_match); -static struct platform_device_id bcm2835_rng_devtype[] = { +static const struct platform_device_id bcm2835_rng_devtype[] = { { .name = "bcm2835-rng" }, { .name = "bcm63xx-rng" }, { /* sentinel */ } diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index d2d7a42d7e0d..8c1c47dd9f46 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -611,7 +611,7 @@ EXPORT_SYMBOL_GPL(devm_hwrng_unregister); static int __init hwrng_modinit(void) { - int ret = -ENOMEM; + int ret; /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */ rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL); diff --git a/drivers/char/hw_random/hisi-rng.c b/drivers/char/hw_random/hisi-rng.c index 6815e17a9834..96438f85cafa 100644 --- a/drivers/char/hw_random/hisi-rng.c +++ b/drivers/char/hw_random/hisi-rng.c @@ -99,7 +99,7 @@ static int hisi_rng_probe(struct platform_device *pdev) return 0; } -static const struct of_device_id hisi_rng_dt_ids[] = { +static const struct of_device_id hisi_rng_dt_ids[] __maybe_unused = { { .compatible = "hisilicon,hip04-rng" }, { .compatible = "hisilicon,hip05-rng" }, { } diff --git a/drivers/char/hw_random/ingenic-rng.c b/drivers/char/hw_random/ingenic-rng.c new file mode 100644 index 000000000000..d704cef64b64 --- /dev/null +++ b/drivers/char/hw_random/ingenic-rng.c @@ -0,0 +1,154 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Ingenic Random Number Generator driver + * Copyright (c) 2017 PrasannaKumar Muralidharan <prasannatsmkumar@gmail.com> + * Copyright (c) 2020 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com> + */ + +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/hw_random.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +/* RNG register offsets */ +#define RNG_REG_ERNG_OFFSET 0x0 +#define RNG_REG_RNG_OFFSET 0x4 + +/* bits within the ERND register */ +#define ERNG_READY BIT(31) +#define ERNG_ENABLE BIT(0) + +enum ingenic_rng_version { + ID_JZ4780, + ID_X1000, +}; + +/* Device associated memory */ +struct ingenic_rng { + enum ingenic_rng_version version; + + void __iomem *base; + struct hwrng rng; +}; + +static int ingenic_rng_init(struct hwrng *rng) +{ + struct ingenic_rng *priv = container_of(rng, struct ingenic_rng, rng); + + writel(ERNG_ENABLE, priv->base + RNG_REG_ERNG_OFFSET); + + return 0; +} + +static void ingenic_rng_cleanup(struct hwrng *rng) +{ + struct ingenic_rng *priv = container_of(rng, struct ingenic_rng, rng); + + writel(0, priv->base + RNG_REG_ERNG_OFFSET); +} + +static int ingenic_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) +{ + struct ingenic_rng *priv = container_of(rng, struct ingenic_rng, rng); + u32 *data = buf; + u32 status; + int ret; + + if (priv->version >= ID_X1000) { + ret = readl_poll_timeout(priv->base + RNG_REG_ERNG_OFFSET, status, + status & ERNG_READY, 10, 1000); + if (ret == -ETIMEDOUT) { + pr_err("%s: Wait for RNG data ready timeout\n", __func__); + return ret; + } + } else { + /* + * A delay is required so that the current RNG data is not bit shifted + * version of previous RNG data which could happen if random data is + * read continuously from this device. + */ + udelay(20); + } + + *data = readl(priv->base + RNG_REG_RNG_OFFSET); + + return 4; +} + +static int ingenic_rng_probe(struct platform_device *pdev) +{ + struct ingenic_rng *priv; + int ret; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) { + pr_err("%s: Failed to map RNG registers\n", __func__); + ret = PTR_ERR(priv->base); + goto err_free_rng; + } + + priv->version = (enum ingenic_rng_version)of_device_get_match_data(&pdev->dev); + + priv->rng.name = pdev->name; + priv->rng.init = ingenic_rng_init; + priv->rng.cleanup = ingenic_rng_cleanup; + priv->rng.read = ingenic_rng_read; + + ret = hwrng_register(&priv->rng); + if (ret) { + dev_err(&pdev->dev, "Failed to register hwrng\n"); + goto err_free_rng; + } + + platform_set_drvdata(pdev, priv); + + dev_info(&pdev->dev, "Ingenic RNG driver registered\n"); + return 0; + +err_free_rng: + kfree(priv); + return ret; +} + +static int ingenic_rng_remove(struct platform_device *pdev) +{ + struct ingenic_rng *priv = platform_get_drvdata(pdev); + + hwrng_unregister(&priv->rng); + + writel(0, priv->base + RNG_REG_ERNG_OFFSET); + + return 0; +} + +static const struct of_device_id ingenic_rng_of_match[] = { + { .compatible = "ingenic,jz4780-rng", .data = (void *) ID_JZ4780 }, + { .compatible = "ingenic,x1000-rng", .data = (void *) ID_X1000 }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, ingenic_rng_of_match); + +static struct platform_driver ingenic_rng_driver = { + .probe = ingenic_rng_probe, + .remove = ingenic_rng_remove, + .driver = { + .name = "ingenic-rng", + .of_match_table = ingenic_rng_of_match, + }, +}; + +module_platform_driver(ingenic_rng_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("PrasannaKumar Muralidharan <prasannatsmkumar@gmail.com>"); +MODULE_AUTHOR("周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>"); +MODULE_DESCRIPTION("Ingenic Random Number Generator driver"); diff --git a/drivers/char/hw_random/ks-sa-rng.c b/drivers/char/hw_random/ks-sa-rng.c index 001617033d6a..8f1d47ff9799 100644 --- a/drivers/char/hw_random/ks-sa-rng.c +++ b/drivers/char/hw_random/ks-sa-rng.c @@ -2,7 +2,7 @@ /* * Random Number Generator driver for the Keystone SOC * - * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2016 Texas Instruments Incorporated - https://www.ti.com * * Authors: Sandeep Nair * Vitaly Andrianov diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c index 74ed29f42e4f..b0ded41eb865 100644 --- a/drivers/char/hw_random/nomadik-rng.c +++ b/drivers/char/hw_random/nomadik-rng.c @@ -76,7 +76,7 @@ static int nmk_rng_remove(struct amba_device *dev) return 0; } -static struct amba_id nmk_rng_ids[] = { +static const struct amba_id nmk_rng_ids[] = { { .id = 0x000805e1, .mask = 0x000fffff, /* top bits are rev and cfg: accept all */ diff --git a/drivers/char/hw_random/npcm-rng.c b/drivers/char/hw_random/npcm-rng.c index 01d04404d8c0..5d0d13f891b7 100644 --- a/drivers/char/hw_random/npcm-rng.c +++ b/drivers/char/hw_random/npcm-rng.c @@ -161,7 +161,7 @@ static const struct dev_pm_ops npcm_rng_pm_ops = { pm_runtime_force_resume) }; -static const struct of_device_id rng_dt_id[] = { +static const struct of_device_id rng_dt_id[] __maybe_unused = { { .compatible = "nuvoton,npcm750-rng", }, {}, }; diff --git a/drivers/char/hw_random/octeon-rng.c b/drivers/char/hw_random/octeon-rng.c index 7be8067ac4e8..8561a09b4681 100644 --- a/drivers/char/hw_random/octeon-rng.c +++ b/drivers/char/hw_random/octeon-rng.c @@ -33,7 +33,7 @@ static int octeon_rng_init(struct hwrng *rng) ctl.u64 = 0; ctl.s.ent_en = 1; /* Enable the entropy source. */ ctl.s.rng_en = 1; /* Enable the RNG hardware. */ - cvmx_write_csr((u64)p->control_status, ctl.u64); + cvmx_write_csr((__force u64)p->control_status, ctl.u64); return 0; } @@ -44,14 +44,14 @@ static void octeon_rng_cleanup(struct hwrng *rng) ctl.u64 = 0; /* Disable everything. */ - cvmx_write_csr((u64)p->control_status, ctl.u64); + cvmx_write_csr((__force u64)p->control_status, ctl.u64); } static int octeon_rng_data_read(struct hwrng *rng, u32 *data) { struct octeon_rng *p = container_of(rng, struct octeon_rng, ops); - *data = cvmx_read64_uint32((u64)p->result); + *data = cvmx_read64_uint32((__force u64)p->result); return sizeof(u32); } diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c index 7290c603fcb8..5cc5fc504968 100644 --- a/drivers/char/hw_random/omap-rng.c +++ b/drivers/char/hw_random/omap-rng.c @@ -22,6 +22,7 @@ #include <linux/platform_device.h> #include <linux/hw_random.h> #include <linux/delay.h> +#include <linux/kernel.h> #include <linux/slab.h> #include <linux/pm_runtime.h> #include <linux/of.h> @@ -243,7 +244,6 @@ static struct omap_rng_pdata omap2_rng_pdata = { .cleanup = omap2_rng_cleanup, }; -#if defined(CONFIG_OF) static inline u32 omap4_rng_data_present(struct omap_rng_dev *priv) { return omap_rng_read(priv, RNG_STATUS_REG) & RNG_REG_STATUS_RDY; @@ -358,7 +358,7 @@ static struct omap_rng_pdata eip76_rng_pdata = { .cleanup = omap4_rng_cleanup, }; -static const struct of_device_id omap_rng_of_match[] = { +static const struct of_device_id omap_rng_of_match[] __maybe_unused = { { .compatible = "ti,omap2-rng", .data = &omap2_rng_pdata, @@ -418,13 +418,6 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv, } return 0; } -#else -static int of_get_omap_rng_device_details(struct omap_rng_dev *omap_rng, - struct platform_device *pdev) -{ - return -EINVAL; -} -#endif static int get_omap_rng_device_details(struct omap_rng_dev *omap_rng) { diff --git a/drivers/char/hw_random/pic32-rng.c b/drivers/char/hw_random/pic32-rng.c index 81080cb2294e..e8210c1715cf 100644 --- a/drivers/char/hw_random/pic32-rng.c +++ b/drivers/char/hw_random/pic32-rng.c @@ -119,7 +119,7 @@ static int pic32_rng_remove(struct platform_device *pdev) return 0; } -static const struct of_device_id pic32_rng_of_match[] = { +static const struct of_device_id pic32_rng_of_match[] __maybe_unused = { { .compatible = "microchip,pic32mzda-rng", }, { /* sentinel */ } }; diff --git a/drivers/char/hw_random/st-rng.c b/drivers/char/hw_random/st-rng.c index 783c24e3f8b7..15ba1e6fae4d 100644 --- a/drivers/char/hw_random/st-rng.c +++ b/drivers/char/hw_random/st-rng.c @@ -12,6 +12,7 @@ #include <linux/delay.h> #include <linux/hw_random.h> #include <linux/io.h> +#include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> @@ -121,7 +122,7 @@ static int st_rng_remove(struct platform_device *pdev) return 0; } -static const struct of_device_id st_rng_match[] = { +static const struct of_device_id st_rng_match[] __maybe_unused = { { .compatible = "st,rng" }, {}, }; diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c index 79a6e47b5fbc..a90001e02bf7 100644 --- a/drivers/char/hw_random/virtio-rng.c +++ b/drivers/char/hw_random/virtio-rng.c @@ -195,7 +195,7 @@ static int virtrng_restore(struct virtio_device *vdev) } #endif -static struct virtio_device_id id_table[] = { +static const struct virtio_device_id id_table[] = { { VIRTIO_ID_RNG, VIRTIO_DEV_ANY_ID }, { 0 }, }; diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 934c92dcb9ab..687d4af6945d 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -814,7 +814,8 @@ static struct inode *devmem_inode; #ifdef CONFIG_IO_STRICT_DEVMEM void revoke_devmem(struct resource *res) { - struct inode *inode = READ_ONCE(devmem_inode); + /* pairs with smp_store_release() in devmem_init_inode() */ + struct inode *inode = smp_load_acquire(&devmem_inode); /* * Check that the initialization has completed. Losing the race @@ -1028,8 +1029,11 @@ static int devmem_init_inode(void) return rc; } - /* publish /dev/mem initialized */ - WRITE_ONCE(devmem_inode, inode); + /* + * Publish /dev/mem initialized. + * Pairs with smp_load_acquire() in revoke_devmem(). + */ + smp_store_release(&devmem_inode, inode); return 0; } diff --git a/drivers/char/random.c b/drivers/char/random.c index 2a41b21623ae..d20ba1b104ca 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1277,6 +1277,7 @@ void add_interrupt_randomness(int irq, int irq_flags) fast_mix(fast_pool); add_interrupt_bench(cycles); + this_cpu_add(net_rand_state.s1, fast_pool->pool[cycles & 3]); if (unlikely(crng_init == 0)) { if ((fast_pool->count >= 64) && diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c index 63ada5e53f13..3633ed70f48f 100644 --- a/drivers/char/tpm/eventlog/acpi.c +++ b/drivers/char/tpm/eventlog/acpi.c @@ -49,9 +49,9 @@ int tpm_read_log_acpi(struct tpm_chip *chip) void __iomem *virt; u64 len, start; struct tpm_bios_log *log; - - if (chip->flags & TPM_CHIP_FLAG_TPM2) - return -ENODEV; + struct acpi_table_tpm2 *tbl; + struct acpi_tpm2_phy *tpm2_phy; + int format; log = &chip->log; @@ -61,23 +61,44 @@ int tpm_read_log_acpi(struct tpm_chip *chip) if (!chip->acpi_dev_handle) return -ENODEV; - /* Find TCPA entry in RSDT (ACPI_LOGICAL_ADDRESSING) */ - status = acpi_get_table(ACPI_SIG_TCPA, 1, - (struct acpi_table_header **)&buff); - - if (ACPI_FAILURE(status)) - return -ENODEV; - - switch(buff->platform_class) { - case BIOS_SERVER: - len = buff->server.log_max_len; - start = buff->server.log_start_addr; - break; - case BIOS_CLIENT: - default: - len = buff->client.log_max_len; - start = buff->client.log_start_addr; - break; + if (chip->flags & TPM_CHIP_FLAG_TPM2) { + status = acpi_get_table("TPM2", 1, + (struct acpi_table_header **)&tbl); + if (ACPI_FAILURE(status)) + return -ENODEV; + + if (tbl->header.length < + sizeof(*tbl) + sizeof(struct acpi_tpm2_phy)) + return -ENODEV; + + tpm2_phy = (void *)tbl + sizeof(*tbl); + len = tpm2_phy->log_area_minimum_length; + + start = tpm2_phy->log_area_start_address; + if (!start || !len) + return -ENODEV; + + format = EFI_TCG2_EVENT_LOG_FORMAT_TCG_2; + } else { + /* Find TCPA entry in RSDT (ACPI_LOGICAL_ADDRESSING) */ + status = acpi_get_table(ACPI_SIG_TCPA, 1, + (struct acpi_table_header **)&buff); + if (ACPI_FAILURE(status)) + return -ENODEV; + + switch (buff->platform_class) { + case BIOS_SERVER: + len = buff->server.log_max_len; + start = buff->server.log_start_addr; + break; + case BIOS_CLIENT: + default: + len = buff->client.log_max_len; + start = buff->client.log_start_addr; + break; + } + + format = EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2; } if (!len) { dev_warn(&chip->dev, "%s: TCPA log area empty\n", __func__); @@ -98,7 +119,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip) memcpy_fromio(log->bios_event_log, virt, len); acpi_os_unmap_iomem(virt, len); - return EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2; + return format; err: kfree(log->bios_event_log); diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index 8c77e88012e9..ddaeceb7e109 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -386,13 +386,8 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev, chip->cdev.owner = THIS_MODULE; chip->cdevs.owner = THIS_MODULE; - chip->work_space.context_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); - if (!chip->work_space.context_buf) { - rc = -ENOMEM; - goto out; - } - chip->work_space.session_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); - if (!chip->work_space.session_buf) { + rc = tpm2_init_space(&chip->work_space, TPM2_SPACE_BUFFER_SIZE); + if (rc) { rc = -ENOMEM; goto out; } diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index 0fbcede241ea..947d1db0a5cc 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -59,6 +59,9 @@ enum tpm_addr { #define TPM_TAG_RQU_COMMAND 193 +/* TPM2 specific constants. */ +#define TPM2_SPACE_BUFFER_SIZE 16384 /* 16 kB */ + struct stclear_flags_t { __be16 tag; u8 deactivated; @@ -228,7 +231,7 @@ unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal); int tpm2_probe(struct tpm_chip *chip); int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip); int tpm2_find_cc(struct tpm_chip *chip, u32 cc); -int tpm2_init_space(struct tpm_space *space); +int tpm2_init_space(struct tpm_space *space, unsigned int buf_size); void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space); void tpm2_flush_space(struct tpm_chip *chip); int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd, diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c index 982d341d8837..784b8b3cb903 100644 --- a/drivers/char/tpm/tpm2-space.c +++ b/drivers/char/tpm/tpm2-space.c @@ -38,18 +38,21 @@ static void tpm2_flush_sessions(struct tpm_chip *chip, struct tpm_space *space) } } -int tpm2_init_space(struct tpm_space *space) +int tpm2_init_space(struct tpm_space *space, unsigned int buf_size) { - space->context_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); + space->context_buf = kzalloc(buf_size, GFP_KERNEL); if (!space->context_buf) return -ENOMEM; - space->session_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); + space->session_buf = kzalloc(buf_size, GFP_KERNEL); if (space->session_buf == NULL) { kfree(space->context_buf); + /* Prevent caller getting a dangling pointer. */ + space->context_buf = NULL; return -ENOMEM; } + space->buf_size = buf_size; return 0; } @@ -311,8 +314,10 @@ int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd, sizeof(space->context_tbl)); memcpy(&chip->work_space.session_tbl, &space->session_tbl, sizeof(space->session_tbl)); - memcpy(chip->work_space.context_buf, space->context_buf, PAGE_SIZE); - memcpy(chip->work_space.session_buf, space->session_buf, PAGE_SIZE); + memcpy(chip->work_space.context_buf, space->context_buf, + space->buf_size); + memcpy(chip->work_space.session_buf, space->session_buf, + space->buf_size); rc = tpm2_load_space(chip); if (rc) { @@ -492,7 +497,7 @@ static int tpm2_save_space(struct tpm_chip *chip) continue; rc = tpm2_save_context(chip, space->context_tbl[i], - space->context_buf, PAGE_SIZE, + space->context_buf, space->buf_size, &offset); if (rc == -ENOENT) { space->context_tbl[i] = 0; @@ -509,9 +514,8 @@ static int tpm2_save_space(struct tpm_chip *chip) continue; rc = tpm2_save_context(chip, space->session_tbl[i], - space->session_buf, PAGE_SIZE, + space->session_buf, space->buf_size, &offset); - if (rc == -ENOENT) { /* handle error saving session, just forget it */ space->session_tbl[i] = 0; @@ -557,8 +561,10 @@ int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, sizeof(space->context_tbl)); memcpy(&space->session_tbl, &chip->work_space.session_tbl, sizeof(space->session_tbl)); - memcpy(space->context_buf, chip->work_space.context_buf, PAGE_SIZE); - memcpy(space->session_buf, chip->work_space.session_buf, PAGE_SIZE); + memcpy(space->context_buf, chip->work_space.context_buf, + space->buf_size); + memcpy(space->session_buf, chip->work_space.session_buf, + space->buf_size); return 0; out: diff --git a/drivers/char/tpm/tpmrm-dev.c b/drivers/char/tpm/tpmrm-dev.c index 7a0a7051a06f..eef0fb06ea83 100644 --- a/drivers/char/tpm/tpmrm-dev.c +++ b/drivers/char/tpm/tpmrm-dev.c @@ -21,7 +21,7 @@ static int tpmrm_open(struct inode *inode, struct file *file) if (priv == NULL) return -ENOMEM; - rc = tpm2_init_space(&priv->space); + rc = tpm2_init_space(&priv->space, TPM2_SPACE_BUFFER_SIZE); if (rc) { kfree(priv); return -ENOMEM; diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c index 6fd1f219a512..f6fd1c1cc527 100644 --- a/drivers/clocksource/timer-ti-dm-systimer.c +++ b/drivers/clocksource/timer-ti-dm-systimer.c @@ -19,7 +19,7 @@ /* For type1, set SYSC_OMAP2_CLOCKACTIVITY for fck off on idle, l4 clock on */ #define DMTIMER_TYPE1_ENABLE ((1 << 9) | (SYSC_IDLE_SMART << 3) | \ SYSC_OMAP2_ENAWAKEUP | SYSC_OMAP2_AUTOIDLE) - +#define DMTIMER_TYPE1_DISABLE (SYSC_OMAP2_SOFTRESET | SYSC_OMAP2_AUTOIDLE) #define DMTIMER_TYPE2_ENABLE (SYSC_IDLE_SMART_WKUP << 2) #define DMTIMER_RESET_WAIT 100000 @@ -44,6 +44,8 @@ struct dmtimer_systimer { u8 ctrl; u8 wakeup; u8 ifctrl; + struct clk *fck; + struct clk *ick; unsigned long rate; }; @@ -298,16 +300,20 @@ static void __init dmtimer_systimer_select_best(void) } /* Interface clocks are only available on some SoCs variants */ -static int __init dmtimer_systimer_init_clock(struct device_node *np, +static int __init dmtimer_systimer_init_clock(struct dmtimer_systimer *t, + struct device_node *np, const char *name, unsigned long *rate) { struct clk *clock; unsigned long r; + bool is_ick = false; int error; + is_ick = !strncmp(name, "ick", 3); + clock = of_clk_get_by_name(np, name); - if ((PTR_ERR(clock) == -EINVAL) && !strncmp(name, "ick", 3)) + if ((PTR_ERR(clock) == -EINVAL) && is_ick) return 0; else if (IS_ERR(clock)) return PTR_ERR(clock); @@ -320,6 +326,11 @@ static int __init dmtimer_systimer_init_clock(struct device_node *np, if (!r) return -ENODEV; + if (is_ick) + t->ick = clock; + else + t->fck = clock; + *rate = r; return 0; @@ -339,7 +350,10 @@ static void dmtimer_systimer_enable(struct dmtimer_systimer *t) static void dmtimer_systimer_disable(struct dmtimer_systimer *t) { - writel_relaxed(0, t->base + t->sysc); + if (!dmtimer_systimer_revision1(t)) + return; + + writel_relaxed(DMTIMER_TYPE1_DISABLE, t->base + t->sysc); } static int __init dmtimer_systimer_setup(struct device_node *np, @@ -366,13 +380,13 @@ static int __init dmtimer_systimer_setup(struct device_node *np, pr_err("%s: clock source init failed: %i\n", __func__, error); /* For ti-sysc, we have timer clocks at the parent module level */ - error = dmtimer_systimer_init_clock(np->parent, "fck", &rate); + error = dmtimer_systimer_init_clock(t, np->parent, "fck", &rate); if (error) goto err_unmap; t->rate = rate; - error = dmtimer_systimer_init_clock(np->parent, "ick", &rate); + error = dmtimer_systimer_init_clock(t, np->parent, "ick", &rate); if (error) goto err_unmap; @@ -496,12 +510,18 @@ static void omap_clockevent_idle(struct clock_event_device *evt) struct dmtimer_systimer *t = &clkevt->t; dmtimer_systimer_disable(t); + clk_disable(t->fck); } static void omap_clockevent_unidle(struct clock_event_device *evt) { struct dmtimer_clockevent *clkevt = to_dmtimer_clockevent(evt); struct dmtimer_systimer *t = &clkevt->t; + int error; + + error = clk_enable(t->fck); + if (error) + pr_err("could not enable timer fck on resume: %i\n", error); dmtimer_systimer_enable(t); writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_ena); @@ -570,8 +590,8 @@ static int __init dmtimer_clockevent_init(struct device_node *np) 3, /* Timer internal resynch latency */ 0xffffffff); - if (of_device_is_compatible(np, "ti,am33xx") || - of_device_is_compatible(np, "ti,am43")) { + if (of_machine_is_compatible("ti,am33xx") || + of_machine_is_compatible("ti,am43")) { dev->suspend = omap_clockevent_idle; dev->resume = omap_clockevent_unidle; } @@ -616,12 +636,18 @@ static void dmtimer_clocksource_suspend(struct clocksource *cs) clksrc->loadval = readl_relaxed(t->base + t->counter); dmtimer_systimer_disable(t); + clk_disable(t->fck); } static void dmtimer_clocksource_resume(struct clocksource *cs) { struct dmtimer_clocksource *clksrc = to_dmtimer_clocksource(cs); struct dmtimer_systimer *t = &clksrc->t; + int error; + + error = clk_enable(t->fck); + if (error) + pr_err("could not enable timer fck on resume: %i\n", error); dmtimer_systimer_enable(t); writel_relaxed(clksrc->loadval, t->base + t->counter); @@ -653,8 +679,8 @@ static int __init dmtimer_clocksource_init(struct device_node *np) dev->mask = CLOCKSOURCE_MASK(32); dev->flags = CLOCK_SOURCE_IS_CONTINUOUS; - if (of_device_is_compatible(np, "ti,am33xx") || - of_device_is_compatible(np, "ti,am43")) { + /* Unlike for clockevent, legacy code sets suspend only for am4 */ + if (of_machine_is_compatible("ti,am43")) { dev->suspend = dmtimer_clocksource_suspend; dev->resume = dmtimer_clocksource_resume; } diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 802b9ada4e9e..aa3a4ed07a66 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -624,6 +624,8 @@ config CRYPTO_DEV_QCE_SKCIPHER config CRYPTO_DEV_QCE_SHA bool depends on CRYPTO_DEV_QCE + select CRYPTO_SHA1 + select CRYPTO_SHA256 choice prompt "Algorithms enabled for QCE acceleration" @@ -756,10 +758,9 @@ config CRYPTO_DEV_ZYNQMP_AES config CRYPTO_DEV_MEDIATEK tristate "MediaTek's EIP97 Cryptographic Engine driver" depends on (ARM && ARCH_MEDIATEK) || COMPILE_TEST - select CRYPTO_AES + select CRYPTO_LIB_AES select CRYPTO_AEAD select CRYPTO_SKCIPHER - select CRYPTO_CTR select CRYPTO_SHA1 select CRYPTO_SHA256 select CRYPTO_SHA512 @@ -865,4 +866,18 @@ source "drivers/crypto/hisilicon/Kconfig" source "drivers/crypto/amlogic/Kconfig" +config CRYPTO_DEV_SA2UL + tristate "Support for TI security accelerator" + depends on ARCH_K3 || COMPILE_TEST + select ARM64_CRYPTO + select CRYPTO_AES + select CRYPTO_AES_ARM64 + select CRYPTO_ALGAPI + select HW_RANDOM + select SG_SPLIT + help + K3 devices include a security accelerator engine that may be + used for crypto offload. Select this if you want to use hardware + acceleration for cryptographic algorithms on these devices. + endif # CRYPTO_HW diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 944ed7226e37..53fc115cf459 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -38,6 +38,7 @@ obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/ obj-$(CONFIG_CRYPTO_DEV_QCOM_RNG) += qcom-rng.o obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/ obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o +obj-$(CONFIG_CRYPTO_DEV_SA2UL) += sa2ul.o obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o obj-$(CONFIG_ARCH_STM32) += stm32/ obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c index 7f22d305178e..b72de8939497 100644 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c @@ -122,19 +122,17 @@ static int noinline_for_stack sun4i_ss_cipher_poll_fallback(struct skcipher_requ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq); - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm); int err; - skcipher_request_set_sync_tfm(subreq, op->fallback_tfm); - skcipher_request_set_callback(subreq, areq->base.flags, NULL, - NULL); - skcipher_request_set_crypt(subreq, areq->src, areq->dst, + skcipher_request_set_tfm(&ctx->fallback_req, op->fallback_tfm); + skcipher_request_set_callback(&ctx->fallback_req, areq->base.flags, + areq->base.complete, areq->base.data); + skcipher_request_set_crypt(&ctx->fallback_req, areq->src, areq->dst, areq->cryptlen, areq->iv); if (ctx->mode & SS_DECRYPTION) - err = crypto_skcipher_decrypt(subreq); + err = crypto_skcipher_decrypt(&ctx->fallback_req); else - err = crypto_skcipher_encrypt(subreq); - skcipher_request_zero(subreq); + err = crypto_skcipher_encrypt(&ctx->fallback_req); return err; } @@ -494,23 +492,25 @@ int sun4i_ss_cipher_init(struct crypto_tfm *tfm) alg.crypto.base); op->ss = algt->ss; - crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm), - sizeof(struct sun4i_cipher_req_ctx)); - - op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); + op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(op->fallback_tfm)) { dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n", name, PTR_ERR(op->fallback_tfm)); return PTR_ERR(op->fallback_tfm); } + crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm), + sizeof(struct sun4i_cipher_req_ctx) + + crypto_skcipher_reqsize(op->fallback_tfm)); + + err = pm_runtime_get_sync(op->ss->dev); if (err < 0) goto error_pm; return 0; error_pm: - crypto_free_sync_skcipher(op->fallback_tfm); + crypto_free_skcipher(op->fallback_tfm); return err; } @@ -518,7 +518,7 @@ void sun4i_ss_cipher_exit(struct crypto_tfm *tfm) { struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm); - crypto_free_sync_skcipher(op->fallback_tfm); + crypto_free_skcipher(op->fallback_tfm); pm_runtime_put(op->ss->dev); } @@ -546,10 +546,10 @@ int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, op->keylen = keylen; memcpy(op->key, key, keylen); - crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); + crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); - return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen); + return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); } /* check and set the DES key, prepare the mode to be used */ @@ -566,10 +566,10 @@ int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key, op->keylen = keylen; memcpy(op->key, key, keylen); - crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); + crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); - return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen); + return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); } /* check and set the 3DES key, prepare the mode to be used */ @@ -586,9 +586,9 @@ int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, op->keylen = keylen; memcpy(op->key, key, keylen); - crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); + crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); - return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen); + return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); } diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h index 2b4c6333eb67..163962f9e284 100644 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h @@ -170,11 +170,12 @@ struct sun4i_tfm_ctx { u32 keylen; u32 keymode; struct sun4i_ss_ctx *ss; - struct crypto_sync_skcipher *fallback_tfm; + struct crypto_skcipher *fallback_tfm; }; struct sun4i_cipher_req_ctx { u32 mode; + struct skcipher_request fallback_req; // keep at the end }; struct sun4i_req_ctx { diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c index a6abb701bfc6..1e4f9a58bb24 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c @@ -58,23 +58,20 @@ static int sun8i_ce_cipher_fallback(struct skcipher_request *areq) #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct sun8i_ce_alg_template *algt; -#endif - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm); -#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher); algt->stat_fb++; #endif - skcipher_request_set_sync_tfm(subreq, op->fallback_tfm); - skcipher_request_set_callback(subreq, areq->base.flags, NULL, NULL); - skcipher_request_set_crypt(subreq, areq->src, areq->dst, + skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm); + skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags, + areq->base.complete, areq->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst, areq->cryptlen, areq->iv); if (rctx->op_dir & CE_DECRYPTION) - err = crypto_skcipher_decrypt(subreq); + err = crypto_skcipher_decrypt(&rctx->fallback_req); else - err = crypto_skcipher_encrypt(subreq); - skcipher_request_zero(subreq); + err = crypto_skcipher_encrypt(&rctx->fallback_req); return err; } @@ -335,18 +332,20 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm) algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher); op->ce = algt->ce; - sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx); - - op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); + op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(op->fallback_tfm)) { dev_err(op->ce->dev, "ERROR: Cannot allocate fallback for %s %ld\n", name, PTR_ERR(op->fallback_tfm)); return PTR_ERR(op->fallback_tfm); } + sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx) + + crypto_skcipher_reqsize(op->fallback_tfm); + + dev_info(op->ce->dev, "Fallback for %s is %s\n", crypto_tfm_alg_driver_name(&sktfm->base), - crypto_tfm_alg_driver_name(crypto_skcipher_tfm(&op->fallback_tfm->base))); + crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm))); op->enginectx.op.do_one_request = sun8i_ce_handle_cipher_request; op->enginectx.op.prepare_request = NULL; @@ -358,7 +357,8 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm) return 0; error_pm: - crypto_free_sync_skcipher(op->fallback_tfm); + pm_runtime_put_noidle(op->ce->dev); + crypto_free_skcipher(op->fallback_tfm); return err; } @@ -370,7 +370,7 @@ void sun8i_ce_cipher_exit(struct crypto_tfm *tfm) memzero_explicit(op->key, op->keylen); kfree(op->key); } - crypto_free_sync_skcipher(op->fallback_tfm); + crypto_free_skcipher(op->fallback_tfm); pm_runtime_put_sync_suspend(op->ce->dev); } @@ -400,10 +400,10 @@ int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, if (!op->key) return -ENOMEM; - crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); + crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); - return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen); + return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); } int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, @@ -425,8 +425,8 @@ int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, if (!op->key) return -ENOMEM; - crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); + crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); - return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen); + return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); } diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c index b957061424a1..138759dc8190 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c @@ -185,7 +185,8 @@ static struct sun8i_ce_alg_template ce_algs[] = { .cra_priority = 400, .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 0xf, @@ -211,7 +212,8 @@ static struct sun8i_ce_alg_template ce_algs[] = { .cra_priority = 400, .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 0xf, @@ -236,7 +238,8 @@ static struct sun8i_ce_alg_template ce_algs[] = { .cra_priority = 400, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 0xf, @@ -262,7 +265,8 @@ static struct sun8i_ce_alg_template ce_algs[] = { .cra_priority = 400, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 0xf, diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h index 0e9eac397e1b..963645fe4adb 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h @@ -181,12 +181,14 @@ struct sun8i_ce_dev { /* * struct sun8i_cipher_req_ctx - context for a skcipher request - * @op_dir: direction (encrypt vs decrypt) for this request - * @flow: the flow to use for this request + * @op_dir: direction (encrypt vs decrypt) for this request + * @flow: the flow to use for this request + * @fallback_req: request struct for invoking the fallback skcipher TFM */ struct sun8i_cipher_req_ctx { u32 op_dir; int flow; + struct skcipher_request fallback_req; // keep at the end }; /* @@ -202,7 +204,7 @@ struct sun8i_cipher_tfm_ctx { u32 *key; u32 keylen; struct sun8i_ce_dev *ce; - struct crypto_sync_skcipher *fallback_tfm; + struct crypto_skcipher *fallback_tfm; }; /* diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c index c89cb2ee2496..7a131675a41c 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c @@ -73,7 +73,6 @@ static int sun8i_ss_cipher_fallback(struct skcipher_request *areq) struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); int err; - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm); #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct sun8i_ss_alg_template *algt; @@ -81,15 +80,15 @@ static int sun8i_ss_cipher_fallback(struct skcipher_request *areq) algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher); algt->stat_fb++; #endif - skcipher_request_set_sync_tfm(subreq, op->fallback_tfm); - skcipher_request_set_callback(subreq, areq->base.flags, NULL, NULL); - skcipher_request_set_crypt(subreq, areq->src, areq->dst, + skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm); + skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags, + areq->base.complete, areq->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst, areq->cryptlen, areq->iv); if (rctx->op_dir & SS_DECRYPTION) - err = crypto_skcipher_decrypt(subreq); + err = crypto_skcipher_decrypt(&rctx->fallback_req); else - err = crypto_skcipher_encrypt(subreq); - skcipher_request_zero(subreq); + err = crypto_skcipher_encrypt(&rctx->fallback_req); return err; } @@ -334,18 +333,20 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm) algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher); op->ss = algt->ss; - sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx); - - op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); + op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(op->fallback_tfm)) { dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n", name, PTR_ERR(op->fallback_tfm)); return PTR_ERR(op->fallback_tfm); } + sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx) + + crypto_skcipher_reqsize(op->fallback_tfm); + + dev_info(op->ss->dev, "Fallback for %s is %s\n", crypto_tfm_alg_driver_name(&sktfm->base), - crypto_tfm_alg_driver_name(crypto_skcipher_tfm(&op->fallback_tfm->base))); + crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm))); op->enginectx.op.do_one_request = sun8i_ss_handle_cipher_request; op->enginectx.op.prepare_request = NULL; @@ -359,7 +360,7 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm) return 0; error_pm: - crypto_free_sync_skcipher(op->fallback_tfm); + crypto_free_skcipher(op->fallback_tfm); return err; } @@ -371,7 +372,7 @@ void sun8i_ss_cipher_exit(struct crypto_tfm *tfm) memzero_explicit(op->key, op->keylen); kfree(op->key); } - crypto_free_sync_skcipher(op->fallback_tfm); + crypto_free_skcipher(op->fallback_tfm); pm_runtime_put_sync(op->ss->dev); } @@ -401,10 +402,10 @@ int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, if (!op->key) return -ENOMEM; - crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); + crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); - return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen); + return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); } int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, @@ -427,8 +428,8 @@ int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, if (!op->key) return -ENOMEM; - crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); + crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); - return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen); + return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); } diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c index 5d9d0fedcb06..9a23515783a6 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c @@ -169,7 +169,8 @@ static struct sun8i_ss_alg_template ss_algs[] = { .cra_priority = 400, .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 0xf, @@ -195,7 +196,8 @@ static struct sun8i_ss_alg_template ss_algs[] = { .cra_priority = 400, .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 0xf, @@ -220,7 +222,8 @@ static struct sun8i_ss_alg_template ss_algs[] = { .cra_priority = 400, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 0xf, @@ -246,7 +249,8 @@ static struct sun8i_ss_alg_template ss_algs[] = { .cra_priority = 400, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 0xf, diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h index 29c44f279112..0405767f1f7e 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h @@ -135,17 +135,18 @@ struct sun8i_ss_dev { /* * struct sun8i_cipher_req_ctx - context for a skcipher request - * @t_src: list of mapped SGs with their size - * @t_dst: list of mapped SGs with their size - * @p_key: DMA address of the key - * @p_iv: DMA address of the IV - * @method: current algorithm for this request - * @op_mode: op_mode for this request - * @op_dir: direction (encrypt vs decrypt) for this request - * @flow: the flow to use for this request - * @ivlen: size of biv - * @keylen: keylen for this request - * @biv: buffer which contain the IV + * @t_src: list of mapped SGs with their size + * @t_dst: list of mapped SGs with their size + * @p_key: DMA address of the key + * @p_iv: DMA address of the IV + * @method: current algorithm for this request + * @op_mode: op_mode for this request + * @op_dir: direction (encrypt vs decrypt) for this request + * @flow: the flow to use for this request + * @ivlen: size of biv + * @keylen: keylen for this request + * @biv: buffer which contain the IV + * @fallback_req: request struct for invoking the fallback skcipher TFM */ struct sun8i_cipher_req_ctx { struct sginfo t_src[MAX_SG]; @@ -159,6 +160,7 @@ struct sun8i_cipher_req_ctx { unsigned int ivlen; unsigned int keylen; void *biv; + struct skcipher_request fallback_req; // keep at the end }; /* @@ -174,7 +176,7 @@ struct sun8i_cipher_tfm_ctx { u32 *key; u32 keylen; struct sun8i_ss_dev *ss; - struct crypto_sync_skcipher *fallback_tfm; + struct crypto_skcipher *fallback_tfm; }; /* diff --git a/drivers/crypto/amlogic/Kconfig b/drivers/crypto/amlogic/Kconfig index cf9547602670..cf2c676a7093 100644 --- a/drivers/crypto/amlogic/Kconfig +++ b/drivers/crypto/amlogic/Kconfig @@ -1,7 +1,7 @@ config CRYPTO_DEV_AMLOGIC_GXL tristate "Support for amlogic cryptographic offloader" depends on HAS_IOMEM - default y if ARCH_MESON + default m if ARCH_MESON select CRYPTO_SKCIPHER select CRYPTO_ENGINE select CRYPTO_ECB diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c index 9819dd50fbad..5880b94dcb32 100644 --- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c +++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c @@ -64,22 +64,20 @@ static int meson_cipher_do_fallback(struct skcipher_request *areq) #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct meson_alg_template *algt; -#endif - SYNC_SKCIPHER_REQUEST_ON_STACK(req, op->fallback_tfm); -#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG algt = container_of(alg, struct meson_alg_template, alg.skcipher); algt->stat_fb++; #endif - skcipher_request_set_sync_tfm(req, op->fallback_tfm); - skcipher_request_set_callback(req, areq->base.flags, NULL, NULL); - skcipher_request_set_crypt(req, areq->src, areq->dst, + skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm); + skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags, + areq->base.complete, areq->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst, areq->cryptlen, areq->iv); + if (rctx->op_dir == MESON_DECRYPT) - err = crypto_skcipher_decrypt(req); + err = crypto_skcipher_decrypt(&rctx->fallback_req); else - err = crypto_skcipher_encrypt(req); - skcipher_request_zero(req); + err = crypto_skcipher_encrypt(&rctx->fallback_req); return err; } @@ -321,15 +319,16 @@ int meson_cipher_init(struct crypto_tfm *tfm) algt = container_of(alg, struct meson_alg_template, alg.skcipher); op->mc = algt->mc; - sktfm->reqsize = sizeof(struct meson_cipher_req_ctx); - - op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); + op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(op->fallback_tfm)) { dev_err(op->mc->dev, "ERROR: Cannot allocate fallback for %s %ld\n", name, PTR_ERR(op->fallback_tfm)); return PTR_ERR(op->fallback_tfm); } + sktfm->reqsize = sizeof(struct meson_cipher_req_ctx) + + crypto_skcipher_reqsize(op->fallback_tfm); + op->enginectx.op.do_one_request = meson_handle_cipher_request; op->enginectx.op.prepare_request = NULL; op->enginectx.op.unprepare_request = NULL; @@ -345,7 +344,7 @@ void meson_cipher_exit(struct crypto_tfm *tfm) memzero_explicit(op->key, op->keylen); kfree(op->key); } - crypto_free_sync_skcipher(op->fallback_tfm); + crypto_free_skcipher(op->fallback_tfm); } int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, @@ -377,5 +376,5 @@ int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, if (!op->key) return -ENOMEM; - return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen); + return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); } diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c index 411857fad8ba..466552acbbbb 100644 --- a/drivers/crypto/amlogic/amlogic-gxl-core.c +++ b/drivers/crypto/amlogic/amlogic-gxl-core.c @@ -54,7 +54,8 @@ static struct meson_alg_template mc_algs[] = { .cra_priority = 400, .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct meson_cipher_tfm_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 0xf, @@ -79,7 +80,8 @@ static struct meson_alg_template mc_algs[] = { .cra_priority = 400, .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct meson_cipher_tfm_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 0xf, diff --git a/drivers/crypto/amlogic/amlogic-gxl.h b/drivers/crypto/amlogic/amlogic-gxl.h index b7f2de91ab76..dc0f142324a3 100644 --- a/drivers/crypto/amlogic/amlogic-gxl.h +++ b/drivers/crypto/amlogic/amlogic-gxl.h @@ -109,6 +109,7 @@ struct meson_dev { struct meson_cipher_req_ctx { u32 op_dir; int flow; + struct skcipher_request fallback_req; // keep at the end }; /* @@ -126,7 +127,7 @@ struct meson_cipher_tfm_ctx { u32 keylen; u32 keymode; struct meson_dev *mc; - struct crypto_sync_skcipher *fallback_tfm; + struct crypto_skcipher *fallback_tfm; }; /* diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c index 62ba0325a618..1a46eeddf082 100644 --- a/drivers/crypto/axis/artpec6_crypto.c +++ b/drivers/crypto/axis/artpec6_crypto.c @@ -2630,7 +2630,8 @@ static struct ahash_alg hash_algos[] = { .cra_name = "sha1", .cra_driver_name = "artpec-sha1", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct artpec6_hashalg_context), .cra_alignmask = 3, @@ -2653,7 +2654,8 @@ static struct ahash_alg hash_algos[] = { .cra_name = "sha256", .cra_driver_name = "artpec-sha256", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct artpec6_hashalg_context), .cra_alignmask = 3, @@ -2677,7 +2679,8 @@ static struct ahash_alg hash_algos[] = { .cra_name = "hmac(sha256)", .cra_driver_name = "artpec-hmac-sha256", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct artpec6_hashalg_context), .cra_alignmask = 3, @@ -2696,7 +2699,8 @@ static struct skcipher_alg crypto_algos[] = { .cra_name = "ecb(aes)", .cra_driver_name = "artpec6-ecb-aes", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context), .cra_alignmask = 3, @@ -2717,6 +2721,7 @@ static struct skcipher_alg crypto_algos[] = { .cra_driver_name = "artpec6-ctr-aes", .cra_priority = 300, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context), @@ -2738,7 +2743,8 @@ static struct skcipher_alg crypto_algos[] = { .cra_name = "cbc(aes)", .cra_driver_name = "artpec6-cbc-aes", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context), .cra_alignmask = 3, @@ -2759,7 +2765,8 @@ static struct skcipher_alg crypto_algos[] = { .cra_name = "xts(aes)", .cra_driver_name = "artpec6-xts-aes", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context), .cra_alignmask = 3, @@ -2790,6 +2797,7 @@ static struct aead_alg aead_algos[] = { .cra_driver_name = "artpec-gcm-aes", .cra_priority = 300, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context), diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index a353217a0d33..8a7fa1ae1ade 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -3233,7 +3233,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(md5),cbc(aes))", .cra_driver_name = "authenc-hmac-md5-cbc-aes-iproc", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = AES_BLOCK_SIZE, @@ -3256,7 +3258,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha1),cbc(aes))", .cra_driver_name = "authenc-hmac-sha1-cbc-aes-iproc", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = AES_BLOCK_SIZE, @@ -3279,7 +3283,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha256),cbc(aes))", .cra_driver_name = "authenc-hmac-sha256-cbc-aes-iproc", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = AES_BLOCK_SIZE, @@ -3302,7 +3308,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(md5),cbc(des))", .cra_driver_name = "authenc-hmac-md5-cbc-des-iproc", .cra_blocksize = DES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES_BLOCK_SIZE, @@ -3325,7 +3333,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha1),cbc(des))", .cra_driver_name = "authenc-hmac-sha1-cbc-des-iproc", .cra_blocksize = DES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES_BLOCK_SIZE, @@ -3348,7 +3358,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha224),cbc(des))", .cra_driver_name = "authenc-hmac-sha224-cbc-des-iproc", .cra_blocksize = DES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES_BLOCK_SIZE, @@ -3371,7 +3383,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha256),cbc(des))", .cra_driver_name = "authenc-hmac-sha256-cbc-des-iproc", .cra_blocksize = DES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES_BLOCK_SIZE, @@ -3394,7 +3408,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha384),cbc(des))", .cra_driver_name = "authenc-hmac-sha384-cbc-des-iproc", .cra_blocksize = DES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES_BLOCK_SIZE, @@ -3417,7 +3433,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha512),cbc(des))", .cra_driver_name = "authenc-hmac-sha512-cbc-des-iproc", .cra_blocksize = DES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES_BLOCK_SIZE, @@ -3440,7 +3458,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(md5),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-md5-cbc-des3-iproc", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES3_EDE_BLOCK_SIZE, @@ -3463,7 +3483,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha1-cbc-des3-iproc", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES3_EDE_BLOCK_SIZE, @@ -3486,7 +3508,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha224),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha224-cbc-des3-iproc", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES3_EDE_BLOCK_SIZE, @@ -3509,7 +3533,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha256),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha256-cbc-des3-iproc", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES3_EDE_BLOCK_SIZE, @@ -3532,7 +3558,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha384),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha384-cbc-des3-iproc", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES3_EDE_BLOCK_SIZE, @@ -3555,7 +3583,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha512),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha512-cbc-des3-iproc", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES3_EDE_BLOCK_SIZE, @@ -3811,7 +3841,8 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "md5", .cra_driver_name = "md5-iproc", .cra_blocksize = MD5_BLOCK_WORDS * 4, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, } }, .cipher_info = { @@ -4508,7 +4539,9 @@ static int spu_register_skcipher(struct iproc_alg_s *driver_alg) crypto->base.cra_priority = cipher_pri; crypto->base.cra_alignmask = 0; crypto->base.cra_ctxsize = sizeof(struct iproc_ctx_s); - crypto->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; + crypto->base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY; crypto->init = skcipher_init_tfm; crypto->exit = skcipher_exit_tfm; @@ -4547,7 +4580,8 @@ static int spu_register_ahash(struct iproc_alg_s *driver_alg) hash->halg.base.cra_ctxsize = sizeof(struct iproc_ctx_s); hash->halg.base.cra_init = ahash_cra_init; hash->halg.base.cra_exit = generic_cra_exit; - hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC; + hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY; hash->halg.statesize = sizeof(struct spu_hash_export_s); if (driver_alg->auth_info.mode != HASH_MODE_HMAC) { @@ -4591,7 +4625,7 @@ static int spu_register_aead(struct iproc_alg_s *driver_alg) aead->base.cra_alignmask = 0; aead->base.cra_ctxsize = sizeof(struct iproc_ctx_s); - aead->base.cra_flags |= CRYPTO_ALG_ASYNC; + aead->base.cra_flags |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY; /* setkey set in alg initialization */ aead->setauthsize = aead_setauthsize; aead->encrypt = aead_encrypt; diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index b2f9882bc010..91feda5b63f6 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -810,12 +810,6 @@ static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher, return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); } -static int arc4_skcipher_setkey(struct crypto_skcipher *skcipher, - const u8 *key, unsigned int keylen) -{ - return skcipher_setkey(skcipher, key, keylen, 0); -} - static int des_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen) { @@ -838,7 +832,7 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, u32 *desc; if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { - dev_err(jrdev, "key size mismatch\n"); + dev_dbg(jrdev, "key size mismatch\n"); return -EINVAL; } @@ -1967,21 +1961,6 @@ static struct caam_skcipher_alg driver_algs[] = { }, .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_ECB, }, - { - .skcipher = { - .base = { - .cra_name = "ecb(arc4)", - .cra_driver_name = "ecb-arc4-caam", - .cra_blocksize = ARC4_BLOCK_SIZE, - }, - .setkey = arc4_skcipher_setkey, - .encrypt = skcipher_encrypt, - .decrypt = skcipher_decrypt, - .min_keysize = ARC4_MIN_KEY_SIZE, - .max_keysize = ARC4_MAX_KEY_SIZE, - }, - .caam.class1_alg_type = OP_ALG_ALGSEL_ARC4 | OP_ALG_AAI_ECB, - }, }; static struct caam_aead_alg driver_aeads[] = { @@ -3433,7 +3412,8 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg) alg->base.cra_module = THIS_MODULE; alg->base.cra_priority = CAAM_CRA_PRIORITY; alg->base.cra_ctxsize = sizeof(struct caam_ctx); - alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY; alg->init = caam_cra_init; alg->exit = caam_cra_exit; @@ -3446,7 +3426,8 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg) alg->base.cra_module = THIS_MODULE; alg->base.cra_priority = CAAM_CRA_PRIORITY; alg->base.cra_ctxsize = sizeof(struct caam_ctx); - alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY; alg->init = caam_aead_init; alg->exit = caam_aead_exit; @@ -3457,7 +3438,6 @@ int caam_algapi_init(struct device *ctrldev) struct caam_drv_private *priv = dev_get_drvdata(ctrldev); int i = 0, err = 0; u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst; - u32 arc4_inst; unsigned int md_limit = SHA512_DIGEST_SIZE; bool registered = false, gcm_support; @@ -3477,8 +3457,6 @@ int caam_algapi_init(struct device *ctrldev) CHA_ID_LS_DES_SHIFT; aes_inst = cha_inst & CHA_ID_LS_AES_MASK; md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; - arc4_inst = (cha_inst & CHA_ID_LS_ARC4_MASK) >> - CHA_ID_LS_ARC4_SHIFT; ccha_inst = 0; ptha_inst = 0; @@ -3499,7 +3477,6 @@ int caam_algapi_init(struct device *ctrldev) md_inst = mdha & CHA_VER_NUM_MASK; ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK; ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK; - arc4_inst = rd_reg32(&priv->ctrl->vreg.afha) & CHA_VER_NUM_MASK; gcm_support = aesa & CHA_VER_MISC_AES_GCM; } @@ -3522,10 +3499,6 @@ int caam_algapi_init(struct device *ctrldev) if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES)) continue; - /* Skip ARC4 algorithms if not supported by device */ - if (!arc4_inst && alg_sel == OP_ALG_ALGSEL_ARC4) - continue; - /* * Check support for AES modes not available * on LP devices. diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index 27e36bdf6163..bb1c0106a95c 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c @@ -728,7 +728,7 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, int ret = 0; if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { - dev_err(jrdev, "key size mismatch\n"); + dev_dbg(jrdev, "key size mismatch\n"); return -EINVAL; } @@ -2502,7 +2502,8 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg) alg->base.cra_module = THIS_MODULE; alg->base.cra_priority = CAAM_CRA_PRIORITY; alg->base.cra_ctxsize = sizeof(struct caam_ctx); - alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY; alg->init = caam_cra_init; alg->exit = caam_cra_exit; @@ -2515,7 +2516,8 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg) alg->base.cra_module = THIS_MODULE; alg->base.cra_priority = CAAM_CRA_PRIORITY; alg->base.cra_ctxsize = sizeof(struct caam_ctx); - alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY; alg->init = caam_aead_init; alg->exit = caam_aead_exit; diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index 28669cbecf77..66ae1d581168 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -1058,7 +1058,7 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, u32 *desc; if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { - dev_err(dev, "key size mismatch\n"); + dev_dbg(dev, "key size mismatch\n"); return -EINVAL; } @@ -2912,7 +2912,8 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg) alg->base.cra_module = THIS_MODULE; alg->base.cra_priority = CAAM_CRA_PRIORITY; alg->base.cra_ctxsize = sizeof(struct caam_ctx); - alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY; alg->init = caam_cra_init_skcipher; alg->exit = caam_cra_exit; @@ -2925,7 +2926,8 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg) alg->base.cra_module = THIS_MODULE; alg->base.cra_priority = CAAM_CRA_PRIORITY; alg->base.cra_ctxsize = sizeof(struct caam_ctx); - alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY; alg->init = caam_cra_init_aead; alg->exit = caam_cra_exit_aead; @@ -4004,7 +4006,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req) int digestsize = crypto_ahash_digestsize(ahash); struct ahash_edesc *edesc; struct dpaa2_sg_entry *sg_table; - int ret; + int ret = -ENOMEM; src_nents = sg_nents_for_len(req->src, req->nbytes); if (src_nents < 0) { @@ -4017,7 +4019,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req) DMA_TO_DEVICE); if (!mapped_nents) { dev_err(ctx->dev, "unable to DMA map source\n"); - return -ENOMEM; + return ret; } } else { mapped_nents = 0; @@ -4027,7 +4029,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req) edesc = qi_cache_zalloc(GFP_DMA | flags); if (!edesc) { dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); - return -ENOMEM; + return ret; } edesc->src_nents = src_nents; @@ -4082,7 +4084,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req) unmap: ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE); qi_cache_free(edesc); - return -ENOMEM; + return ret; } static int ahash_update_first(struct ahash_request *req) @@ -4498,7 +4500,11 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct caam_hash_state)); - return ahash_set_sh_desc(ahash); + /* + * For keyed hash algorithms shared descriptors + * will be created later in setkey() callback + */ + return alg->setkey ? 0 : ahash_set_sh_desc(ahash); } static void caam_hash_cra_exit(struct crypto_tfm *tfm) @@ -4547,7 +4553,7 @@ static struct caam_hash_alg *caam_hash_alloc(struct device *dev, alg->cra_priority = CAAM_CRA_PRIORITY; alg->cra_blocksize = template->blocksize; alg->cra_alignmask = 0; - alg->cra_flags = CRYPTO_ALG_ASYNC; + alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY; t_alg->alg_type = template->alg_type; t_alg->dev = dev; @@ -4697,6 +4703,13 @@ static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv) { struct device *dev = priv->dev; struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev); + int err; + + if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) { + err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle); + if (err) + dev_err(dev, "dpseci_reset() failed\n"); + } dpaa2_dpseci_congestion_free(priv); dpseci_close(priv->mc_io, 0, ls_dev->mc_handle); @@ -4894,6 +4907,14 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev) dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver); + if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) { + err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle); + if (err) { + dev_err(dev, "dpseci_reset() failed\n"); + goto err_get_vers; + } + } + err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle, &priv->dpseci_attr); if (err) { @@ -5221,7 +5242,7 @@ static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev) if (IS_ERR(t_alg)) { err = PTR_ERR(t_alg); dev_warn(dev, "%s hash alg allocation failed: %d\n", - alg->driver_name, err); + alg->hmac_driver_name, err); continue; } @@ -5384,6 +5405,7 @@ static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = { }, { .vendor = 0x0 } }; +MODULE_DEVICE_TABLE(fslmc, dpaa2_caam_match_id_table); static struct fsl_mc_driver dpaa2_caam_driver = { .driver = { diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 27ff4a3d037e..e8a6d8bc43b5 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -1927,7 +1927,7 @@ caam_hash_alloc(struct caam_hash_template *template, alg->cra_priority = CAAM_CRA_PRIORITY; alg->cra_blocksize = template->blocksize; alg->cra_alignmask = 0; - alg->cra_flags = CRYPTO_ALG_ASYNC; + alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY; t_alg->alg_type = template->alg_type; diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h index 60e2a54c19f1..c3c22a8de4c0 100644 --- a/drivers/crypto/caam/compat.h +++ b/drivers/crypto/caam/compat.h @@ -43,7 +43,6 @@ #include <crypto/akcipher.h> #include <crypto/scatterwalk.h> #include <crypto/skcipher.h> -#include <crypto/arc4.h> #include <crypto/internal/skcipher.h> #include <crypto/internal/hash.h> #include <crypto/internal/rsa.h> diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index f3d20b7645e0..94502f1d4b48 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -469,7 +469,7 @@ static int caam_get_era(struct caam_ctrl __iomem *ctrl) * pipeline to a depth of 1 (from it's default of 4) to preclude this situation * from occurring. */ -static void handle_imx6_err005766(u32 *mcr) +static void handle_imx6_err005766(u32 __iomem *mcr) { if (of_machine_is_compatible("fsl,imx6q") || of_machine_is_compatible("fsl,imx6dl") || @@ -527,11 +527,21 @@ static const struct caam_imx_data caam_imx6ul_data = { .num_clks = ARRAY_SIZE(caam_imx6ul_clks), }; +static const struct clk_bulk_data caam_vf610_clks[] = { + { .id = "ipg" }, +}; + +static const struct caam_imx_data caam_vf610_data = { + .clks = caam_vf610_clks, + .num_clks = ARRAY_SIZE(caam_vf610_clks), +}; + static const struct soc_device_attribute caam_imx_soc_table[] = { { .soc_id = "i.MX6UL", .data = &caam_imx6ul_data }, { .soc_id = "i.MX6*", .data = &caam_imx6_data }, { .soc_id = "i.MX7*", .data = &caam_imx7_data }, { .soc_id = "i.MX8M*", .data = &caam_imx7_data }, + { .soc_id = "VF*", .data = &caam_vf610_data }, { .family = "Freescale i.MX" }, { /* sentinel */ } }; diff --git a/drivers/crypto/caam/dpseci.c b/drivers/crypto/caam/dpseci.c index 8a68531ded0b..039df6c5790c 100644 --- a/drivers/crypto/caam/dpseci.c +++ b/drivers/crypto/caam/dpseci.c @@ -104,6 +104,24 @@ int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) } /** + * dpseci_reset() - Reset the DPSECI, returns the object to initial state + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSECI object + * + * Return: '0' on success, error code otherwise + */ +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) +{ + struct fsl_mc_command cmd = { 0 }; + + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET, + cmd_flags, + token); + return mc_send_command(mc_io, &cmd); +} + +/** * dpseci_is_enabled() - Check if the DPSECI is enabled. * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' diff --git a/drivers/crypto/caam/dpseci.h b/drivers/crypto/caam/dpseci.h index 4550e134d166..6dcd9be8144b 100644 --- a/drivers/crypto/caam/dpseci.h +++ b/drivers/crypto/caam/dpseci.h @@ -59,6 +59,8 @@ int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); + int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, int *en); diff --git a/drivers/crypto/caam/dpseci_cmd.h b/drivers/crypto/caam/dpseci_cmd.h index 6ab77ead6e3d..71a007c85adb 100644 --- a/drivers/crypto/caam/dpseci_cmd.h +++ b/drivers/crypto/caam/dpseci_cmd.h @@ -33,6 +33,7 @@ #define DPSECI_CMDID_ENABLE DPSECI_CMD_V1(0x002) #define DPSECI_CMDID_DISABLE DPSECI_CMD_V1(0x003) #define DPSECI_CMDID_GET_ATTR DPSECI_CMD_V1(0x004) +#define DPSECI_CMDID_RESET DPSECI_CMD_V1(0x005) #define DPSECI_CMDID_IS_ENABLED DPSECI_CMD_V1(0x006) #define DPSECI_CMDID_SET_RX_QUEUE DPSECI_CMD_V1(0x194) diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c index 17c6108b6d41..72db90176b1a 100644 --- a/drivers/crypto/caam/error.c +++ b/drivers/crypto/caam/error.c @@ -212,6 +212,9 @@ static const char * const rng_err_id_list[] = { "Prediction resistance and test request", "Uninstantiate", "Secure key generation", + "", + "Hardware error", + "Continuous check" }; static int report_ccb_status(struct device *jrdev, const u32 status, diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index 4af22e7ceb4f..bf6b03b17251 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c @@ -339,8 +339,7 @@ EXPORT_SYMBOL(caam_jr_free); * caam_jr_enqueue() - Enqueue a job descriptor head. Returns -EINPROGRESS * if OK, -ENOSPC if the queue is full, -EIO if it cannot map the caller's * descriptor. - * @dev: device of the job ring to be used. This device should have - * been assigned prior by caam_jr_register(). + * @dev: struct device of the job ring to be used * @desc: points to a job descriptor that execute our request. All * descriptors (and all referenced data) must be in a DMAable * region, and all data references must be physical addresses diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index 0f810bc13b2b..af61f3a2c0d4 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h @@ -173,9 +173,14 @@ static inline u64 rd_reg64(void __iomem *reg) static inline u64 cpu_to_caam_dma64(dma_addr_t value) { - if (caam_imx) - return (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | - (u64)cpu_to_caam32(upper_32_bits(value))); + if (caam_imx) { + u64 ret_val = (u64)cpu_to_caam32(lower_32_bits(value)) << 32; + + if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) + ret_val |= (u64)cpu_to_caam32(upper_32_bits(value)); + + return ret_val; + } return cpu_to_caam64(value); } diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c index 1be1adffff1d..5af0dc2a8909 100644 --- a/drivers/crypto/cavium/cpt/cptvf_algs.c +++ b/drivers/crypto/cavium/cpt/cptvf_algs.c @@ -99,10 +99,10 @@ static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc, struct cvm_enc_ctx *ctx = crypto_skcipher_ctx(tfm); struct cvm_req_ctx *rctx = skcipher_request_ctx(req); struct fc_context *fctx = &rctx->fctx; - u64 *offset_control = &rctx->control_word; u32 enc_iv_len = crypto_skcipher_ivsize(tfm); struct cpt_request_info *req_info = &rctx->cpt_req; - u64 *ctrl_flags = NULL; + __be64 *ctrl_flags = NULL; + __be64 *offset_control; req_info->ctrl.s.grp = 0; req_info->ctrl.s.dma_mode = DMA_GATHER_SCATTER; @@ -126,9 +126,10 @@ static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc, memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2); else memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len); - ctrl_flags = (u64 *)&fctx->enc.enc_ctrl.flags; - *ctrl_flags = cpu_to_be64(*ctrl_flags); + ctrl_flags = (__be64 *)&fctx->enc.enc_ctrl.flags; + *ctrl_flags = cpu_to_be64(fctx->enc.enc_ctrl.flags); + offset_control = (__be64 *)&rctx->control_word; *offset_control = cpu_to_be64(((u64)(enc_iv_len) << 16)); /* Storing Packet Data Information in offset * Control Word First 8 bytes @@ -200,6 +201,7 @@ static inline int cvm_enc_dec(struct skcipher_request *req, u32 enc) int status; memset(req_info, 0, sizeof(struct cpt_request_info)); + req_info->may_sleep = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) != 0; memset(fctx, 0, sizeof(struct fc_context)); create_input_list(req, enc, enc_iv_len); create_output_list(req, enc_iv_len); @@ -339,7 +341,8 @@ static int cvm_enc_dec_init(struct crypto_skcipher *tfm) } static struct skcipher_alg algs[] = { { - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct cvm_enc_ctx), .base.cra_alignmask = 7, @@ -356,7 +359,8 @@ static struct skcipher_alg algs[] = { { .decrypt = cvm_decrypt, .init = cvm_enc_dec_init, }, { - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct cvm_enc_ctx), .base.cra_alignmask = 7, @@ -373,7 +377,8 @@ static struct skcipher_alg algs[] = { { .decrypt = cvm_decrypt, .init = cvm_enc_dec_init, }, { - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct cvm_enc_ctx), .base.cra_alignmask = 7, @@ -389,7 +394,8 @@ static struct skcipher_alg algs[] = { { .decrypt = cvm_decrypt, .init = cvm_enc_dec_init, }, { - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct cvm_enc_ctx), .base.cra_alignmask = 7, @@ -406,7 +412,8 @@ static struct skcipher_alg algs[] = { { .decrypt = cvm_decrypt, .init = cvm_enc_dec_init, }, { - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct cvm_des3_ctx), .base.cra_alignmask = 7, @@ -423,7 +430,8 @@ static struct skcipher_alg algs[] = { { .decrypt = cvm_decrypt, .init = cvm_enc_dec_init, }, { - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct cvm_des3_ctx), .base.cra_alignmask = 7, diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c index 7a24019356b5..3878b01e19e1 100644 --- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c +++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c @@ -4,6 +4,7 @@ */ #include "cptvf.h" +#include "cptvf_algs.h" #include "request_manager.h" /** @@ -133,7 +134,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf, /* Setup gather (input) components */ g_sz_bytes = ((req->incnt + 3) / 4) * sizeof(struct sglist_component); - info->gather_components = kzalloc(g_sz_bytes, GFP_KERNEL); + info->gather_components = kzalloc(g_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (!info->gather_components) { ret = -ENOMEM; goto scatter_gather_clean; @@ -150,7 +151,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf, /* Setup scatter (output) components */ s_sz_bytes = ((req->outcnt + 3) / 4) * sizeof(struct sglist_component); - info->scatter_components = kzalloc(s_sz_bytes, GFP_KERNEL); + info->scatter_components = kzalloc(s_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (!info->scatter_components) { ret = -ENOMEM; goto scatter_gather_clean; @@ -167,17 +168,16 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf, /* Create and initialize DPTR */ info->dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE; - info->in_buffer = kzalloc(info->dlen, GFP_KERNEL); + info->in_buffer = kzalloc(info->dlen, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (!info->in_buffer) { ret = -ENOMEM; goto scatter_gather_clean; } - ((u16 *)info->in_buffer)[0] = req->outcnt; - ((u16 *)info->in_buffer)[1] = req->incnt; - ((u16 *)info->in_buffer)[2] = 0; - ((u16 *)info->in_buffer)[3] = 0; - *(u64 *)info->in_buffer = cpu_to_be64p((u64 *)info->in_buffer); + ((__be16 *)info->in_buffer)[0] = cpu_to_be16(req->outcnt); + ((__be16 *)info->in_buffer)[1] = cpu_to_be16(req->incnt); + ((__be16 *)info->in_buffer)[2] = 0; + ((__be16 *)info->in_buffer)[3] = 0; memcpy(&info->in_buffer[8], info->gather_components, g_sz_bytes); @@ -195,7 +195,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf, } /* Create and initialize RPTR */ - info->out_buffer = kzalloc(COMPLETION_CODE_SIZE, GFP_KERNEL); + info->out_buffer = kzalloc(COMPLETION_CODE_SIZE, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (!info->out_buffer) { ret = -ENOMEM; goto scatter_gather_clean; @@ -421,7 +421,7 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req) struct cpt_vq_command vq_cmd; union cpt_inst_s cptinst; - info = kzalloc(sizeof(*info), GFP_KERNEL); + info = kzalloc(sizeof(*info), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (unlikely(!info)) { dev_err(&pdev->dev, "Unable to allocate memory for info_buffer\n"); return -ENOMEM; @@ -443,7 +443,7 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req) * Get buffer for union cpt_res_s response * structure and its physical address */ - info->completion_addr = kzalloc(sizeof(union cpt_res_s), GFP_KERNEL); + info->completion_addr = kzalloc(sizeof(union cpt_res_s), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (unlikely(!info->completion_addr)) { dev_err(&pdev->dev, "Unable to allocate memory for completion_addr\n"); ret = -ENOMEM; @@ -470,8 +470,6 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req) vq_cmd.cmd.s.param2 = cpu_to_be16(cpt_req->param2); vq_cmd.cmd.s.dlen = cpu_to_be16(cpt_req->dlen); - /* 64-bit swap for microcode data reads, not needed for addresses*/ - vq_cmd.cmd.u64 = cpu_to_be64(vq_cmd.cmd.u64); vq_cmd.dptr = info->dptr_baddr; vq_cmd.rptr = info->rptr_baddr; vq_cmd.cptr.u64 = 0; diff --git a/drivers/crypto/cavium/cpt/request_manager.h b/drivers/crypto/cavium/cpt/request_manager.h index 3514b082eca7..8d40e4ba3af1 100644 --- a/drivers/crypto/cavium/cpt/request_manager.h +++ b/drivers/crypto/cavium/cpt/request_manager.h @@ -62,6 +62,8 @@ struct cpt_request_info { union ctrl_info ctrl; /* User control information */ struct cptvf_request req; /* Request Information (Core specific) */ + bool may_sleep; + struct buf_ptr in[MAX_BUF_CNT]; struct buf_ptr out[MAX_BUF_CNT]; @@ -73,16 +75,16 @@ struct sglist_component { union { u64 len; struct { - u16 len0; - u16 len1; - u16 len2; - u16 len3; + __be16 len0; + __be16 len1; + __be16 len2; + __be16 len3; } s; } u; - u64 ptr0; - u64 ptr1; - u64 ptr2; - u64 ptr3; + __be64 ptr0; + __be64 ptr1; + __be64 ptr2; + __be64 ptr3; }; struct cpt_info_buffer { @@ -112,10 +114,10 @@ struct cpt_info_buffer { union vq_cmd_word0 { u64 u64; struct { - u16 opcode; - u16 param1; - u16 param2; - u16 dlen; + __be16 opcode; + __be16 param1; + __be16 param2; + __be16 dlen; } s; }; diff --git a/drivers/crypto/cavium/nitrox/nitrox_aead.c b/drivers/crypto/cavium/nitrox/nitrox_aead.c index dce5423a5883..1be2571363fe 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_aead.c +++ b/drivers/crypto/cavium/nitrox/nitrox_aead.c @@ -522,7 +522,7 @@ static struct aead_alg nitrox_aeads[] = { { .cra_name = "gcm(aes)", .cra_driver_name = "n5_aes_gcm", .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), .cra_alignmask = 0, @@ -541,7 +541,7 @@ static struct aead_alg nitrox_aeads[] = { { .cra_name = "rfc4106(gcm(aes))", .cra_driver_name = "n5_rfc4106", .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), .cra_alignmask = 0, diff --git a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c index 18088b0a2257..a553ac65f324 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c +++ b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c @@ -388,7 +388,7 @@ static struct skcipher_alg nitrox_skciphers[] = { { .cra_name = "cbc(aes)", .cra_driver_name = "n5_cbc(aes)", .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), .cra_alignmask = 0, @@ -407,7 +407,7 @@ static struct skcipher_alg nitrox_skciphers[] = { { .cra_name = "ecb(aes)", .cra_driver_name = "n5_ecb(aes)", .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), .cra_alignmask = 0, @@ -426,7 +426,7 @@ static struct skcipher_alg nitrox_skciphers[] = { { .cra_name = "cfb(aes)", .cra_driver_name = "n5_cfb(aes)", .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), .cra_alignmask = 0, @@ -445,7 +445,7 @@ static struct skcipher_alg nitrox_skciphers[] = { { .cra_name = "xts(aes)", .cra_driver_name = "n5_xts(aes)", .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), .cra_alignmask = 0, @@ -464,7 +464,7 @@ static struct skcipher_alg nitrox_skciphers[] = { { .cra_name = "rfc3686(ctr(aes))", .cra_driver_name = "n5_rfc3686(ctr(aes))", .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), .cra_alignmask = 0, @@ -483,7 +483,7 @@ static struct skcipher_alg nitrox_skciphers[] = { { .cra_name = "cts(cbc(aes))", .cra_driver_name = "n5_cts(cbc(aes))", .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), .cra_alignmask = 0, @@ -502,7 +502,7 @@ static struct skcipher_alg nitrox_skciphers[] = { { .cra_name = "cbc(des3_ede)", .cra_driver_name = "n5_cbc(des3_ede)", .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), .cra_alignmask = 0, @@ -521,7 +521,7 @@ static struct skcipher_alg nitrox_skciphers[] = { { .cra_name = "ecb(des3_ede)", .cra_driver_name = "n5_ecb(des3_ede)", .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), .cra_alignmask = 0, diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c index 5eba7ee49e81..11a305fa19e6 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c @@ -378,6 +378,7 @@ int ccp_register_aes_cmac_algs(struct list_head *head) snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)"); snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "cmac-aes-ccp"); base->cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK; base->cra_blocksize = AES_BLOCK_SIZE; diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c index 9e8f07c1afac..1c1c939f5c39 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c @@ -172,6 +172,7 @@ static struct aead_alg ccp_aes_gcm_defaults = { .maxauthsize = AES_BLOCK_SIZE, .base = { .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = AES_BLOCK_SIZE, diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c index 04b2517df955..6849261ca47d 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c @@ -98,7 +98,7 @@ static int ccp_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, ctx->u.aes.key_len = key_len / 2; sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len); - return crypto_sync_skcipher_setkey(ctx->u.aes.tfm_skcipher, key, key_len); + return crypto_skcipher_setkey(ctx->u.aes.tfm_skcipher, key, key_len); } static int ccp_aes_xts_crypt(struct skcipher_request *req, @@ -145,20 +145,19 @@ static int ccp_aes_xts_crypt(struct skcipher_request *req, (ctx->u.aes.key_len != AES_KEYSIZE_256)) fallback = 1; if (fallback) { - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, - ctx->u.aes.tfm_skcipher); - /* Use the fallback to process the request for any * unsupported unit sizes or key sizes */ - skcipher_request_set_sync_tfm(subreq, ctx->u.aes.tfm_skcipher); - skcipher_request_set_callback(subreq, req->base.flags, - NULL, NULL); - skcipher_request_set_crypt(subreq, req->src, req->dst, - req->cryptlen, req->iv); - ret = encrypt ? crypto_skcipher_encrypt(subreq) : - crypto_skcipher_decrypt(subreq); - skcipher_request_zero(subreq); + skcipher_request_set_tfm(&rctx->fallback_req, + ctx->u.aes.tfm_skcipher); + skcipher_request_set_callback(&rctx->fallback_req, + req->base.flags, + req->base.complete, + req->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, req->src, + req->dst, req->cryptlen, req->iv); + ret = encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) : + crypto_skcipher_decrypt(&rctx->fallback_req); return ret; } @@ -198,13 +197,12 @@ static int ccp_aes_xts_decrypt(struct skcipher_request *req) static int ccp_aes_xts_init_tfm(struct crypto_skcipher *tfm) { struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); - struct crypto_sync_skcipher *fallback_tfm; + struct crypto_skcipher *fallback_tfm; ctx->complete = ccp_aes_xts_complete; ctx->u.aes.key_len = 0; - fallback_tfm = crypto_alloc_sync_skcipher("xts(aes)", 0, - CRYPTO_ALG_ASYNC | + fallback_tfm = crypto_alloc_skcipher("xts(aes)", 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(fallback_tfm)) { pr_warn("could not load fallback driver xts(aes)\n"); @@ -212,7 +210,8 @@ static int ccp_aes_xts_init_tfm(struct crypto_skcipher *tfm) } ctx->u.aes.tfm_skcipher = fallback_tfm; - crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx)); + crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx) + + crypto_skcipher_reqsize(fallback_tfm)); return 0; } @@ -221,7 +220,7 @@ static void ccp_aes_xts_exit_tfm(struct crypto_skcipher *tfm) { struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); - crypto_free_sync_skcipher(ctx->u.aes.tfm_skcipher); + crypto_free_skcipher(ctx->u.aes.tfm_skcipher); } static int ccp_register_aes_xts_alg(struct list_head *head, @@ -243,6 +242,7 @@ static int ccp_register_aes_xts_alg(struct list_head *head, snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", def->drv_name); alg->base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK; alg->base.cra_blocksize = AES_BLOCK_SIZE; diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c index 51e12fbd1159..e6dcd8cedd53 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes.c +++ b/drivers/crypto/ccp/ccp-crypto-aes.c @@ -212,6 +212,7 @@ static const struct skcipher_alg ccp_aes_defaults = { .init = ccp_aes_init_tfm, .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = AES_BLOCK_SIZE, @@ -229,6 +230,7 @@ static const struct skcipher_alg ccp_aes_rfc3686_defaults = { .init = ccp_aes_rfc3686_init_tfm, .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = CTR_RFC3686_BLOCK_SIZE, diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c index 9c129defdb50..ec97daf0fcb7 100644 --- a/drivers/crypto/ccp/ccp-crypto-des3.c +++ b/drivers/crypto/ccp/ccp-crypto-des3.c @@ -136,6 +136,7 @@ static const struct skcipher_alg ccp_des3_defaults = { .init = ccp_des3_init_tfm, .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c index b0cc2bd73af8..8fbfdb9e8cd3 100644 --- a/drivers/crypto/ccp/ccp-crypto-sha.c +++ b/drivers/crypto/ccp/ccp-crypto-sha.c @@ -19,6 +19,7 @@ #include <crypto/internal/hash.h> #include <crypto/sha.h> #include <crypto/scatterwalk.h> +#include <linux/string.h> #include "ccp-crypto.h" @@ -424,7 +425,7 @@ static int ccp_register_hmac_alg(struct list_head *head, *ccp_alg = *base_alg; INIT_LIST_HEAD(&ccp_alg->entry); - strncpy(ccp_alg->child_alg, def->name, CRYPTO_MAX_ALG_NAME); + strscpy(ccp_alg->child_alg, def->name, CRYPTO_MAX_ALG_NAME); alg = &ccp_alg->alg; alg->setkey = ccp_sha_setkey; @@ -486,6 +487,7 @@ static int ccp_register_sha_alg(struct list_head *head, snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", def->drv_name); base->cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK; base->cra_blocksize = def->block_size; diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h index 90a009e6b5c1..aed3d2192d01 100644 --- a/drivers/crypto/ccp/ccp-crypto.h +++ b/drivers/crypto/ccp/ccp-crypto.h @@ -89,7 +89,7 @@ static inline struct ccp_crypto_ahash_alg * /***** AES related defines *****/ struct ccp_aes_ctx { /* Fallback cipher for XTS with unsupported unit sizes */ - struct crypto_sync_skcipher *tfm_skcipher; + struct crypto_skcipher *tfm_skcipher; enum ccp_engine engine; enum ccp_aes_type type; @@ -121,6 +121,8 @@ struct ccp_aes_req_ctx { u8 rfc3686_iv[AES_BLOCK_SIZE]; struct ccp_cmd cmd; + + struct skcipher_request fallback_req; // keep at the end }; struct ccp_aes_cmac_req_ctx { diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 82ac4c14c04c..7838f63bab32 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -221,8 +221,8 @@ static unsigned int ccp5_get_free_slots(struct ccp_cmd_queue *cmd_q) static int ccp5_do_cmd(struct ccp5_desc *desc, struct ccp_cmd_queue *cmd_q) { - u32 *mP; - __le32 *dP; + __le32 *mP; + u32 *dP; u32 tail; int i; int ret = 0; @@ -235,8 +235,8 @@ static int ccp5_do_cmd(struct ccp5_desc *desc, } mutex_lock(&cmd_q->q_mutex); - mP = (u32 *) &cmd_q->qbase[cmd_q->qidx]; - dP = (__le32 *) desc; + mP = (__le32 *)&cmd_q->qbase[cmd_q->qidx]; + dP = (u32 *)desc; for (i = 0; i < 8; i++) mP[i] = cpu_to_le32(dP[i]); /* handle endianness */ diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index 19ac509ed76e..0971ee60f840 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c @@ -531,7 +531,6 @@ int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait) return len; } -#ifdef CONFIG_PM bool ccp_queues_suspended(struct ccp_device *ccp) { unsigned int suspended = 0; @@ -549,7 +548,7 @@ bool ccp_queues_suspended(struct ccp_device *ccp) return ccp->cmd_q_count == suspended; } -int ccp_dev_suspend(struct sp_device *sp, pm_message_t state) +int ccp_dev_suspend(struct sp_device *sp) { struct ccp_device *ccp = sp->ccp_data; unsigned long flags; @@ -601,7 +600,6 @@ int ccp_dev_resume(struct sp_device *sp) return 0; } -#endif int ccp_dev_init(struct sp_device *sp) { diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 3f68262d9ab4..a5d9123a22ea 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -469,6 +469,7 @@ struct ccp_sg_workarea { unsigned int sg_used; struct scatterlist *dma_sg; + struct scatterlist *dma_sg_head; struct device *dma_dev; unsigned int dma_count; enum dma_data_direction dma_dir; @@ -596,8 +597,8 @@ struct dword3 { }; union dword4 { - __le32 dst_lo; /* NON-SHA */ - __le32 sha_len_lo; /* SHA */ + u32 dst_lo; /* NON-SHA */ + u32 sha_len_lo; /* SHA */ }; union dword5 { @@ -607,7 +608,7 @@ union dword5 { unsigned int rsvd1:13; unsigned int fixed:1; } fields; - __le32 sha_len_hi; + u32 sha_len_hi; }; struct dword7 { @@ -618,12 +619,12 @@ struct dword7 { struct ccp5_desc { struct dword0 dw0; - __le32 length; - __le32 src_lo; + u32 length; + u32 src_lo; struct dword3 dw3; union dword4 dw4; union dword5 dw5; - __le32 key_lo; + u32 key_lo; struct dword7 dw7; }; diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 422193690fd4..bd270e66185e 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -63,7 +63,7 @@ static u32 ccp_gen_jobid(struct ccp_device *ccp) static void ccp_sg_free(struct ccp_sg_workarea *wa) { if (wa->dma_count) - dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir); + dma_unmap_sg(wa->dma_dev, wa->dma_sg_head, wa->nents, wa->dma_dir); wa->dma_count = 0; } @@ -92,6 +92,7 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev, return 0; wa->dma_sg = sg; + wa->dma_sg_head = sg; wa->dma_dev = dev; wa->dma_dir = dma_dir; wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir); @@ -104,14 +105,28 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev, static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len) { unsigned int nbytes = min_t(u64, len, wa->bytes_left); + unsigned int sg_combined_len = 0; if (!wa->sg) return; wa->sg_used += nbytes; wa->bytes_left -= nbytes; - if (wa->sg_used == wa->sg->length) { - wa->sg = sg_next(wa->sg); + if (wa->sg_used == sg_dma_len(wa->dma_sg)) { + /* Advance to the next DMA scatterlist entry */ + wa->dma_sg = sg_next(wa->dma_sg); + + /* In the case that the DMA mapped scatterlist has entries + * that have been merged, the non-DMA mapped scatterlist + * must be advanced multiple times for each merged entry. + * This ensures that the current non-DMA mapped entry + * corresponds to the current DMA mapped entry. + */ + do { + sg_combined_len += wa->sg->length; + wa->sg = sg_next(wa->sg); + } while (wa->sg_used > sg_combined_len); + wa->sg_used = 0; } } @@ -299,7 +314,7 @@ static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from) /* Update the structures and generate the count */ buf_count = 0; while (sg_wa->bytes_left && (buf_count < dm_wa->length)) { - nbytes = min(sg_wa->sg->length - sg_wa->sg_used, + nbytes = min(sg_dma_len(sg_wa->dma_sg) - sg_wa->sg_used, dm_wa->length - buf_count); nbytes = min_t(u64, sg_wa->bytes_left, nbytes); @@ -331,11 +346,11 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst, * and destination. The resulting len values will always be <= UINT_MAX * because the dma length is an unsigned int. */ - sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used; + sg_src_len = sg_dma_len(src->sg_wa.dma_sg) - src->sg_wa.sg_used; sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len); if (dst) { - sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used; + sg_dst_len = sg_dma_len(dst->sg_wa.dma_sg) - dst->sg_wa.sg_used; sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len); op_len = min(sg_src_len, sg_dst_len); } else { @@ -365,7 +380,7 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst, /* Enough data in the sg element, but we need to * adjust for any previously copied data */ - op->src.u.dma.address = sg_dma_address(src->sg_wa.sg); + op->src.u.dma.address = sg_dma_address(src->sg_wa.dma_sg); op->src.u.dma.offset = src->sg_wa.sg_used; op->src.u.dma.length = op_len & ~(block_size - 1); @@ -386,7 +401,7 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst, /* Enough room in the sg element, but we need to * adjust for any previously used area */ - op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg); + op->dst.u.dma.address = sg_dma_address(dst->sg_wa.dma_sg); op->dst.u.dma.offset = dst->sg_wa.sg_used; op->dst.u.dma.length = op->src.u.dma.length; } @@ -617,13 +632,12 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) struct ccp_data src, dst; struct ccp_data aad; struct ccp_op op; - - unsigned long long *final; unsigned int dm_offset; unsigned int authsize; unsigned int jobid; unsigned int ilen; bool in_place = true; /* Default value */ + __be64 *final; int ret; struct scatterlist *p_inp, sg_inp[2]; @@ -825,7 +839,7 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) DMA_BIDIRECTIONAL); if (ret) goto e_dst; - final = (unsigned long long *) final_wa.address; + final = (__be64 *)final_wa.address; final[0] = cpu_to_be64(aes->aad_len * 8); final[1] = cpu_to_be64(ilen * 8); @@ -1308,7 +1322,6 @@ ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) return -EINVAL; } - ret = -EIO; /* Zero out all the fields of the command desc */ memset(&op, 0, sizeof(op)); @@ -2028,7 +2041,7 @@ ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) dst.sg_wa.sg_used = 0; for (i = 1; i <= src.sg_wa.dma_count; i++) { if (!dst.sg_wa.sg || - (dst.sg_wa.sg->length < src.sg_wa.sg->length)) { + (sg_dma_len(dst.sg_wa.sg) < sg_dma_len(src.sg_wa.sg))) { ret = -EINVAL; goto e_dst; } @@ -2054,8 +2067,8 @@ ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) goto e_dst; } - dst.sg_wa.sg_used += src.sg_wa.sg->length; - if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) { + dst.sg_wa.sg_used += sg_dma_len(src.sg_wa.sg); + if (dst.sg_wa.sg_used == sg_dma_len(dst.sg_wa.sg)) { dst.sg_wa.sg = sg_next(dst.sg_wa.sg); dst.sg_wa.sg_used = 0; } diff --git a/drivers/crypto/ccp/sp-dev.c b/drivers/crypto/ccp/sp-dev.c index ce42675d3274..6284a15e5047 100644 --- a/drivers/crypto/ccp/sp-dev.c +++ b/drivers/crypto/ccp/sp-dev.c @@ -211,13 +211,12 @@ void sp_destroy(struct sp_device *sp) sp_del_device(sp); } -#ifdef CONFIG_PM -int sp_suspend(struct sp_device *sp, pm_message_t state) +int sp_suspend(struct sp_device *sp) { int ret; if (sp->dev_vdata->ccp_vdata) { - ret = ccp_dev_suspend(sp, state); + ret = ccp_dev_suspend(sp); if (ret) return ret; } @@ -237,7 +236,6 @@ int sp_resume(struct sp_device *sp) return 0; } -#endif struct sp_device *sp_get_psp_master_device(void) { diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h index f913f1494af9..0218d0670eee 100644 --- a/drivers/crypto/ccp/sp-dev.h +++ b/drivers/crypto/ccp/sp-dev.h @@ -119,7 +119,7 @@ int sp_init(struct sp_device *sp); void sp_destroy(struct sp_device *sp); struct sp_device *sp_get_master(void); -int sp_suspend(struct sp_device *sp, pm_message_t state); +int sp_suspend(struct sp_device *sp); int sp_resume(struct sp_device *sp); int sp_request_ccp_irq(struct sp_device *sp, irq_handler_t handler, const char *name, void *data); @@ -134,7 +134,7 @@ struct sp_device *sp_get_psp_master_device(void); int ccp_dev_init(struct sp_device *sp); void ccp_dev_destroy(struct sp_device *sp); -int ccp_dev_suspend(struct sp_device *sp, pm_message_t state); +int ccp_dev_suspend(struct sp_device *sp); int ccp_dev_resume(struct sp_device *sp); #else /* !CONFIG_CRYPTO_DEV_SP_CCP */ @@ -145,7 +145,7 @@ static inline int ccp_dev_init(struct sp_device *sp) } static inline void ccp_dev_destroy(struct sp_device *sp) { } -static inline int ccp_dev_suspend(struct sp_device *sp, pm_message_t state) +static inline int ccp_dev_suspend(struct sp_device *sp) { return 0; } diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index cb6cb47053f4..f471dbaef1fb 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -252,23 +252,19 @@ static void sp_pci_remove(struct pci_dev *pdev) sp_free_irqs(sp); } -#ifdef CONFIG_PM -static int sp_pci_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused sp_pci_suspend(struct device *dev) { - struct device *dev = &pdev->dev; struct sp_device *sp = dev_get_drvdata(dev); - return sp_suspend(sp, state); + return sp_suspend(sp); } -static int sp_pci_resume(struct pci_dev *pdev) +static int __maybe_unused sp_pci_resume(struct device *dev) { - struct device *dev = &pdev->dev; struct sp_device *sp = dev_get_drvdata(dev); return sp_resume(sp); } -#endif #ifdef CONFIG_CRYPTO_DEV_SP_PSP static const struct sev_vdata sevv1 = { @@ -365,15 +361,14 @@ static const struct pci_device_id sp_pci_table[] = { }; MODULE_DEVICE_TABLE(pci, sp_pci_table); +static SIMPLE_DEV_PM_OPS(sp_pci_pm_ops, sp_pci_suspend, sp_pci_resume); + static struct pci_driver sp_pci_driver = { .name = "ccp", .id_table = sp_pci_table, .probe = sp_pci_probe, .remove = sp_pci_remove, -#ifdef CONFIG_PM - .suspend = sp_pci_suspend, - .resume = sp_pci_resume, -#endif + .driver.pm = &sp_pci_pm_ops, }; int sp_pci_init(void) diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c index 831aac1393a2..9dba52fbee99 100644 --- a/drivers/crypto/ccp/sp-platform.c +++ b/drivers/crypto/ccp/sp-platform.c @@ -207,7 +207,7 @@ static int sp_platform_suspend(struct platform_device *pdev, struct device *dev = &pdev->dev; struct sp_device *sp = dev_get_drvdata(dev); - return sp_suspend(sp, state); + return sp_suspend(sp); } static int sp_platform_resume(struct platform_device *pdev) diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c index 872ea3ff1c6b..076669dc1035 100644 --- a/drivers/crypto/ccree/cc_cipher.c +++ b/drivers/crypto/ccree/cc_cipher.c @@ -45,7 +45,6 @@ enum cc_key_type { struct cc_cipher_ctx { struct cc_drvdata *drvdata; int keylen; - int key_round_number; int cipher_mode; int flow_mode; unsigned int flags; @@ -56,6 +55,8 @@ struct cc_cipher_ctx { struct cc_cpp_key_info cpp; }; struct crypto_shash *shash_tfm; + struct crypto_skcipher *fallback_tfm; + bool fallback_on; }; static void cc_cipher_complete(struct device *dev, void *cc_req, int err); @@ -75,7 +76,6 @@ static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size) case CC_AES_128_BIT_KEY_SIZE: case CC_AES_192_BIT_KEY_SIZE: if (ctx_p->cipher_mode != DRV_CIPHER_XTS && - ctx_p->cipher_mode != DRV_CIPHER_ESSIV && ctx_p->cipher_mode != DRV_CIPHER_BITLOCKER) return 0; break; @@ -159,22 +159,49 @@ static int cc_cipher_init(struct crypto_tfm *tfm) skcipher_alg.base); struct device *dev = drvdata_to_dev(cc_alg->drvdata); unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize; - int rc = 0; + unsigned int fallback_req_size = 0; dev_dbg(dev, "Initializing context @%p for %s\n", ctx_p, crypto_tfm_alg_name(tfm)); - crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm), - sizeof(struct cipher_req_ctx)); - ctx_p->cipher_mode = cc_alg->cipher_mode; ctx_p->flow_mode = cc_alg->flow_mode; ctx_p->drvdata = cc_alg->drvdata; + if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) { + const char *name = crypto_tfm_alg_name(tfm); + + /* Alloc hash tfm for essiv */ + ctx_p->shash_tfm = crypto_alloc_shash("sha256", 0, 0); + if (IS_ERR(ctx_p->shash_tfm)) { + dev_err(dev, "Error allocating hash tfm for ESSIV.\n"); + return PTR_ERR(ctx_p->shash_tfm); + } + max_key_buf_size <<= 1; + + /* Alloc fallabck tfm or essiv when key size != 256 bit */ + ctx_p->fallback_tfm = + crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC); + + if (IS_ERR(ctx_p->fallback_tfm)) { + /* Note we're still allowing registration with no fallback since it's + * better to have most modes supported than none at all. + */ + dev_warn(dev, "Error allocating fallback algo %s. Some modes may be available.\n", + name); + ctx_p->fallback_tfm = NULL; + } else { + fallback_req_size = crypto_skcipher_reqsize(ctx_p->fallback_tfm); + } + } + + crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm), + sizeof(struct cipher_req_ctx) + fallback_req_size); + /* Allocate key buffer, cache line aligned */ - ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL); + ctx_p->user.key = kzalloc(max_key_buf_size, GFP_KERNEL); if (!ctx_p->user.key) - return -ENOMEM; + goto free_fallback; dev_dbg(dev, "Allocated key buffer in context. key=@%p\n", ctx_p->user.key); @@ -186,21 +213,20 @@ static int cc_cipher_init(struct crypto_tfm *tfm) if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) { dev_err(dev, "Mapping Key %u B at va=%pK for DMA failed\n", max_key_buf_size, ctx_p->user.key); - return -ENOMEM; + goto free_key; } dev_dbg(dev, "Mapped key %u B at va=%pK to dma=%pad\n", max_key_buf_size, ctx_p->user.key, &ctx_p->user.key_dma_addr); - if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) { - /* Alloc hash tfm for essiv */ - ctx_p->shash_tfm = crypto_alloc_shash("sha256-generic", 0, 0); - if (IS_ERR(ctx_p->shash_tfm)) { - dev_err(dev, "Error allocating hash tfm for ESSIV.\n"); - return PTR_ERR(ctx_p->shash_tfm); - } - } + return 0; - return rc; +free_key: + kfree(ctx_p->user.key); +free_fallback: + crypto_free_skcipher(ctx_p->fallback_tfm); + crypto_free_shash(ctx_p->shash_tfm); + + return -ENOMEM; } static void cc_cipher_exit(struct crypto_tfm *tfm) @@ -220,6 +246,8 @@ static void cc_cipher_exit(struct crypto_tfm *tfm) /* Free hash tfm for essiv */ crypto_free_shash(ctx_p->shash_tfm); ctx_p->shash_tfm = NULL; + crypto_free_skcipher(ctx_p->fallback_tfm); + ctx_p->fallback_tfm = NULL; } /* Unmap key buffer */ @@ -303,6 +331,7 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key, } ctx_p->keylen = keylen; + ctx_p->fallback_on = false; switch (cc_slot_to_key_type(hki.hw_key1)) { case CC_HW_PROTECTED_KEY: @@ -388,10 +417,33 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key, /* STAT_PHASE_0: Init and sanity checks */ if (validate_keys_sizes(ctx_p, keylen)) { - dev_dbg(dev, "Unsupported key size %d.\n", keylen); + dev_dbg(dev, "Invalid key size %d.\n", keylen); return -EINVAL; } + if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) { + + /* We only support 256 bit ESSIV-CBC-AES keys */ + if (keylen != AES_KEYSIZE_256) { + unsigned int flags = crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_MASK; + + if (likely(ctx_p->fallback_tfm)) { + ctx_p->fallback_on = true; + crypto_skcipher_clear_flags(ctx_p->fallback_tfm, + CRYPTO_TFM_REQ_MASK); + crypto_skcipher_clear_flags(ctx_p->fallback_tfm, flags); + return crypto_skcipher_setkey(ctx_p->fallback_tfm, key, keylen); + } + + dev_dbg(dev, "Unsupported key size %d and no fallback.\n", keylen); + return -EINVAL; + } + + /* Internal ESSIV key buffer is double sized */ + max_key_buf_size <<= 1; + } + + ctx_p->fallback_on = false; ctx_p->key_type = CC_UNPROTECTED_KEY; /* @@ -419,21 +471,20 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key, max_key_buf_size, DMA_TO_DEVICE); memcpy(ctx_p->user.key, key, keylen); - if (keylen == 24) - memset(ctx_p->user.key + 24, 0, CC_AES_KEY_SIZE_MAX - 24); if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) { /* sha256 for key2 - use sw implementation */ - int key_len = keylen >> 1; int err; err = crypto_shash_tfm_digest(ctx_p->shash_tfm, - ctx_p->user.key, key_len, - ctx_p->user.key + key_len); + ctx_p->user.key, keylen, + ctx_p->user.key + keylen); if (err) { dev_err(dev, "Failed to hash ESSIV key.\n"); return err; } + + keylen <<= 1; } dma_sync_single_for_device(dev, ctx_p->user.key_dma_addr, max_key_buf_size, DMA_TO_DEVICE); @@ -571,9 +622,10 @@ static void cc_setup_xex_state_desc(struct crypto_tfm *tfm, int flow_mode = ctx_p->flow_mode; int direction = req_ctx->gen_ctx.op_type; dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr; - unsigned int key_len = ctx_p->keylen; + unsigned int key_len = (ctx_p->keylen / 2); dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr; unsigned int du_size = nbytes; + unsigned int key_offset = key_len; struct cc_crypto_alg *cc_alg = container_of(tfm->__crt_alg, struct cc_crypto_alg, @@ -593,6 +645,10 @@ static void cc_setup_xex_state_desc(struct crypto_tfm *tfm, case DRV_CIPHER_XTS: case DRV_CIPHER_ESSIV: case DRV_CIPHER_BITLOCKER: + + if (cipher_mode == DRV_CIPHER_ESSIV) + key_len = SHA256_DIGEST_SIZE; + /* load XEX key */ hw_desc_init(&desc[*seq_size]); set_cipher_mode(&desc[*seq_size], cipher_mode); @@ -602,12 +658,12 @@ static void cc_setup_xex_state_desc(struct crypto_tfm *tfm, ctx_p->hw.key2_slot); } else { set_din_type(&desc[*seq_size], DMA_DLLI, - (key_dma_addr + (key_len / 2)), - (key_len / 2), NS_BIT); + (key_dma_addr + key_offset), + key_len, NS_BIT); } set_xex_data_unit_size(&desc[*seq_size], du_size); set_flow_mode(&desc[*seq_size], S_DIN_to_AES2); - set_key_size_aes(&desc[*seq_size], (key_len / 2)); + set_key_size_aes(&desc[*seq_size], key_len); set_setup_mode(&desc[*seq_size], SETUP_LOAD_XEX_KEY); (*seq_size)++; @@ -616,7 +672,7 @@ static void cc_setup_xex_state_desc(struct crypto_tfm *tfm, set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1); set_cipher_mode(&desc[*seq_size], cipher_mode); set_cipher_config0(&desc[*seq_size], direction); - set_key_size_aes(&desc[*seq_size], (key_len / 2)); + set_key_size_aes(&desc[*seq_size], key_len); set_flow_mode(&desc[*seq_size], flow_mode); set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT); @@ -867,6 +923,17 @@ static int cc_cipher_process(struct skcipher_request *req, goto exit_process; } + if (ctx_p->fallback_on) { + struct skcipher_request *subreq = skcipher_request_ctx(req); + + *subreq = *req; + skcipher_request_set_tfm(subreq, ctx_p->fallback_tfm); + if (direction == DRV_CRYPTO_DIRECTION_ENCRYPT) + return crypto_skcipher_encrypt(subreq); + else + return crypto_skcipher_decrypt(subreq); + } + /* The IV we are handed may be allocted from the stack so * we must copy it to a DMAable buffer before use. */ @@ -1010,7 +1077,7 @@ static const struct cc_alg_template skcipher_algs[] = { .sec_func = true, }, { - .name = "essiv(paes)", + .name = "essiv(cbc(paes),sha256)", .driver_name = "essiv-paes-ccree", .blocksize = AES_BLOCK_SIZE, .template_skcipher = { @@ -1028,7 +1095,7 @@ static const struct cc_alg_template skcipher_algs[] = { .sec_func = true, }, { - .name = "essiv512(paes)", + .name = "essiv512(cbc(paes),sha256)", .driver_name = "essiv-paes-du512-ccree", .blocksize = AES_BLOCK_SIZE, .template_skcipher = { @@ -1047,7 +1114,7 @@ static const struct cc_alg_template skcipher_algs[] = { .sec_func = true, }, { - .name = "essiv4096(paes)", + .name = "essiv4096(cbc(paes),sha256)", .driver_name = "essiv-paes-du4096-ccree", .blocksize = AES_BLOCK_SIZE, .template_skcipher = { @@ -1269,15 +1336,15 @@ static const struct cc_alg_template skcipher_algs[] = { .std_body = CC_STD_NIST, }, { - .name = "essiv(aes)", + .name = "essiv(cbc(aes),sha256)", .driver_name = "essiv-aes-ccree", .blocksize = AES_BLOCK_SIZE, .template_skcipher = { .setkey = cc_cipher_setkey, .encrypt = cc_cipher_encrypt, .decrypt = cc_cipher_decrypt, - .min_keysize = AES_MIN_KEY_SIZE * 2, - .max_keysize = AES_MAX_KEY_SIZE * 2, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, }, .cipher_mode = DRV_CIPHER_ESSIV, @@ -1286,15 +1353,15 @@ static const struct cc_alg_template skcipher_algs[] = { .std_body = CC_STD_NIST, }, { - .name = "essiv512(aes)", + .name = "essiv512(cbc(aes),sha256)", .driver_name = "essiv-aes-du512-ccree", .blocksize = AES_BLOCK_SIZE, .template_skcipher = { .setkey = cc_cipher_setkey, .encrypt = cc_cipher_encrypt, .decrypt = cc_cipher_decrypt, - .min_keysize = AES_MIN_KEY_SIZE * 2, - .max_keysize = AES_MAX_KEY_SIZE * 2, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, }, .cipher_mode = DRV_CIPHER_ESSIV, @@ -1304,15 +1371,15 @@ static const struct cc_alg_template skcipher_algs[] = { .std_body = CC_STD_NIST, }, { - .name = "essiv4096(aes)", + .name = "essiv4096(cbc(aes),sha256)", .driver_name = "essiv-aes-du4096-ccree", .blocksize = AES_BLOCK_SIZE, .template_skcipher = { .setkey = cc_cipher_setkey, .encrypt = cc_cipher_encrypt, .decrypt = cc_cipher_decrypt, - .min_keysize = AES_MIN_KEY_SIZE * 2, - .max_keysize = AES_MAX_KEY_SIZE * 2, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, }, .cipher_mode = DRV_CIPHER_ESSIV, diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index 4c2553672b6f..13b908ea4873 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -690,26 +690,22 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src, return min(srclen, dstlen); } -static int chcr_cipher_fallback(struct crypto_sync_skcipher *cipher, - u32 flags, - struct scatterlist *src, - struct scatterlist *dst, - unsigned int nbytes, +static int chcr_cipher_fallback(struct crypto_skcipher *cipher, + struct skcipher_request *req, u8 *iv, unsigned short op_type) { + struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); int err; - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, cipher); - - skcipher_request_set_sync_tfm(subreq, cipher); - skcipher_request_set_callback(subreq, flags, NULL, NULL); - skcipher_request_set_crypt(subreq, src, dst, - nbytes, iv); + skcipher_request_set_tfm(&reqctx->fallback_req, cipher); + skcipher_request_set_callback(&reqctx->fallback_req, req->base.flags, + req->base.complete, req->base.data); + skcipher_request_set_crypt(&reqctx->fallback_req, req->src, req->dst, + req->cryptlen, iv); - err = op_type ? crypto_skcipher_decrypt(subreq) : - crypto_skcipher_encrypt(subreq); - skcipher_request_zero(subreq); + err = op_type ? crypto_skcipher_decrypt(&reqctx->fallback_req) : + crypto_skcipher_encrypt(&reqctx->fallback_req); return err; @@ -924,11 +920,11 @@ static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher, { struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); - crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher, + crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(ablkctx->sw_cipher, + crypto_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK); - return crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen); + return crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen); } static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher, @@ -1206,13 +1202,8 @@ static int chcr_handle_cipher_resp(struct skcipher_request *req, req); memcpy(req->iv, reqctx->init_iv, IV); atomic_inc(&adap->chcr_stats.fallback); - err = chcr_cipher_fallback(ablkctx->sw_cipher, - req->base.flags, - req->src, - req->dst, - req->cryptlen, - req->iv, - reqctx->op); + err = chcr_cipher_fallback(ablkctx->sw_cipher, req, req->iv, + reqctx->op); goto complete; } @@ -1224,7 +1215,7 @@ static int chcr_handle_cipher_resp(struct skcipher_request *req, wrparam.bytes = bytes; skb = create_cipher_wr(&wrparam); if (IS_ERR(skb)) { - pr_err("chcr : %s : Failed to form WR. No memory\n", __func__); + pr_err("%s : Failed to form WR. No memory\n", __func__); err = PTR_ERR(skb); goto unmap; } @@ -1341,11 +1332,7 @@ static int process_cipher(struct skcipher_request *req, chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req); fallback: atomic_inc(&adap->chcr_stats.fallback); - err = chcr_cipher_fallback(ablkctx->sw_cipher, - req->base.flags, - req->src, - req->dst, - req->cryptlen, + err = chcr_cipher_fallback(ablkctx->sw_cipher, req, subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 ? reqctx->iv : req->iv, @@ -1486,14 +1473,15 @@ static int chcr_init_tfm(struct crypto_skcipher *tfm) struct chcr_context *ctx = crypto_skcipher_ctx(tfm); struct ablk_ctx *ablkctx = ABLK_CTX(ctx); - ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->base.cra_name, 0, + ablkctx->sw_cipher = crypto_alloc_skcipher(alg->base.cra_name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ablkctx->sw_cipher)) { pr_err("failed to allocate fallback for %s\n", alg->base.cra_name); return PTR_ERR(ablkctx->sw_cipher); } init_completion(&ctx->cbc_aes_aio_done); - crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx)); + crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) + + crypto_skcipher_reqsize(ablkctx->sw_cipher)); return chcr_device_init(ctx); } @@ -1507,13 +1495,14 @@ static int chcr_rfc3686_init(struct crypto_skcipher *tfm) /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes)) * cannot be used as fallback in chcr_handle_cipher_response */ - ablkctx->sw_cipher = crypto_alloc_sync_skcipher("ctr(aes)", 0, + ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ablkctx->sw_cipher)) { pr_err("failed to allocate fallback for %s\n", alg->base.cra_name); return PTR_ERR(ablkctx->sw_cipher); } - crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx)); + crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) + + crypto_skcipher_reqsize(ablkctx->sw_cipher)); return chcr_device_init(ctx); } @@ -1523,7 +1512,7 @@ static void chcr_exit_tfm(struct crypto_skcipher *tfm) struct chcr_context *ctx = crypto_skcipher_ctx(tfm); struct ablk_ctx *ablkctx = ABLK_CTX(ctx); - crypto_free_sync_skcipher(ablkctx->sw_cipher); + crypto_free_skcipher(ablkctx->sw_cipher); } static int get_alg_config(struct algo_param *params, @@ -1556,7 +1545,7 @@ static int get_alg_config(struct algo_param *params, params->result_size = SHA512_DIGEST_SIZE; break; default: - pr_err("chcr : ERROR, unsupported digest size\n"); + pr_err("ERROR, unsupported digest size\n"); return -EINVAL; } return 0; @@ -3571,7 +3560,7 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, goto out; if (get_alg_config(¶m, max_authsize)) { - pr_err("chcr : Unsupported digest size\n"); + pr_err("Unsupported digest size\n"); goto out; } subtype = get_aead_subtype(authenc); @@ -3590,7 +3579,7 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, } else if (keys.enckeylen == AES_KEYSIZE_256) { ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; } else { - pr_err("chcr : Unsupported cipher key\n"); + pr_err("Unsupported cipher key\n"); goto out; } @@ -3608,10 +3597,8 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, } base_hash = chcr_alloc_shash(max_authsize); if (IS_ERR(base_hash)) { - pr_err("chcr : Base driver cannot be loaded\n"); - aeadctx->enckey_len = 0; - memzero_explicit(&keys, sizeof(keys)); - return -EINVAL; + pr_err("Base driver cannot be loaded\n"); + goto out; } { SHASH_DESC_ON_STACK(shash, base_hash); @@ -3626,7 +3613,7 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, keys.authkeylen, o_ptr); if (err) { - pr_err("chcr : Base driver cannot be loaded\n"); + pr_err("Base driver cannot be loaded\n"); goto out; } keys.authkeylen = max_authsize; @@ -3711,7 +3698,7 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc, } else if (keys.enckeylen == AES_KEYSIZE_256) { ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; } else { - pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen); + pr_err("Unsupported cipher key %d\n", keys.enckeylen); goto out; } memcpy(aeadctx->key, keys.enckey, keys.enckeylen); @@ -3747,7 +3734,7 @@ static int chcr_aead_op(struct aead_request *req, cdev = a_ctx(tfm)->dev; if (!cdev) { - pr_err("chcr : %s : No crypto device.\n", __func__); + pr_err("%s : No crypto device.\n", __func__); return -ENXIO; } @@ -4445,6 +4432,7 @@ static int chcr_register_alg(void) driver_algs[i].alg.skcipher.base.cra_module = THIS_MODULE; driver_algs[i].alg.skcipher.base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_NEED_FALLBACK; driver_algs[i].alg.skcipher.base.cra_ctxsize = sizeof(struct chcr_context) + @@ -4456,7 +4444,8 @@ static int chcr_register_alg(void) break; case CRYPTO_ALG_TYPE_AEAD: driver_algs[i].alg.aead.base.cra_flags = - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK; + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ALLOCATES_MEMORY; driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt; driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt; driver_algs[i].alg.aead.init = chcr_aead_cra_init; @@ -4476,7 +4465,8 @@ static int chcr_register_alg(void) a_hash->halg.statesize = SZ_AHASH_REQ_CTX; a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY; a_hash->halg.base.cra_module = THIS_MODULE; - a_hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC; + a_hash->halg.base.cra_flags = + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY; a_hash->halg.base.cra_alignmask = 0; a_hash->halg.base.cra_exit = NULL; @@ -4497,8 +4487,7 @@ static int chcr_register_alg(void) break; } if (err) { - pr_err("chcr : %s : Algorithm registration failed\n", - name); + pr_err("%s : Algorithm registration failed\n", name); goto register_err; } else { driver_algs[i].is_registered = 1; diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h index 31e427e273f8..e89f9e0094b4 100644 --- a/drivers/crypto/chelsio/chcr_crypto.h +++ b/drivers/crypto/chelsio/chcr_crypto.h @@ -171,7 +171,7 @@ static inline struct chcr_context *h_ctx(struct crypto_ahash *tfm) } struct ablk_ctx { - struct crypto_sync_skcipher *sw_cipher; + struct crypto_skcipher *sw_cipher; __be32 key_ctx_hdr; unsigned int enckey_len; unsigned char ciph_mode; @@ -305,6 +305,7 @@ struct chcr_skcipher_req_ctx { u8 init_iv[CHCR_MAX_CRYPTO_IV_LEN]; u16 txqidx; u16 rxqidx; + struct skcipher_request fallback_req; // keep at the end }; struct chcr_alg_template { diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c index f200fae6f7cb..54093115eb95 100644 --- a/drivers/crypto/chelsio/chtls/chtls_cm.c +++ b/drivers/crypto/chelsio/chtls/chtls_cm.c @@ -102,7 +102,7 @@ static struct net_device *chtls_find_netdev(struct chtls_dev *cdev, case PF_INET: if (likely(!inet_sk(sk)->inet_rcv_saddr)) return ndev; - ndev = ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr); + ndev = __ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr, false); break; #if IS_ENABLED(CONFIG_IPV6) case PF_INET6: diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c index e1401d9cc756..2e9acae1cba3 100644 --- a/drivers/crypto/chelsio/chtls/chtls_io.c +++ b/drivers/crypto/chelsio/chtls/chtls_io.c @@ -1052,14 +1052,15 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) &record_type); if (err) goto out_err; + + /* Avoid appending tls handshake, alert to tls data */ + if (skb) + tx_skb_finalize(skb); } recordsz = size; csk->tlshws.txleft = recordsz; csk->tlshws.type = record_type; - - if (skb) - ULP_SKB_CB(skb)->ulp.tls.type = record_type; } if (!skb || (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) || diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index a3ee127a70e3..b135c74fb619 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -12,7 +12,6 @@ #include <linux/topology.h> #include "hpre.h" -#define HPRE_VF_NUM 63 #define HPRE_QUEUE_NUM_V2 1024 #define HPRE_QM_ABNML_INT_MASK 0x100004 #define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0) @@ -46,9 +45,9 @@ #define HPRE_CORE_IS_SCHD_OFFSET 0x90 #define HPRE_RAS_CE_ENB 0x301410 -#define HPRE_HAC_RAS_CE_ENABLE 0x3f +#define HPRE_HAC_RAS_CE_ENABLE 0x1 #define HPRE_RAS_NFE_ENB 0x301414 -#define HPRE_HAC_RAS_NFE_ENABLE 0x3fffc0 +#define HPRE_HAC_RAS_NFE_ENABLE 0x3ffffe #define HPRE_RAS_FE_ENB 0x301418 #define HPRE_HAC_RAS_FE_ENABLE 0 @@ -83,6 +82,10 @@ #define HPRE_CORE_ECC_2BIT_ERR BIT(1) #define HPRE_OOO_ECC_2BIT_ERR BIT(5) +#define HPRE_QM_BME_FLR BIT(7) +#define HPRE_QM_PM_FLR BIT(11) +#define HPRE_QM_SRIOV_FLR BIT(12) + #define HPRE_VIA_MSI_DSM 1 #define HPRE_SQE_MASK_OFFSET 8 #define HPRE_SQE_MASK_LEN 24 @@ -231,6 +234,22 @@ static int hpre_cfg_by_dsm(struct hisi_qm *qm) return 0; } +/* + * For Hi1620, we shoul disable FLR triggered by hardware (BME/PM/SRIOV). + * Or it may stay in D3 state when we bind and unbind hpre quickly, + * as it does FLR triggered by hardware. + */ +static void disable_flr_of_bme(struct hisi_qm *qm) +{ + u32 val; + + val = readl(HPRE_ADDR(qm, QM_PEH_AXUSER_CFG)); + val &= ~(HPRE_QM_BME_FLR | HPRE_QM_SRIOV_FLR); + val |= HPRE_QM_PM_FLR; + writel(val, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG)); + writel(PEH_AXUSER_CFG_ENABLE, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG_ENABLE)); +} + static int hpre_set_user_domain_and_cache(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; @@ -242,10 +261,6 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm) writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(qm, QM_AWUSER_M_CFG_ENABLE)); writel_relaxed(HPRE_QM_AXI_CFG_MASK, HPRE_ADDR(qm, QM_AXI_M_CFG)); - /* disable FLR triggered by BME(bus master enable) */ - writel(PEH_AXUSER_CFG, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG)); - writel(PEH_AXUSER_CFG_ENABLE, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG_ENABLE)); - /* HPRE need more time, we close this interrupt */ val = readl_relaxed(HPRE_ADDR(qm, HPRE_QM_ABNML_INT_MASK)); val |= BIT(HPRE_TIMEOUT_ABNML_BIT); @@ -264,7 +279,7 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm) writel(HPRE_BD_USR_MASK, HPRE_ADDR(qm, HPRE_BD_AWUSR_CFG)); writel(0x1, HPRE_ADDR(qm, HPRE_RDCHN_INI_CFG)); ret = readl_relaxed_poll_timeout(HPRE_ADDR(qm, HPRE_RDCHN_INI_ST), val, - val & BIT(0), + val & BIT(0), HPRE_REG_RD_INTVRL_US, HPRE_REG_RD_TMOUT_US); if (ret) { @@ -296,6 +311,8 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm) if (ret) dev_err(dev, "acpi_evaluate_dsm err.\n"); + disable_flr_of_bme(qm); + return ret; } @@ -372,7 +389,6 @@ static int hpre_current_qm_write(struct hpre_debugfs_file *file, u32 val) u32 num_vfs = qm->vfs_num; u32 vfq_num, tmp; - if (val > num_vfs) return -EINVAL; @@ -449,7 +465,7 @@ static int hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val) } static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf, - size_t count, loff_t *pos) + size_t count, loff_t *pos) { struct hpre_debugfs_file *file = filp->private_data; char tbuf[HPRE_DBGFS_VAL_MAX_LEN]; @@ -477,7 +493,7 @@ static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf, } static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf, - size_t count, loff_t *pos) + size_t count, loff_t *pos) { struct hpre_debugfs_file *file = filp->private_data; char tbuf[HPRE_DBGFS_VAL_MAX_LEN]; @@ -548,13 +564,15 @@ static int hpre_debugfs_atomic64_get(void *data, u64 *val) static int hpre_debugfs_atomic64_set(void *data, u64 val) { struct hpre_dfx *dfx_item = data; - struct hpre_dfx *hpre_dfx = dfx_item - HPRE_OVERTIME_THRHLD; + struct hpre_dfx *hpre_dfx = NULL; - if (val) + if (dfx_item->type == HPRE_OVERTIME_THRHLD) { + hpre_dfx = dfx_item - HPRE_OVERTIME_THRHLD; + atomic64_set(&hpre_dfx[HPRE_OVER_THRHLD_CNT].value, 0); + } else if (val) { return -EINVAL; + } - if (dfx_item->type == HPRE_OVERTIME_THRHLD) - atomic64_set(&hpre_dfx[HPRE_OVER_THRHLD_CNT].value, 0); atomic64_set(&dfx_item->value, val); return 0; @@ -563,15 +581,17 @@ static int hpre_debugfs_atomic64_set(void *data, u64 val) DEFINE_DEBUGFS_ATTRIBUTE(hpre_atomic64_ops, hpre_debugfs_atomic64_get, hpre_debugfs_atomic64_set, "%llu\n"); -static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir, +static int hpre_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir, enum hpre_ctrl_dbgfs_file type, int indx) { + struct hpre *hpre = container_of(qm, struct hpre, qm); + struct hpre_debug *dbg = &hpre->debug; struct dentry *file_dir; if (dir) file_dir = dir; else - file_dir = dbg->debug_root; + file_dir = qm->debug.debug_root; if (type >= HPRE_DEBUG_FILE_NUM) return -EINVAL; @@ -586,10 +606,8 @@ static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir, return 0; } -static int hpre_pf_comm_regs_debugfs_init(struct hpre_debug *debug) +static int hpre_pf_comm_regs_debugfs_init(struct hisi_qm *qm) { - struct hpre *hpre = container_of(debug, struct hpre, debug); - struct hisi_qm *qm = &hpre->qm; struct device *dev = &qm->pdev->dev; struct debugfs_regset32 *regset; @@ -601,14 +619,12 @@ static int hpre_pf_comm_regs_debugfs_init(struct hpre_debug *debug) regset->nregs = ARRAY_SIZE(hpre_com_dfx_regs); regset->base = qm->io_base; - debugfs_create_regset32("regs", 0444, debug->debug_root, regset); + debugfs_create_regset32("regs", 0444, qm->debug.debug_root, regset); return 0; } -static int hpre_cluster_debugfs_init(struct hpre_debug *debug) +static int hpre_cluster_debugfs_init(struct hisi_qm *qm) { - struct hpre *hpre = container_of(debug, struct hpre, debug); - struct hisi_qm *qm = &hpre->qm; struct device *dev = &qm->pdev->dev; char buf[HPRE_DBGFS_VAL_MAX_LEN]; struct debugfs_regset32 *regset; @@ -619,7 +635,7 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug) ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i); if (ret < 0) return -EINVAL; - tmp_d = debugfs_create_dir(buf, debug->debug_root); + tmp_d = debugfs_create_dir(buf, qm->debug.debug_root); regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL); if (!regset) @@ -630,7 +646,7 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug) regset->base = qm->io_base + hpre_cluster_offsets[i]; debugfs_create_regset32("regs", 0444, tmp_d, regset); - ret = hpre_create_debugfs_file(debug, tmp_d, HPRE_CLUSTER_CTRL, + ret = hpre_create_debugfs_file(qm, tmp_d, HPRE_CLUSTER_CTRL, i + HPRE_CLUSTER_CTRL); if (ret) return ret; @@ -639,32 +655,31 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug) return 0; } -static int hpre_ctrl_debug_init(struct hpre_debug *debug) +static int hpre_ctrl_debug_init(struct hisi_qm *qm) { int ret; - ret = hpre_create_debugfs_file(debug, NULL, HPRE_CURRENT_QM, + ret = hpre_create_debugfs_file(qm, NULL, HPRE_CURRENT_QM, HPRE_CURRENT_QM); if (ret) return ret; - ret = hpre_create_debugfs_file(debug, NULL, HPRE_CLEAR_ENABLE, + ret = hpre_create_debugfs_file(qm, NULL, HPRE_CLEAR_ENABLE, HPRE_CLEAR_ENABLE); if (ret) return ret; - ret = hpre_pf_comm_regs_debugfs_init(debug); + ret = hpre_pf_comm_regs_debugfs_init(qm); if (ret) return ret; - return hpre_cluster_debugfs_init(debug); + return hpre_cluster_debugfs_init(qm); } -static void hpre_dfx_debug_init(struct hpre_debug *debug) +static void hpre_dfx_debug_init(struct hisi_qm *qm) { - struct hpre *hpre = container_of(debug, struct hpre, debug); + struct hpre *hpre = container_of(qm, struct hpre, qm); struct hpre_dfx *dfx = hpre->debug.dfx; - struct hisi_qm *qm = &hpre->qm; struct dentry *parent; int i; @@ -676,30 +691,27 @@ static void hpre_dfx_debug_init(struct hpre_debug *debug) } } -static int hpre_debugfs_init(struct hpre *hpre) +static int hpre_debugfs_init(struct hisi_qm *qm) { - struct hisi_qm *qm = &hpre->qm; struct device *dev = &qm->pdev->dev; - struct dentry *dir; int ret; - dir = debugfs_create_dir(dev_name(dev), hpre_debugfs_root); - qm->debug.debug_root = dir; + qm->debug.debug_root = debugfs_create_dir(dev_name(dev), + hpre_debugfs_root); + qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET; qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN; - ret = hisi_qm_debug_init(qm); if (ret) goto failed_to_create; if (qm->pdev->device == HPRE_PCI_DEVICE_ID) { - hpre->debug.debug_root = dir; - ret = hpre_ctrl_debug_init(&hpre->debug); + ret = hpre_ctrl_debug_init(qm); if (ret) goto failed_to_create; } - hpre_dfx_debug_init(&hpre->debug); + hpre_dfx_debug_init(qm); return 0; @@ -708,10 +720,8 @@ failed_to_create: return ret; } -static void hpre_debugfs_exit(struct hpre *hpre) +static void hpre_debugfs_exit(struct hisi_qm *qm) { - struct hisi_qm *qm = &hpre->qm; - debugfs_remove_recursive(qm->debug.debug_root); } @@ -732,6 +742,7 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) if (qm->fun_type == QM_HW_PF) { qm->qp_base = HPRE_PF_DEF_Q_BASE; qm->qp_num = pf_q_num; + qm->debug.curr_qm_qp_num = pf_q_num; qm->qm_list = &hpre_devices; } @@ -849,7 +860,7 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (ret) goto err_with_err_init; - ret = hpre_debugfs_init(hpre); + ret = hpre_debugfs_init(qm); if (ret) dev_warn(&pdev->dev, "init debugfs fail!\n"); @@ -874,6 +885,7 @@ err_with_crypto_register: err_with_qm_start: hisi_qm_del_from_list(qm, &hpre_devices); + hpre_debugfs_exit(qm); hisi_qm_stop(qm); err_with_err_init: @@ -905,7 +917,7 @@ static void hpre_remove(struct pci_dev *pdev) qm->debug.curr_qm_qp_num = 0; } - hpre_debugfs_exit(hpre); + hpre_debugfs_exit(qm); hisi_qm_stop(qm); hisi_qm_dev_err_uninit(qm); hisi_qm_uninit(qm); @@ -924,7 +936,8 @@ static struct pci_driver hpre_pci_driver = { .id_table = hpre_dev_ids, .probe = hpre_probe, .remove = hpre_remove, - .sriov_configure = hisi_qm_sriov_configure, + .sriov_configure = IS_ENABLED(CONFIG_PCI_IOV) ? + hisi_qm_sriov_configure : NULL, .err_handler = &hpre_err_handler, }; diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 9bb263cec6c3..6527c53b073f 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -1064,19 +1064,10 @@ static ssize_t qm_cmd_read(struct file *filp, char __user *buffer, char buf[QM_DBG_READ_LEN]; int len; - if (*pos) - return 0; - - if (count < QM_DBG_READ_LEN) - return -ENOSPC; - - len = snprintf(buf, QM_DBG_READ_LEN, "%s\n", - "Please echo help to cmd to get help information"); + len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n", + "Please echo help to cmd to get help information"); - if (copy_to_user(buffer, buf, len)) - return -EFAULT; - - return (*pos = len); + return simple_read_from_buffer(buffer, count, pos, buf, len); } static void *qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size, @@ -1741,7 +1732,7 @@ void hisi_qm_release_qp(struct hisi_qp *qp) } EXPORT_SYMBOL_GPL(hisi_qm_release_qp); -static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid) +static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) { struct hisi_qm *qm = qp->qm; struct device *dev = &qm->pdev->dev; @@ -1813,7 +1804,7 @@ static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg) struct hisi_qm *qm = qp->qm; struct device *dev = &qm->pdev->dev; int qp_id = qp->qp_id; - int pasid = arg; + u32 pasid = arg; int ret; if (!qm_qp_avail_state(qm, qp, QP_START)) @@ -2179,8 +2170,12 @@ static int qm_alloc_uacce(struct hisi_qm *qm) .flags = UACCE_DEV_SVA, .ops = &uacce_qm_ops, }; + int ret; - strncpy(interface.name, pdev->driver->name, sizeof(interface.name)); + ret = strscpy(interface.name, pdev->driver->name, + sizeof(interface.name)); + if (ret < 0) + return -ENAMETOOLONG; uacce = uacce_alloc(&pdev->dev, &interface); if (IS_ERR(uacce)) @@ -2691,24 +2686,12 @@ static ssize_t qm_status_read(struct file *filp, char __user *buffer, { struct hisi_qm *qm = filp->private_data; char buf[QM_DBG_READ_LEN]; - int val, cp_len, len; - - if (*pos) - return 0; - - if (count < QM_DBG_READ_LEN) - return -ENOSPC; + int val, len; val = atomic_read(&qm->status.flags); - len = snprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]); - if (!len) - return -EFAULT; - - cp_len = copy_to_user(buffer, buf, len); - if (cp_len) - return -EFAULT; + len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]); - return (*pos = len); + return simple_read_from_buffer(buffer, count, pos, buf, len); } static const struct file_operations qm_status_fops = { diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index 0a351de8d838..6c1d3c7d64ee 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -44,6 +44,7 @@ #define QM_AXI_M_CFG 0x1000ac #define AXI_M_CFG 0xffff #define QM_AXI_M_CFG_ENABLE 0x1000b0 +#define AM_CFG_SINGLE_PORT_MAX_TRANS 0x300014 #define AXI_M_CFG_ENABLE 0xffffffff #define QM_PEH_AXUSER_CFG 0x1000cc #define QM_PEH_AXUSER_CFG_ENABLE 0x1000d0 diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c index c27e7160d2df..8ca945ac297e 100644 --- a/drivers/crypto/hisilicon/sec/sec_algs.c +++ b/drivers/crypto/hisilicon/sec/sec_algs.c @@ -175,7 +175,8 @@ static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl, dma_addr_t *psec_sgl, struct scatterlist *sgl, int count, - struct sec_dev_info *info) + struct sec_dev_info *info, + gfp_t gfp) { struct sec_hw_sgl *sgl_current = NULL; struct sec_hw_sgl *sgl_next; @@ -190,7 +191,7 @@ static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl, sge_index = i % SEC_MAX_SGE_NUM; if (sge_index == 0) { sgl_next = dma_pool_zalloc(info->hw_sgl_pool, - GFP_KERNEL, &sgl_next_dma); + gfp, &sgl_next_dma); if (!sgl_next) { ret = -ENOMEM; goto err_free_hw_sgls; @@ -545,14 +546,14 @@ void sec_alg_callback(struct sec_bd_info *resp, void *shadow) } static int sec_alg_alloc_and_calc_split_sizes(int length, size_t **split_sizes, - int *steps) + int *steps, gfp_t gfp) { size_t *sizes; int i; /* Split into suitable sized blocks */ *steps = roundup(length, SEC_REQ_LIMIT) / SEC_REQ_LIMIT; - sizes = kcalloc(*steps, sizeof(*sizes), GFP_KERNEL); + sizes = kcalloc(*steps, sizeof(*sizes), gfp); if (!sizes) return -ENOMEM; @@ -568,7 +569,7 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes, int steps, struct scatterlist ***splits, int **splits_nents, int sgl_len_in, - struct device *dev) + struct device *dev, gfp_t gfp) { int ret, count; @@ -576,12 +577,12 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes, if (!count) return -EINVAL; - *splits = kcalloc(steps, sizeof(struct scatterlist *), GFP_KERNEL); + *splits = kcalloc(steps, sizeof(struct scatterlist *), gfp); if (!*splits) { ret = -ENOMEM; goto err_unmap_sg; } - *splits_nents = kcalloc(steps, sizeof(int), GFP_KERNEL); + *splits_nents = kcalloc(steps, sizeof(int), gfp); if (!*splits_nents) { ret = -ENOMEM; goto err_free_splits; @@ -589,7 +590,7 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes, /* output the scatter list before and after this */ ret = sg_split(sgl, count, 0, steps, split_sizes, - *splits, *splits_nents, GFP_KERNEL); + *splits, *splits_nents, gfp); if (ret) { ret = -ENOMEM; goto err_free_splits_nents; @@ -630,13 +631,13 @@ static struct sec_request_el int el_size, bool different_dest, struct scatterlist *sgl_in, int n_ents_in, struct scatterlist *sgl_out, int n_ents_out, - struct sec_dev_info *info) + struct sec_dev_info *info, gfp_t gfp) { struct sec_request_el *el; struct sec_bd_info *req; int ret; - el = kzalloc(sizeof(*el), GFP_KERNEL); + el = kzalloc(sizeof(*el), gfp); if (!el) return ERR_PTR(-ENOMEM); el->el_length = el_size; @@ -668,7 +669,7 @@ static struct sec_request_el el->sgl_in = sgl_in; ret = sec_alloc_and_fill_hw_sgl(&el->in, &el->dma_in, el->sgl_in, - n_ents_in, info); + n_ents_in, info, gfp); if (ret) goto err_free_el; @@ -679,7 +680,7 @@ static struct sec_request_el el->sgl_out = sgl_out; ret = sec_alloc_and_fill_hw_sgl(&el->out, &el->dma_out, el->sgl_out, - n_ents_out, info); + n_ents_out, info, gfp); if (ret) goto err_free_hw_sgl_in; @@ -720,6 +721,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, int *splits_out_nents = NULL; struct sec_request_el *el, *temp; bool split = skreq->src != skreq->dst; + gfp_t gfp = skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; mutex_init(&sec_req->lock); sec_req->req_base = &skreq->base; @@ -728,13 +730,13 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, sec_req->len_in = sg_nents(skreq->src); ret = sec_alg_alloc_and_calc_split_sizes(skreq->cryptlen, &split_sizes, - &steps); + &steps, gfp); if (ret) return ret; sec_req->num_elements = steps; ret = sec_map_and_split_sg(skreq->src, split_sizes, steps, &splits_in, &splits_in_nents, sec_req->len_in, - info->dev); + info->dev, gfp); if (ret) goto err_free_split_sizes; @@ -742,7 +744,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, sec_req->len_out = sg_nents(skreq->dst); ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps, &splits_out, &splits_out_nents, - sec_req->len_out, info->dev); + sec_req->len_out, info->dev, gfp); if (ret) goto err_unmap_in_sg; } @@ -775,7 +777,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, splits_in[i], splits_in_nents[i], split ? splits_out[i] : NULL, split ? splits_out_nents[i] : 0, - info); + info, gfp); if (IS_ERR(el)) { ret = PTR_ERR(el); goto err_free_elements; @@ -932,7 +934,8 @@ static struct skcipher_alg sec_algs[] = { .cra_name = "ecb(aes)", .cra_driver_name = "hisi_sec_aes_ecb", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), .cra_alignmask = 0, @@ -951,7 +954,8 @@ static struct skcipher_alg sec_algs[] = { .cra_name = "cbc(aes)", .cra_driver_name = "hisi_sec_aes_cbc", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), .cra_alignmask = 0, @@ -970,7 +974,8 @@ static struct skcipher_alg sec_algs[] = { .cra_name = "ctr(aes)", .cra_driver_name = "hisi_sec_aes_ctr", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), .cra_alignmask = 0, @@ -989,7 +994,8 @@ static struct skcipher_alg sec_algs[] = { .cra_name = "xts(aes)", .cra_driver_name = "hisi_sec_aes_xts", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), .cra_alignmask = 0, @@ -1009,7 +1015,8 @@ static struct skcipher_alg sec_algs[] = { .cra_name = "ecb(des)", .cra_driver_name = "hisi_sec_des_ecb", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), .cra_alignmask = 0, @@ -1028,7 +1035,8 @@ static struct skcipher_alg sec_algs[] = { .cra_name = "cbc(des)", .cra_driver_name = "hisi_sec_des_cbc", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), .cra_alignmask = 0, @@ -1047,7 +1055,8 @@ static struct skcipher_alg sec_algs[] = { .cra_name = "cbc(des3_ede)", .cra_driver_name = "hisi_sec_3des_cbc", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), .cra_alignmask = 0, @@ -1066,7 +1075,8 @@ static struct skcipher_alg sec_algs[] = { .cra_name = "ecb(des3_ede)", .cra_driver_name = "hisi_sec_3des_ecb", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), .cra_alignmask = 0, diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index 7b64aca704d6..037762b531e2 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -46,9 +46,11 @@ struct sec_req { struct sec_cipher_req c_req; struct sec_aead_req aead_req; + struct list_head backlog_head; int err_type; int req_id; + int flag; /* Status of the SEC request */ bool fake_busy; @@ -104,6 +106,7 @@ struct sec_qp_ctx { struct sec_alg_res res[QM_Q_DEPTH]; struct sec_ctx *ctx; struct mutex req_lock; + struct list_head backlog; struct hisi_acc_sgl_pool *c_in_pool; struct hisi_acc_sgl_pool *c_out_pool; atomic_t pending_reqs; @@ -161,6 +164,7 @@ struct sec_dfx { atomic64_t send_cnt; atomic64_t recv_cnt; atomic64_t send_busy_cnt; + atomic64_t recv_busy_cnt; atomic64_t err_bd_cnt; atomic64_t invalid_req_cnt; atomic64_t done_flag_cnt; diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 64614a9bdf21..497969ae8b23 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -166,6 +166,7 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp) req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)]; if (unlikely(!req)) { atomic64_inc(&dfx->invalid_req_cnt); + atomic_inc(&qp->qp_status.used); return; } req->err_type = bd->type2.error_type; @@ -198,21 +199,30 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) struct sec_qp_ctx *qp_ctx = req->qp_ctx; int ret; + if (ctx->fake_req_limit <= + atomic_read(&qp_ctx->qp->qp_status.used) && + !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)) + return -EBUSY; + mutex_lock(&qp_ctx->req_lock); ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe); + + if (ctx->fake_req_limit <= + atomic_read(&qp_ctx->qp->qp_status.used) && !ret) { + list_add_tail(&req->backlog_head, &qp_ctx->backlog); + atomic64_inc(&ctx->sec->debug.dfx.send_cnt); + atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt); + mutex_unlock(&qp_ctx->req_lock); + return -EBUSY; + } mutex_unlock(&qp_ctx->req_lock); - atomic64_inc(&ctx->sec->debug.dfx.send_cnt); if (unlikely(ret == -EBUSY)) return -ENOBUFS; - if (!ret) { - if (req->fake_busy) { - atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt); - ret = -EBUSY; - } else { - ret = -EINPROGRESS; - } + if (likely(!ret)) { + ret = -EINPROGRESS; + atomic64_inc(&ctx->sec->debug.dfx.send_cnt); } return ret; @@ -373,8 +383,8 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx, qp_ctx->ctx = ctx; mutex_init(&qp_ctx->req_lock); - atomic_set(&qp_ctx->pending_reqs, 0); idr_init(&qp_ctx->req_idr); + INIT_LIST_HEAD(&qp_ctx->backlog); qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH, SEC_SGL_SGE_NR); @@ -1048,21 +1058,49 @@ static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type) dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n"); } +static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx, + struct sec_qp_ctx *qp_ctx) +{ + struct sec_req *backlog_req = NULL; + + mutex_lock(&qp_ctx->req_lock); + if (ctx->fake_req_limit >= + atomic_read(&qp_ctx->qp->qp_status.used) && + !list_empty(&qp_ctx->backlog)) { + backlog_req = list_first_entry(&qp_ctx->backlog, + typeof(*backlog_req), backlog_head); + list_del(&backlog_req->backlog_head); + } + mutex_unlock(&qp_ctx->req_lock); + + return backlog_req; +} + static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req, int err) { struct skcipher_request *sk_req = req->c_req.sk_req; struct sec_qp_ctx *qp_ctx = req->qp_ctx; + struct skcipher_request *backlog_sk_req; + struct sec_req *backlog_req; - atomic_dec(&qp_ctx->pending_reqs); sec_free_req_id(req); /* IV output at encrypto of CBC mode */ if (!err && ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt) sec_update_iv(req, SEC_SKCIPHER); - if (req->fake_busy) - sk_req->base.complete(&sk_req->base, -EINPROGRESS); + while (1) { + backlog_req = sec_back_req_clear(ctx, qp_ctx); + if (!backlog_req) + break; + + backlog_sk_req = backlog_req->c_req.sk_req; + backlog_sk_req->base.complete(&backlog_sk_req->base, + -EINPROGRESS); + atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt); + } + sk_req->base.complete(&sk_req->base, err); } @@ -1133,10 +1171,10 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err) struct sec_cipher_req *c_req = &req->c_req; size_t authsize = crypto_aead_authsize(tfm); struct sec_qp_ctx *qp_ctx = req->qp_ctx; + struct aead_request *backlog_aead_req; + struct sec_req *backlog_req; size_t sz; - atomic_dec(&qp_ctx->pending_reqs); - if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt) sec_update_iv(req, SEC_AEAD); @@ -1157,17 +1195,22 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err) sec_free_req_id(req); - if (req->fake_busy) - a_req->base.complete(&a_req->base, -EINPROGRESS); + while (1) { + backlog_req = sec_back_req_clear(c, qp_ctx); + if (!backlog_req) + break; + + backlog_aead_req = backlog_req->aead_req.aead_req; + backlog_aead_req->base.complete(&backlog_aead_req->base, + -EINPROGRESS); + atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt); + } a_req->base.complete(&a_req->base, err); } static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req) { - struct sec_qp_ctx *qp_ctx = req->qp_ctx; - - atomic_dec(&qp_ctx->pending_reqs); sec_free_req_id(req); sec_free_queue_id(ctx, req); } @@ -1187,11 +1230,6 @@ static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req) return req->req_id; } - if (ctx->fake_req_limit <= atomic_inc_return(&qp_ctx->pending_reqs)) - req->fake_busy = true; - else - req->fake_busy = false; - return 0; } @@ -1213,7 +1251,8 @@ static int sec_process(struct sec_ctx *ctx, struct sec_req *req) sec_update_iv(req, ctx->alg_type); ret = ctx->req_op->bd_send(ctx, req); - if (unlikely(ret != -EBUSY && ret != -EINPROGRESS)) { + if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) || + (ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) { dev_err_ratelimited(SEC_CTX_DEV(ctx), "send sec request failed!\n"); goto err_send_req; } @@ -1407,6 +1446,7 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt) if (!sk_req->cryptlen) return 0; + req->flag = sk_req->base.flags; req->c_req.sk_req = sk_req; req->c_req.encrypt = encrypt; req->ctx = ctx; @@ -1435,7 +1475,7 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req) .cra_name = sec_cra_name,\ .cra_driver_name = "hisi_sec_"sec_cra_name,\ .cra_priority = SEC_PRIORITY,\ - .cra_flags = CRYPTO_ALG_ASYNC,\ + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,\ .cra_blocksize = blk_size,\ .cra_ctxsize = sizeof(struct sec_ctx),\ .cra_module = THIS_MODULE,\ @@ -1530,6 +1570,7 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt) struct sec_ctx *ctx = crypto_aead_ctx(tfm); int ret; + req->flag = a_req->base.flags; req->aead_req.aead_req = a_req; req->c_req.encrypt = encrypt; req->ctx = ctx; @@ -1558,7 +1599,7 @@ static int sec_aead_decrypt(struct aead_request *a_req) .cra_name = sec_cra_name,\ .cra_driver_name = "hisi_sec_"sec_cra_name,\ .cra_priority = SEC_PRIORITY,\ - .cra_flags = CRYPTO_ALG_ASYNC,\ + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,\ .cra_blocksize = blk_size,\ .cra_ctxsize = sizeof(struct sec_ctx),\ .cra_module = THIS_MODULE,\ diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index a4cb58b54b25..2297425486cb 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -22,17 +22,15 @@ #define SEC_PF_PCI_DEVICE_ID 0xa255 #define SEC_VF_PCI_DEVICE_ID 0xa256 -#define SEC_XTS_MIV_ENABLE_REG 0x301384 -#define SEC_XTS_MIV_ENABLE_MSK 0x7FFFFFFF -#define SEC_XTS_MIV_DISABLE_MSK 0xFFFFFFFF -#define SEC_BD_ERR_CHK_EN1 0xfffff7fd -#define SEC_BD_ERR_CHK_EN2 0xffffbfff +#define SEC_BD_ERR_CHK_EN0 0xEFFFFFFF +#define SEC_BD_ERR_CHK_EN1 0x7ffff7fd +#define SEC_BD_ERR_CHK_EN3 0xffffbfff #define SEC_SQE_SIZE 128 #define SEC_SQ_SIZE (SEC_SQE_SIZE * QM_Q_DEPTH) -#define SEC_PF_DEF_Q_NUM 64 +#define SEC_PF_DEF_Q_NUM 256 #define SEC_PF_DEF_Q_BASE 0 -#define SEC_CTX_Q_NUM_DEF 24 +#define SEC_CTX_Q_NUM_DEF 2 #define SEC_CTX_Q_NUM_MAX 32 #define SEC_CTRL_CNT_CLR_CE 0x301120 @@ -47,17 +45,18 @@ #define SEC_ECC_ADDR(err) ((err) >> 0) #define SEC_CORE_INT_DISABLE 0x0 #define SEC_CORE_INT_ENABLE 0x1ff +#define SEC_CORE_INT_CLEAR 0x1ff +#define SEC_SAA_ENABLE 0x17f -#define SEC_RAS_CE_REG 0x50 -#define SEC_RAS_FE_REG 0x54 -#define SEC_RAS_NFE_REG 0x58 +#define SEC_RAS_CE_REG 0x301050 +#define SEC_RAS_FE_REG 0x301054 +#define SEC_RAS_NFE_REG 0x301058 #define SEC_RAS_CE_ENB_MSK 0x88 #define SEC_RAS_FE_ENB_MSK 0x0 #define SEC_RAS_NFE_ENB_MSK 0x177 #define SEC_RAS_DISABLE 0x0 #define SEC_MEM_START_INIT_REG 0x0100 #define SEC_MEM_INIT_DONE_REG 0x0104 -#define SEC_QM_ABNORMAL_INT_MASK 0x100004 #define SEC_CONTROL_REG 0x0200 #define SEC_TRNG_EN_SHIFT 8 @@ -68,8 +67,10 @@ #define SEC_INTERFACE_USER_CTRL0_REG 0x0220 #define SEC_INTERFACE_USER_CTRL1_REG 0x0224 +#define SEC_SAA_EN_REG 0x0270 +#define SEC_BD_ERR_CHK_EN_REG0 0x0380 #define SEC_BD_ERR_CHK_EN_REG1 0x0384 -#define SEC_BD_ERR_CHK_EN_REG2 0x038c +#define SEC_BD_ERR_CHK_EN_REG3 0x038c #define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15)) #define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7)) @@ -77,8 +78,8 @@ #define SEC_DELAY_10_US 10 #define SEC_POLL_TIMEOUT_US 1000 -#define SEC_VF_CNT_MASK 0xffffffc0 #define SEC_DBGFS_VAL_MAX_LEN 20 +#define SEC_SINGLE_PORT_MAX_TRANS 0x2060 #define SEC_SQE_MASK_OFFSET 64 #define SEC_SQE_MASK_LEN 48 @@ -122,6 +123,7 @@ static struct sec_dfx_item sec_dfx_labels[] = { {"send_cnt", offsetof(struct sec_dfx, send_cnt)}, {"recv_cnt", offsetof(struct sec_dfx, recv_cnt)}, {"send_busy_cnt", offsetof(struct sec_dfx, send_busy_cnt)}, + {"recv_busy_cnt", offsetof(struct sec_dfx, recv_busy_cnt)}, {"err_bd_cnt", offsetof(struct sec_dfx, err_bd_cnt)}, {"invalid_req_cnt", offsetof(struct sec_dfx, invalid_req_cnt)}, {"done_flag_cnt", offsetof(struct sec_dfx, done_flag_cnt)}, @@ -191,7 +193,7 @@ static const struct kernel_param_ops sec_ctx_q_num_ops = { }; static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF; module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444); -MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (24 default, 2, 4, ..., 32)"); +MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (2 default, 2, 4, ..., 32)"); static const struct kernel_param_ops vfs_num_ops = { .set = vfs_num_set, @@ -280,7 +282,7 @@ static int sec_engine_init(struct hisi_qm *qm) reg, reg & 0x1, SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US); if (ret) { - dev_err(&qm->pdev->dev, "fail to init sec mem\n"); + pci_err(qm->pdev, "fail to init sec mem\n"); return ret; } @@ -296,25 +298,25 @@ static int sec_engine_init(struct hisi_qm *qm) reg |= SEC_USER1_SMMU_NORMAL; writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG)); + writel(SEC_SINGLE_PORT_MAX_TRANS, + qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS); + + writel(SEC_SAA_ENABLE, SEC_ADDR(qm, SEC_SAA_EN_REG)); + + /* Enable sm4 extra mode, as ctr/ecb */ + writel_relaxed(SEC_BD_ERR_CHK_EN0, + SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG0)); + /* Enable sm4 xts mode multiple iv */ writel_relaxed(SEC_BD_ERR_CHK_EN1, SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG1)); - writel_relaxed(SEC_BD_ERR_CHK_EN2, - SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG2)); - - /* enable clock gate control */ - reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG)); - reg |= SEC_CLK_GATE_ENABLE; - writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG)); + writel_relaxed(SEC_BD_ERR_CHK_EN3, + SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG3)); /* config endian */ reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG)); reg |= sec_get_endian(qm); writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG)); - /* Enable sm4 xts mode multiple iv */ - writel_relaxed(SEC_XTS_MIV_ENABLE_MSK, - qm->io_base + SEC_XTS_MIV_ENABLE_REG); - return 0; } @@ -346,10 +348,17 @@ static int sec_set_user_domain_and_cache(struct hisi_qm *qm) /* sec_debug_regs_clear() - clear the sec debug regs */ static void sec_debug_regs_clear(struct hisi_qm *qm) { + int i; + /* clear current_qm */ writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF); writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF); + /* clear sec dfx regs */ + writel(0x1, qm->io_base + SEC_CTRL_CNT_CLR_CE); + for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++) + readl(qm->io_base + sec_dfx_regs[i].offset); + /* clear rdclr_en */ writel(0x0, qm->io_base + SEC_CTRL_CNT_CLR_CE); @@ -362,14 +371,14 @@ static void sec_hw_error_enable(struct hisi_qm *qm) if (qm->ver == QM_HW_V1) { writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK); - dev_info(&qm->pdev->dev, "V1 not support hw error handle\n"); + pci_info(qm->pdev, "V1 not support hw error handle\n"); return; } - val = readl(qm->io_base + SEC_CONTROL_REG); + val = readl(SEC_ADDR(qm, SEC_CONTROL_REG)); /* clear SEC hw error source if having */ - writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_SOURCE); + writel(SEC_CORE_INT_CLEAR, qm->io_base + SEC_CORE_INT_SOURCE); /* enable SEC hw error interrupts */ writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK); @@ -382,14 +391,14 @@ static void sec_hw_error_enable(struct hisi_qm *qm) /* enable SEC block master OOO when m-bit error occur */ val = val | SEC_AXI_SHUTDOWN_ENABLE; - writel(val, qm->io_base + SEC_CONTROL_REG); + writel(val, SEC_ADDR(qm, SEC_CONTROL_REG)); } static void sec_hw_error_disable(struct hisi_qm *qm) { u32 val; - val = readl(qm->io_base + SEC_CONTROL_REG); + val = readl(SEC_ADDR(qm, SEC_CONTROL_REG)); /* disable RAS int */ writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG); @@ -402,7 +411,7 @@ static void sec_hw_error_disable(struct hisi_qm *qm) /* disable SEC block master OOO when m-bit error occur */ val = val & SEC_AXI_SHUTDOWN_DISABLE; - writel(val, qm->io_base + SEC_CONTROL_REG); + writel(val, SEC_ADDR(qm, SEC_CONTROL_REG)); } static u32 sec_current_qm_read(struct sec_debug_file *file) @@ -577,20 +586,20 @@ static int sec_debugfs_atomic64_set(void *data, u64 val) DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get, sec_debugfs_atomic64_set, "%lld\n"); -static int sec_core_debug_init(struct sec_dev *sec) +static int sec_core_debug_init(struct hisi_qm *qm) { - struct hisi_qm *qm = &sec->qm; + struct sec_dev *sec = container_of(qm, struct sec_dev, qm); struct device *dev = &qm->pdev->dev; struct sec_dfx *dfx = &sec->debug.dfx; struct debugfs_regset32 *regset; struct dentry *tmp_d; int i; - tmp_d = debugfs_create_dir("sec_dfx", sec->qm.debug.debug_root); + tmp_d = debugfs_create_dir("sec_dfx", qm->debug.debug_root); regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL); if (!regset) - return -ENOENT; + return -ENOMEM; regset->regs = sec_dfx_regs; regset->nregs = ARRAY_SIZE(sec_dfx_regs); @@ -609,44 +618,44 @@ static int sec_core_debug_init(struct sec_dev *sec) return 0; } -static int sec_debug_init(struct sec_dev *sec) +static int sec_debug_init(struct hisi_qm *qm) { + struct sec_dev *sec = container_of(qm, struct sec_dev, qm); int i; - for (i = SEC_CURRENT_QM; i < SEC_DEBUG_FILE_NUM; i++) { - spin_lock_init(&sec->debug.files[i].lock); - sec->debug.files[i].index = i; - sec->debug.files[i].qm = &sec->qm; - - debugfs_create_file(sec_dbg_file_name[i], 0600, - sec->qm.debug.debug_root, - sec->debug.files + i, - &sec_dbg_fops); + if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) { + for (i = SEC_CURRENT_QM; i < SEC_DEBUG_FILE_NUM; i++) { + spin_lock_init(&sec->debug.files[i].lock); + sec->debug.files[i].index = i; + sec->debug.files[i].qm = qm; + + debugfs_create_file(sec_dbg_file_name[i], 0600, + qm->debug.debug_root, + sec->debug.files + i, + &sec_dbg_fops); + } } - return sec_core_debug_init(sec); + return sec_core_debug_init(qm); } -static int sec_debugfs_init(struct sec_dev *sec) +static int sec_debugfs_init(struct hisi_qm *qm) { - struct hisi_qm *qm = &sec->qm; struct device *dev = &qm->pdev->dev; int ret; qm->debug.debug_root = debugfs_create_dir(dev_name(dev), sec_debugfs_root); - qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET; qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN; ret = hisi_qm_debug_init(qm); if (ret) goto failed_to_create; - if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) { - ret = sec_debug_init(sec); - if (ret) - goto failed_to_create; - } + ret = sec_debug_init(qm); + if (ret) + goto failed_to_create; + return 0; @@ -656,9 +665,9 @@ failed_to_create: return ret; } -static void sec_debugfs_exit(struct sec_dev *sec) +static void sec_debugfs_exit(struct hisi_qm *qm) { - debugfs_remove_recursive(sec->qm.debug.debug_root); + debugfs_remove_recursive(qm->debug.debug_root); } static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts) @@ -677,8 +686,6 @@ static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts) SEC_CORE_SRAM_ECC_ERR_INFO); dev_err(dev, "multi ecc sram num=0x%x\n", SEC_ECC_NUM(err_val)); - dev_err(dev, "multi ecc sram addr=0x%x\n", - SEC_ECC_ADDR(err_val)); } } errs++; @@ -868,7 +875,7 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_probe_uninit; } - ret = sec_debugfs_init(sec); + ret = sec_debugfs_init(qm); if (ret) pci_warn(pdev, "Failed to init debugfs!\n"); @@ -893,7 +900,7 @@ err_crypto_unregister: err_remove_from_list: hisi_qm_del_from_list(qm, &sec_devices); - sec_debugfs_exit(sec); + sec_debugfs_exit(qm); hisi_qm_stop(qm); err_probe_uninit: @@ -917,7 +924,7 @@ static void sec_remove(struct pci_dev *pdev) if (qm->fun_type == QM_HW_PF && qm->vfs_num) hisi_qm_sriov_disable(pdev); - sec_debugfs_exit(sec); + sec_debugfs_exit(qm); (void)hisi_qm_stop(qm); @@ -987,5 +994,6 @@ module_exit(sec_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>"); MODULE_AUTHOR("Longfang Liu <liulongfang@huawei.com>"); +MODULE_AUTHOR("Kai Ye <yekai13@huawei.com>"); MODULE_AUTHOR("Wei Zhang <zhangwei375@huawei.com>"); MODULE_DESCRIPTION("Driver for HiSilicon SEC accelerator"); diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h index f3ed4c0e5493..4484be13812b 100644 --- a/drivers/crypto/hisilicon/zip/zip.h +++ b/drivers/crypto/hisilicon/zip/zip.h @@ -76,7 +76,7 @@ struct hisi_zip_sqe { u32 rsvd1[4]; }; -int zip_create_qps(struct hisi_qp **qps, int ctx_num); +int zip_create_qps(struct hisi_qp **qps, int ctx_num, int node); int hisi_zip_register_to_crypto(void); void hisi_zip_unregister_from_crypto(void); #endif diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c index c73707c2e539..01fd6a78111d 100644 --- a/drivers/crypto/hisilicon/zip/zip_crypto.c +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -158,13 +158,13 @@ static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *ctx) hisi_qm_release_qp(ctx->qp); } -static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type) +static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int node) { struct hisi_qp *qps[HZIP_CTX_Q_NUM] = { NULL }; struct hisi_zip *hisi_zip; int ret, i, j; - ret = zip_create_qps(qps, HZIP_CTX_Q_NUM); + ret = zip_create_qps(qps, HZIP_CTX_Q_NUM, node); if (ret) { pr_err("Can not create zip qps!\n"); return -ENODEV; @@ -379,7 +379,7 @@ static int hisi_zip_acomp_init(struct crypto_acomp *tfm) struct hisi_zip_ctx *ctx = crypto_tfm_ctx(&tfm->base); int ret; - ret = hisi_zip_ctx_init(ctx, COMP_NAME_TO_TYPE(alg_name)); + ret = hisi_zip_ctx_init(ctx, COMP_NAME_TO_TYPE(alg_name), tfm->base.node); if (ret) return ret; diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 2229a21ae7c8..e2845b2c963d 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -234,9 +234,10 @@ static const struct pci_device_id hisi_zip_dev_ids[] = { }; MODULE_DEVICE_TABLE(pci, hisi_zip_dev_ids); -int zip_create_qps(struct hisi_qp **qps, int qp_num) +int zip_create_qps(struct hisi_qp **qps, int qp_num, int node) { - int node = cpu_to_node(smp_processor_id()); + if (node == NUMA_NO_NODE) + node = cpu_to_node(smp_processor_id()); return hisi_qm_alloc_qps_node(&zip_devices, qp_num, 0, node, qps); } diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c index 0e25fc3087f3..87226b7c2795 100644 --- a/drivers/crypto/img-hash.c +++ b/drivers/crypto/img-hash.c @@ -330,7 +330,7 @@ static int img_hash_write_via_dma(struct img_hash_dev *hdev) static int img_hash_dma_init(struct img_hash_dev *hdev) { struct dma_slave_config dma_conf; - int err = -EINVAL; + int err; hdev->dma_lch = dma_request_chan(hdev->dev, "tx"); if (IS_ERR(hdev->dma_lch)) { diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 2cb53fbae841..fa7398e68858 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -1135,11 +1135,12 @@ static irqreturn_t safexcel_irq_ring_thread(int irq, void *data) static int safexcel_request_ring_irq(void *pdev, int irqid, int is_pci_dev, + int ring_id, irq_handler_t handler, irq_handler_t threaded_handler, struct safexcel_ring_irq_data *ring_irq_priv) { - int ret, irq; + int ret, irq, cpu; struct device *dev; if (IS_ENABLED(CONFIG_PCI) && is_pci_dev) { @@ -1177,6 +1178,10 @@ static int safexcel_request_ring_irq(void *pdev, int irqid, return ret; } + /* Set affinity */ + cpu = cpumask_local_spread(ring_id, NUMA_NO_NODE); + irq_set_affinity_hint(irq, get_cpu_mask(cpu)); + return irq; } @@ -1611,6 +1616,7 @@ static int safexcel_probe_generic(void *pdev, irq = safexcel_request_ring_irq(pdev, EIP197_IRQ_NUMBER(i, is_pci_dev), is_pci_dev, + i, safexcel_irq_ring, safexcel_irq_ring_thread, ring_irq); @@ -1619,6 +1625,7 @@ static int safexcel_probe_generic(void *pdev, return irq; } + priv->ring[i].irq = irq; priv->ring[i].work_data.priv = priv; priv->ring[i].work_data.ring = i; INIT_WORK(&priv->ring[i].work_data.work, @@ -1756,8 +1763,10 @@ static int safexcel_remove(struct platform_device *pdev) clk_disable_unprepare(priv->reg_clk); clk_disable_unprepare(priv->clk); - for (i = 0; i < priv->config.rings; i++) + for (i = 0; i < priv->config.rings; i++) { + irq_set_affinity_hint(priv->ring[i].irq, NULL); destroy_workqueue(priv->ring[i].workqueue); + } return 0; } diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index 94016c505abb..7c5fe382d272 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -707,6 +707,9 @@ struct safexcel_ring { */ struct crypto_async_request *req; struct crypto_async_request *backlog; + + /* irq of this ring */ + int irq; }; /* EIP integration context flags */ diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 0c5e80c3f6e3..1ac3253b7903 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -1300,6 +1300,7 @@ struct safexcel_alg_template safexcel_alg_ecb_aes = { .cra_driver_name = "safexcel-ecb-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1337,6 +1338,7 @@ struct safexcel_alg_template safexcel_alg_cbc_aes = { .cra_driver_name = "safexcel-cbc-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1374,6 +1376,7 @@ struct safexcel_alg_template safexcel_alg_cfb_aes = { .cra_driver_name = "safexcel-cfb-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1411,6 +1414,7 @@ struct safexcel_alg_template safexcel_alg_ofb_aes = { .cra_driver_name = "safexcel-ofb-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1485,6 +1489,7 @@ struct safexcel_alg_template safexcel_alg_ctr_aes = { .cra_driver_name = "safexcel-ctr-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1545,6 +1550,7 @@ struct safexcel_alg_template safexcel_alg_cbc_des = { .cra_driver_name = "safexcel-cbc-des", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1582,6 +1588,7 @@ struct safexcel_alg_template safexcel_alg_ecb_des = { .cra_driver_name = "safexcel-ecb-des", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1642,6 +1649,7 @@ struct safexcel_alg_template safexcel_alg_cbc_des3_ede = { .cra_driver_name = "safexcel-cbc-des3_ede", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1679,6 +1687,7 @@ struct safexcel_alg_template safexcel_alg_ecb_des3_ede = { .cra_driver_name = "safexcel-ecb-des3_ede", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1751,6 +1760,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes = { .cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1786,6 +1796,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes = { .cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1821,6 +1832,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = { .cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1856,6 +1868,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes = { .cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1891,6 +1904,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes = { .cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1927,6 +1941,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des3_ede = { .cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-des3_ede", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1963,6 +1978,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des3_ede = { .cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-des3_ede", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1999,6 +2015,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des3_ede = { .cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-des3_ede", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2035,6 +2052,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des3_ede = { .cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-des3_ede", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2071,6 +2089,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des3_ede = { .cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-des3_ede", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2107,6 +2126,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des = { .cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-des", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2143,6 +2163,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des = { .cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-des", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2179,6 +2200,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des = { .cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-des", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2215,6 +2237,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des = { .cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-des", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2251,6 +2274,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des = { .cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-des", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2285,6 +2309,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_aes = { .cra_driver_name = "safexcel-authenc-hmac-sha1-ctr-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2319,6 +2344,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_ctr_aes = { .cra_driver_name = "safexcel-authenc-hmac-sha256-ctr-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2353,6 +2379,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_ctr_aes = { .cra_driver_name = "safexcel-authenc-hmac-sha224-ctr-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2387,6 +2414,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_ctr_aes = { .cra_driver_name = "safexcel-authenc-hmac-sha512-ctr-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2421,6 +2449,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_ctr_aes = { .cra_driver_name = "safexcel-authenc-hmac-sha384-ctr-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2534,6 +2563,7 @@ struct safexcel_alg_template safexcel_alg_xts_aes = { .cra_driver_name = "safexcel-xts-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = XTS_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2646,6 +2676,7 @@ struct safexcel_alg_template safexcel_alg_gcm = { .cra_driver_name = "safexcel-gcm-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2769,6 +2800,7 @@ struct safexcel_alg_template safexcel_alg_ccm = { .cra_driver_name = "safexcel-ccm-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2832,6 +2864,7 @@ struct safexcel_alg_template safexcel_alg_chacha20 = { .cra_driver_name = "safexcel-chacha20", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2993,6 +3026,7 @@ struct safexcel_alg_template safexcel_alg_chachapoly = { /* +1 to put it above HW chacha + SW poly */ .cra_priority = SAFEXCEL_CRA_PRIORITY + 1, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = 1, @@ -3032,6 +3066,7 @@ struct safexcel_alg_template safexcel_alg_chachapoly_esp = { /* +1 to put it above HW chacha + SW poly */ .cra_priority = SAFEXCEL_CRA_PRIORITY + 1, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = 1, @@ -3110,6 +3145,7 @@ struct safexcel_alg_template safexcel_alg_ecb_sm4 = { .cra_driver_name = "safexcel-ecb-sm4", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SM4_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -3147,6 +3183,7 @@ struct safexcel_alg_template safexcel_alg_cbc_sm4 = { .cra_driver_name = "safexcel-cbc-sm4", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SM4_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -3184,6 +3221,7 @@ struct safexcel_alg_template safexcel_alg_ofb_sm4 = { .cra_driver_name = "safexcel-ofb-sm4", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -3221,6 +3259,7 @@ struct safexcel_alg_template safexcel_alg_cfb_sm4 = { .cra_driver_name = "safexcel-cfb-sm4", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -3273,6 +3312,7 @@ struct safexcel_alg_template safexcel_alg_ctr_sm4 = { .cra_driver_name = "safexcel-ctr-sm4", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -3332,6 +3372,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_sm4 = { .cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-sm4", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SM4_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -3441,6 +3482,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sm3_cbc_sm4 = { .cra_driver_name = "safexcel-authenc-hmac-sm3-cbc-sm4", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SM4_BLOCK_SIZE, @@ -3476,6 +3518,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_sm4 = { .cra_driver_name = "safexcel-authenc-hmac-sha1-ctr-sm4", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -3510,6 +3553,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sm3_ctr_sm4 = { .cra_driver_name = "safexcel-authenc-hmac-sm3-ctr-sm4", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -3578,6 +3622,7 @@ struct safexcel_alg_template safexcel_alg_rfc4106_gcm = { .cra_driver_name = "safexcel-rfc4106-gcm-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -3622,6 +3667,7 @@ struct safexcel_alg_template safexcel_alg_rfc4543_gcm = { .cra_driver_name = "safexcel-rfc4543-gcm-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -3713,6 +3759,7 @@ struct safexcel_alg_template safexcel_alg_rfc4309_ccm = { .cra_driver_name = "safexcel-rfc4309-ccm-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index 43962bc709c6..16a467969d8e 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -992,6 +992,7 @@ struct safexcel_alg_template safexcel_alg_sha1 = { .cra_driver_name = "safexcel-sha1", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1235,6 +1236,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha1 = { .cra_driver_name = "safexcel-hmac-sha1", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1291,6 +1293,7 @@ struct safexcel_alg_template safexcel_alg_sha256 = { .cra_driver_name = "safexcel-sha256", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1347,6 +1350,7 @@ struct safexcel_alg_template safexcel_alg_sha224 = { .cra_driver_name = "safexcel-sha224", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA224_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1418,6 +1422,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha224 = { .cra_driver_name = "safexcel-hmac-sha224", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA224_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1489,6 +1494,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha256 = { .cra_driver_name = "safexcel-hmac-sha256", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1545,6 +1551,7 @@ struct safexcel_alg_template safexcel_alg_sha512 = { .cra_driver_name = "safexcel-sha512", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1601,6 +1608,7 @@ struct safexcel_alg_template safexcel_alg_sha384 = { .cra_driver_name = "safexcel-sha384", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1672,6 +1680,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha512 = { .cra_driver_name = "safexcel-hmac-sha512", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1743,6 +1752,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha384 = { .cra_driver_name = "safexcel-hmac-sha384", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1799,6 +1809,7 @@ struct safexcel_alg_template safexcel_alg_md5 = { .cra_driver_name = "safexcel-md5", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1871,6 +1882,7 @@ struct safexcel_alg_template safexcel_alg_hmac_md5 = { .cra_driver_name = "safexcel-hmac-md5", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1952,6 +1964,7 @@ struct safexcel_alg_template safexcel_alg_crc32 = { .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_OPTIONAL_KEY | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -2041,6 +2054,7 @@ struct safexcel_alg_template safexcel_alg_cbcmac = { .cra_driver_name = "safexcel-cbcmac-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -2136,6 +2150,7 @@ struct safexcel_alg_template safexcel_alg_xcbcmac = { .cra_driver_name = "safexcel-xcbc-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -2232,6 +2247,7 @@ struct safexcel_alg_template safexcel_alg_cmac = { .cra_driver_name = "safexcel-cmac-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -2288,6 +2304,7 @@ struct safexcel_alg_template safexcel_alg_sm3 = { .cra_driver_name = "safexcel-sm3", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SM3_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -2359,6 +2376,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sm3 = { .cra_driver_name = "safexcel-hmac-sm3", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SM3_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index ad73fc946682..f478bb0a566a 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c @@ -1402,7 +1402,8 @@ static int __init ixp_module_init(void) /* block ciphers */ cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | - CRYPTO_ALG_ASYNC; + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY; if (!cra->setkey) cra->setkey = ablk_setkey; if (!cra->encrypt) @@ -1435,7 +1436,8 @@ static int __init ixp_module_init(void) /* authenc */ cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | - CRYPTO_ALG_ASYNC; + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY; cra->setkey = cra->setkey ?: aead_setkey; cra->setauthsize = aead_setauthsize; cra->encrypt = aead_encrypt; diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c index 8a5f0b0bdf77..d63bca9718dc 100644 --- a/drivers/crypto/marvell/cesa/cesa.c +++ b/drivers/crypto/marvell/cesa/cesa.c @@ -438,7 +438,7 @@ static int mv_cesa_probe(struct platform_device *pdev) struct mv_cesa_dev *cesa; struct mv_cesa_engine *engines; struct resource *res; - int irq, ret, i; + int irq, ret, i, cpu; u32 sram_size; if (cesa_dev) { @@ -505,6 +505,8 @@ static int mv_cesa_probe(struct platform_device *pdev) goto err_cleanup; } + engine->irq = irq; + /* * Not all platforms can gate the CESA clocks: do not complain * if the clock does not exist. @@ -548,6 +550,10 @@ static int mv_cesa_probe(struct platform_device *pdev) if (ret) goto err_cleanup; + /* Set affinity */ + cpu = cpumask_local_spread(engine->id, NUMA_NO_NODE); + irq_set_affinity_hint(irq, get_cpu_mask(cpu)); + crypto_init_queue(&engine->queue, CESA_CRYPTO_DEFAULT_MAX_QLEN); atomic_set(&engine->load, 0); INIT_LIST_HEAD(&engine->complete_queue); @@ -570,6 +576,8 @@ err_cleanup: clk_disable_unprepare(cesa->engines[i].zclk); clk_disable_unprepare(cesa->engines[i].clk); mv_cesa_put_sram(pdev, i); + if (cesa->engines[i].irq > 0) + irq_set_affinity_hint(cesa->engines[i].irq, NULL); } return ret; @@ -586,6 +594,7 @@ static int mv_cesa_remove(struct platform_device *pdev) clk_disable_unprepare(cesa->engines[i].zclk); clk_disable_unprepare(cesa->engines[i].clk); mv_cesa_put_sram(pdev, i); + irq_set_affinity_hint(cesa->engines[i].irq, NULL); } return 0; diff --git a/drivers/crypto/marvell/cesa/cesa.h b/drivers/crypto/marvell/cesa/cesa.h index e8632d5f343f..0c9cbb681e49 100644 --- a/drivers/crypto/marvell/cesa/cesa.h +++ b/drivers/crypto/marvell/cesa/cesa.h @@ -457,6 +457,7 @@ struct mv_cesa_engine { atomic_t load; struct mv_cesa_tdma_chain chain; struct list_head complete_queue; + int irq; }; /** diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c index f133c2ccb5ae..45b4d7a29833 100644 --- a/drivers/crypto/marvell/cesa/cipher.c +++ b/drivers/crypto/marvell/cesa/cipher.c @@ -508,7 +508,8 @@ struct skcipher_alg mv_cesa_ecb_des_alg = { .cra_name = "ecb(des)", .cra_driver_name = "mv-ecb-des", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct mv_cesa_des_ctx), .cra_alignmask = 0, @@ -558,7 +559,8 @@ struct skcipher_alg mv_cesa_cbc_des_alg = { .cra_name = "cbc(des)", .cra_driver_name = "mv-cbc-des", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct mv_cesa_des_ctx), .cra_alignmask = 0, @@ -616,7 +618,8 @@ struct skcipher_alg mv_cesa_ecb_des3_ede_alg = { .cra_name = "ecb(des3_ede)", .cra_driver_name = "mv-ecb-des3-ede", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx), .cra_alignmask = 0, @@ -669,7 +672,8 @@ struct skcipher_alg mv_cesa_cbc_des3_ede_alg = { .cra_name = "cbc(des3_ede)", .cra_driver_name = "mv-cbc-des3-ede", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx), .cra_alignmask = 0, @@ -741,7 +745,8 @@ struct skcipher_alg mv_cesa_ecb_aes_alg = { .cra_name = "ecb(aes)", .cra_driver_name = "mv-ecb-aes", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx), .cra_alignmask = 0, @@ -790,7 +795,8 @@ struct skcipher_alg mv_cesa_cbc_aes_alg = { .cra_name = "cbc(aes)", .cra_driver_name = "mv-cbc-aes", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx), .cra_alignmask = 0, diff --git a/drivers/crypto/marvell/cesa/hash.c b/drivers/crypto/marvell/cesa/hash.c index b971284332b6..bd0bd9ffd6e9 100644 --- a/drivers/crypto/marvell/cesa/hash.c +++ b/drivers/crypto/marvell/cesa/hash.c @@ -921,6 +921,7 @@ struct ahash_alg mv_md5_alg = { .cra_driver_name = "mv-md5", .cra_priority = 300, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), @@ -991,6 +992,7 @@ struct ahash_alg mv_sha1_alg = { .cra_driver_name = "mv-sha1", .cra_priority = 300, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), @@ -1064,6 +1066,7 @@ struct ahash_alg mv_sha256_alg = { .cra_driver_name = "mv-sha256", .cra_priority = 300, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), @@ -1298,6 +1301,7 @@ struct ahash_alg mv_ahmac_md5_alg = { .cra_driver_name = "mv-hmac-md5", .cra_priority = 300, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), @@ -1368,6 +1372,7 @@ struct ahash_alg mv_ahmac_sha1_alg = { .cra_driver_name = "mv-hmac-sha1", .cra_priority = 300, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), @@ -1438,6 +1443,7 @@ struct ahash_alg mv_ahmac_sha256_alg = { .cra_driver_name = "mv-hmac-sha256", .cra_priority = 300, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c index fec8f3b9b112..cc103b1bc224 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c +++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c @@ -878,11 +878,11 @@ static int copy_ucode_to_dma_mem(struct device *dev, /* Byte swap 64-bit */ for (i = 0; i < (ucode->size / 8); i++) - ((u64 *)ucode->align_va)[i] = + ((__be64 *)ucode->align_va)[i] = cpu_to_be64(((u64 *)ucode->align_va)[i]); /* Ucode needs 16-bit swap */ for (i = 0; i < (ucode->size / 2); i++) - ((u16 *)ucode->align_va)[i] = + ((__be16 *)ucode->align_va)[i] = cpu_to_be16(((u16 *)ucode->align_va)[i]); return 0; } @@ -1463,8 +1463,8 @@ int otx_cpt_try_create_default_eng_grps(struct pci_dev *pdev, struct otx_cpt_eng_grps *eng_grps, int pf_type) { - struct tar_ucode_info_t *tar_info[OTX_CPT_MAX_ETYPES_PER_GRP] = { 0 }; - struct otx_cpt_engines engs[OTX_CPT_MAX_ETYPES_PER_GRP] = { {0} }; + struct tar_ucode_info_t *tar_info[OTX_CPT_MAX_ETYPES_PER_GRP] = {}; + struct otx_cpt_engines engs[OTX_CPT_MAX_ETYPES_PER_GRP] = {}; struct tar_arch_info_t *tar_arch = NULL; char *tar_filename; int i, ret = 0; diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h index 14f02b60d0c2..8620ac87a447 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h +++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h @@ -74,7 +74,7 @@ struct otx_cpt_ucode_ver_num { struct otx_cpt_ucode_hdr { struct otx_cpt_ucode_ver_num ver_num; u8 ver_str[OTX_CPT_UCODE_VER_STR_SZ]; - u32 code_length; + __be32 code_length; u32 padding[3]; }; diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c index 1e0a1d70ebd3..90bb31329d4b 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c +++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c @@ -239,7 +239,6 @@ static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc, struct otx_cpt_fc_ctx *fctx = &rctx->fctx; int ivsize = crypto_skcipher_ivsize(stfm); u32 start = req->cryptlen - ivsize; - u64 *ctrl_flags = NULL; gfp_t flags; flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? @@ -280,8 +279,7 @@ static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc, memcpy(fctx->enc.encr_iv, req->iv, crypto_skcipher_ivsize(stfm)); - ctrl_flags = (u64 *)&fctx->enc.enc_ctrl.flags; - *ctrl_flags = cpu_to_be64(*ctrl_flags); + fctx->enc.enc_ctrl.flags = cpu_to_be64(fctx->enc.enc_ctrl.cflags); /* * Storing Packet Data Information in offset @@ -692,20 +690,17 @@ static struct otx_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg) static inline void swap_data32(void *buf, u32 len) { - u32 *store = (u32 *) buf; - int i = 0; - - for (i = 0 ; i < len/sizeof(u32); i++, store++) - *store = cpu_to_be32(*store); + cpu_to_be32_array(buf, buf, len / 4); } static inline void swap_data64(void *buf, u32 len) { - u64 *store = (u64 *) buf; + __be64 *dst = buf; + u64 *src = buf; int i = 0; - for (i = 0 ; i < len/sizeof(u64); i++, store++) - *store = cpu_to_be64(*store); + for (i = 0 ; i < len / 8; i++, src++, dst++) + *dst = cpu_to_be64p(src); } static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad) @@ -1012,7 +1007,7 @@ static inline u32 create_aead_ctx_hdr(struct aead_request *req, u32 enc, /* Unknown cipher type */ return -EINVAL; } - rctx->ctrl_word.flags = cpu_to_be64(rctx->ctrl_word.flags); + rctx->ctrl_word.flags = cpu_to_be64(rctx->ctrl_word.cflags); req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER; req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ; @@ -1032,7 +1027,7 @@ static inline u32 create_aead_ctx_hdr(struct aead_request *req, u32 enc, fctx->enc.enc_ctrl.e.aes_key = ctx->key_type; fctx->enc.enc_ctrl.e.mac_type = ctx->mac_type; fctx->enc.enc_ctrl.e.mac_len = mac_len; - fctx->enc.enc_ctrl.flags = cpu_to_be64(fctx->enc.enc_ctrl.flags); + fctx->enc.enc_ctrl.flags = cpu_to_be64(fctx->enc.enc_ctrl.cflags); /* * Storing Packet Data Information in offset @@ -1306,7 +1301,7 @@ static int otx_cpt_aead_null_decrypt(struct aead_request *req) static struct skcipher_alg otx_cpt_skciphers[] = { { .base.cra_name = "xts(aes)", .base.cra_driver_name = "cpt_xts_aes", - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx), .base.cra_alignmask = 7, @@ -1323,7 +1318,7 @@ static struct skcipher_alg otx_cpt_skciphers[] = { { }, { .base.cra_name = "cbc(aes)", .base.cra_driver_name = "cpt_cbc_aes", - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx), .base.cra_alignmask = 7, @@ -1340,7 +1335,7 @@ static struct skcipher_alg otx_cpt_skciphers[] = { { }, { .base.cra_name = "ecb(aes)", .base.cra_driver_name = "cpt_ecb_aes", - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx), .base.cra_alignmask = 7, @@ -1357,7 +1352,7 @@ static struct skcipher_alg otx_cpt_skciphers[] = { { }, { .base.cra_name = "cfb(aes)", .base.cra_driver_name = "cpt_cfb_aes", - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx), .base.cra_alignmask = 7, @@ -1374,7 +1369,7 @@ static struct skcipher_alg otx_cpt_skciphers[] = { { }, { .base.cra_name = "cbc(des3_ede)", .base.cra_driver_name = "cpt_cbc_des3_ede", - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct otx_cpt_des3_ctx), .base.cra_alignmask = 7, @@ -1391,7 +1386,7 @@ static struct skcipher_alg otx_cpt_skciphers[] = { { }, { .base.cra_name = "ecb(des3_ede)", .base.cra_driver_name = "cpt_ecb_des3_ede", - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct otx_cpt_des3_ctx), .base.cra_alignmask = 7, @@ -1412,7 +1407,7 @@ static struct aead_alg otx_cpt_aeads[] = { { .cra_name = "authenc(hmac(sha1),cbc(aes))", .cra_driver_name = "cpt_hmac_sha1_cbc_aes", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), .cra_priority = 4001, .cra_alignmask = 0, @@ -1431,7 +1426,7 @@ static struct aead_alg otx_cpt_aeads[] = { { .cra_name = "authenc(hmac(sha256),cbc(aes))", .cra_driver_name = "cpt_hmac_sha256_cbc_aes", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), .cra_priority = 4001, .cra_alignmask = 0, @@ -1450,7 +1445,7 @@ static struct aead_alg otx_cpt_aeads[] = { { .cra_name = "authenc(hmac(sha384),cbc(aes))", .cra_driver_name = "cpt_hmac_sha384_cbc_aes", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), .cra_priority = 4001, .cra_alignmask = 0, @@ -1469,7 +1464,7 @@ static struct aead_alg otx_cpt_aeads[] = { { .cra_name = "authenc(hmac(sha512),cbc(aes))", .cra_driver_name = "cpt_hmac_sha512_cbc_aes", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), .cra_priority = 4001, .cra_alignmask = 0, @@ -1488,7 +1483,7 @@ static struct aead_alg otx_cpt_aeads[] = { { .cra_name = "authenc(hmac(sha1),ecb(cipher_null))", .cra_driver_name = "cpt_hmac_sha1_ecb_null", .cra_blocksize = 1, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), .cra_priority = 4001, .cra_alignmask = 0, @@ -1507,7 +1502,7 @@ static struct aead_alg otx_cpt_aeads[] = { { .cra_name = "authenc(hmac(sha256),ecb(cipher_null))", .cra_driver_name = "cpt_hmac_sha256_ecb_null", .cra_blocksize = 1, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), .cra_priority = 4001, .cra_alignmask = 0, @@ -1526,7 +1521,7 @@ static struct aead_alg otx_cpt_aeads[] = { { .cra_name = "authenc(hmac(sha384),ecb(cipher_null))", .cra_driver_name = "cpt_hmac_sha384_ecb_null", .cra_blocksize = 1, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), .cra_priority = 4001, .cra_alignmask = 0, @@ -1545,7 +1540,7 @@ static struct aead_alg otx_cpt_aeads[] = { { .cra_name = "authenc(hmac(sha512),ecb(cipher_null))", .cra_driver_name = "cpt_hmac_sha512_ecb_null", .cra_blocksize = 1, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), .cra_priority = 4001, .cra_alignmask = 0, @@ -1564,7 +1559,7 @@ static struct aead_alg otx_cpt_aeads[] = { { .cra_name = "rfc4106(gcm(aes))", .cra_driver_name = "cpt_rfc4106_gcm_aes", .cra_blocksize = 1, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), .cra_priority = 4001, .cra_alignmask = 0, diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h index 67cc0025f5d5..4181b5c5c356 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h +++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h @@ -66,7 +66,8 @@ enum otx_cpt_aes_key_len { }; union otx_cpt_encr_ctrl { - u64 flags; + __be64 flags; + u64 cflags; struct { #if defined(__BIG_ENDIAN_BITFIELD) u64 enc_cipher:4; @@ -138,7 +139,8 @@ struct otx_cpt_des3_ctx { }; union otx_cpt_offset_ctrl_word { - u64 flags; + __be64 flags; + u64 cflags; struct { #if defined(__BIG_ENDIAN_BITFIELD) u64 reserved:32; diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c index 239195cccf93..cbc3d7869ebe 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c +++ b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c @@ -202,11 +202,10 @@ static inline int setup_sgio_list(struct pci_dev *pdev, info->dlen = dlen; info->in_buffer = (u8 *)info + info_len; - ((u16 *)info->in_buffer)[0] = req->outcnt; - ((u16 *)info->in_buffer)[1] = req->incnt; + ((__be16 *)info->in_buffer)[0] = cpu_to_be16(req->outcnt); + ((__be16 *)info->in_buffer)[1] = cpu_to_be16(req->incnt); ((u16 *)info->in_buffer)[2] = 0; ((u16 *)info->in_buffer)[3] = 0; - *(u64 *)info->in_buffer = cpu_to_be64p((u64 *)info->in_buffer); /* Setup gather (input) components */ if (setup_sgio_components(pdev, req->in, req->incnt, @@ -367,8 +366,6 @@ static int process_request(struct pci_dev *pdev, struct otx_cpt_req_info *req, iq_cmd.cmd.s.param2 = cpu_to_be16(cpt_req->param2); iq_cmd.cmd.s.dlen = cpu_to_be16(cpt_req->dlen); - /* 64-bit swap for microcode data reads, not needed for addresses*/ - iq_cmd.cmd.u64 = cpu_to_be64(iq_cmd.cmd.u64); iq_cmd.dptr = info->dptr_baddr; iq_cmd.rptr = info->rptr_baddr; iq_cmd.cptr.u64 = 0; @@ -436,7 +433,7 @@ static int cpt_process_ccode(struct pci_dev *pdev, u8 ccode = cpt_status->s.compcode; union otx_cpt_error_code ecode; - ecode.u = be64_to_cpu(*((u64 *) cpt_info->out_buffer)); + ecode.u = be64_to_cpup((__be64 *)cpt_info->out_buffer); switch (ccode) { case CPT_COMP_E_FAULT: dev_err(&pdev->dev, diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.h b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.h index a4c9ff730b13..d912fe0c532d 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.h +++ b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.h @@ -92,10 +92,10 @@ union otx_cpt_ctrl_info { union otx_cpt_iq_cmd_word0 { u64 u64; struct { - u16 opcode; - u16 param1; - u16 param2; - u16 dlen; + __be16 opcode; + __be16 param1; + __be16 param2; + __be16 dlen; } s; }; @@ -123,16 +123,16 @@ struct otx_cpt_sglist_component { union { u64 len; struct { - u16 len0; - u16 len1; - u16 len2; - u16 len3; + __be16 len0; + __be16 len1; + __be16 len2; + __be16 len3; } s; } u; - u64 ptr0; - u64 ptr1; - u64 ptr2; - u64 ptr3; + __be64 ptr0; + __be64 ptr1; + __be64 ptr2; + __be64 ptr3; }; struct otx_cpt_pending_entry { diff --git a/drivers/crypto/mediatek/mtk-aes.c b/drivers/crypto/mediatek/mtk-aes.c index 78d660d963e2..4ad3571ab6af 100644 --- a/drivers/crypto/mediatek/mtk-aes.c +++ b/drivers/crypto/mediatek/mtk-aes.c @@ -137,8 +137,6 @@ struct mtk_aes_gcm_ctx { u32 authsize; size_t textlen; - - struct crypto_skcipher *ctr; }; struct mtk_aes_drv { @@ -996,17 +994,8 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key, u32 keylen) { struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead); - struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx); - struct crypto_skcipher *ctr = gctx->ctr; - struct { - u32 hash[4]; - u8 iv[8]; - - struct crypto_wait wait; - - struct scatterlist sg[1]; - struct skcipher_request req; - } *data; + u8 hash[AES_BLOCK_SIZE] __aligned(4) = {}; + struct crypto_aes_ctx aes_ctx; int err; switch (keylen) { @@ -1026,39 +1015,18 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key, ctx->keylen = SIZE_IN_WORDS(keylen); - /* Same as crypto_gcm_setkey() from crypto/gcm.c */ - crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK); - crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) & - CRYPTO_TFM_REQ_MASK); - err = crypto_skcipher_setkey(ctr, key, keylen); + err = aes_expandkey(&aes_ctx, key, keylen); if (err) return err; - data = kzalloc(sizeof(*data) + crypto_skcipher_reqsize(ctr), - GFP_KERNEL); - if (!data) - return -ENOMEM; - - crypto_init_wait(&data->wait); - sg_init_one(data->sg, &data->hash, AES_BLOCK_SIZE); - skcipher_request_set_tfm(&data->req, ctr); - skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP | - CRYPTO_TFM_REQ_MAY_BACKLOG, - crypto_req_done, &data->wait); - skcipher_request_set_crypt(&data->req, data->sg, data->sg, - AES_BLOCK_SIZE, data->iv); - - err = crypto_wait_req(crypto_skcipher_encrypt(&data->req), - &data->wait); - if (err) - goto out; + aes_encrypt(&aes_ctx, hash, hash); + memzero_explicit(&aes_ctx, sizeof(aes_ctx)); mtk_aes_write_state_le(ctx->key, (const u32 *)key, keylen); - mtk_aes_write_state_be(ctx->key + ctx->keylen, data->hash, + mtk_aes_write_state_be(ctx->key + ctx->keylen, (const u32 *)hash, AES_BLOCK_SIZE); -out: - kzfree(data); - return err; + + return 0; } static int mtk_aes_gcm_setauthsize(struct crypto_aead *aead, @@ -1095,32 +1063,17 @@ static int mtk_aes_gcm_init(struct crypto_aead *aead) { struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead); - ctx->ctr = crypto_alloc_skcipher("ctr(aes)", 0, - CRYPTO_ALG_ASYNC); - if (IS_ERR(ctx->ctr)) { - pr_err("Error allocating ctr(aes)\n"); - return PTR_ERR(ctx->ctr); - } - crypto_aead_set_reqsize(aead, sizeof(struct mtk_aes_reqctx)); ctx->base.start = mtk_aes_gcm_start; return 0; } -static void mtk_aes_gcm_exit(struct crypto_aead *aead) -{ - struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead); - - crypto_free_skcipher(ctx->ctr); -} - static struct aead_alg aes_gcm_alg = { .setkey = mtk_aes_gcm_setkey, .setauthsize = mtk_aes_gcm_setauthsize, .encrypt = mtk_aes_gcm_encrypt, .decrypt = mtk_aes_gcm_decrypt, .init = mtk_aes_gcm_init, - .exit = mtk_aes_gcm_exit, .ivsize = GCM_AES_IV_SIZE, .maxauthsize = AES_BLOCK_SIZE, diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c index d84530293036..909a7eb748e3 100644 --- a/drivers/crypto/mxs-dcp.c +++ b/drivers/crypto/mxs-dcp.c @@ -97,7 +97,7 @@ struct dcp_async_ctx { unsigned int hot:1; /* Crypto-specific context */ - struct crypto_sync_skcipher *fallback; + struct crypto_skcipher *fallback; unsigned int key_len; uint8_t key[AES_KEYSIZE_128]; }; @@ -105,6 +105,7 @@ struct dcp_async_ctx { struct dcp_aes_req_ctx { unsigned int enc:1; unsigned int ecb:1; + struct skcipher_request fallback_req; // keep at the end }; struct dcp_sha_req_ctx { @@ -426,21 +427,20 @@ static int dcp_chan_thread_aes(void *data) static int mxs_dcp_block_fallback(struct skcipher_request *req, int enc) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); struct dcp_async_ctx *ctx = crypto_skcipher_ctx(tfm); - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); int ret; - skcipher_request_set_sync_tfm(subreq, ctx->fallback); - skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL); - skcipher_request_set_crypt(subreq, req->src, req->dst, + skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); + skcipher_request_set_callback(&rctx->fallback_req, req->base.flags, + req->base.complete, req->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst, req->cryptlen, req->iv); if (enc) - ret = crypto_skcipher_encrypt(subreq); + ret = crypto_skcipher_encrypt(&rctx->fallback_req); else - ret = crypto_skcipher_decrypt(subreq); - - skcipher_request_zero(subreq); + ret = crypto_skcipher_decrypt(&rctx->fallback_req); return ret; } @@ -510,24 +510,25 @@ static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, * but is supported by in-kernel software implementation, we use * software fallback. */ - crypto_sync_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(actx->fallback, + crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(actx->fallback, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); - return crypto_sync_skcipher_setkey(actx->fallback, key, len); + return crypto_skcipher_setkey(actx->fallback, key, len); } static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm) { const char *name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm)); struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); - struct crypto_sync_skcipher *blk; + struct crypto_skcipher *blk; - blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); + blk = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(blk)) return PTR_ERR(blk); actx->fallback = blk; - crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx)); + crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx) + + crypto_skcipher_reqsize(blk)); return 0; } @@ -535,7 +536,7 @@ static void mxs_dcp_aes_fallback_exit_tfm(struct crypto_skcipher *tfm) { struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); - crypto_free_sync_skcipher(actx->fallback); + crypto_free_skcipher(actx->fallback); } /* diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 6a828bbecea4..d8aec5153b21 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -1382,7 +1382,8 @@ static int __n2_register_one_skcipher(const struct n2_skcipher_tmpl *tmpl) snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name); alg->base.cra_priority = N2_CRA_PRIORITY; - alg->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC; + alg->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY; alg->base.cra_blocksize = tmpl->block_size; p->enc_type = tmpl->enc_type; alg->base.cra_ctxsize = sizeof(struct n2_skcipher_context); diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index b5aff20c5900..4fd14d90cc40 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -139,7 +139,7 @@ int omap_aes_write_ctrl(struct omap_aes_dev *dd) for (i = 0; i < key32; i++) { omap_aes_write(dd, AES_REG_KEY(dd, i), - __le32_to_cpu(dd->ctx->key[i])); + (__force u32)cpu_to_le32(dd->ctx->key[i])); } if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->iv) @@ -363,7 +363,7 @@ int omap_aes_crypt_dma_start(struct omap_aes_dev *dd) { int err; - pr_debug("total: %d\n", dd->total); + pr_debug("total: %zu\n", dd->total); if (!dd->pio_only) { err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len, @@ -409,7 +409,7 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err) int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) { - pr_debug("total: %d\n", dd->total); + pr_debug("total: %zu\n", dd->total); omap_aes_dma_stop(dd); @@ -548,20 +548,18 @@ static int omap_aes_crypt(struct skcipher_request *req, unsigned long mode) !!(mode & FLAGS_CBC)); if (req->cryptlen < aes_fallback_sz) { - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); - - skcipher_request_set_sync_tfm(subreq, ctx->fallback); - skcipher_request_set_callback(subreq, req->base.flags, NULL, - NULL); - skcipher_request_set_crypt(subreq, req->src, req->dst, - req->cryptlen, req->iv); + skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); + skcipher_request_set_callback(&rctx->fallback_req, + req->base.flags, + req->base.complete, + req->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, req->src, + req->dst, req->cryptlen, req->iv); if (mode & FLAGS_ENCRYPT) - ret = crypto_skcipher_encrypt(subreq); + ret = crypto_skcipher_encrypt(&rctx->fallback_req); else - ret = crypto_skcipher_decrypt(subreq); - - skcipher_request_zero(subreq); + ret = crypto_skcipher_decrypt(&rctx->fallback_req); return ret; } dd = omap_aes_find_dev(rctx); @@ -590,11 +588,11 @@ static int omap_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, memcpy(ctx->key, key, keylen); ctx->keylen = keylen; - crypto_sync_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags & + crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); - ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen); + ret = crypto_skcipher_setkey(ctx->fallback, key, keylen); if (!ret) return 0; @@ -640,15 +638,16 @@ static int omap_aes_init_tfm(struct crypto_skcipher *tfm) { const char *name = crypto_tfm_alg_name(&tfm->base); struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm); - struct crypto_sync_skcipher *blk; + struct crypto_skcipher *blk; - blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); + blk = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(blk)) return PTR_ERR(blk); ctx->fallback = blk; - crypto_skcipher_set_reqsize(tfm, sizeof(struct omap_aes_reqctx)); + crypto_skcipher_set_reqsize(tfm, sizeof(struct omap_aes_reqctx) + + crypto_skcipher_reqsize(blk)); ctx->enginectx.op.prepare_request = omap_aes_prepare_req; ctx->enginectx.op.unprepare_request = NULL; @@ -662,7 +661,7 @@ static void omap_aes_exit_tfm(struct crypto_skcipher *tfm) struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm); if (ctx->fallback) - crypto_free_sync_skcipher(ctx->fallback); + crypto_free_skcipher(ctx->fallback); ctx->fallback = NULL; } diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h index 2d111bf906e1..23d073e87bb8 100644 --- a/drivers/crypto/omap-aes.h +++ b/drivers/crypto/omap-aes.h @@ -97,7 +97,7 @@ struct omap_aes_ctx { int keylen; u32 key[AES_KEYSIZE_256 / sizeof(u32)]; u8 nonce[4]; - struct crypto_sync_skcipher *fallback; + struct crypto_skcipher *fallback; }; struct omap_aes_gcm_ctx { @@ -110,6 +110,7 @@ struct omap_aes_reqctx { unsigned long mode; u8 iv[AES_BLOCK_SIZE]; u32 auth_tag[AES_BLOCK_SIZE / sizeof(u32)]; + struct skcipher_request fallback_req; // keep at the end }; #define OMAP_AES_QUEUE_LENGTH 1 diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c index 8eda43319204..c9d38bcfd1c7 100644 --- a/drivers/crypto/omap-des.c +++ b/drivers/crypto/omap-des.c @@ -87,7 +87,7 @@ struct omap_des_ctx { struct omap_des_dev *dd; int keylen; - u32 key[(3 * DES_KEY_SIZE) / sizeof(u32)]; + __le32 key[(3 * DES_KEY_SIZE) / sizeof(u32)]; unsigned long flags; }; @@ -461,7 +461,7 @@ static int omap_des_crypt_dma_start(struct omap_des_dev *dd) crypto_skcipher_reqtfm(dd->req)); int err; - pr_debug("total: %d\n", dd->total); + pr_debug("total: %zd\n", dd->total); if (!dd->pio_only) { err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len, @@ -504,7 +504,7 @@ static void omap_des_finish_req(struct omap_des_dev *dd, int err) static int omap_des_crypt_dma_stop(struct omap_des_dev *dd) { - pr_debug("total: %d\n", dd->total); + pr_debug("total: %zd\n", dd->total); omap_des_dma_stop(dd); diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 82691a057d2a..954d703f2981 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -357,10 +357,10 @@ static void omap_sham_copy_ready_hash(struct ahash_request *req) if (big_endian) for (i = 0; i < d; i++) - hash[i] = be32_to_cpu(in[i]); + hash[i] = be32_to_cpup((__be32 *)in + i); else for (i = 0; i < d; i++) - hash[i] = le32_to_cpu(in[i]); + hash[i] = le32_to_cpup((__le32 *)in + i); } static int omap_sham_hw_init(struct omap_sham_dev *dd) @@ -522,7 +522,7 @@ static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, size_t length, int mlen; struct sg_mapping_iter mi; - dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n", + dev_dbg(dd->dev, "xmit_cpu: digcnt: %zd, length: %zd, final: %d\n", ctx->digcnt, length, final); dd->pdata->write_ctrl(dd, length, final, 0); @@ -588,7 +588,7 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, size_t length, struct dma_slave_config cfg; int ret; - dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n", + dev_dbg(dd->dev, "xmit_dma: digcnt: %zd, length: %zd, final: %d\n", ctx->digcnt, length, final); if (!dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE)) { @@ -871,7 +871,7 @@ static int omap_sham_prepare_request(struct ahash_request *req, bool update) nbytes += req->nbytes - rctx->offset; dev_dbg(rctx->dd->dev, - "%s: nbytes=%d, bs=%d, total=%d, offset=%d, bufcnt=%d\n", + "%s: nbytes=%d, bs=%d, total=%d, offset=%d, bufcnt=%zd\n", __func__, nbytes, bs, rctx->total, rctx->offset, rctx->bufcnt); @@ -932,7 +932,7 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd) return 0; } -struct omap_sham_dev *omap_sham_find_dev(struct omap_sham_reqctx *ctx) +static struct omap_sham_dev *omap_sham_find_dev(struct omap_sham_reqctx *ctx) { struct omap_sham_dev *dd; @@ -1023,7 +1023,7 @@ static int omap_sham_update_req(struct omap_sham_dev *dd) bool final = (ctx->flags & BIT(FLAGS_FINUP)) && !(dd->flags & BIT(FLAGS_HUGE)); - dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, final: %d", + dev_dbg(dd->dev, "update_req: total: %u, digcnt: %zd, final: %d", ctx->total, ctx->digcnt, final); if (ctx->total < get_block_size(ctx) || @@ -1036,7 +1036,7 @@ static int omap_sham_update_req(struct omap_sham_dev *dd) err = omap_sham_xmit_dma(dd, ctx->total, final); /* wait for dma completion before can take more data */ - dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt); + dev_dbg(dd->dev, "update: err: %d, digcnt: %zd\n", err, ctx->digcnt); return err; } @@ -1097,7 +1097,7 @@ static int omap_sham_finish(struct ahash_request *req) err = omap_sham_finish_hmac(req); } - dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt); + dev_dbg(dd->dev, "digcnt: %zd, bufcnt: %zd\n", ctx->digcnt, ctx->bufcnt); return err; } diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index 7384e91c8b32..dac6eb37fff9 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c @@ -86,6 +86,7 @@ struct spacc_req { dma_addr_t src_addr, dst_addr; struct spacc_ddt *src_ddt, *dst_ddt; void (*complete)(struct spacc_req *req); + struct skcipher_request fallback_req; // keep at the end }; struct spacc_aead { @@ -158,7 +159,7 @@ struct spacc_ablk_ctx { * The fallback cipher. If the operation can't be done in hardware, * fallback to a software version. */ - struct crypto_sync_skcipher *sw_cipher; + struct crypto_skcipher *sw_cipher; }; /* AEAD cipher context. */ @@ -792,13 +793,13 @@ static int spacc_aes_setkey(struct crypto_skcipher *cipher, const u8 *key, * Set the fallback transform to use the same request flags as * the hardware transform. */ - crypto_sync_skcipher_clear_flags(ctx->sw_cipher, + crypto_skcipher_clear_flags(ctx->sw_cipher, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(ctx->sw_cipher, + crypto_skcipher_set_flags(ctx->sw_cipher, cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK); - err = crypto_sync_skcipher_setkey(ctx->sw_cipher, key, len); + err = crypto_skcipher_setkey(ctx->sw_cipher, key, len); if (err) goto sw_setkey_failed; } @@ -900,7 +901,7 @@ static int spacc_ablk_do_fallback(struct skcipher_request *req, struct crypto_tfm *old_tfm = crypto_skcipher_tfm(crypto_skcipher_reqtfm(req)); struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm); - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->sw_cipher); + struct spacc_req *dev_req = skcipher_request_ctx(req); int err; /* @@ -908,13 +909,13 @@ static int spacc_ablk_do_fallback(struct skcipher_request *req, * the ciphering has completed, put the old transform back into the * request. */ - skcipher_request_set_sync_tfm(subreq, ctx->sw_cipher); - skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL); - skcipher_request_set_crypt(subreq, req->src, req->dst, + skcipher_request_set_tfm(&dev_req->fallback_req, ctx->sw_cipher); + skcipher_request_set_callback(&dev_req->fallback_req, req->base.flags, + req->base.complete, req->base.data); + skcipher_request_set_crypt(&dev_req->fallback_req, req->src, req->dst, req->cryptlen, req->iv); - err = is_encrypt ? crypto_skcipher_encrypt(subreq) : - crypto_skcipher_decrypt(subreq); - skcipher_request_zero(subreq); + err = is_encrypt ? crypto_skcipher_encrypt(&dev_req->fallback_req) : + crypto_skcipher_decrypt(&dev_req->fallback_req); return err; } @@ -1007,19 +1008,24 @@ static int spacc_ablk_init_tfm(struct crypto_skcipher *tfm) ctx->generic.flags = spacc_alg->type; ctx->generic.engine = engine; if (alg->base.cra_flags & CRYPTO_ALG_NEED_FALLBACK) { - ctx->sw_cipher = crypto_alloc_sync_skcipher( - alg->base.cra_name, 0, CRYPTO_ALG_NEED_FALLBACK); + ctx->sw_cipher = crypto_alloc_skcipher(alg->base.cra_name, 0, + CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ctx->sw_cipher)) { dev_warn(engine->dev, "failed to allocate fallback for %s\n", alg->base.cra_name); return PTR_ERR(ctx->sw_cipher); } + crypto_skcipher_set_reqsize(tfm, sizeof(struct spacc_req) + + crypto_skcipher_reqsize(ctx->sw_cipher)); + } else { + /* take the size without the fallback skcipher_request at the end */ + crypto_skcipher_set_reqsize(tfm, offsetof(struct spacc_req, + fallback_req)); } + ctx->generic.key_offs = spacc_alg->key_offs; ctx->generic.iv_offs = spacc_alg->iv_offs; - crypto_skcipher_set_reqsize(tfm, sizeof(struct spacc_req)); - return 0; } @@ -1027,7 +1033,7 @@ static void spacc_ablk_exit_tfm(struct crypto_skcipher *tfm) { struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(tfm); - crypto_free_sync_skcipher(ctx->sw_cipher); + crypto_free_skcipher(ctx->sw_cipher); } static int spacc_ablk_encrypt(struct skcipher_request *req) @@ -1226,6 +1232,7 @@ static struct spacc_alg ipsec_engine_algs[] = { .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx), @@ -1251,6 +1258,7 @@ static struct spacc_alg ipsec_engine_algs[] = { .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx), @@ -1274,7 +1282,8 @@ static struct spacc_alg ipsec_engine_algs[] = { .base.cra_driver_name = "cbc-des-picoxcell", .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | - CRYPTO_ALG_ASYNC, + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx), .base.cra_module = THIS_MODULE, @@ -1298,7 +1307,8 @@ static struct spacc_alg ipsec_engine_algs[] = { .base.cra_driver_name = "ecb-des-picoxcell", .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | - CRYPTO_ALG_ASYNC, + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx), .base.cra_module = THIS_MODULE, @@ -1321,6 +1331,7 @@ static struct spacc_alg ipsec_engine_algs[] = { .base.cra_driver_name = "cbc-des3-ede-picoxcell", .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx), @@ -1345,6 +1356,7 @@ static struct spacc_alg ipsec_engine_algs[] = { .base.cra_driver_name = "ecb-des3-ede-picoxcell", .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx), @@ -1376,6 +1388,7 @@ static struct spacc_aead ipsec_engine_aeads[] = { "cbc-aes-picoxcell", .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, @@ -1406,6 +1419,7 @@ static struct spacc_aead ipsec_engine_aeads[] = { "cbc-aes-picoxcell", .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, @@ -1436,6 +1450,7 @@ static struct spacc_aead ipsec_engine_aeads[] = { "cbc-aes-picoxcell", .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, @@ -1466,6 +1481,7 @@ static struct spacc_aead ipsec_engine_aeads[] = { "cbc-3des-picoxcell", .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, @@ -1497,6 +1513,7 @@ static struct spacc_aead ipsec_engine_aeads[] = { "cbc-3des-picoxcell", .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, @@ -1527,6 +1544,7 @@ static struct spacc_aead ipsec_engine_aeads[] = { "cbc-3des-picoxcell", .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, @@ -1556,6 +1574,7 @@ static struct spacc_alg l2_engine_algs[] = { .base.cra_driver_name = "f8-kasumi-picoxcell", .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .base.cra_blocksize = 8, .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx), diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c index 6bc68bc00d76..aee494d3da52 100644 --- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c +++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <adf_accel_devices.h> #include <adf_common_drv.h> #include <adf_pf2vf_msg.h> diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h index afc9a0a86747..8b5dd2c94ebf 100644 --- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h +++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef ADF_C3XXX_HW_DATA_H_ #define ADF_C3XXX_HW_DATA_H_ diff --git a/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/qat/qat_c3xxx/adf_drv.c index d937cc7248a5..020d099409e5 100644 --- a/drivers/crypto/qat/qat_c3xxx/adf_drv.c +++ b/drivers/crypto/qat/qat_c3xxx/adf_drv.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c index d2d0ae445fd8..d2fedbd7113c 100644 --- a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c +++ b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2015 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2015 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2015 - 2020 Intel Corporation */ #include <adf_accel_devices.h> #include <adf_pf2vf_msg.h> #include <adf_common_drv.h> diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h index 934f216acf39..7945a9cd1c60 100644 --- a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h +++ b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2015 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2015 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2015 - 2020 Intel Corporation */ #ifndef ADF_C3XXXVF_HW_DATA_H_ #define ADF_C3XXXVF_HW_DATA_H_ diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c index 1dc5ac859f7b..11039fe55f61 100644 --- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c +++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c index 618cec360b39..844ad5ed33fc 100644 --- a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c +++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <adf_accel_devices.h> #include <adf_common_drv.h> #include <adf_pf2vf_msg.h> diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h index 17a8a32d5c63..88504d2bf30d 100644 --- a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h +++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef ADF_C62X_HW_DATA_H_ #define ADF_C62X_HW_DATA_H_ diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c index 2bc06c89d2fe..4ba9c14383af 100644 --- a/drivers/crypto/qat/qat_c62x/adf_drv.c +++ b/drivers/crypto/qat/qat_c62x/adf_drv.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> diff --git a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c index 38e4bc04f407..29fd3f1091ab 100644 --- a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c +++ b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2015 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2015 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2015 - 2020 Intel Corporation */ #include <adf_accel_devices.h> #include <adf_pf2vf_msg.h> #include <adf_common_drv.h> diff --git a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h index a28d83e77422..a6c04cf7a43c 100644 --- a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h +++ b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2015 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2015 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2015 - 2020 Intel Corporation */ #ifndef ADF_C62XVF_HW_DATA_H_ #define ADF_C62XVF_HW_DATA_H_ diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c index a68358b31292..b8b021d54bb5 100644 --- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c +++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h index 33f0a6251e38..c1db8c26afb6 100644 --- a/drivers/crypto/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef ADF_ACCEL_DEVICES_H_ #define ADF_ACCEL_DEVICES_H_ #include <linux/interrupt.h> @@ -103,8 +59,8 @@ struct adf_accel_pci { struct pci_dev *pci_dev; struct adf_accel_msix msix_entries; struct adf_bar pci_bars[ADF_PCI_MAX_BARS]; - uint8_t revid; - uint8_t sku; + u8 revid; + u8 sku; } __packed; enum dev_state { @@ -144,7 +100,7 @@ static inline const char *get_sku_info(enum dev_sku_info info) struct adf_hw_device_class { const char *name; const enum adf_device_type type; - uint32_t instances; + u32 instances; } __packed; struct adf_cfg_device_data; @@ -154,15 +110,15 @@ struct adf_etr_ring_data; struct adf_hw_device_data { struct adf_hw_device_class *dev_class; - uint32_t (*get_accel_mask)(uint32_t fuse); - uint32_t (*get_ae_mask)(uint32_t fuse); - uint32_t (*get_sram_bar_id)(struct adf_hw_device_data *self); - uint32_t (*get_misc_bar_id)(struct adf_hw_device_data *self); - uint32_t (*get_etr_bar_id)(struct adf_hw_device_data *self); - uint32_t (*get_num_aes)(struct adf_hw_device_data *self); - uint32_t (*get_num_accels)(struct adf_hw_device_data *self); - uint32_t (*get_pf2vf_offset)(uint32_t i); - uint32_t (*get_vintmsk_offset)(uint32_t i); + u32 (*get_accel_mask)(u32 fuse); + u32 (*get_ae_mask)(u32 fuse); + u32 (*get_sram_bar_id)(struct adf_hw_device_data *self); + u32 (*get_misc_bar_id)(struct adf_hw_device_data *self); + u32 (*get_etr_bar_id)(struct adf_hw_device_data *self); + u32 (*get_num_aes)(struct adf_hw_device_data *self); + u32 (*get_num_accels)(struct adf_hw_device_data *self); + u32 (*get_pf2vf_offset)(u32 i); + u32 (*get_vintmsk_offset)(u32 i); enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self); int (*alloc_irq)(struct adf_accel_dev *accel_dev); void (*free_irq)(struct adf_accel_dev *accel_dev); @@ -173,25 +129,25 @@ struct adf_hw_device_data { int (*init_arb)(struct adf_accel_dev *accel_dev); void (*exit_arb)(struct adf_accel_dev *accel_dev); void (*get_arb_mapping)(struct adf_accel_dev *accel_dev, - const uint32_t **cfg); + const u32 **cfg); void (*disable_iov)(struct adf_accel_dev *accel_dev); void (*enable_ints)(struct adf_accel_dev *accel_dev); int (*enable_vf2pf_comms)(struct adf_accel_dev *accel_dev); void (*reset_device)(struct adf_accel_dev *accel_dev); const char *fw_name; const char *fw_mmp_name; - uint32_t fuses; - uint32_t accel_capabilities_mask; - uint32_t instance_id; - uint16_t accel_mask; - uint16_t ae_mask; - uint16_t tx_rings_mask; - uint8_t tx_rx_gap; - uint8_t num_banks; - uint8_t num_accel; - uint8_t num_logical_accel; - uint8_t num_engines; - uint8_t min_iov_compat_ver; + u32 fuses; + u32 accel_capabilities_mask; + u32 instance_id; + u16 accel_mask; + u16 ae_mask; + u16 tx_rings_mask; + u8 tx_rx_gap; + u8 num_banks; + u8 num_accel; + u8 num_logical_accel; + u8 num_engines; + u8 min_iov_compat_ver; } __packed; /* CSR write macro */ @@ -248,8 +204,8 @@ struct adf_accel_dev { struct tasklet_struct pf2vf_bh_tasklet; struct mutex vf2pf_lock; /* protect CSR access */ struct completion iov_msg_completion; - uint8_t compatible; - uint8_t pf_version; + u8 compatible; + u8 pf_version; } vf; }; bool is_vf; diff --git a/drivers/crypto/qat/qat_common/adf_accel_engine.c b/drivers/crypto/qat/qat_common/adf_accel_engine.c index a42fc42704be..c8ad85b882be 100644 --- a/drivers/crypto/qat/qat_common/adf_accel_engine.c +++ b/drivers/crypto/qat/qat_common/adf_accel_engine.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/firmware.h> #include <linux/pci.h> #include "adf_cfg.h" @@ -118,7 +74,7 @@ int adf_ae_start(struct adf_accel_dev *accel_dev) { struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; struct adf_hw_device_data *hw_data = accel_dev->hw_device; - uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev); + u32 ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev); if (!hw_data->fw_name) return 0; @@ -139,7 +95,7 @@ int adf_ae_stop(struct adf_accel_dev *accel_dev) { struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; struct adf_hw_device_data *hw_data = accel_dev->hw_device; - uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev); + u32 ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev); if (!hw_data->fw_name) return 0; diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c index d28cba34773e..1c8ca151a963 100644 --- a/drivers/crypto/qat/qat_common/adf_admin.c +++ b/drivers/crypto/qat/qat_common/adf_admin.c @@ -1,53 +1,9 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/types.h> #include <linux/mutex.h> #include <linux/slab.h> -#include <linux/delay.h> +#include <linux/iopoll.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include "adf_accel_devices.h" @@ -60,6 +16,9 @@ #define ADF_DH895XCC_MAILBOX_BASE_OFFSET 0x20970 #define ADF_DH895XCC_MAILBOX_STRIDE 0x1000 #define ADF_ADMINMSG_LEN 32 +#define ADF_CONST_TABLE_SIZE 1024 +#define ADF_ADMIN_POLL_DELAY_US 20 +#define ADF_ADMIN_POLL_TIMEOUT_US (5 * USEC_PER_SEC) static const u8 const_tab[1024] __aligned(1024) = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -154,11 +113,13 @@ struct adf_admin_comms { static int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, u32 ae, void *in, void *out) { + int ret; + u32 status; struct adf_admin_comms *admin = accel_dev->admin; int offset = ae * ADF_ADMINMSG_LEN * 2; void __iomem *mailbox = admin->mailbox_addr; int mb_offset = ae * ADF_DH895XCC_MAILBOX_STRIDE; - int times, received; + struct icp_qat_fw_init_admin_req *request = in; mutex_lock(&admin->lock); @@ -169,46 +130,71 @@ static int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, u32 ae, memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN); ADF_CSR_WR(mailbox, mb_offset, 1); - received = 0; - for (times = 0; times < 50; times++) { - msleep(20); - if (ADF_CSR_RD(mailbox, mb_offset) == 0) { - received = 1; - break; - } - } - if (received) + + ret = readl_poll_timeout(mailbox + mb_offset, status, + status == 0, ADF_ADMIN_POLL_DELAY_US, + ADF_ADMIN_POLL_TIMEOUT_US); + if (ret < 0) { + /* Response timeout */ + dev_err(&GET_DEV(accel_dev), + "Failed to send admin msg %d to accelerator %d\n", + request->cmd_id, ae); + } else { + /* Response received from admin message, we can now + * make response data available in "out" parameter. + */ memcpy(out, admin->virt_addr + offset + ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN); - else - dev_err(&GET_DEV(accel_dev), - "Failed to send admin msg to accelerator\n"); + } mutex_unlock(&admin->lock); - return received ? 0 : -EFAULT; + return ret; +} + +static int adf_send_admin(struct adf_accel_dev *accel_dev, + struct icp_qat_fw_init_admin_req *req, + struct icp_qat_fw_init_admin_resp *resp, + const unsigned long ae_mask) +{ + u32 ae; + + for_each_set_bit(ae, &ae_mask, ICP_QAT_HW_AE_DELIMITER) + if (adf_put_admin_msg_sync(accel_dev, ae, req, resp) || + resp->status) + return -EFAULT; + + return 0; } -static int adf_send_admin_cmd(struct adf_accel_dev *accel_dev, int cmd) +static int adf_init_me(struct adf_accel_dev *accel_dev) { + struct icp_qat_fw_init_admin_req req; + struct icp_qat_fw_init_admin_resp resp; struct adf_hw_device_data *hw_device = accel_dev->hw_device; + u32 ae_mask = hw_device->ae_mask; + + memset(&req, 0, sizeof(req)); + memset(&resp, 0, sizeof(resp)); + req.cmd_id = ICP_QAT_FW_INIT_ME; + + return adf_send_admin(accel_dev, &req, &resp, ae_mask); +} + +static int adf_set_fw_constants(struct adf_accel_dev *accel_dev) +{ struct icp_qat_fw_init_admin_req req; struct icp_qat_fw_init_admin_resp resp; - int i; + struct adf_hw_device_data *hw_device = accel_dev->hw_device; + u32 ae_mask = hw_device->ae_mask; - memset(&req, 0, sizeof(struct icp_qat_fw_init_admin_req)); - req.init_admin_cmd_id = cmd; + memset(&req, 0, sizeof(req)); + memset(&resp, 0, sizeof(resp)); + req.cmd_id = ICP_QAT_FW_CONSTANTS_CFG; - if (cmd == ICP_QAT_FW_CONSTANTS_CFG) { - req.init_cfg_sz = 1024; - req.init_cfg_ptr = accel_dev->admin->const_tbl_addr; - } - for (i = 0; i < hw_device->get_num_aes(hw_device); i++) { - memset(&resp, 0, sizeof(struct icp_qat_fw_init_admin_resp)); - if (adf_put_admin_msg_sync(accel_dev, i, &req, &resp) || - resp.init_resp_hdr.status) - return -EFAULT; - } - return 0; + req.init_cfg_sz = ADF_CONST_TABLE_SIZE; + req.init_cfg_ptr = accel_dev->admin->const_tbl_addr; + + return adf_send_admin(accel_dev, &req, &resp, ae_mask); } /** @@ -221,11 +207,13 @@ static int adf_send_admin_cmd(struct adf_accel_dev *accel_dev, int cmd) */ int adf_send_admin_init(struct adf_accel_dev *accel_dev) { - int ret = adf_send_admin_cmd(accel_dev, ICP_QAT_FW_INIT_ME); + int ret; + ret = adf_init_me(accel_dev); if (ret) return ret; - return adf_send_admin_cmd(accel_dev, ICP_QAT_FW_CONSTANTS_CFG); + + return adf_set_fw_constants(accel_dev); } EXPORT_SYMBOL_GPL(adf_send_admin_init); diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c index f5e960d23a7a..32102e27e559 100644 --- a/drivers/crypto/qat/qat_common/adf_aer.c +++ b/drivers/crypto/qat/qat_common/adf_aer.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/aer.h> @@ -86,7 +42,7 @@ void adf_reset_sbr(struct adf_accel_dev *accel_dev) { struct pci_dev *pdev = accel_to_pci_dev(accel_dev); struct pci_dev *parent = pdev->bus->self; - uint16_t bridge_ctl = 0; + u16 bridge_ctl = 0; if (!parent) parent = pdev; diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c index 5c7fdb0fc53d..ac462796cefc 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg.c +++ b/drivers/crypto/qat/qat_common/adf_cfg.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/mutex.h> #include <linux/slab.h> #include <linux/list.h> diff --git a/drivers/crypto/qat/qat_common/adf_cfg.h b/drivers/crypto/qat/qat_common/adf_cfg.h index 6a9c6f6b5ec9..376cde61a60e 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg.h +++ b/drivers/crypto/qat/qat_common/adf_cfg.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef ADF_CFG_H_ #define ADF_CFG_H_ diff --git a/drivers/crypto/qat/qat_common/adf_cfg_common.h b/drivers/crypto/qat/qat_common/adf_cfg_common.h index 1211261de7c2..1ef46ccfba47 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg_common.h +++ b/drivers/crypto/qat/qat_common/adf_cfg_common.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef ADF_CFG_COMMON_H_ #define ADF_CFG_COMMON_H_ @@ -81,16 +37,16 @@ enum adf_device_type { struct adf_dev_status_info { enum adf_device_type type; - u32 accel_id; - u32 instance_id; - uint8_t num_ae; - uint8_t num_accel; - uint8_t num_logical_accel; - uint8_t banks_per_accel; - uint8_t state; - uint8_t bus; - uint8_t dev; - uint8_t fun; + __u32 accel_id; + __u32 instance_id; + __u8 num_ae; + __u8 num_accel; + __u8 num_logical_accel; + __u8 banks_per_accel; + __u8 state; + __u8 bus; + __u8 dev; + __u8 fun; char name[MAX_DEVICE_NAME_SIZE]; }; @@ -101,6 +57,6 @@ struct adf_dev_status_info { struct adf_user_cfg_ctl_data) #define IOCTL_START_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 2, \ struct adf_user_cfg_ctl_data) -#define IOCTL_STATUS_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 3, uint32_t) -#define IOCTL_GET_NUM_DEVICES _IOW(ADF_CTL_IOC_MAGIC, 4, int32_t) +#define IOCTL_STATUS_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 3, __u32) +#define IOCTL_GET_NUM_DEVICES _IOW(ADF_CTL_IOC_MAGIC, 4, __s32) #endif diff --git a/drivers/crypto/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/qat/qat_common/adf_cfg_strings.h index 7632ed0f25c5..314790f5b0af 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg_strings.h +++ b/drivers/crypto/qat/qat_common/adf_cfg_strings.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef ADF_CFG_STRINGS_H_ #define ADF_CFG_STRINGS_H_ diff --git a/drivers/crypto/qat/qat_common/adf_cfg_user.h b/drivers/crypto/qat/qat_common/adf_cfg_user.h index b5484bfa6996..421f4fb8b4dd 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg_user.h +++ b/drivers/crypto/qat/qat_common/adf_cfg_user.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef ADF_CFG_USER_H_ #define ADF_CFG_USER_H_ @@ -55,7 +11,7 @@ struct adf_user_cfg_key_val { char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; union { struct adf_user_cfg_key_val *next; - uint64_t padding3; + __u64 padding3; }; enum adf_cfg_val_type type; } __packed; @@ -64,19 +20,19 @@ struct adf_user_cfg_section { char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES]; union { struct adf_user_cfg_key_val *params; - uint64_t padding1; + __u64 padding1; }; union { struct adf_user_cfg_section *next; - uint64_t padding3; + __u64 padding3; }; } __packed; struct adf_user_cfg_ctl_data { union { struct adf_user_cfg_section *config_section; - uint64_t padding; + __u64 padding; }; - uint8_t device_id; + __u8 device_id; } __packed; #endif diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h index d78f8d5c89c3..ebfcb4ea618d 100644 --- a/drivers/crypto/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef ADF_DRV_H #define ADF_DRV_H @@ -123,11 +79,11 @@ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev, void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev, struct adf_accel_dev *pf); struct list_head *adf_devmgr_get_head(void); -struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id); +struct adf_accel_dev *adf_devmgr_get_dev_by_id(u32 id); struct adf_accel_dev *adf_devmgr_get_first(void); struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev); -int adf_devmgr_verify_id(uint32_t id); -void adf_devmgr_get_num_dev(uint32_t *num); +int adf_devmgr_verify_id(u32 id); +void adf_devmgr_get_num_dev(u32 *num); int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev); int adf_dev_started(struct adf_accel_dev *accel_dev); int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev); @@ -198,7 +154,7 @@ void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int ctx_mask, unsigned int upc); void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int uaddr, - unsigned int words_num, uint64_t *uword); + unsigned int words_num, u64 *uword); void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int uword_addr, unsigned int words_num, unsigned int *data); @@ -233,9 +189,9 @@ int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle, int adf_sriov_configure(struct pci_dev *pdev, int numvfs); void adf_disable_sriov(struct adf_accel_dev *accel_dev); void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, - uint32_t vf_mask); + u32 vf_mask); void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, - uint32_t vf_mask); + u32 vf_mask); void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c index ef0e482ee04f..71d0c44aacca 100644 --- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c +++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/module.h> #include <linux/mutex.h> #include <linux/slab.h> @@ -270,7 +226,7 @@ static int adf_ctl_is_device_in_use(int id) return 0; } -static void adf_ctl_stop_devices(uint32_t id) +static void adf_ctl_stop_devices(u32 id) { struct adf_accel_dev *accel_dev; @@ -374,7 +330,7 @@ out: static int adf_ctl_ioctl_get_num_devices(struct file *fp, unsigned int cmd, unsigned long arg) { - uint32_t num_devices = 0; + u32 num_devices = 0; adf_devmgr_get_num_dev(&num_devices); if (copy_to_user((void __user *)arg, &num_devices, sizeof(num_devices))) diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/qat/qat_common/adf_dev_mgr.c index 2d06409bd3c4..72753af056b3 100644 --- a/drivers/crypto/qat/qat_common/adf_dev_mgr.c +++ b/drivers/crypto/qat/qat_common/adf_dev_mgr.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/mutex.h> #include <linux/list.h> #include "adf_cfg.h" @@ -52,7 +8,7 @@ static LIST_HEAD(accel_table); static LIST_HEAD(vfs_table); static DEFINE_MUTEX(table_lock); -static uint32_t num_devices; +static u32 num_devices; static u8 id_map[ADF_MAX_DEVICES]; struct vf_id_map { @@ -355,7 +311,7 @@ struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev) } EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev); -struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id) +struct adf_accel_dev *adf_devmgr_get_dev_by_id(u32 id) { struct list_head *itr; int real_id; @@ -380,7 +336,7 @@ unlock: return NULL; } -int adf_devmgr_verify_id(uint32_t id) +int adf_devmgr_verify_id(u32 id) { if (id == ADF_CFG_ALL_DEVICES) return 0; @@ -407,7 +363,7 @@ static int adf_get_num_dettached_vfs(void) return vfs; } -void adf_devmgr_get_num_dev(uint32_t *num) +void adf_devmgr_get_num_dev(u32 *num) { *num = num_devices - adf_get_num_dettached_vfs(); } diff --git a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c index d7dd18d9bef8..d4162783f970 100644 --- a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c +++ b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "adf_transport_internal.h" diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c index 26556c713049..42029153408e 100644 --- a/drivers/crypto/qat/qat_common/adf_init.c +++ b/drivers/crypto/qat/qat_common/adf_init.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/mutex.h> #include <linux/list.h> #include <linux/bitops.h> diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c index cd1cdf5305bc..36136f7db509 100644 --- a/drivers/crypto/qat/qat_common/adf_isr.c +++ b/drivers/crypto/qat/qat_common/adf_isr.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c index b3875fdf6cd7..519fd5acf713 100644 --- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c +++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c @@ -1,50 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2015 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2015 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2015 - 2020 Intel Corporation */ #include <linux/delay.h> #include "adf_accel_devices.h" #include "adf_common_drv.h" diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h index 5acd531a11ff..0690c031bfce 100644 --- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h +++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2015 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2015 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2015 - 2020 Intel Corporation */ #ifndef ADF_PF2VF_MSG_H #define ADF_PF2VF_MSG_H diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c index b36d8653b1ba..8827aa139f96 100644 --- a/drivers/crypto/qat/qat_common/adf_sriov.c +++ b/drivers/crypto/qat/qat_common/adf_sriov.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2015 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2015 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2015 - 2020 Intel Corporation */ #include <linux/workqueue.h> #include <linux/pci.h> #include <linux/device.h> diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c index 2136cbe4bf6c..2ad774017200 100644 --- a/drivers/crypto/qat/qat_common/adf_transport.c +++ b/drivers/crypto/qat/qat_common/adf_transport.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/delay.h> #include "adf_accel_devices.h" #include "adf_transport_internal.h" @@ -51,22 +7,22 @@ #include "adf_cfg.h" #include "adf_common_drv.h" -static inline uint32_t adf_modulo(uint32_t data, uint32_t shift) +static inline u32 adf_modulo(u32 data, u32 shift) { - uint32_t div = data >> shift; - uint32_t mult = div << shift; + u32 div = data >> shift; + u32 mult = div << shift; return data - mult; } -static inline int adf_check_ring_alignment(uint64_t addr, uint64_t size) +static inline int adf_check_ring_alignment(u64 addr, u64 size) { if (((size - 1) & addr) != 0) return -EFAULT; return 0; } -static int adf_verify_ring_size(uint32_t msg_size, uint32_t msg_num) +static int adf_verify_ring_size(u32 msg_size, u32 msg_num) { int i = ADF_MIN_RING_SIZE; @@ -77,7 +33,7 @@ static int adf_verify_ring_size(uint32_t msg_size, uint32_t msg_num) return ADF_DEFAULT_RING_SIZE; } -static int adf_reserve_ring(struct adf_etr_bank_data *bank, uint32_t ring) +static int adf_reserve_ring(struct adf_etr_bank_data *bank, u32 ring) { spin_lock(&bank->lock); if (bank->ring_mask & (1 << ring)) { @@ -89,14 +45,14 @@ static int adf_reserve_ring(struct adf_etr_bank_data *bank, uint32_t ring) return 0; } -static void adf_unreserve_ring(struct adf_etr_bank_data *bank, uint32_t ring) +static void adf_unreserve_ring(struct adf_etr_bank_data *bank, u32 ring) { spin_lock(&bank->lock); bank->ring_mask &= ~(1 << ring); spin_unlock(&bank->lock); } -static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring) +static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, u32 ring) { spin_lock_bh(&bank->lock); bank->irq_mask |= (1 << ring); @@ -106,7 +62,7 @@ static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring) bank->irq_coalesc_timer); } -static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring) +static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, u32 ring) { spin_lock_bh(&bank->lock); bank->irq_mask &= ~(1 << ring); @@ -114,7 +70,7 @@ static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring) WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask); } -int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg) +int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg) { if (atomic_add_return(1, ring->inflights) > ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size)) { @@ -136,18 +92,18 @@ int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg) static int adf_handle_response(struct adf_etr_ring_data *ring) { - uint32_t msg_counter = 0; - uint32_t *msg = (uint32_t *)((uintptr_t)ring->base_addr + ring->head); + u32 msg_counter = 0; + u32 *msg = (u32 *)((uintptr_t)ring->base_addr + ring->head); while (*msg != ADF_RING_EMPTY_SIG) { - ring->callback((uint32_t *)msg); + ring->callback((u32 *)msg); atomic_dec(ring->inflights); *msg = ADF_RING_EMPTY_SIG; ring->head = adf_modulo(ring->head + ADF_MSG_SIZE_TO_BYTES(ring->msg_size), ADF_RING_SIZE_MODULO(ring->ring_size)); msg_counter++; - msg = (uint32_t *)((uintptr_t)ring->base_addr + ring->head); + msg = (u32 *)((uintptr_t)ring->base_addr + ring->head); } if (msg_counter > 0) WRITE_CSR_RING_HEAD(ring->bank->csr_addr, @@ -158,7 +114,7 @@ static int adf_handle_response(struct adf_etr_ring_data *ring) static void adf_configure_tx_ring(struct adf_etr_ring_data *ring) { - uint32_t ring_config = BUILD_RING_CONFIG(ring->ring_size); + u32 ring_config = BUILD_RING_CONFIG(ring->ring_size); WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number, ring->ring_number, ring_config); @@ -166,7 +122,7 @@ static void adf_configure_tx_ring(struct adf_etr_ring_data *ring) static void adf_configure_rx_ring(struct adf_etr_ring_data *ring) { - uint32_t ring_config = + u32 ring_config = BUILD_RESP_RING_CONFIG(ring->ring_size, ADF_RING_NEAR_WATERMARK_512, ADF_RING_NEAR_WATERMARK_0); @@ -180,8 +136,8 @@ static int adf_init_ring(struct adf_etr_ring_data *ring) struct adf_etr_bank_data *bank = ring->bank; struct adf_accel_dev *accel_dev = bank->accel_dev; struct adf_hw_device_data *hw_data = accel_dev->hw_device; - uint64_t ring_base; - uint32_t ring_size_bytes = + u64 ring_base; + u32 ring_size_bytes = ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size); ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes); @@ -215,7 +171,7 @@ static int adf_init_ring(struct adf_etr_ring_data *ring) static void adf_cleanup_ring(struct adf_etr_ring_data *ring) { - uint32_t ring_size_bytes = + u32 ring_size_bytes = ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size); ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes); @@ -228,8 +184,8 @@ static void adf_cleanup_ring(struct adf_etr_ring_data *ring) } int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section, - uint32_t bank_num, uint32_t num_msgs, - uint32_t msg_size, const char *ring_name, + u32 bank_num, u32 num_msgs, + u32 msg_size, const char *ring_name, adf_callback_fn callback, int poll_mode, struct adf_etr_ring_data **ring_ptr) { @@ -237,7 +193,7 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section, struct adf_etr_bank_data *bank; struct adf_etr_ring_data *ring; char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; - uint32_t ring_num; + u32 ring_num; int ret; if (bank_num >= GET_MAX_BANKS(accel_dev)) { @@ -330,7 +286,7 @@ void adf_remove_ring(struct adf_etr_ring_data *ring) static void adf_ring_response_handler(struct adf_etr_bank_data *bank) { - uint32_t empty_rings, i; + u32 empty_rings, i; empty_rings = READ_CSR_E_STAT(bank->csr_addr, bank->bank_number); empty_rings = ~empty_rings & bank->irq_mask; @@ -353,7 +309,7 @@ void adf_response_handler(uintptr_t bank_addr) static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev, const char *section, const char *format, - uint32_t key, uint32_t *value) + u32 key, u32 *value) { char key_buf[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char val_buf[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; @@ -370,7 +326,7 @@ static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev, static void adf_get_coalesc_timer(struct adf_etr_bank_data *bank, const char *section, - uint32_t bank_num_in_accel) + u32 bank_num_in_accel) { if (adf_get_cfg_int(bank->accel_dev, section, ADF_ETRMGR_COALESCE_TIMER_FORMAT, @@ -384,12 +340,12 @@ static void adf_get_coalesc_timer(struct adf_etr_bank_data *bank, static int adf_init_bank(struct adf_accel_dev *accel_dev, struct adf_etr_bank_data *bank, - uint32_t bank_num, void __iomem *csr_addr) + u32 bank_num, void __iomem *csr_addr) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct adf_etr_ring_data *ring; struct adf_etr_ring_data *tx_ring; - uint32_t i, coalesc_enabled = 0; + u32 i, coalesc_enabled = 0; memset(bank, 0, sizeof(*bank)); bank->bank_number = bank_num; @@ -461,8 +417,8 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev) struct adf_etr_data *etr_data; struct adf_hw_device_data *hw_data = accel_dev->hw_device; void __iomem *csr_addr; - uint32_t size; - uint32_t num_banks = 0; + u32 size; + u32 num_banks = 0; int i, ret; etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL, @@ -508,7 +464,7 @@ EXPORT_SYMBOL_GPL(adf_init_etr_data); static void cleanup_bank(struct adf_etr_bank_data *bank) { - uint32_t i; + u32 i; for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) { struct adf_accel_dev *accel_dev = bank->accel_dev; @@ -528,7 +484,7 @@ static void cleanup_bank(struct adf_etr_bank_data *bank) static void adf_cleanup_etr_handles(struct adf_accel_dev *accel_dev) { struct adf_etr_data *etr_data = accel_dev->transport; - uint32_t i, num_banks = GET_MAX_BANKS(accel_dev); + u32 i, num_banks = GET_MAX_BANKS(accel_dev); for (i = 0; i < num_banks; i++) cleanup_bank(&etr_data->banks[i]); diff --git a/drivers/crypto/qat/qat_common/adf_transport.h b/drivers/crypto/qat/qat_common/adf_transport.h index 386485bd9c95..2c95f1697c76 100644 --- a/drivers/crypto/qat/qat_common/adf_transport.h +++ b/drivers/crypto/qat/qat_common/adf_transport.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef ADF_TRANSPORT_H #define ADF_TRANSPORT_H @@ -54,10 +10,10 @@ struct adf_etr_ring_data; typedef void (*adf_callback_fn)(void *resp_msg); int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section, - uint32_t bank_num, uint32_t num_mgs, uint32_t msg_size, + u32 bank_num, u32 num_mgs, u32 msg_size, const char *ring_name, adf_callback_fn callback, int poll_mode, struct adf_etr_ring_data **ring_ptr); -int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg); +int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg); void adf_remove_ring(struct adf_etr_ring_data *ring); #endif diff --git a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h index 80e02a2a0a09..950d1988556c 100644 --- a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h +++ b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef ADF_TRANSPORT_ACCESS_MACROS_H #define ADF_TRANSPORT_ACCESS_MACROS_H @@ -132,9 +88,9 @@ ADF_RING_CSR_RING_CONFIG + (ring << 2), value) #define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \ do { \ - uint32_t l_base = 0, u_base = 0; \ - l_base = (uint32_t)(value & 0xFFFFFFFF); \ - u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \ + u32 l_base = 0, u_base = 0; \ + l_base = (u32)(value & 0xFFFFFFFF); \ + u_base = (u32)((value & 0xFFFFFFFF00000000ULL) >> 32); \ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ ADF_RING_CSR_RING_LBASE + (ring << 2), l_base); \ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ diff --git a/drivers/crypto/qat/qat_common/adf_transport_debug.c b/drivers/crypto/qat/qat_common/adf_transport_debug.c index e794e9d97b2c..2a2eccbf56ec 100644 --- a/drivers/crypto/qat/qat_common/adf_transport_debug.c +++ b/drivers/crypto/qat/qat_common/adf_transport_debug.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/mutex.h> #include <linux/slab.h> #include <linux/seq_file.h> diff --git a/drivers/crypto/qat/qat_common/adf_transport_internal.h b/drivers/crypto/qat/qat_common/adf_transport_internal.h index bb883368ac01..c7faf4e2d302 100644 --- a/drivers/crypto/qat/qat_common/adf_transport_internal.h +++ b/drivers/crypto/qat/qat_common/adf_transport_internal.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef ADF_TRANSPORT_INTRN_H #define ADF_TRANSPORT_INTRN_H @@ -59,32 +15,31 @@ struct adf_etr_ring_debug_entry { struct adf_etr_ring_data { void *base_addr; atomic_t *inflights; - spinlock_t lock; /* protects ring data struct */ adf_callback_fn callback; struct adf_etr_bank_data *bank; dma_addr_t dma_addr; - uint16_t head; - uint16_t tail; - uint8_t ring_number; - uint8_t ring_size; - uint8_t msg_size; - uint8_t reserved; struct adf_etr_ring_debug_entry *ring_debug; -} __packed; + spinlock_t lock; /* protects ring data struct */ + u16 head; + u16 tail; + u8 ring_number; + u8 ring_size; + u8 msg_size; +}; struct adf_etr_bank_data { struct adf_etr_ring_data rings[ADF_ETR_MAX_RINGS_PER_BANK]; struct tasklet_struct resp_handler; void __iomem *csr_addr; - struct adf_accel_dev *accel_dev; - uint32_t irq_coalesc_timer; - uint16_t ring_mask; - uint16_t irq_mask; + u32 irq_coalesc_timer; + u32 bank_number; + u16 ring_mask; + u16 irq_mask; spinlock_t lock; /* protects bank data struct */ + struct adf_accel_dev *accel_dev; struct dentry *bank_debug_dir; struct dentry *bank_debug_cfg; - uint32_t bank_number; -} __packed; +}; struct adf_etr_data { struct adf_etr_bank_data *banks; diff --git a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c index cd5f37dffe8a..2c98fb63f7b7 100644 --- a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c +++ b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2015 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2015 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2015 - 2020 Intel Corporation */ #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "adf_pf2vf_msg.h" diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c index 4a73fc70f7a9..c4a44dc6af3e 100644 --- a/drivers/crypto/qat/qat_common/adf_vf_isr.c +++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw.h b/drivers/crypto/qat/qat_common/icp_qat_fw.h index 46747f01b1d1..6dc09d270082 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_fw.h +++ b/drivers/crypto/qat/qat_common/icp_qat_fw.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef _ICP_QAT_FW_H_ #define _ICP_QAT_FW_H_ #include <linux/types.h> @@ -89,41 +45,41 @@ enum icp_qat_fw_comn_request_id { struct icp_qat_fw_comn_req_hdr_cd_pars { union { struct { - uint64_t content_desc_addr; - uint16_t content_desc_resrvd1; - uint8_t content_desc_params_sz; - uint8_t content_desc_hdr_resrvd2; - uint32_t content_desc_resrvd3; + __u64 content_desc_addr; + __u16 content_desc_resrvd1; + __u8 content_desc_params_sz; + __u8 content_desc_hdr_resrvd2; + __u32 content_desc_resrvd3; } s; struct { - uint32_t serv_specif_fields[4]; + __u32 serv_specif_fields[4]; } s1; } u; }; struct icp_qat_fw_comn_req_mid { - uint64_t opaque_data; - uint64_t src_data_addr; - uint64_t dest_data_addr; - uint32_t src_length; - uint32_t dst_length; + __u64 opaque_data; + __u64 src_data_addr; + __u64 dest_data_addr; + __u32 src_length; + __u32 dst_length; }; struct icp_qat_fw_comn_req_cd_ctrl { - uint32_t content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5]; + __u32 content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5]; }; struct icp_qat_fw_comn_req_hdr { - uint8_t resrvd1; - uint8_t service_cmd_id; - uint8_t service_type; - uint8_t hdr_flags; - uint16_t serv_specif_flags; - uint16_t comn_req_flags; + __u8 resrvd1; + __u8 service_cmd_id; + __u8 service_type; + __u8 hdr_flags; + __u16 serv_specif_flags; + __u16 comn_req_flags; }; struct icp_qat_fw_comn_req_rqpars { - uint32_t serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13]; + __u32 serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13]; }; struct icp_qat_fw_comn_req { @@ -135,24 +91,24 @@ struct icp_qat_fw_comn_req { }; struct icp_qat_fw_comn_error { - uint8_t xlat_err_code; - uint8_t cmp_err_code; + __u8 xlat_err_code; + __u8 cmp_err_code; }; struct icp_qat_fw_comn_resp_hdr { - uint8_t resrvd1; - uint8_t service_id; - uint8_t response_type; - uint8_t hdr_flags; + __u8 resrvd1; + __u8 service_id; + __u8 response_type; + __u8 hdr_flags; struct icp_qat_fw_comn_error comn_error; - uint8_t comn_status; - uint8_t cmd_id; + __u8 comn_status; + __u8 cmd_id; }; struct icp_qat_fw_comn_resp { struct icp_qat_fw_comn_resp_hdr comn_hdr; - uint64_t opaque_data; - uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4]; + __u64 opaque_data; + __u32 resrvd[ICP_QAT_FW_NUM_LONGWORDS_4]; }; #define ICP_QAT_FW_COMN_REQ_FLAG_SET 1 diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h index 72a59faa9005..d4d188cd7ed0 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h +++ b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef _ICP_QAT_FW_INIT_ADMIN_H_ #define _ICP_QAT_FW_INIT_ADMIN_H_ @@ -67,50 +23,75 @@ enum icp_qat_fw_init_admin_resp_status { }; struct icp_qat_fw_init_admin_req { - uint16_t init_cfg_sz; - uint8_t resrvd1; - uint8_t init_admin_cmd_id; - uint32_t resrvd2; - uint64_t opaque_data; - uint64_t init_cfg_ptr; - uint64_t resrvd3; -}; - -struct icp_qat_fw_init_admin_resp_hdr { - uint8_t flags; - uint8_t resrvd1; - uint8_t status; - uint8_t init_admin_cmd_id; -}; + __u16 init_cfg_sz; + __u8 resrvd1; + __u8 cmd_id; + __u32 resrvd2; + __u64 opaque_data; + __u64 init_cfg_ptr; -struct icp_qat_fw_init_admin_resp_pars { union { - uint32_t resrvd1[ICP_QAT_FW_NUM_LONGWORDS_4]; struct { - uint32_t version_patch_num; - uint8_t context_id; - uint8_t ae_id; - uint16_t resrvd1; - uint64_t resrvd2; - } s1; - struct { - uint64_t req_rec_count; - uint64_t resp_sent_count; - } s2; - } u; + __u16 ibuf_size_in_kb; + __u16 resrvd3; + }; + __u32 idle_filter; + }; + + __u32 resrvd4; }; struct icp_qat_fw_init_admin_resp { - struct icp_qat_fw_init_admin_resp_hdr init_resp_hdr; + __u8 flags; + __u8 resrvd1; + __u8 status; + __u8 cmd_id; union { - uint32_t resrvd2; + __u32 resrvd2; + struct { + __u16 version_minor_num; + __u16 version_major_num; + }; + }; + __u64 opaque_data; + union { + __u32 resrvd3[ICP_QAT_FW_NUM_LONGWORDS_4]; + struct { + __u32 version_patch_num; + __u8 context_id; + __u8 ae_id; + __u16 resrvd4; + __u64 resrvd5; + }; + struct { + __u64 req_rec_count; + __u64 resp_sent_count; + }; + struct { + __u16 compression_algos; + __u16 checksum_algos; + __u32 deflate_capabilities; + __u32 resrvd6; + __u32 lzs_capabilities; + }; + struct { + __u32 cipher_algos; + __u32 hash_algos; + __u16 keygen_algos; + __u16 other; + __u16 public_key_algos; + __u16 prime_algos; + }; + struct { + __u64 timestamp; + __u64 resrvd7; + }; struct { - uint16_t version_minor_num; - uint16_t version_major_num; - } s; - } u; - uint64_t opaque_data; - struct icp_qat_fw_init_admin_resp_pars init_resp_pars; + __u32 successful_count; + __u32 unsuccessful_count; + __u64 resrvd8; + }; + }; }; #define ICP_QAT_FW_COMN_HEARTBEAT_OK 0 diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_la.h b/drivers/crypto/qat/qat_common/icp_qat_fw_la.h index c8d26697e8ea..6757ec09d81f 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_fw_la.h +++ b/drivers/crypto/qat/qat_common/icp_qat_fw_la.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef _ICP_QAT_FW_LA_H_ #define _ICP_QAT_FW_LA_H_ #include "icp_qat_fw.h" @@ -226,14 +182,14 @@ struct icp_qat_fw_la_bulk_req { struct icp_qat_fw_cipher_req_hdr_cd_pars { union { struct { - uint64_t content_desc_addr; - uint16_t content_desc_resrvd1; - uint8_t content_desc_params_sz; - uint8_t content_desc_hdr_resrvd2; - uint32_t content_desc_resrvd3; + __u64 content_desc_addr; + __u16 content_desc_resrvd1; + __u8 content_desc_params_sz; + __u8 content_desc_hdr_resrvd2; + __u32 content_desc_resrvd3; } s; struct { - uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4]; + __u32 cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4]; } s1; } u; }; @@ -241,70 +197,70 @@ struct icp_qat_fw_cipher_req_hdr_cd_pars { struct icp_qat_fw_cipher_auth_req_hdr_cd_pars { union { struct { - uint64_t content_desc_addr; - uint16_t content_desc_resrvd1; - uint8_t content_desc_params_sz; - uint8_t content_desc_hdr_resrvd2; - uint32_t content_desc_resrvd3; + __u64 content_desc_addr; + __u16 content_desc_resrvd1; + __u8 content_desc_params_sz; + __u8 content_desc_hdr_resrvd2; + __u32 content_desc_resrvd3; } s; struct { - uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4]; + __u32 cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4]; } sl; } u; }; struct icp_qat_fw_cipher_cd_ctrl_hdr { - uint8_t cipher_state_sz; - uint8_t cipher_key_sz; - uint8_t cipher_cfg_offset; - uint8_t next_curr_id; - uint8_t cipher_padding_sz; - uint8_t resrvd1; - uint16_t resrvd2; - uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3]; + __u8 cipher_state_sz; + __u8 cipher_key_sz; + __u8 cipher_cfg_offset; + __u8 next_curr_id; + __u8 cipher_padding_sz; + __u8 resrvd1; + __u16 resrvd2; + __u32 resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3]; }; struct icp_qat_fw_auth_cd_ctrl_hdr { - uint32_t resrvd1; - uint8_t resrvd2; - uint8_t hash_flags; - uint8_t hash_cfg_offset; - uint8_t next_curr_id; - uint8_t resrvd3; - uint8_t outer_prefix_sz; - uint8_t final_sz; - uint8_t inner_res_sz; - uint8_t resrvd4; - uint8_t inner_state1_sz; - uint8_t inner_state2_offset; - uint8_t inner_state2_sz; - uint8_t outer_config_offset; - uint8_t outer_state1_sz; - uint8_t outer_res_sz; - uint8_t outer_prefix_offset; + __u32 resrvd1; + __u8 resrvd2; + __u8 hash_flags; + __u8 hash_cfg_offset; + __u8 next_curr_id; + __u8 resrvd3; + __u8 outer_prefix_sz; + __u8 final_sz; + __u8 inner_res_sz; + __u8 resrvd4; + __u8 inner_state1_sz; + __u8 inner_state2_offset; + __u8 inner_state2_sz; + __u8 outer_config_offset; + __u8 outer_state1_sz; + __u8 outer_res_sz; + __u8 outer_prefix_offset; }; struct icp_qat_fw_cipher_auth_cd_ctrl_hdr { - uint8_t cipher_state_sz; - uint8_t cipher_key_sz; - uint8_t cipher_cfg_offset; - uint8_t next_curr_id_cipher; - uint8_t cipher_padding_sz; - uint8_t hash_flags; - uint8_t hash_cfg_offset; - uint8_t next_curr_id_auth; - uint8_t resrvd1; - uint8_t outer_prefix_sz; - uint8_t final_sz; - uint8_t inner_res_sz; - uint8_t resrvd2; - uint8_t inner_state1_sz; - uint8_t inner_state2_offset; - uint8_t inner_state2_sz; - uint8_t outer_config_offset; - uint8_t outer_state1_sz; - uint8_t outer_res_sz; - uint8_t outer_prefix_offset; + __u8 cipher_state_sz; + __u8 cipher_key_sz; + __u8 cipher_cfg_offset; + __u8 next_curr_id_cipher; + __u8 cipher_padding_sz; + __u8 hash_flags; + __u8 hash_cfg_offset; + __u8 next_curr_id_auth; + __u8 resrvd1; + __u8 outer_prefix_sz; + __u8 final_sz; + __u8 inner_res_sz; + __u8 resrvd2; + __u8 inner_state1_sz; + __u8 inner_state2_offset; + __u8 inner_state2_sz; + __u8 outer_config_offset; + __u8 outer_state1_sz; + __u8 outer_res_sz; + __u8 outer_prefix_offset; }; #define ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED 1 @@ -315,48 +271,48 @@ struct icp_qat_fw_cipher_auth_cd_ctrl_hdr { #define ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET (0) struct icp_qat_fw_la_cipher_req_params { - uint32_t cipher_offset; - uint32_t cipher_length; + __u32 cipher_offset; + __u32 cipher_length; union { - uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4]; + __u32 cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4]; struct { - uint64_t cipher_IV_ptr; - uint64_t resrvd1; + __u64 cipher_IV_ptr; + __u64 resrvd1; } s; } u; }; struct icp_qat_fw_la_auth_req_params { - uint32_t auth_off; - uint32_t auth_len; + __u32 auth_off; + __u32 auth_len; union { - uint64_t auth_partial_st_prefix; - uint64_t aad_adr; + __u64 auth_partial_st_prefix; + __u64 aad_adr; } u1; - uint64_t auth_res_addr; + __u64 auth_res_addr; union { - uint8_t inner_prefix_sz; - uint8_t aad_sz; + __u8 inner_prefix_sz; + __u8 aad_sz; } u2; - uint8_t resrvd1; - uint8_t hash_state_sz; - uint8_t auth_res_sz; + __u8 resrvd1; + __u8 hash_state_sz; + __u8 auth_res_sz; } __packed; struct icp_qat_fw_la_auth_req_params_resrvd_flds { - uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_6]; + __u32 resrvd[ICP_QAT_FW_NUM_LONGWORDS_6]; union { - uint8_t inner_prefix_sz; - uint8_t aad_sz; + __u8 inner_prefix_sz; + __u8 aad_sz; } u2; - uint8_t resrvd1; - uint16_t resrvd2; + __u8 resrvd1; + __u16 resrvd2; }; struct icp_qat_fw_la_resp { struct icp_qat_fw_comn_resp_hdr comn_resp; - uint64_t opaque_data; - uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4]; + __u64 opaque_data; + __u32 resrvd[ICP_QAT_FW_NUM_LONGWORDS_4]; }; #define ICP_QAT_FW_CIPHER_NEXT_ID_GET(cd_ctrl_hdr_t) \ diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h b/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h index 2ffef3e4fd68..3e8e291cd122 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h +++ b/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef __ICP_QAT_FW_LOADER_HANDLE_H__ #define __ICP_QAT_FW_LOADER_HANDLE_H__ #include "icp_qat_uclo.h" diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h b/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h index 0d7a9b51ce9f..9dddae0009fc 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h +++ b/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h @@ -1,100 +1,56 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef _ICP_QAT_FW_PKE_ #define _ICP_QAT_FW_PKE_ #include "icp_qat_fw.h" struct icp_qat_fw_req_hdr_pke_cd_pars { - u64 content_desc_addr; - u32 content_desc_resrvd; - u32 func_id; + __u64 content_desc_addr; + __u32 content_desc_resrvd; + __u32 func_id; }; struct icp_qat_fw_req_pke_mid { - u64 opaque; - u64 src_data_addr; - u64 dest_data_addr; + __u64 opaque; + __u64 src_data_addr; + __u64 dest_data_addr; }; struct icp_qat_fw_req_pke_hdr { - u8 resrvd1; - u8 resrvd2; - u8 service_type; - u8 hdr_flags; - u16 comn_req_flags; - u16 resrvd4; + __u8 resrvd1; + __u8 resrvd2; + __u8 service_type; + __u8 hdr_flags; + __u16 comn_req_flags; + __u16 resrvd4; struct icp_qat_fw_req_hdr_pke_cd_pars cd_pars; }; struct icp_qat_fw_pke_request { struct icp_qat_fw_req_pke_hdr pke_hdr; struct icp_qat_fw_req_pke_mid pke_mid; - u8 output_param_count; - u8 input_param_count; - u16 resrvd1; - u32 resrvd2; - u64 next_req_adr; + __u8 output_param_count; + __u8 input_param_count; + __u16 resrvd1; + __u32 resrvd2; + __u64 next_req_adr; }; struct icp_qat_fw_resp_pke_hdr { - u8 resrvd1; - u8 resrvd2; - u8 response_type; - u8 hdr_flags; - u16 comn_resp_flags; - u16 resrvd4; + __u8 resrvd1; + __u8 resrvd2; + __u8 response_type; + __u8 hdr_flags; + __u16 comn_resp_flags; + __u16 resrvd4; }; struct icp_qat_fw_pke_resp { struct icp_qat_fw_resp_pke_hdr pke_resp_hdr; - u64 opaque; - u64 src_data_addr; - u64 dest_data_addr; + __u64 opaque; + __u64 src_data_addr; + __u64 dest_data_addr; }; #define ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS 7 diff --git a/drivers/crypto/qat/qat_common/icp_qat_hal.h b/drivers/crypto/qat/qat_common/icp_qat_hal.h index 7187917533d0..c0e9fc0c93dd 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_hal.h +++ b/drivers/crypto/qat/qat_common/icp_qat_hal.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef __ICP_QAT_HAL_H #define __ICP_QAT_HAL_H #include "icp_qat_fw_loader_handle.h" diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw.h b/drivers/crypto/qat/qat_common/icp_qat_hw.h index 121d5e6e46ca..c4b6ef1506ab 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_hw.h +++ b/drivers/crypto/qat/qat_common/icp_qat_hw.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef _ICP_QAT_HW_H_ #define _ICP_QAT_HW_H_ @@ -105,8 +61,8 @@ enum icp_qat_hw_auth_mode { }; struct icp_qat_hw_auth_config { - uint32_t config; - uint32_t reserved; + __u32 config; + __u32 reserved; }; #define QAT_AUTH_MODE_BITPOS 4 @@ -131,7 +87,7 @@ struct icp_qat_hw_auth_config { struct icp_qat_hw_auth_counter { __be32 counter; - uint32_t reserved; + __u32 reserved; }; #define QAT_AUTH_COUNT_MASK 0xFFFFFFFF @@ -191,9 +147,9 @@ struct icp_qat_hw_auth_setup { struct icp_qat_hw_auth_sha512 { struct icp_qat_hw_auth_setup inner_setup; - uint8_t state1[ICP_QAT_HW_SHA512_STATE1_SZ]; + __u8 state1[ICP_QAT_HW_SHA512_STATE1_SZ]; struct icp_qat_hw_auth_setup outer_setup; - uint8_t state2[ICP_QAT_HW_SHA512_STATE2_SZ]; + __u8 state2[ICP_QAT_HW_SHA512_STATE2_SZ]; }; struct icp_qat_hw_auth_algo_blk { @@ -227,8 +183,8 @@ enum icp_qat_hw_cipher_mode { }; struct icp_qat_hw_cipher_config { - uint32_t val; - uint32_t reserved; + __u32 val; + __u32 reserved; }; enum icp_qat_hw_cipher_dir { @@ -296,7 +252,7 @@ enum icp_qat_hw_cipher_convert { struct icp_qat_hw_cipher_aes256_f8 { struct icp_qat_hw_cipher_config cipher_config; - uint8_t key[ICP_QAT_HW_AES_256_F8_KEY_SZ]; + __u8 key[ICP_QAT_HW_AES_256_F8_KEY_SZ]; }; struct icp_qat_hw_cipher_algo_blk { diff --git a/drivers/crypto/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/qat/qat_common/icp_qat_uclo.h index 5d1ee7e53492..8fe1ec344fa2 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_uclo.h +++ b/drivers/crypto/qat/qat_common/icp_qat_uclo.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef __ICP_QAT_UCLO_H__ #define __ICP_QAT_UCLO_H__ @@ -176,7 +132,7 @@ struct icp_qat_uof_encap_obj { struct icp_qat_uclo_encap_uwblock { unsigned int start_addr; unsigned int words_num; - uint64_t micro_words; + u64 micro_words; }; struct icp_qat_uclo_encap_page { @@ -215,7 +171,7 @@ struct icp_qat_uclo_objhdr { struct icp_qat_uof_strtable { unsigned int table_len; unsigned int reserved; - uint64_t strings; + u64 strings; }; struct icp_qat_uclo_objhandle { @@ -235,7 +191,7 @@ struct icp_qat_uclo_objhandle { unsigned int ae_num; unsigned int ustore_phy_size; void *obj_buf; - uint64_t *uword_buf; + u64 *uword_buf; }; struct icp_qat_uof_uword_block { diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index e14d3dd291f0..72753b84dc95 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/module.h> #include <linux/slab.h> #include <linux/crypto.h> @@ -55,6 +11,7 @@ #include <crypto/hmac.h> #include <crypto/algapi.h> #include <crypto/authenc.h> +#include <crypto/xts.h> #include <linux/dma-mapping.h> #include "adf_accel_devices.h" #include "adf_transport.h" @@ -78,15 +35,15 @@ static DEFINE_MUTEX(algs_lock); static unsigned int active_devs; struct qat_alg_buf { - uint32_t len; - uint32_t resrvd; - uint64_t addr; + u32 len; + u32 resrvd; + u64 addr; } __packed; struct qat_alg_buf_list { - uint64_t resrvd; - uint32_t num_bufs; - uint32_t num_mapped_bufs; + u64 resrvd; + u32 num_bufs; + u32 num_mapped_bufs; struct qat_alg_buf bufers[]; } __packed __aligned(64); @@ -131,7 +88,8 @@ struct qat_alg_skcipher_ctx { struct icp_qat_fw_la_bulk_req enc_fw_req; struct icp_qat_fw_la_bulk_req dec_fw_req; struct qat_crypto_instance *inst; - struct crypto_skcipher *tfm; + struct crypto_skcipher *ftfm; + bool fallback; }; static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg) @@ -151,7 +109,7 @@ static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg) static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash, struct qat_alg_aead_ctx *ctx, - const uint8_t *auth_key, + const u8 *auth_key, unsigned int auth_keylen) { SHASH_DESC_ON_STACK(shash, ctx->hash_tfm); @@ -467,7 +425,7 @@ static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm, static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx, struct icp_qat_fw_la_bulk_req *req, struct icp_qat_hw_cipher_algo_blk *cd, - const uint8_t *key, unsigned int keylen) + const u8 *key, unsigned int keylen) { struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars; struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr; @@ -487,7 +445,7 @@ static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx, } static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx, - int alg, const uint8_t *key, + int alg, const u8 *key, unsigned int keylen, int mode) { struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd; @@ -500,7 +458,7 @@ static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx, } static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx, - int alg, const uint8_t *key, + int alg, const u8 *key, unsigned int keylen, int mode) { struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd; @@ -578,7 +536,7 @@ error: } static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx, - const uint8_t *key, + const u8 *key, unsigned int keylen, int mode) { @@ -592,7 +550,7 @@ static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx, return 0; } -static int qat_alg_aead_rekey(struct crypto_aead *tfm, const uint8_t *key, +static int qat_alg_aead_rekey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); @@ -606,7 +564,7 @@ static int qat_alg_aead_rekey(struct crypto_aead *tfm, const uint8_t *key, ICP_QAT_HW_CIPHER_CBC_MODE); } -static int qat_alg_aead_newkey(struct crypto_aead *tfm, const uint8_t *key, +static int qat_alg_aead_newkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); @@ -658,7 +616,7 @@ out_free_inst: return ret; } -static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key, +static int qat_alg_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); @@ -820,7 +778,7 @@ static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp, struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx; struct qat_crypto_instance *inst = ctx->inst; struct aead_request *areq = qat_req->aead_req; - uint8_t stat_filed = qat_resp->comn_resp.comn_status; + u8 stat_filed = qat_resp->comn_resp.comn_status; int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed); qat_alg_free_bufl(inst, qat_req); @@ -835,7 +793,7 @@ static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp, struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx; struct qat_crypto_instance *inst = ctx->inst; struct skcipher_request *sreq = qat_req->skcipher_req; - uint8_t stat_filed = qat_resp->comn_resp.comn_status; + u8 stat_filed = qat_resp->comn_resp.comn_status; struct device *dev = &GET_DEV(ctx->inst->accel_dev); int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed); @@ -880,18 +838,18 @@ static int qat_alg_aead_dec(struct aead_request *areq) qat_req->aead_ctx = ctx; qat_req->aead_req = areq; qat_req->cb = qat_aead_alg_callback; - qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; + qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req; qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; cipher_param = (void *)&qat_req->req.serv_specif_rqpars; cipher_param->cipher_length = areq->cryptlen - digst_size; cipher_param->cipher_offset = areq->assoclen; memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE); - auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); + auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param)); auth_param->auth_off = 0; auth_param->auth_len = areq->assoclen + cipher_param->cipher_length; do { - ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); + ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg); } while (ret == -EAGAIN && ctr++ < 10); if (ret == -EAGAIN) { @@ -910,7 +868,7 @@ static int qat_alg_aead_enc(struct aead_request *areq) struct icp_qat_fw_la_cipher_req_params *cipher_param; struct icp_qat_fw_la_auth_req_params *auth_param; struct icp_qat_fw_la_bulk_req *msg; - uint8_t *iv = areq->iv; + u8 *iv = areq->iv; int ret, ctr = 0; ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req); @@ -922,11 +880,11 @@ static int qat_alg_aead_enc(struct aead_request *areq) qat_req->aead_ctx = ctx; qat_req->aead_req = areq; qat_req->cb = qat_aead_alg_callback; - qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; + qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req; qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; cipher_param = (void *)&qat_req->req.serv_specif_rqpars; - auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); + auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param)); memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE); cipher_param->cipher_length = areq->cryptlen; @@ -936,7 +894,7 @@ static int qat_alg_aead_enc(struct aead_request *areq) auth_param->auth_len = areq->assoclen + areq->cryptlen; do { - ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); + ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg); } while (ret == -EAGAIN && ctr++ < 10); if (ret == -EAGAIN) { @@ -1038,6 +996,25 @@ static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm, static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { + struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); + int ret; + + ret = xts_verify_key(tfm, key, keylen); + if (ret) + return ret; + + if (keylen >> 1 == AES_KEYSIZE_192) { + ret = crypto_skcipher_setkey(ctx->ftfm, key, keylen); + if (ret) + return ret; + + ctx->fallback = true; + + return 0; + } + + ctx->fallback = false; + return qat_alg_skcipher_setkey(tfm, key, keylen, ICP_QAT_HW_CIPHER_XTS_MODE); } @@ -1073,7 +1050,7 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req) qat_req->skcipher_ctx = ctx; qat_req->skcipher_req = req; qat_req->cb = qat_skcipher_alg_callback; - qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; + qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req; qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; cipher_param = (void *)&qat_req->req.serv_specif_rqpars; @@ -1082,7 +1059,7 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req) cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr; memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE); do { - ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); + ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg); } while (ret == -EAGAIN && ctr++ < 10); if (ret == -EAGAIN) { @@ -1102,6 +1079,24 @@ static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req) return qat_alg_skcipher_encrypt(req); } +static int qat_alg_skcipher_xts_encrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req); + struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm); + struct skcipher_request *nreq = skcipher_request_ctx(req); + + if (req->cryptlen < XTS_BLOCK_SIZE) + return -EINVAL; + + if (ctx->fallback) { + memcpy(nreq, req, sizeof(*req)); + skcipher_request_set_tfm(nreq, ctx->ftfm); + return crypto_skcipher_encrypt(nreq); + } + + return qat_alg_skcipher_encrypt(req); +} + static int qat_alg_skcipher_decrypt(struct skcipher_request *req) { struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req); @@ -1133,7 +1128,7 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req) qat_req->skcipher_ctx = ctx; qat_req->skcipher_req = req; qat_req->cb = qat_skcipher_alg_callback; - qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; + qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req; qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; cipher_param = (void *)&qat_req->req.serv_specif_rqpars; @@ -1142,7 +1137,7 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req) cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr; memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE); do { - ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); + ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg); } while (ret == -EAGAIN && ctr++ < 10); if (ret == -EAGAIN) { @@ -1161,6 +1156,25 @@ static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req) return qat_alg_skcipher_decrypt(req); } + +static int qat_alg_skcipher_xts_decrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req); + struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm); + struct skcipher_request *nreq = skcipher_request_ctx(req); + + if (req->cryptlen < XTS_BLOCK_SIZE) + return -EINVAL; + + if (ctx->fallback) { + memcpy(nreq, req, sizeof(*req)); + skcipher_request_set_tfm(nreq, ctx->ftfm); + return crypto_skcipher_decrypt(nreq); + } + + return qat_alg_skcipher_decrypt(req); +} + static int qat_alg_aead_init(struct crypto_aead *tfm, enum icp_qat_hw_auth_algo hash, const char *hash_name) @@ -1217,10 +1231,25 @@ static void qat_alg_aead_exit(struct crypto_aead *tfm) static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm) { + crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request)); + return 0; +} + +static int qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher *tfm) +{ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); + int reqsize; + + ctx->ftfm = crypto_alloc_skcipher("xts(aes)", 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->ftfm)) + return PTR_ERR(ctx->ftfm); + + reqsize = max(sizeof(struct qat_crypto_request), + sizeof(struct skcipher_request) + + crypto_skcipher_reqsize(ctx->ftfm)); + crypto_skcipher_set_reqsize(tfm, reqsize); - crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request)); - ctx->tfm = tfm; return 0; } @@ -1251,13 +1280,22 @@ static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm) qat_crypto_put_instance(inst); } +static void qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher *tfm) +{ + struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); + + if (ctx->ftfm) + crypto_free_skcipher(ctx->ftfm); + + qat_alg_skcipher_exit_tfm(tfm); +} static struct aead_alg qat_aeads[] = { { .base = { .cra_name = "authenc(hmac(sha1),cbc(aes))", .cra_driver_name = "qat_aes_cbc_hmac_sha1", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct qat_alg_aead_ctx), .cra_module = THIS_MODULE, @@ -1274,7 +1312,7 @@ static struct aead_alg qat_aeads[] = { { .cra_name = "authenc(hmac(sha256),cbc(aes))", .cra_driver_name = "qat_aes_cbc_hmac_sha256", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct qat_alg_aead_ctx), .cra_module = THIS_MODULE, @@ -1291,7 +1329,7 @@ static struct aead_alg qat_aeads[] = { { .cra_name = "authenc(hmac(sha512),cbc(aes))", .cra_driver_name = "qat_aes_cbc_hmac_sha512", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct qat_alg_aead_ctx), .cra_module = THIS_MODULE, @@ -1309,7 +1347,7 @@ static struct skcipher_alg qat_skciphers[] = { { .base.cra_name = "cbc(aes)", .base.cra_driver_name = "qat_aes_cbc", .base.cra_priority = 4001, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx), .base.cra_alignmask = 0, @@ -1327,7 +1365,7 @@ static struct skcipher_alg qat_skciphers[] = { { .base.cra_name = "ctr(aes)", .base.cra_driver_name = "qat_aes_ctr", .base.cra_priority = 4001, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx), .base.cra_alignmask = 0, @@ -1345,17 +1383,18 @@ static struct skcipher_alg qat_skciphers[] = { { .base.cra_name = "xts(aes)", .base.cra_driver_name = "qat_aes_xts", .base.cra_priority = 4001, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx), .base.cra_alignmask = 0, .base.cra_module = THIS_MODULE, - .init = qat_alg_skcipher_init_tfm, - .exit = qat_alg_skcipher_exit_tfm, + .init = qat_alg_skcipher_init_xts_tfm, + .exit = qat_alg_skcipher_exit_xts_tfm, .setkey = qat_alg_skcipher_xts_setkey, - .decrypt = qat_alg_skcipher_blk_decrypt, - .encrypt = qat_alg_skcipher_blk_encrypt, + .decrypt = qat_alg_skcipher_xts_decrypt, + .encrypt = qat_alg_skcipher_xts_encrypt, .min_keysize = 2 * AES_MIN_KEY_SIZE, .max_keysize = 2 * AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c index 692a7aaee749..846569ec9066 100644 --- a/drivers/crypto/qat/qat_common/qat_asym_algs.c +++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c @@ -1,50 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/module.h> #include <crypto/internal/rsa.h> #include <crypto/internal/akcipher.h> @@ -384,12 +339,12 @@ static int qat_dh_compute_value(struct kpp_request *req) msg->pke_mid.src_data_addr = qat_req->phy_in; msg->pke_mid.dest_data_addr = qat_req->phy_out; - msg->pke_mid.opaque = (uint64_t)(__force long)qat_req; + msg->pke_mid.opaque = (u64)(__force long)qat_req; msg->input_param_count = n_input_params; msg->output_param_count = 1; do { - ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg); + ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg); } while (ret == -EBUSY && ctr++ < 100); if (!ret) @@ -779,11 +734,11 @@ static int qat_rsa_enc(struct akcipher_request *req) msg->pke_mid.src_data_addr = qat_req->phy_in; msg->pke_mid.dest_data_addr = qat_req->phy_out; - msg->pke_mid.opaque = (uint64_t)(__force long)qat_req; + msg->pke_mid.opaque = (u64)(__force long)qat_req; msg->input_param_count = 3; msg->output_param_count = 1; do { - ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg); + ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg); } while (ret == -EBUSY && ctr++ < 100); if (!ret) @@ -927,7 +882,7 @@ static int qat_rsa_dec(struct akcipher_request *req) msg->pke_mid.src_data_addr = qat_req->phy_in; msg->pke_mid.dest_data_addr = qat_req->phy_out; - msg->pke_mid.opaque = (uint64_t)(__force long)qat_req; + msg->pke_mid.opaque = (u64)(__force long)qat_req; if (ctx->crt_mode) msg->input_param_count = 6; else @@ -935,7 +890,7 @@ static int qat_rsa_dec(struct akcipher_request *req) msg->output_param_count = 1; do { - ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg); + ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg); } while (ret == -EBUSY && ctr++ < 100); if (!ret) diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c index fb504cee0305..ab621b7dbd20 100644 --- a/drivers/crypto/qat/qat_common/qat_crypto.c +++ b/drivers/crypto/qat/qat_common/qat_crypto.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/module.h> #include <linux/slab.h> #include "adf_accel_devices.h" diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h index 300bb919a33a..12682d1e9f5f 100644 --- a/drivers/crypto/qat/qat_common/qat_crypto.h +++ b/drivers/crypto/qat/qat_common/qat_crypto.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef _QAT_CRYPTO_INSTANCE_H_ #define _QAT_CRYPTO_INSTANCE_H_ diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c index ff149e176f64..fa467e0f8285 100644 --- a/drivers/crypto/qat/qat_common/qat_hal.c +++ b/drivers/crypto/qat/qat_common/qat_hal.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/slab.h> #include <linux/delay.h> @@ -78,13 +34,13 @@ #define AE(handle, ae) handle->hal_handle->aes[ae] -static const uint64_t inst_4b[] = { +static const u64 inst_4b[] = { 0x0F0400C0000ull, 0x0F4400C0000ull, 0x0F040000300ull, 0x0F440000300ull, 0x0FC066C0000ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0A021000000ull }; -static const uint64_t inst[] = { +static const u64 inst[] = { 0x0F0000C0000ull, 0x0F000000380ull, 0x0D805000011ull, 0x0FC082C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0A0643C0000ull, 0x0BAC0000301ull, 0x0D802000101ull, 0x0F0000C0001ull, @@ -546,7 +502,7 @@ static void qat_hal_disable_ctx(struct icp_qat_fw_loader_handle *handle, qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx); } -static uint64_t qat_hal_parity_64bit(uint64_t word) +static u64 qat_hal_parity_64bit(u64 word) { word ^= word >> 1; word ^= word >> 2; @@ -557,9 +513,9 @@ static uint64_t qat_hal_parity_64bit(uint64_t word) return word & 1; } -static uint64_t qat_hal_set_uword_ecc(uint64_t uword) +static u64 qat_hal_set_uword_ecc(u64 uword) { - uint64_t bit0_mask = 0xff800007fffULL, bit1_mask = 0x1f801ff801fULL, + u64 bit0_mask = 0xff800007fffULL, bit1_mask = 0x1f801ff801fULL, bit2_mask = 0xe387e0781e1ULL, bit3_mask = 0x7cb8e388e22ULL, bit4_mask = 0xaf5b2c93244ULL, bit5_mask = 0xf56d5525488ULL, bit6_mask = 0xdaf69a46910ULL; @@ -578,7 +534,7 @@ static uint64_t qat_hal_set_uword_ecc(uint64_t uword) void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int uaddr, - unsigned int words_num, uint64_t *uword) + unsigned int words_num, u64 *uword) { unsigned int ustore_addr; unsigned int i; @@ -588,7 +544,7 @@ void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle, qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); for (i = 0; i < words_num; i++) { unsigned int uwrd_lo, uwrd_hi; - uint64_t tmp; + u64 tmp; tmp = qat_hal_set_uword_ecc(uword[i]); uwrd_lo = (unsigned int)(tmp & 0xffffffff); @@ -644,7 +600,7 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle) csr_val |= CE_NN_MODE; qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val); qat_hal_wr_uwords(handle, ae, 0, ARRAY_SIZE(inst), - (uint64_t *)inst); + (u64 *)inst); qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT, handle->hal_handle->upc_mask & INIT_PC_VALUE); @@ -821,7 +777,7 @@ void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle, static void qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int uaddr, - unsigned int words_num, uint64_t *uword) + unsigned int words_num, u64 *uword) { unsigned int i, uwrd_lo, uwrd_hi; unsigned int ustore_addr, misc_control; @@ -871,11 +827,11 @@ void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle, #define MAX_EXEC_INST 100 static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char ctx, - uint64_t *micro_inst, unsigned int inst_num, + u64 *micro_inst, unsigned int inst_num, int code_off, unsigned int max_cycle, unsigned int *endpc) { - uint64_t savuwords[MAX_EXEC_INST]; + u64 savuwords[MAX_EXEC_INST]; unsigned int ind_lm_addr0, ind_lm_addr1; unsigned int ind_lm_addr_byte0, ind_lm_addr_byte1; unsigned int ind_cnt_sig; @@ -972,7 +928,7 @@ static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle, unsigned int ctxarb_cntl, ustore_addr, ctx_enables; unsigned short reg_addr; int status = 0; - uint64_t insts, savuword; + u64 insts, savuword; reg_addr = qat_hal_get_reg_addr(reg_type, reg_num); if (reg_addr == BAD_REGADDR) { @@ -984,7 +940,7 @@ static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle, insts = 0xA070000000ull | (reg_addr & 0x3ff); break; default: - insts = (uint64_t)0xA030000000ull | ((reg_addr & 0x3ff) << 10); + insts = (u64)0xA030000000ull | ((reg_addr & 0x3ff) << 10); break; } savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS); @@ -1030,7 +986,7 @@ static int qat_hal_wr_rel_reg(struct icp_qat_fw_loader_handle *handle, unsigned short reg_num, unsigned int data) { unsigned short src_hiaddr, src_lowaddr, dest_addr, data16hi, data16lo; - uint64_t insts[] = { + u64 insts[] = { 0x0F440000000ull, 0x0F040000000ull, 0x0F0000C0300ull, @@ -1076,13 +1032,13 @@ int qat_hal_get_ins_num(void) return ARRAY_SIZE(inst_4b); } -static int qat_hal_concat_micro_code(uint64_t *micro_inst, +static int qat_hal_concat_micro_code(u64 *micro_inst, unsigned int inst_num, unsigned int size, unsigned int addr, unsigned int *value) { int i; unsigned int cur_value; - const uint64_t *inst_arr; + const u64 *inst_arr; int fixup_offset; int usize = 0; int orig_num; @@ -1107,7 +1063,7 @@ static int qat_hal_concat_micro_code(uint64_t *micro_inst, static int qat_hal_exec_micro_init_lm(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char ctx, - int *pfirst_exec, uint64_t *micro_inst, + int *pfirst_exec, u64 *micro_inst, unsigned int inst_num) { int stat = 0; @@ -1140,7 +1096,7 @@ int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle, struct icp_qat_uof_batch_init *lm_init_header) { struct icp_qat_uof_batch_init *plm_init; - uint64_t *micro_inst_arry; + u64 *micro_inst_arry; int micro_inst_num; int alloc_inst_size; int first_exec = 1; @@ -1150,7 +1106,7 @@ int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle, alloc_inst_size = lm_init_header->size; if ((unsigned int)alloc_inst_size > handle->hal_handle->max_ustore) alloc_inst_size = handle->hal_handle->max_ustore; - micro_inst_arry = kmalloc_array(alloc_inst_size, sizeof(uint64_t), + micro_inst_arry = kmalloc_array(alloc_inst_size, sizeof(u64), GFP_KERNEL); if (!micro_inst_arry) return -ENOMEM; @@ -1229,7 +1185,7 @@ static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle, data16low; unsigned short reg_mask; int status = 0; - uint64_t micro_inst[] = { + u64 micro_inst[] = { 0x0F440000000ull, 0x0F040000000ull, 0x0A000000000ull, diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c index 6bd8f6a2a24f..bff759e2f811 100644 --- a/drivers/crypto/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/qat/qat_common/qat_uclo.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/slab.h> #include <linux/ctype.h> #include <linux/kernel.h> @@ -332,13 +288,18 @@ static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle } return 0; out_err: + /* Do not free the list head unless we allocated it. */ + tail_old = tail_old->next; + if (flag) { + kfree(*init_tab_base); + *init_tab_base = NULL; + } + while (tail_old) { mem_init = tail_old->next; kfree(tail_old); tail_old = mem_init; } - if (flag) - kfree(*init_tab_base); return -ENOMEM; } @@ -411,16 +372,16 @@ static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle, unsigned int ustore_size; unsigned int patt_pos; struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; - uint64_t *fill_data; + u64 *fill_data; uof_image = image->img_ptr; - fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(uint64_t), + fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(u64), GFP_KERNEL); if (!fill_data) return -ENOMEM; for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++) memcpy(&fill_data[i], &uof_image->fill_pattern, - sizeof(uint64_t)); + sizeof(u64)); page = image->page; for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { @@ -981,7 +942,7 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle) pr_err("QAT: UOF incompatible\n"); return -EINVAL; } - obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(uint64_t), + obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(u64), GFP_KERNEL); if (!obj_handle->uword_buf) return -ENOMEM; @@ -1185,7 +1146,7 @@ static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle, return 0; } -#define ADD_ADDR(high, low) ((((uint64_t)high) << 32) + low) +#define ADD_ADDR(high, low) ((((u64)high) << 32) + low) #define BITS_IN_DWORD 32 static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle, @@ -1514,10 +1475,10 @@ void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle) static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle, struct icp_qat_uclo_encap_page *encap_page, - uint64_t *uword, unsigned int addr_p, - unsigned int raddr, uint64_t fill) + u64 *uword, unsigned int addr_p, + unsigned int raddr, u64 fill) { - uint64_t uwrd = 0; + u64 uwrd = 0; unsigned int i; if (!encap_page) { @@ -1547,12 +1508,12 @@ static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle, { unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen; struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; - uint64_t fill_pat; + u64 fill_pat; /* load the page starting at appropriate ustore address */ /* get fill-pattern from an image -- they are all the same */ memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern, - sizeof(uint64_t)); + sizeof(u64)); uw_physical_addr = encap_page->beg_addr_p; uw_relative_addr = 0; words_num = encap_page->micro_words_num; diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c index 1dfcab317bed..b975c263446d 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c @@ -1,62 +1,18 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <adf_accel_devices.h> #include <adf_pf2vf_msg.h> #include <adf_common_drv.h> #include "adf_dh895xcc_hw_data.h" /* Worker thread to service arbiter mappings based on dev SKUs */ -static const uint32_t thrd_to_arb_map_sku4[] = { +static const u32 thrd_to_arb_map_sku4[] = { 0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666, 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }; -static const uint32_t thrd_to_arb_map_sku6[] = { +static const u32 thrd_to_arb_map_sku6[] = { 0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666, 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222 @@ -68,20 +24,20 @@ static struct adf_hw_device_class dh895xcc_class = { .instances = 0 }; -static uint32_t get_accel_mask(uint32_t fuse) +static u32 get_accel_mask(u32 fuse) { return (~fuse) >> ADF_DH895XCC_ACCELERATORS_REG_OFFSET & ADF_DH895XCC_ACCELERATORS_MASK; } -static uint32_t get_ae_mask(uint32_t fuse) +static u32 get_ae_mask(u32 fuse) { return (~fuse) & ADF_DH895XCC_ACCELENGINES_MASK; } -static uint32_t get_num_accels(struct adf_hw_device_data *self) +static u32 get_num_accels(struct adf_hw_device_data *self) { - uint32_t i, ctr = 0; + u32 i, ctr = 0; if (!self || !self->accel_mask) return 0; @@ -93,9 +49,9 @@ static uint32_t get_num_accels(struct adf_hw_device_data *self) return ctr; } -static uint32_t get_num_aes(struct adf_hw_device_data *self) +static u32 get_num_aes(struct adf_hw_device_data *self) { - uint32_t i, ctr = 0; + u32 i, ctr = 0; if (!self || !self->ae_mask) return 0; @@ -107,17 +63,17 @@ static uint32_t get_num_aes(struct adf_hw_device_data *self) return ctr; } -static uint32_t get_misc_bar_id(struct adf_hw_device_data *self) +static u32 get_misc_bar_id(struct adf_hw_device_data *self) { return ADF_DH895XCC_PMISC_BAR; } -static uint32_t get_etr_bar_id(struct adf_hw_device_data *self) +static u32 get_etr_bar_id(struct adf_hw_device_data *self) { return ADF_DH895XCC_ETR_BAR; } -static uint32_t get_sram_bar_id(struct adf_hw_device_data *self) +static u32 get_sram_bar_id(struct adf_hw_device_data *self) { return ADF_DH895XCC_SRAM_BAR; } @@ -161,12 +117,12 @@ static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, } } -static uint32_t get_pf2vf_offset(uint32_t i) +static u32 get_pf2vf_offset(u32 i) { return ADF_DH895XCC_PF2VF_OFFSET(i); } -static uint32_t get_vintmsk_offset(uint32_t i) +static u32 get_vintmsk_offset(u32 i) { return ADF_DH895XCC_VINTMSK_OFFSET(i); } diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h index 092f7353ed23..082a04466dca 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h +++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef ADF_DH895x_HW_DATA_H_ #define ADF_DH895x_HW_DATA_H_ diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c index b11bf8c0e683..4e877b75822b 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c index a3b4dd8099a7..5246f0524ca3 100644 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2015 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2015 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2015 - 2020 Intel Corporation */ #include <adf_accel_devices.h> #include <adf_pf2vf_msg.h> #include <adf_common_drv.h> diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h index 6ddc19bd4410..2bfcc67f8f39 100644 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2015 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2015 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2015 - 2020 Intel Corporation */ #ifndef ADF_DH895XVF_HW_DATA_H_ #define ADF_DH895XVF_HW_DATA_H_ diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c index 1b762eefc6c1..7d6e1db272c2 100644 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h index 7770660bc853..cffa9fc628ff 100644 --- a/drivers/crypto/qce/cipher.h +++ b/drivers/crypto/qce/cipher.h @@ -14,7 +14,7 @@ struct qce_cipher_ctx { u8 enc_key[QCE_MAX_KEY_SIZE]; unsigned int enc_keylen; - struct crypto_sync_skcipher *fallback; + struct crypto_skcipher *fallback; }; /** @@ -43,6 +43,7 @@ struct qce_cipher_reqctx { struct sg_table src_tbl; struct scatterlist *src_sg; unsigned int cryptlen; + struct skcipher_request fallback_req; // keep at the end }; static inline struct qce_alg_template *to_cipher_tmpl(struct crypto_skcipher *tfm) diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h index 9f989cba0f1b..85ba16418a04 100644 --- a/drivers/crypto/qce/common.h +++ b/drivers/crypto/qce/common.h @@ -87,6 +87,8 @@ struct qce_alg_template { struct ahash_alg ahash; } alg; struct qce_device *qce; + const u8 *hash_zero; + const u32 digest_size; }; void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len); diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c index 1ab62e7d5f3c..c230843e2ffb 100644 --- a/drivers/crypto/qce/sha.c +++ b/drivers/crypto/qce/sha.c @@ -203,10 +203,18 @@ static int qce_import_common(struct ahash_request *req, u64 in_count, static int qce_ahash_import(struct ahash_request *req, const void *in) { - struct qce_sha_reqctx *rctx = ahash_request_ctx(req); - unsigned long flags = rctx->flags; - bool hmac = IS_SHA_HMAC(flags); - int ret = -EINVAL; + struct qce_sha_reqctx *rctx; + unsigned long flags; + bool hmac; + int ret; + + ret = qce_ahash_init(req); + if (ret) + return ret; + + rctx = ahash_request_ctx(req); + flags = rctx->flags; + hmac = IS_SHA_HMAC(flags); if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) { const struct sha1_state *state = in; @@ -284,8 +292,6 @@ static int qce_ahash_update(struct ahash_request *req) if (!sg_last) return -EINVAL; - sg_mark_end(sg_last); - if (rctx->buflen) { sg_init_table(rctx->sg, 2); sg_set_buf(rctx->sg, rctx->tmpbuf, rctx->buflen); @@ -305,8 +311,12 @@ static int qce_ahash_final(struct ahash_request *req) struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); struct qce_device *qce = tmpl->qce; - if (!rctx->buflen) + if (!rctx->buflen) { + if (tmpl->hash_zero) + memcpy(req->result, tmpl->hash_zero, + tmpl->alg.ahash.halg.digestsize); return 0; + } rctx->last_blk = true; @@ -338,6 +348,13 @@ static int qce_ahash_digest(struct ahash_request *req) rctx->first_blk = true; rctx->last_blk = true; + if (!rctx->nbytes_orig) { + if (tmpl->hash_zero) + memcpy(req->result, tmpl->hash_zero, + tmpl->alg.ahash.halg.digestsize); + return 0; + } + return qce->async_req_enqueue(tmpl->qce, &req->base); } @@ -490,6 +507,11 @@ static int qce_ahash_register_one(const struct qce_ahash_def *def, alg->halg.digestsize = def->digestsize; alg->halg.statesize = def->statesize; + if (IS_SHA1(def->flags)) + tmpl->hash_zero = sha1_zero_message_hash; + else if (IS_SHA256(def->flags)) + tmpl->hash_zero = sha256_zero_message_hash; + base = &alg->halg.base; base->cra_blocksize = def->blocksize; base->cra_priority = 300; diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c index 9412433f3b21..5630c5addd28 100644 --- a/drivers/crypto/qce/skcipher.c +++ b/drivers/crypto/qce/skcipher.c @@ -178,7 +178,7 @@ static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key, break; } - ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen); + ret = crypto_skcipher_setkey(ctx->fallback, key, keylen); if (!ret) ctx->enc_keylen = keylen; return ret; @@ -235,16 +235,15 @@ static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt) req->cryptlen <= aes_sw_max_len) || (IS_XTS(rctx->flags) && req->cryptlen > QCE_SECTOR_SIZE && req->cryptlen % QCE_SECTOR_SIZE))) { - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); - - skcipher_request_set_sync_tfm(subreq, ctx->fallback); - skcipher_request_set_callback(subreq, req->base.flags, - NULL, NULL); - skcipher_request_set_crypt(subreq, req->src, req->dst, - req->cryptlen, req->iv); - ret = encrypt ? crypto_skcipher_encrypt(subreq) : - crypto_skcipher_decrypt(subreq); - skcipher_request_zero(subreq); + skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); + skcipher_request_set_callback(&rctx->fallback_req, + req->base.flags, + req->base.complete, + req->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, req->src, + req->dst, req->cryptlen, req->iv); + ret = encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) : + crypto_skcipher_decrypt(&rctx->fallback_req); return ret; } @@ -263,10 +262,9 @@ static int qce_skcipher_decrypt(struct skcipher_request *req) static int qce_skcipher_init(struct crypto_skcipher *tfm) { - struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); - - memset(ctx, 0, sizeof(*ctx)); - crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx)); + /* take the size without the fallback skcipher_request at the end */ + crypto_skcipher_set_reqsize(tfm, offsetof(struct qce_cipher_reqctx, + fallback_req)); return 0; } @@ -274,17 +272,21 @@ static int qce_skcipher_init_fallback(struct crypto_skcipher *tfm) { struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); - qce_skcipher_init(tfm); - ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base), - 0, CRYPTO_ALG_NEED_FALLBACK); - return PTR_ERR_OR_ZERO(ctx->fallback); + ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm->base), + 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->fallback)) + return PTR_ERR(ctx->fallback); + + crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx) + + crypto_skcipher_reqsize(ctx->fallback)); + return 0; } static void qce_skcipher_exit(struct crypto_skcipher *tfm) { struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); - crypto_free_sync_skcipher(ctx->fallback); + crypto_free_skcipher(ctx->fallback); } struct qce_skcipher_def { @@ -404,6 +406,7 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def, alg->base.cra_priority = 300; alg->base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY; alg->base.cra_ctxsize = sizeof(struct qce_cipher_ctx); alg->base.cra_alignmask = 0; diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c new file mode 100644 index 000000000000..5bc099052bd2 --- /dev/null +++ b/drivers/crypto/sa2ul.c @@ -0,0 +1,2420 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * K3 SA2UL crypto accelerator driver + * + * Copyright (C) 2018-2020 Texas Instruments Incorporated - http://www.ti.com + * + * Authors: Keerthy + * Vitaly Andrianov + * Tero Kristo + */ +#include <linux/clk.h> +#include <linux/dmaengine.h> +#include <linux/dmapool.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> + +#include <crypto/aes.h> +#include <crypto/authenc.h> +#include <crypto/des.h> +#include <crypto/internal/aead.h> +#include <crypto/internal/hash.h> +#include <crypto/internal/skcipher.h> +#include <crypto/scatterwalk.h> +#include <crypto/sha.h> + +#include "sa2ul.h" + +/* Byte offset for key in encryption security context */ +#define SC_ENC_KEY_OFFSET (1 + 27 + 4) +/* Byte offset for Aux-1 in encryption security context */ +#define SC_ENC_AUX1_OFFSET (1 + 27 + 4 + 32) + +#define SA_CMDL_UPD_ENC 0x0001 +#define SA_CMDL_UPD_AUTH 0x0002 +#define SA_CMDL_UPD_ENC_IV 0x0004 +#define SA_CMDL_UPD_AUTH_IV 0x0008 +#define SA_CMDL_UPD_AUX_KEY 0x0010 + +#define SA_AUTH_SUBKEY_LEN 16 +#define SA_CMDL_PAYLOAD_LENGTH_MASK 0xFFFF +#define SA_CMDL_SOP_BYPASS_LEN_MASK 0xFF000000 + +#define MODE_CONTROL_BYTES 27 +#define SA_HASH_PROCESSING 0 +#define SA_CRYPTO_PROCESSING 0 +#define SA_UPLOAD_HASH_TO_TLR BIT(6) + +#define SA_SW0_FLAGS_MASK 0xF0000 +#define SA_SW0_CMDL_INFO_MASK 0x1F00000 +#define SA_SW0_CMDL_PRESENT BIT(4) +#define SA_SW0_ENG_ID_MASK 0x3E000000 +#define SA_SW0_DEST_INFO_PRESENT BIT(30) +#define SA_SW2_EGRESS_LENGTH 0xFF000000 +#define SA_BASIC_HASH 0x10 + +#define SHA256_DIGEST_WORDS 8 +/* Make 32-bit word from 4 bytes */ +#define SA_MK_U32(b0, b1, b2, b3) (((b0) << 24) | ((b1) << 16) | \ + ((b2) << 8) | (b3)) + +/* size of SCCTL structure in bytes */ +#define SA_SCCTL_SZ 16 + +/* Max Authentication tag size */ +#define SA_MAX_AUTH_TAG_SZ 64 + +#define PRIV_ID 0x1 +#define PRIV 0x1 + +static struct device *sa_k3_dev; + +/** + * struct sa_cmdl_cfg - Command label configuration descriptor + * @aalg: authentication algorithm ID + * @enc_eng_id: Encryption Engine ID supported by the SA hardware + * @auth_eng_id: Authentication Engine ID + * @iv_size: Initialization Vector size + * @akey: Authentication key + * @akey_len: Authentication key length + * @enc: True, if this is an encode request + */ +struct sa_cmdl_cfg { + int aalg; + u8 enc_eng_id; + u8 auth_eng_id; + u8 iv_size; + const u8 *akey; + u16 akey_len; + bool enc; +}; + +/** + * struct algo_data - Crypto algorithm specific data + * @enc_eng: Encryption engine info structure + * @auth_eng: Authentication engine info structure + * @auth_ctrl: Authentication control word + * @hash_size: Size of digest + * @iv_idx: iv index in psdata + * @iv_out_size: iv out size + * @ealg_id: Encryption Algorithm ID + * @aalg_id: Authentication algorithm ID + * @mci_enc: Mode Control Instruction for Encryption algorithm + * @mci_dec: Mode Control Instruction for Decryption + * @inv_key: Whether the encryption algorithm demands key inversion + * @ctx: Pointer to the algorithm context + * @keyed_mac: Whether the authentication algorithm has key + * @prep_iopad: Function pointer to generate intermediate ipad/opad + */ +struct algo_data { + struct sa_eng_info enc_eng; + struct sa_eng_info auth_eng; + u8 auth_ctrl; + u8 hash_size; + u8 iv_idx; + u8 iv_out_size; + u8 ealg_id; + u8 aalg_id; + u8 *mci_enc; + u8 *mci_dec; + bool inv_key; + struct sa_tfm_ctx *ctx; + bool keyed_mac; + void (*prep_iopad)(struct algo_data *algo, const u8 *key, + u16 key_sz, __be32 *ipad, __be32 *opad); +}; + +/** + * struct sa_alg_tmpl: A generic template encompassing crypto/aead algorithms + * @type: Type of the crypto algorithm. + * @alg: Union of crypto algorithm definitions. + * @registered: Flag indicating if the crypto algorithm is already registered + */ +struct sa_alg_tmpl { + u32 type; /* CRYPTO_ALG_TYPE from <linux/crypto.h> */ + union { + struct skcipher_alg skcipher; + struct ahash_alg ahash; + struct aead_alg aead; + } alg; + bool registered; +}; + +/** + * struct sa_rx_data: RX Packet miscellaneous data place holder + * @req: crypto request data pointer + * @ddev: pointer to the DMA device + * @tx_in: dma_async_tx_descriptor pointer for rx channel + * @split_src_sg: Set if the src sg is split and needs to be freed up + * @split_dst_sg: Set if the dst sg is split and needs to be freed up + * @enc: Flag indicating either encryption or decryption + * @enc_iv_size: Initialisation vector size + * @iv_idx: Initialisation vector index + * @rx_sg: Static scatterlist entry for overriding RX data + * @tx_sg: Static scatterlist entry for overriding TX data + * @src: Source data pointer + * @dst: Destination data pointer + */ +struct sa_rx_data { + void *req; + struct device *ddev; + struct dma_async_tx_descriptor *tx_in; + struct scatterlist *split_src_sg; + struct scatterlist *split_dst_sg; + u8 enc; + u8 enc_iv_size; + u8 iv_idx; + struct scatterlist rx_sg; + struct scatterlist tx_sg; + struct scatterlist *src; + struct scatterlist *dst; +}; + +/** + * struct sa_req: SA request definition + * @dev: device for the request + * @size: total data to the xmitted via DMA + * @enc_offset: offset of cipher data + * @enc_size: data to be passed to cipher engine + * @enc_iv: cipher IV + * @auth_offset: offset of the authentication data + * @auth_size: size of the authentication data + * @auth_iv: authentication IV + * @type: algorithm type for the request + * @cmdl: command label pointer + * @base: pointer to the base request + * @ctx: pointer to the algorithm context data + * @enc: true if this is an encode request + * @src: source data + * @dst: destination data + * @callback: DMA callback for the request + * @mdata_size: metadata size passed to DMA + */ +struct sa_req { + struct device *dev; + u16 size; + u8 enc_offset; + u16 enc_size; + u8 *enc_iv; + u8 auth_offset; + u16 auth_size; + u8 *auth_iv; + u32 type; + u32 *cmdl; + struct crypto_async_request *base; + struct sa_tfm_ctx *ctx; + bool enc; + struct scatterlist *src; + struct scatterlist *dst; + dma_async_tx_callback callback; + u16 mdata_size; +}; + +/* + * Mode Control Instructions for various Key lengths 128, 192, 256 + * For CBC (Cipher Block Chaining) mode for encryption + */ +static u8 mci_cbc_enc_array[3][MODE_CONTROL_BYTES] = { + { 0x61, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + { 0x61, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + { 0x61, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, +}; + +/* + * Mode Control Instructions for various Key lengths 128, 192, 256 + * For CBC (Cipher Block Chaining) mode for decryption + */ +static u8 mci_cbc_dec_array[3][MODE_CONTROL_BYTES] = { + { 0x71, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + { 0x71, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + { 0x71, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, +}; + +/* + * Mode Control Instructions for various Key lengths 128, 192, 256 + * For CBC (Cipher Block Chaining) mode for encryption + */ +static u8 mci_cbc_enc_no_iv_array[3][MODE_CONTROL_BYTES] = { + { 0x21, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + { 0x21, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + { 0x21, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, +}; + +/* + * Mode Control Instructions for various Key lengths 128, 192, 256 + * For CBC (Cipher Block Chaining) mode for decryption + */ +static u8 mci_cbc_dec_no_iv_array[3][MODE_CONTROL_BYTES] = { + { 0x31, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + { 0x31, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + { 0x31, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, +}; + +/* + * Mode Control Instructions for various Key lengths 128, 192, 256 + * For ECB (Electronic Code Book) mode for encryption + */ +static u8 mci_ecb_enc_array[3][27] = { + { 0x21, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + { 0x21, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + { 0x21, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, +}; + +/* + * Mode Control Instructions for various Key lengths 128, 192, 256 + * For ECB (Electronic Code Book) mode for decryption + */ +static u8 mci_ecb_dec_array[3][27] = { + { 0x31, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + { 0x31, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + { 0x31, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, +}; + +/* + * Mode Control Instructions for DES algorithm + * For CBC (Cipher Block Chaining) mode and ECB mode + * encryption and for decryption respectively + */ +static u8 mci_cbc_3des_enc_array[MODE_CONTROL_BYTES] = { + 0x60, 0x00, 0x00, 0x18, 0x88, 0x52, 0xaa, 0x4b, 0x7e, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, +}; + +static u8 mci_cbc_3des_dec_array[MODE_CONTROL_BYTES] = { + 0x70, 0x00, 0x00, 0x85, 0x0a, 0xca, 0x98, 0xf4, 0x40, 0xc0, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, +}; + +static u8 mci_ecb_3des_enc_array[MODE_CONTROL_BYTES] = { + 0x20, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, +}; + +static u8 mci_ecb_3des_dec_array[MODE_CONTROL_BYTES] = { + 0x30, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, +}; + +/* + * Perform 16 byte or 128 bit swizzling + * The SA2UL Expects the security context to + * be in little Endian and the bus width is 128 bits or 16 bytes + * Hence swap 16 bytes at a time from higher to lower address + */ +static void sa_swiz_128(u8 *in, u16 len) +{ + u8 data[16]; + int i, j; + + for (i = 0; i < len; i += 16) { + memcpy(data, &in[i], 16); + for (j = 0; j < 16; j++) + in[i + j] = data[15 - j]; + } +} + +/* Prepare the ipad and opad from key as per SHA algorithm step 1*/ +static void prepare_kiopad(u8 *k_ipad, u8 *k_opad, const u8 *key, u16 key_sz) +{ + int i; + + for (i = 0; i < key_sz; i++) { + k_ipad[i] = key[i] ^ 0x36; + k_opad[i] = key[i] ^ 0x5c; + } + + /* Instead of XOR with 0 */ + for (; i < SHA1_BLOCK_SIZE; i++) { + k_ipad[i] = 0x36; + k_opad[i] = 0x5c; + } +} + +static void sa_export_shash(struct shash_desc *hash, int block_size, + int digest_size, __be32 *out) +{ + union { + struct sha1_state sha1; + struct sha256_state sha256; + struct sha512_state sha512; + } sha; + void *state; + u32 *result; + int i; + + switch (digest_size) { + case SHA1_DIGEST_SIZE: + state = &sha.sha1; + result = sha.sha1.state; + break; + case SHA256_DIGEST_SIZE: + state = &sha.sha256; + result = sha.sha256.state; + break; + default: + dev_err(sa_k3_dev, "%s: bad digest_size=%d\n", __func__, + digest_size); + return; + } + + crypto_shash_export(hash, state); + + for (i = 0; i < digest_size >> 2; i++) + out[i] = cpu_to_be32(result[i]); +} + +static void sa_prepare_iopads(struct algo_data *data, const u8 *key, + u16 key_sz, __be32 *ipad, __be32 *opad) +{ + SHASH_DESC_ON_STACK(shash, data->ctx->shash); + int block_size = crypto_shash_blocksize(data->ctx->shash); + int digest_size = crypto_shash_digestsize(data->ctx->shash); + u8 k_ipad[SHA1_BLOCK_SIZE]; + u8 k_opad[SHA1_BLOCK_SIZE]; + + shash->tfm = data->ctx->shash; + + prepare_kiopad(k_ipad, k_opad, key, key_sz); + + memzero_explicit(ipad, block_size); + memzero_explicit(opad, block_size); + + crypto_shash_init(shash); + crypto_shash_update(shash, k_ipad, block_size); + sa_export_shash(shash, block_size, digest_size, ipad); + + crypto_shash_init(shash); + crypto_shash_update(shash, k_opad, block_size); + + sa_export_shash(shash, block_size, digest_size, opad); +} + +/* Derive the inverse key used in AES-CBC decryption operation */ +static inline int sa_aes_inv_key(u8 *inv_key, const u8 *key, u16 key_sz) +{ + struct crypto_aes_ctx ctx; + int key_pos; + + if (aes_expandkey(&ctx, key, key_sz)) { + dev_err(sa_k3_dev, "%s: bad key len(%d)\n", __func__, key_sz); + return -EINVAL; + } + + /* work around to get the right inverse for AES_KEYSIZE_192 size keys */ + if (key_sz == AES_KEYSIZE_192) { + ctx.key_enc[52] = ctx.key_enc[51] ^ ctx.key_enc[46]; + ctx.key_enc[53] = ctx.key_enc[52] ^ ctx.key_enc[47]; + } + + /* Based crypto_aes_expand_key logic */ + switch (key_sz) { + case AES_KEYSIZE_128: + case AES_KEYSIZE_192: + key_pos = key_sz + 24; + break; + + case AES_KEYSIZE_256: + key_pos = key_sz + 24 - 4; + break; + + default: + dev_err(sa_k3_dev, "%s: bad key len(%d)\n", __func__, key_sz); + return -EINVAL; + } + + memcpy(inv_key, &ctx.key_enc[key_pos], key_sz); + return 0; +} + +/* Set Security context for the encryption engine */ +static int sa_set_sc_enc(struct algo_data *ad, const u8 *key, u16 key_sz, + u8 enc, u8 *sc_buf) +{ + const u8 *mci = NULL; + + /* Set Encryption mode selector to crypto processing */ + sc_buf[0] = SA_CRYPTO_PROCESSING; + + if (enc) + mci = ad->mci_enc; + else + mci = ad->mci_dec; + /* Set the mode control instructions in security context */ + if (mci) + memcpy(&sc_buf[1], mci, MODE_CONTROL_BYTES); + + /* For AES-CBC decryption get the inverse key */ + if (ad->inv_key && !enc) { + if (sa_aes_inv_key(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz)) + return -EINVAL; + /* For all other cases: key is used */ + } else { + memcpy(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz); + } + + return 0; +} + +/* Set Security context for the authentication engine */ +static void sa_set_sc_auth(struct algo_data *ad, const u8 *key, u16 key_sz, + u8 *sc_buf) +{ + __be32 ipad[64], opad[64]; + + /* Set Authentication mode selector to hash processing */ + sc_buf[0] = SA_HASH_PROCESSING; + /* Auth SW ctrl word: bit[6]=1 (upload computed hash to TLR section) */ + sc_buf[1] = SA_UPLOAD_HASH_TO_TLR; + sc_buf[1] |= ad->auth_ctrl; + + /* Copy the keys or ipad/opad */ + if (ad->keyed_mac) { + ad->prep_iopad(ad, key, key_sz, ipad, opad); + + /* Copy ipad to AuthKey */ + memcpy(&sc_buf[32], ipad, ad->hash_size); + /* Copy opad to Aux-1 */ + memcpy(&sc_buf[64], opad, ad->hash_size); + } else { + /* basic hash */ + sc_buf[1] |= SA_BASIC_HASH; + } +} + +static inline void sa_copy_iv(__be32 *out, const u8 *iv, bool size16) +{ + int j; + + for (j = 0; j < ((size16) ? 4 : 2); j++) { + *out = cpu_to_be32(*((u32 *)iv)); + iv += 4; + out++; + } +} + +/* Format general command label */ +static int sa_format_cmdl_gen(struct sa_cmdl_cfg *cfg, u8 *cmdl, + struct sa_cmdl_upd_info *upd_info) +{ + u8 enc_offset = 0, auth_offset = 0, total = 0; + u8 enc_next_eng = SA_ENG_ID_OUTPORT2; + u8 auth_next_eng = SA_ENG_ID_OUTPORT2; + u32 *word_ptr = (u32 *)cmdl; + int i; + + /* Clear the command label */ + memzero_explicit(cmdl, (SA_MAX_CMDL_WORDS * sizeof(u32))); + + /* Iniialize the command update structure */ + memzero_explicit(upd_info, sizeof(*upd_info)); + + if (cfg->enc_eng_id && cfg->auth_eng_id) { + if (cfg->enc) { + auth_offset = SA_CMDL_HEADER_SIZE_BYTES; + enc_next_eng = cfg->auth_eng_id; + + if (cfg->iv_size) + auth_offset += cfg->iv_size; + } else { + enc_offset = SA_CMDL_HEADER_SIZE_BYTES; + auth_next_eng = cfg->enc_eng_id; + } + } + + if (cfg->enc_eng_id) { + upd_info->flags |= SA_CMDL_UPD_ENC; + upd_info->enc_size.index = enc_offset >> 2; + upd_info->enc_offset.index = upd_info->enc_size.index + 1; + /* Encryption command label */ + cmdl[enc_offset + SA_CMDL_OFFSET_NESC] = enc_next_eng; + + /* Encryption modes requiring IV */ + if (cfg->iv_size) { + upd_info->flags |= SA_CMDL_UPD_ENC_IV; + upd_info->enc_iv.index = + (enc_offset + SA_CMDL_HEADER_SIZE_BYTES) >> 2; + upd_info->enc_iv.size = cfg->iv_size; + + cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] = + SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size; + + cmdl[enc_offset + SA_CMDL_OFFSET_OPTION_CTRL1] = + (SA_CTX_ENC_AUX2_OFFSET | (cfg->iv_size >> 3)); + total += SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size; + } else { + cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] = + SA_CMDL_HEADER_SIZE_BYTES; + total += SA_CMDL_HEADER_SIZE_BYTES; + } + } + + if (cfg->auth_eng_id) { + upd_info->flags |= SA_CMDL_UPD_AUTH; + upd_info->auth_size.index = auth_offset >> 2; + upd_info->auth_offset.index = upd_info->auth_size.index + 1; + cmdl[auth_offset + SA_CMDL_OFFSET_NESC] = auth_next_eng; + cmdl[auth_offset + SA_CMDL_OFFSET_LABEL_LEN] = + SA_CMDL_HEADER_SIZE_BYTES; + total += SA_CMDL_HEADER_SIZE_BYTES; + } + + total = roundup(total, 8); + + for (i = 0; i < total / 4; i++) + word_ptr[i] = swab32(word_ptr[i]); + + return total; +} + +/* Update Command label */ +static inline void sa_update_cmdl(struct sa_req *req, u32 *cmdl, + struct sa_cmdl_upd_info *upd_info) +{ + int i = 0, j; + + if (likely(upd_info->flags & SA_CMDL_UPD_ENC)) { + cmdl[upd_info->enc_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK; + cmdl[upd_info->enc_size.index] |= req->enc_size; + cmdl[upd_info->enc_offset.index] &= + ~SA_CMDL_SOP_BYPASS_LEN_MASK; + cmdl[upd_info->enc_offset.index] |= + ((u32)req->enc_offset << + __ffs(SA_CMDL_SOP_BYPASS_LEN_MASK)); + + if (likely(upd_info->flags & SA_CMDL_UPD_ENC_IV)) { + __be32 *data = (__be32 *)&cmdl[upd_info->enc_iv.index]; + u32 *enc_iv = (u32 *)req->enc_iv; + + for (j = 0; i < upd_info->enc_iv.size; i += 4, j++) { + data[j] = cpu_to_be32(*enc_iv); + enc_iv++; + } + } + } + + if (likely(upd_info->flags & SA_CMDL_UPD_AUTH)) { + cmdl[upd_info->auth_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK; + cmdl[upd_info->auth_size.index] |= req->auth_size; + cmdl[upd_info->auth_offset.index] &= + ~SA_CMDL_SOP_BYPASS_LEN_MASK; + cmdl[upd_info->auth_offset.index] |= + ((u32)req->auth_offset << + __ffs(SA_CMDL_SOP_BYPASS_LEN_MASK)); + if (upd_info->flags & SA_CMDL_UPD_AUTH_IV) { + sa_copy_iv((void *)&cmdl[upd_info->auth_iv.index], + req->auth_iv, + (upd_info->auth_iv.size > 8)); + } + if (upd_info->flags & SA_CMDL_UPD_AUX_KEY) { + int offset = (req->auth_size & 0xF) ? 4 : 0; + + memcpy(&cmdl[upd_info->aux_key_info.index], + &upd_info->aux_key[offset], 16); + } + } +} + +/* Format SWINFO words to be sent to SA */ +static +void sa_set_swinfo(u8 eng_id, u16 sc_id, dma_addr_t sc_phys, + u8 cmdl_present, u8 cmdl_offset, u8 flags, + u8 hash_size, u32 *swinfo) +{ + swinfo[0] = sc_id; + swinfo[0] |= (flags << __ffs(SA_SW0_FLAGS_MASK)); + if (likely(cmdl_present)) + swinfo[0] |= ((cmdl_offset | SA_SW0_CMDL_PRESENT) << + __ffs(SA_SW0_CMDL_INFO_MASK)); + swinfo[0] |= (eng_id << __ffs(SA_SW0_ENG_ID_MASK)); + + swinfo[0] |= SA_SW0_DEST_INFO_PRESENT; + swinfo[1] = (u32)(sc_phys & 0xFFFFFFFFULL); + swinfo[2] = (u32)((sc_phys & 0xFFFFFFFF00000000ULL) >> 32); + swinfo[2] |= (hash_size << __ffs(SA_SW2_EGRESS_LENGTH)); +} + +/* Dump the security context */ +static void sa_dump_sc(u8 *buf, dma_addr_t dma_addr) +{ +#ifdef DEBUG + dev_info(sa_k3_dev, "Security context dump:: 0x%pad\n", &dma_addr); + print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, + 16, 1, buf, SA_CTX_MAX_SZ, false); +#endif +} + +static +int sa_init_sc(struct sa_ctx_info *ctx, const u8 *enc_key, + u16 enc_key_sz, const u8 *auth_key, u16 auth_key_sz, + struct algo_data *ad, u8 enc, u32 *swinfo) +{ + int enc_sc_offset = 0; + int auth_sc_offset = 0; + u8 *sc_buf = ctx->sc; + u16 sc_id = ctx->sc_id; + u8 first_engine = 0; + + memzero_explicit(sc_buf, SA_CTX_MAX_SZ); + + if (ad->auth_eng.eng_id) { + if (enc) + first_engine = ad->enc_eng.eng_id; + else + first_engine = ad->auth_eng.eng_id; + + enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ; + auth_sc_offset = enc_sc_offset + ad->enc_eng.sc_size; + sc_buf[1] = SA_SCCTL_FE_AUTH_ENC; + if (!ad->hash_size) + return -EINVAL; + ad->hash_size = roundup(ad->hash_size, 8); + + } else if (ad->enc_eng.eng_id && !ad->auth_eng.eng_id) { + enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ; + first_engine = ad->enc_eng.eng_id; + sc_buf[1] = SA_SCCTL_FE_ENC; + ad->hash_size = ad->iv_out_size; + } + + /* SCCTL Owner info: 0=host, 1=CP_ACE */ + sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0; + memcpy(&sc_buf[2], &sc_id, 2); + sc_buf[4] = 0x0; + sc_buf[5] = PRIV_ID; + sc_buf[6] = PRIV; + sc_buf[7] = 0x0; + + /* Prepare context for encryption engine */ + if (ad->enc_eng.sc_size) { + if (sa_set_sc_enc(ad, enc_key, enc_key_sz, enc, + &sc_buf[enc_sc_offset])) + return -EINVAL; + } + + /* Prepare context for authentication engine */ + if (ad->auth_eng.sc_size) + sa_set_sc_auth(ad, auth_key, auth_key_sz, + &sc_buf[auth_sc_offset]); + + /* Set the ownership of context to CP_ACE */ + sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0x80; + + /* swizzle the security context */ + sa_swiz_128(sc_buf, SA_CTX_MAX_SZ); + + sa_set_swinfo(first_engine, ctx->sc_id, ctx->sc_phys, 1, 0, + SA_SW_INFO_FLAG_EVICT, ad->hash_size, swinfo); + + sa_dump_sc(sc_buf, ctx->sc_phys); + + return 0; +} + +/* Free the per direction context memory */ +static void sa_free_ctx_info(struct sa_ctx_info *ctx, + struct sa_crypto_data *data) +{ + unsigned long bn; + + bn = ctx->sc_id - data->sc_id_start; + spin_lock(&data->scid_lock); + __clear_bit(bn, data->ctx_bm); + data->sc_id--; + spin_unlock(&data->scid_lock); + + if (ctx->sc) { + dma_pool_free(data->sc_pool, ctx->sc, ctx->sc_phys); + ctx->sc = NULL; + } +} + +static int sa_init_ctx_info(struct sa_ctx_info *ctx, + struct sa_crypto_data *data) +{ + unsigned long bn; + int err; + + spin_lock(&data->scid_lock); + bn = find_first_zero_bit(data->ctx_bm, SA_MAX_NUM_CTX); + __set_bit(bn, data->ctx_bm); + data->sc_id++; + spin_unlock(&data->scid_lock); + + ctx->sc_id = (u16)(data->sc_id_start + bn); + + ctx->sc = dma_pool_alloc(data->sc_pool, GFP_KERNEL, &ctx->sc_phys); + if (!ctx->sc) { + dev_err(&data->pdev->dev, "Failed to allocate SC memory\n"); + err = -ENOMEM; + goto scid_rollback; + } + + return 0; + +scid_rollback: + spin_lock(&data->scid_lock); + __clear_bit(bn, data->ctx_bm); + data->sc_id--; + spin_unlock(&data->scid_lock); + + return err; +} + +static void sa_cipher_cra_exit(struct crypto_skcipher *tfm) +{ + struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev); + + dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n", + __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys, + ctx->dec.sc_id, &ctx->dec.sc_phys); + + sa_free_ctx_info(&ctx->enc, data); + sa_free_ctx_info(&ctx->dec, data); + + crypto_free_sync_skcipher(ctx->fallback.skcipher); +} + +static int sa_cipher_cra_init(struct crypto_skcipher *tfm) +{ + struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev); + const char *name = crypto_tfm_alg_name(&tfm->base); + int ret; + + memzero_explicit(ctx, sizeof(*ctx)); + ctx->dev_data = data; + + ret = sa_init_ctx_info(&ctx->enc, data); + if (ret) + return ret; + ret = sa_init_ctx_info(&ctx->dec, data); + if (ret) { + sa_free_ctx_info(&ctx->enc, data); + return ret; + } + + ctx->fallback.skcipher = + crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); + + if (IS_ERR(ctx->fallback.skcipher)) { + dev_err(sa_k3_dev, "Error allocating fallback algo %s\n", name); + return PTR_ERR(ctx->fallback.skcipher); + } + + dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n", + __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys, + ctx->dec.sc_id, &ctx->dec.sc_phys); + return 0; +} + +static int sa_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen, struct algo_data *ad) +{ + struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + int cmdl_len; + struct sa_cmdl_cfg cfg; + int ret; + + if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && + keylen != AES_KEYSIZE_256) + return -EINVAL; + + ad->enc_eng.eng_id = SA_ENG_ID_EM1; + ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ; + + memzero_explicit(&cfg, sizeof(cfg)); + cfg.enc_eng_id = ad->enc_eng.eng_id; + cfg.iv_size = crypto_skcipher_ivsize(tfm); + + crypto_sync_skcipher_clear_flags(ctx->fallback.skcipher, + CRYPTO_TFM_REQ_MASK); + crypto_sync_skcipher_set_flags(ctx->fallback.skcipher, + tfm->base.crt_flags & + CRYPTO_TFM_REQ_MASK); + ret = crypto_sync_skcipher_setkey(ctx->fallback.skcipher, key, keylen); + if (ret) + return ret; + + /* Setup Encryption Security Context & Command label template */ + if (sa_init_sc(&ctx->enc, key, keylen, NULL, 0, ad, 1, + &ctx->enc.epib[1])) + goto badkey; + + cmdl_len = sa_format_cmdl_gen(&cfg, + (u8 *)ctx->enc.cmdl, + &ctx->enc.cmdl_upd_info); + if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32))) + goto badkey; + + ctx->enc.cmdl_size = cmdl_len; + + /* Setup Decryption Security Context & Command label template */ + if (sa_init_sc(&ctx->dec, key, keylen, NULL, 0, ad, 0, + &ctx->dec.epib[1])) + goto badkey; + + cfg.enc_eng_id = ad->enc_eng.eng_id; + cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl, + &ctx->dec.cmdl_upd_info); + + if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32))) + goto badkey; + + ctx->dec.cmdl_size = cmdl_len; + ctx->iv_idx = ad->iv_idx; + + return 0; + +badkey: + dev_err(sa_k3_dev, "%s: badkey\n", __func__); + return -EINVAL; +} + +static int sa_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct algo_data ad = { 0 }; + /* Convert the key size (16/24/32) to the key size index (0/1/2) */ + int key_idx = (keylen >> 3) - 2; + + if (key_idx >= 3) + return -EINVAL; + + ad.mci_enc = mci_cbc_enc_array[key_idx]; + ad.mci_dec = mci_cbc_dec_array[key_idx]; + ad.inv_key = true; + ad.ealg_id = SA_EALG_ID_AES_CBC; + ad.iv_idx = 4; + ad.iv_out_size = 16; + + return sa_cipher_setkey(tfm, key, keylen, &ad); +} + +static int sa_aes_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct algo_data ad = { 0 }; + /* Convert the key size (16/24/32) to the key size index (0/1/2) */ + int key_idx = (keylen >> 3) - 2; + + if (key_idx >= 3) + return -EINVAL; + + ad.mci_enc = mci_ecb_enc_array[key_idx]; + ad.mci_dec = mci_ecb_dec_array[key_idx]; + ad.inv_key = true; + ad.ealg_id = SA_EALG_ID_AES_ECB; + + return sa_cipher_setkey(tfm, key, keylen, &ad); +} + +static int sa_3des_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct algo_data ad = { 0 }; + + ad.mci_enc = mci_cbc_3des_enc_array; + ad.mci_dec = mci_cbc_3des_dec_array; + ad.ealg_id = SA_EALG_ID_3DES_CBC; + ad.iv_idx = 6; + ad.iv_out_size = 8; + + return sa_cipher_setkey(tfm, key, keylen, &ad); +} + +static int sa_3des_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct algo_data ad = { 0 }; + + ad.mci_enc = mci_ecb_3des_enc_array; + ad.mci_dec = mci_ecb_3des_dec_array; + + return sa_cipher_setkey(tfm, key, keylen, &ad); +} + +static void sa_aes_dma_in_callback(void *data) +{ + struct sa_rx_data *rxd = (struct sa_rx_data *)data; + struct skcipher_request *req; + int sglen; + u32 *result; + __be32 *mdptr; + size_t ml, pl; + int i; + enum dma_data_direction dir_src; + bool diff_dst; + + req = container_of(rxd->req, struct skcipher_request, base); + sglen = sg_nents_for_len(req->src, req->cryptlen); + + diff_dst = (req->src != req->dst) ? true : false; + dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; + + if (req->iv) { + mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, + &ml); + result = (u32 *)req->iv; + + for (i = 0; i < (rxd->enc_iv_size / 4); i++) + result[i] = be32_to_cpu(mdptr[i + rxd->iv_idx]); + } + + dma_unmap_sg(rxd->ddev, req->src, sglen, dir_src); + kfree(rxd->split_src_sg); + + if (diff_dst) { + sglen = sg_nents_for_len(req->dst, req->cryptlen); + + dma_unmap_sg(rxd->ddev, req->dst, sglen, + DMA_FROM_DEVICE); + kfree(rxd->split_dst_sg); + } + + kfree(rxd); + + skcipher_request_complete(req, 0); +} + +static void +sa_prepare_tx_desc(u32 *mdptr, u32 pslen, u32 *psdata, u32 epiblen, u32 *epib) +{ + u32 *out, *in; + int i; + + for (out = mdptr, in = epib, i = 0; i < epiblen / sizeof(u32); i++) + *out++ = *in++; + + mdptr[4] = (0xFFFF << 16); + for (out = &mdptr[5], in = psdata, i = 0; + i < pslen / sizeof(u32); i++) + *out++ = *in++; +} + +static int sa_run(struct sa_req *req) +{ + struct sa_rx_data *rxd; + gfp_t gfp_flags; + u32 cmdl[SA_MAX_CMDL_WORDS]; + struct sa_crypto_data *pdata = dev_get_drvdata(sa_k3_dev); + struct device *ddev; + struct dma_chan *dma_rx; + int sg_nents, src_nents, dst_nents; + int mapped_src_nents, mapped_dst_nents; + struct scatterlist *src, *dst; + size_t pl, ml, split_size; + struct sa_ctx_info *sa_ctx = req->enc ? &req->ctx->enc : &req->ctx->dec; + int ret; + struct dma_async_tx_descriptor *tx_out; + u32 *mdptr; + bool diff_dst; + enum dma_data_direction dir_src; + + gfp_flags = req->base->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? + GFP_KERNEL : GFP_ATOMIC; + + rxd = kzalloc(sizeof(*rxd), gfp_flags); + if (!rxd) + return -ENOMEM; + + if (req->src != req->dst) { + diff_dst = true; + dir_src = DMA_TO_DEVICE; + } else { + diff_dst = false; + dir_src = DMA_BIDIRECTIONAL; + } + + /* + * SA2UL has an interesting feature where the receive DMA channel + * is selected based on the data passed to the engine. Within the + * transition range, there is also a space where it is impossible + * to determine where the data will end up, and this should be + * avoided. This will be handled by the SW fallback mechanism by + * the individual algorithm implementations. + */ + if (req->size >= 256) + dma_rx = pdata->dma_rx2; + else + dma_rx = pdata->dma_rx1; + + ddev = dma_rx->device->dev; + + memcpy(cmdl, sa_ctx->cmdl, sa_ctx->cmdl_size); + + sa_update_cmdl(req, cmdl, &sa_ctx->cmdl_upd_info); + + if (req->type != CRYPTO_ALG_TYPE_AHASH) { + if (req->enc) + req->type |= + (SA_REQ_SUBTYPE_ENC << SA_REQ_SUBTYPE_SHIFT); + else + req->type |= + (SA_REQ_SUBTYPE_DEC << SA_REQ_SUBTYPE_SHIFT); + } + + cmdl[sa_ctx->cmdl_size / sizeof(u32)] = req->type; + + /* + * Map the packets, first we check if the data fits into a single + * sg entry and use that if possible. If it does not fit, we check + * if we need to do sg_split to align the scatterlist data on the + * actual data size being processed by the crypto engine. + */ + src = req->src; + sg_nents = sg_nents_for_len(src, req->size); + + split_size = req->size; + + if (sg_nents == 1 && split_size <= req->src->length) { + src = &rxd->rx_sg; + sg_init_table(src, 1); + sg_set_page(src, sg_page(req->src), split_size, + req->src->offset); + src_nents = 1; + dma_map_sg(ddev, src, sg_nents, dir_src); + } else { + mapped_src_nents = dma_map_sg(ddev, req->src, sg_nents, + dir_src); + ret = sg_split(req->src, mapped_src_nents, 0, 1, &split_size, + &src, &src_nents, gfp_flags); + if (ret) { + src_nents = sg_nents; + src = req->src; + } else { + rxd->split_src_sg = src; + } + } + + if (!diff_dst) { + dst_nents = src_nents; + dst = src; + } else { + dst_nents = sg_nents_for_len(req->dst, req->size); + + if (dst_nents == 1 && split_size <= req->dst->length) { + dst = &rxd->tx_sg; + sg_init_table(dst, 1); + sg_set_page(dst, sg_page(req->dst), split_size, + req->dst->offset); + dst_nents = 1; + dma_map_sg(ddev, dst, dst_nents, DMA_FROM_DEVICE); + } else { + mapped_dst_nents = dma_map_sg(ddev, req->dst, dst_nents, + DMA_FROM_DEVICE); + ret = sg_split(req->dst, mapped_dst_nents, 0, 1, + &split_size, &dst, &dst_nents, + gfp_flags); + if (ret) { + dst_nents = dst_nents; + dst = req->dst; + } else { + rxd->split_dst_sg = dst; + } + } + } + + if (unlikely(src_nents != sg_nents)) { + dev_warn_ratelimited(sa_k3_dev, "failed to map tx pkt\n"); + ret = -EIO; + goto err_cleanup; + } + + rxd->tx_in = dmaengine_prep_slave_sg(dma_rx, dst, dst_nents, + DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!rxd->tx_in) { + dev_err(pdata->dev, "IN prep_slave_sg() failed\n"); + ret = -EINVAL; + goto err_cleanup; + } + + rxd->req = (void *)req->base; + rxd->enc = req->enc; + rxd->ddev = ddev; + rxd->src = src; + rxd->dst = dst; + rxd->iv_idx = req->ctx->iv_idx; + rxd->enc_iv_size = sa_ctx->cmdl_upd_info.enc_iv.size; + rxd->tx_in->callback = req->callback; + rxd->tx_in->callback_param = rxd; + + tx_out = dmaengine_prep_slave_sg(pdata->dma_tx, src, + src_nents, DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + + if (!tx_out) { + dev_err(pdata->dev, "OUT prep_slave_sg() failed\n"); + ret = -EINVAL; + goto err_cleanup; + } + + /* + * Prepare metadata for DMA engine. This essentially describes the + * crypto algorithm to be used, data sizes, different keys etc. + */ + mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(tx_out, &pl, &ml); + + sa_prepare_tx_desc(mdptr, (sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS * + sizeof(u32))), cmdl, sizeof(sa_ctx->epib), + sa_ctx->epib); + + ml = sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS * sizeof(u32)); + dmaengine_desc_set_metadata_len(tx_out, req->mdata_size); + + dmaengine_submit(tx_out); + dmaengine_submit(rxd->tx_in); + + dma_async_issue_pending(dma_rx); + dma_async_issue_pending(pdata->dma_tx); + + return -EINPROGRESS; + +err_cleanup: + dma_unmap_sg(ddev, req->src, sg_nents, DMA_TO_DEVICE); + kfree(rxd->split_src_sg); + + if (req->src != req->dst) { + dst_nents = sg_nents_for_len(req->dst, req->size); + dma_unmap_sg(ddev, req->dst, dst_nents, DMA_FROM_DEVICE); + kfree(rxd->split_dst_sg); + } + + kfree(rxd); + + return ret; +} + +static int sa_cipher_run(struct skcipher_request *req, u8 *iv, int enc) +{ + struct sa_tfm_ctx *ctx = + crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); + struct crypto_alg *alg = req->base.tfm->__crt_alg; + struct sa_req sa_req = { 0 }; + int ret; + + if (!req->cryptlen) + return 0; + + if (req->cryptlen % alg->cra_blocksize) + return -EINVAL; + + /* Use SW fallback if the data size is not supported */ + if (req->cryptlen > SA_MAX_DATA_SZ || + (req->cryptlen >= SA_UNSAFE_DATA_SZ_MIN && + req->cryptlen <= SA_UNSAFE_DATA_SZ_MAX)) { + SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback.skcipher); + + skcipher_request_set_sync_tfm(subreq, ctx->fallback.skcipher); + skcipher_request_set_callback(subreq, req->base.flags, + NULL, NULL); + skcipher_request_set_crypt(subreq, req->src, req->dst, + req->cryptlen, req->iv); + if (enc) + ret = crypto_skcipher_encrypt(subreq); + else + ret = crypto_skcipher_decrypt(subreq); + + skcipher_request_zero(subreq); + return ret; + } + + sa_req.size = req->cryptlen; + sa_req.enc_size = req->cryptlen; + sa_req.src = req->src; + sa_req.dst = req->dst; + sa_req.enc_iv = iv; + sa_req.type = CRYPTO_ALG_TYPE_SKCIPHER; + sa_req.enc = enc; + sa_req.callback = sa_aes_dma_in_callback; + sa_req.mdata_size = 44; + sa_req.base = &req->base; + sa_req.ctx = ctx; + + return sa_run(&sa_req); +} + +static int sa_encrypt(struct skcipher_request *req) +{ + return sa_cipher_run(req, req->iv, 1); +} + +static int sa_decrypt(struct skcipher_request *req) +{ + return sa_cipher_run(req, req->iv, 0); +} + +static void sa_sha_dma_in_callback(void *data) +{ + struct sa_rx_data *rxd = (struct sa_rx_data *)data; + struct ahash_request *req; + struct crypto_ahash *tfm; + unsigned int authsize; + int i, sg_nents; + size_t ml, pl; + u32 *result; + __be32 *mdptr; + + req = container_of(rxd->req, struct ahash_request, base); + tfm = crypto_ahash_reqtfm(req); + authsize = crypto_ahash_digestsize(tfm); + + mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml); + result = (u32 *)req->result; + + for (i = 0; i < (authsize / 4); i++) + result[i] = be32_to_cpu(mdptr[i + 4]); + + sg_nents = sg_nents_for_len(req->src, req->nbytes); + dma_unmap_sg(rxd->ddev, req->src, sg_nents, DMA_FROM_DEVICE); + + kfree(rxd->split_src_sg); + + kfree(rxd); + + ahash_request_complete(req, 0); +} + +static int zero_message_process(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + int sa_digest_size = crypto_ahash_digestsize(tfm); + + switch (sa_digest_size) { + case SHA1_DIGEST_SIZE: + memcpy(req->result, sha1_zero_message_hash, sa_digest_size); + break; + case SHA256_DIGEST_SIZE: + memcpy(req->result, sha256_zero_message_hash, sa_digest_size); + break; + case SHA512_DIGEST_SIZE: + memcpy(req->result, sha512_zero_message_hash, sa_digest_size); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int sa_sha_run(struct ahash_request *req) +{ + struct sa_tfm_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); + struct sa_sha_req_ctx *rctx = ahash_request_ctx(req); + struct sa_req sa_req = { 0 }; + size_t auth_len; + + auth_len = req->nbytes; + + if (!auth_len) + return zero_message_process(req); + + if (auth_len > SA_MAX_DATA_SZ || + (auth_len >= SA_UNSAFE_DATA_SZ_MIN && + auth_len <= SA_UNSAFE_DATA_SZ_MAX)) { + struct ahash_request *subreq = &rctx->fallback_req; + int ret = 0; + + ahash_request_set_tfm(subreq, ctx->fallback.ahash); + subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + + crypto_ahash_init(subreq); + + subreq->nbytes = auth_len; + subreq->src = req->src; + subreq->result = req->result; + + ret |= crypto_ahash_update(subreq); + + subreq->nbytes = 0; + + ret |= crypto_ahash_final(subreq); + + return ret; + } + + sa_req.size = auth_len; + sa_req.auth_size = auth_len; + sa_req.src = req->src; + sa_req.dst = req->src; + sa_req.enc = true; + sa_req.type = CRYPTO_ALG_TYPE_AHASH; + sa_req.callback = sa_sha_dma_in_callback; + sa_req.mdata_size = 28; + sa_req.ctx = ctx; + sa_req.base = &req->base; + + return sa_run(&sa_req); +} + +static int sa_sha_setup(struct sa_tfm_ctx *ctx, struct algo_data *ad) +{ + int bs = crypto_shash_blocksize(ctx->shash); + int cmdl_len; + struct sa_cmdl_cfg cfg; + + ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ; + ad->auth_eng.eng_id = SA_ENG_ID_AM1; + ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ; + + memset(ctx->authkey, 0, bs); + memset(&cfg, 0, sizeof(cfg)); + cfg.aalg = ad->aalg_id; + cfg.enc_eng_id = ad->enc_eng.eng_id; + cfg.auth_eng_id = ad->auth_eng.eng_id; + cfg.iv_size = 0; + cfg.akey = NULL; + cfg.akey_len = 0; + + /* Setup Encryption Security Context & Command label template */ + if (sa_init_sc(&ctx->enc, NULL, 0, NULL, 0, ad, 0, + &ctx->enc.epib[1])) + goto badkey; + + cmdl_len = sa_format_cmdl_gen(&cfg, + (u8 *)ctx->enc.cmdl, + &ctx->enc.cmdl_upd_info); + if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32))) + goto badkey; + + ctx->enc.cmdl_size = cmdl_len; + + return 0; + +badkey: + dev_err(sa_k3_dev, "%s: badkey\n", __func__); + return -EINVAL; +} + +static int sa_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base) +{ + struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm); + struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev); + int ret; + + memset(ctx, 0, sizeof(*ctx)); + ctx->dev_data = data; + ret = sa_init_ctx_info(&ctx->enc, data); + if (ret) + return ret; + + if (alg_base) { + ctx->shash = crypto_alloc_shash(alg_base, 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->shash)) { + dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n", + alg_base); + return PTR_ERR(ctx->shash); + } + /* for fallback */ + ctx->fallback.ahash = + crypto_alloc_ahash(alg_base, 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->fallback.ahash)) { + dev_err(ctx->dev_data->dev, + "Could not load fallback driver\n"); + return PTR_ERR(ctx->fallback.ahash); + } + } + + dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n", + __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys, + ctx->dec.sc_id, &ctx->dec.sc_phys); + + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct sa_sha_req_ctx) + + crypto_ahash_reqsize(ctx->fallback.ahash)); + + return 0; +} + +static int sa_sha_digest(struct ahash_request *req) +{ + return sa_sha_run(req); +} + +static int sa_sha_init(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct sa_sha_req_ctx *rctx = ahash_request_ctx(req); + struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm); + + dev_dbg(sa_k3_dev, "init: digest size: %d, rctx=%llx\n", + crypto_ahash_digestsize(tfm), (u64)rctx); + + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash); + rctx->fallback_req.base.flags = + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + + return crypto_ahash_init(&rctx->fallback_req); +} + +static int sa_sha_update(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct sa_sha_req_ctx *rctx = ahash_request_ctx(req); + struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash); + rctx->fallback_req.base.flags = + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + rctx->fallback_req.nbytes = req->nbytes; + rctx->fallback_req.src = req->src; + + return crypto_ahash_update(&rctx->fallback_req); +} + +static int sa_sha_final(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct sa_sha_req_ctx *rctx = ahash_request_ctx(req); + struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash); + rctx->fallback_req.base.flags = + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + rctx->fallback_req.result = req->result; + + return crypto_ahash_final(&rctx->fallback_req); +} + +static int sa_sha_finup(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct sa_sha_req_ctx *rctx = ahash_request_ctx(req); + struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash); + rctx->fallback_req.base.flags = + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + + rctx->fallback_req.nbytes = req->nbytes; + rctx->fallback_req.src = req->src; + rctx->fallback_req.result = req->result; + + return crypto_ahash_finup(&rctx->fallback_req); +} + +static int sa_sha_import(struct ahash_request *req, const void *in) +{ + struct sa_sha_req_ctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash); + rctx->fallback_req.base.flags = req->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + + return crypto_ahash_import(&rctx->fallback_req, in); +} + +static int sa_sha_export(struct ahash_request *req, void *out) +{ + struct sa_sha_req_ctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm); + struct ahash_request *subreq = &rctx->fallback_req; + + ahash_request_set_tfm(subreq, ctx->fallback.ahash); + subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + + return crypto_ahash_export(subreq, out); +} + +static int sa_sha1_cra_init(struct crypto_tfm *tfm) +{ + struct algo_data ad = { 0 }; + struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm); + + sa_sha_cra_init_alg(tfm, "sha1"); + + ad.aalg_id = SA_AALG_ID_SHA1; + ad.hash_size = SHA1_DIGEST_SIZE; + ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA1; + + sa_sha_setup(ctx, &ad); + + return 0; +} + +static int sa_sha256_cra_init(struct crypto_tfm *tfm) +{ + struct algo_data ad = { 0 }; + struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm); + + sa_sha_cra_init_alg(tfm, "sha256"); + + ad.aalg_id = SA_AALG_ID_SHA2_256; + ad.hash_size = SHA256_DIGEST_SIZE; + ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA256; + + sa_sha_setup(ctx, &ad); + + return 0; +} + +static int sa_sha512_cra_init(struct crypto_tfm *tfm) +{ + struct algo_data ad = { 0 }; + struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm); + + sa_sha_cra_init_alg(tfm, "sha512"); + + ad.aalg_id = SA_AALG_ID_SHA2_512; + ad.hash_size = SHA512_DIGEST_SIZE; + ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA512; + + sa_sha_setup(ctx, &ad); + + return 0; +} + +static void sa_sha_cra_exit(struct crypto_tfm *tfm) +{ + struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm); + struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev); + + dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n", + __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys, + ctx->dec.sc_id, &ctx->dec.sc_phys); + + if (crypto_tfm_alg_type(tfm) == CRYPTO_ALG_TYPE_AHASH) + sa_free_ctx_info(&ctx->enc, data); + + crypto_free_shash(ctx->shash); + crypto_free_ahash(ctx->fallback.ahash); +} + +static void sa_aead_dma_in_callback(void *data) +{ + struct sa_rx_data *rxd = (struct sa_rx_data *)data; + struct aead_request *req; + struct crypto_aead *tfm; + unsigned int start; + unsigned int authsize; + u8 auth_tag[SA_MAX_AUTH_TAG_SZ]; + size_t pl, ml; + int i, sglen; + int err = 0; + u16 auth_len; + u32 *mdptr; + bool diff_dst; + enum dma_data_direction dir_src; + + req = container_of(rxd->req, struct aead_request, base); + tfm = crypto_aead_reqtfm(req); + start = req->assoclen + req->cryptlen; + authsize = crypto_aead_authsize(tfm); + + diff_dst = (req->src != req->dst) ? true : false; + dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; + + mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml); + for (i = 0; i < (authsize / 4); i++) + mdptr[i + 4] = swab32(mdptr[i + 4]); + + auth_len = req->assoclen + req->cryptlen; + if (!rxd->enc) + auth_len -= authsize; + + sglen = sg_nents_for_len(rxd->src, auth_len); + dma_unmap_sg(rxd->ddev, rxd->src, sglen, dir_src); + kfree(rxd->split_src_sg); + + if (diff_dst) { + sglen = sg_nents_for_len(rxd->dst, auth_len); + dma_unmap_sg(rxd->ddev, rxd->dst, sglen, DMA_FROM_DEVICE); + kfree(rxd->split_dst_sg); + } + + if (rxd->enc) { + scatterwalk_map_and_copy(&mdptr[4], req->dst, start, authsize, + 1); + } else { + start -= authsize; + scatterwalk_map_and_copy(auth_tag, req->src, start, authsize, + 0); + + err = memcmp(&mdptr[4], auth_tag, authsize) ? -EBADMSG : 0; + } + + kfree(rxd); + + aead_request_complete(req, err); +} + +static int sa_cra_init_aead(struct crypto_aead *tfm, const char *hash, + const char *fallback) +{ + struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm); + struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev); + int ret; + + memzero_explicit(ctx, sizeof(*ctx)); + + ctx->shash = crypto_alloc_shash(hash, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->shash)) { + dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n", hash); + return PTR_ERR(ctx->shash); + } + + ctx->fallback.aead = crypto_alloc_aead(fallback, 0, + CRYPTO_ALG_NEED_FALLBACK); + + if (IS_ERR(ctx->fallback.aead)) { + dev_err(sa_k3_dev, "fallback driver %s couldn't be loaded\n", + fallback); + return PTR_ERR(ctx->fallback.aead); + } + + crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) + + crypto_aead_reqsize(ctx->fallback.aead)); + + ret = sa_init_ctx_info(&ctx->enc, data); + if (ret) + return ret; + + ret = sa_init_ctx_info(&ctx->dec, data); + if (ret) { + sa_free_ctx_info(&ctx->enc, data); + return ret; + } + + dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n", + __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys, + ctx->dec.sc_id, &ctx->dec.sc_phys); + + return ret; +} + +static int sa_cra_init_aead_sha1(struct crypto_aead *tfm) +{ + return sa_cra_init_aead(tfm, "sha1", + "authenc(hmac(sha1-ce),cbc(aes-ce))"); +} + +static int sa_cra_init_aead_sha256(struct crypto_aead *tfm) +{ + return sa_cra_init_aead(tfm, "sha256", + "authenc(hmac(sha256-ce),cbc(aes-ce))"); +} + +static void sa_exit_tfm_aead(struct crypto_aead *tfm) +{ + struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm); + struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev); + + crypto_free_shash(ctx->shash); + crypto_free_aead(ctx->fallback.aead); + + sa_free_ctx_info(&ctx->enc, data); + sa_free_ctx_info(&ctx->dec, data); +} + +/* AEAD algorithm configuration interface function */ +static int sa_aead_setkey(struct crypto_aead *authenc, + const u8 *key, unsigned int keylen, + struct algo_data *ad) +{ + struct sa_tfm_ctx *ctx = crypto_aead_ctx(authenc); + struct crypto_authenc_keys keys; + int cmdl_len; + struct sa_cmdl_cfg cfg; + int key_idx; + + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) + return -EINVAL; + + /* Convert the key size (16/24/32) to the key size index (0/1/2) */ + key_idx = (keys.enckeylen >> 3) - 2; + if (key_idx >= 3) + return -EINVAL; + + ad->ctx = ctx; + ad->enc_eng.eng_id = SA_ENG_ID_EM1; + ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ; + ad->auth_eng.eng_id = SA_ENG_ID_AM1; + ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ; + ad->mci_enc = mci_cbc_enc_no_iv_array[key_idx]; + ad->mci_dec = mci_cbc_dec_no_iv_array[key_idx]; + ad->inv_key = true; + ad->keyed_mac = true; + ad->ealg_id = SA_EALG_ID_AES_CBC; + ad->prep_iopad = sa_prepare_iopads; + + memset(&cfg, 0, sizeof(cfg)); + cfg.enc = true; + cfg.aalg = ad->aalg_id; + cfg.enc_eng_id = ad->enc_eng.eng_id; + cfg.auth_eng_id = ad->auth_eng.eng_id; + cfg.iv_size = crypto_aead_ivsize(authenc); + cfg.akey = keys.authkey; + cfg.akey_len = keys.authkeylen; + + /* Setup Encryption Security Context & Command label template */ + if (sa_init_sc(&ctx->enc, keys.enckey, keys.enckeylen, + keys.authkey, keys.authkeylen, + ad, 1, &ctx->enc.epib[1])) + return -EINVAL; + + cmdl_len = sa_format_cmdl_gen(&cfg, + (u8 *)ctx->enc.cmdl, + &ctx->enc.cmdl_upd_info); + if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32))) + return -EINVAL; + + ctx->enc.cmdl_size = cmdl_len; + + /* Setup Decryption Security Context & Command label template */ + if (sa_init_sc(&ctx->dec, keys.enckey, keys.enckeylen, + keys.authkey, keys.authkeylen, + ad, 0, &ctx->dec.epib[1])) + return -EINVAL; + + cfg.enc = false; + cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl, + &ctx->dec.cmdl_upd_info); + + if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32))) + return -EINVAL; + + ctx->dec.cmdl_size = cmdl_len; + + crypto_aead_clear_flags(ctx->fallback.aead, CRYPTO_TFM_REQ_MASK); + crypto_aead_set_flags(ctx->fallback.aead, + crypto_aead_get_flags(authenc) & + CRYPTO_TFM_REQ_MASK); + crypto_aead_setkey(ctx->fallback.aead, key, keylen); + + return 0; +} + +static int sa_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize) +{ + struct sa_tfm_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm)); + + return crypto_aead_setauthsize(ctx->fallback.aead, authsize); +} + +static int sa_aead_cbc_sha1_setkey(struct crypto_aead *authenc, + const u8 *key, unsigned int keylen) +{ + struct algo_data ad = { 0 }; + + ad.ealg_id = SA_EALG_ID_AES_CBC; + ad.aalg_id = SA_AALG_ID_HMAC_SHA1; + ad.hash_size = SHA1_DIGEST_SIZE; + ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA1; + + return sa_aead_setkey(authenc, key, keylen, &ad); +} + +static int sa_aead_cbc_sha256_setkey(struct crypto_aead *authenc, + const u8 *key, unsigned int keylen) +{ + struct algo_data ad = { 0 }; + + ad.ealg_id = SA_EALG_ID_AES_CBC; + ad.aalg_id = SA_AALG_ID_HMAC_SHA2_256; + ad.hash_size = SHA256_DIGEST_SIZE; + ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA256; + + return sa_aead_setkey(authenc, key, keylen, &ad); +} + +static int sa_aead_run(struct aead_request *req, u8 *iv, int enc) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm); + struct sa_req sa_req = { 0 }; + size_t auth_size, enc_size; + + enc_size = req->cryptlen; + auth_size = req->assoclen + req->cryptlen; + + if (!enc) { + enc_size -= crypto_aead_authsize(tfm); + auth_size -= crypto_aead_authsize(tfm); + } + + if (auth_size > SA_MAX_DATA_SZ || + (auth_size >= SA_UNSAFE_DATA_SZ_MIN && + auth_size <= SA_UNSAFE_DATA_SZ_MAX)) { + struct aead_request *subreq = aead_request_ctx(req); + int ret; + + aead_request_set_tfm(subreq, ctx->fallback.aead); + aead_request_set_callback(subreq, req->base.flags, + req->base.complete, req->base.data); + aead_request_set_crypt(subreq, req->src, req->dst, + req->cryptlen, req->iv); + aead_request_set_ad(subreq, req->assoclen); + + ret = enc ? crypto_aead_encrypt(subreq) : + crypto_aead_decrypt(subreq); + return ret; + } + + sa_req.enc_offset = req->assoclen; + sa_req.enc_size = enc_size; + sa_req.auth_size = auth_size; + sa_req.size = auth_size; + sa_req.enc_iv = iv; + sa_req.type = CRYPTO_ALG_TYPE_AEAD; + sa_req.enc = enc; + sa_req.callback = sa_aead_dma_in_callback; + sa_req.mdata_size = 52; + sa_req.base = &req->base; + sa_req.ctx = ctx; + sa_req.src = req->src; + sa_req.dst = req->dst; + + return sa_run(&sa_req); +} + +/* AEAD algorithm encrypt interface function */ +static int sa_aead_encrypt(struct aead_request *req) +{ + return sa_aead_run(req, req->iv, 1); +} + +/* AEAD algorithm decrypt interface function */ +static int sa_aead_decrypt(struct aead_request *req) +{ + return sa_aead_run(req, req->iv, 0); +} + +static struct sa_alg_tmpl sa_algs[] = { + { + .type = CRYPTO_ALG_TYPE_SKCIPHER, + .alg.skcipher = { + .base.cra_name = "cbc(aes)", + .base.cra_driver_name = "cbc-aes-sa2ul", + .base.cra_priority = 30000, + .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct sa_tfm_ctx), + .base.cra_module = THIS_MODULE, + .init = sa_cipher_cra_init, + .exit = sa_cipher_cra_exit, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = sa_aes_cbc_setkey, + .encrypt = sa_encrypt, + .decrypt = sa_decrypt, + } + }, + { + .type = CRYPTO_ALG_TYPE_SKCIPHER, + .alg.skcipher = { + .base.cra_name = "ecb(aes)", + .base.cra_driver_name = "ecb-aes-sa2ul", + .base.cra_priority = 30000, + .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct sa_tfm_ctx), + .base.cra_module = THIS_MODULE, + .init = sa_cipher_cra_init, + .exit = sa_cipher_cra_exit, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = sa_aes_ecb_setkey, + .encrypt = sa_encrypt, + .decrypt = sa_decrypt, + } + }, + { + .type = CRYPTO_ALG_TYPE_SKCIPHER, + .alg.skcipher = { + .base.cra_name = "cbc(des3_ede)", + .base.cra_driver_name = "cbc-des3-sa2ul", + .base.cra_priority = 30000, + .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .base.cra_blocksize = DES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct sa_tfm_ctx), + .base.cra_module = THIS_MODULE, + .init = sa_cipher_cra_init, + .exit = sa_cipher_cra_exit, + .min_keysize = 3 * DES_KEY_SIZE, + .max_keysize = 3 * DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .setkey = sa_3des_cbc_setkey, + .encrypt = sa_encrypt, + .decrypt = sa_decrypt, + } + }, + { + .type = CRYPTO_ALG_TYPE_SKCIPHER, + .alg.skcipher = { + .base.cra_name = "ecb(des3_ede)", + .base.cra_driver_name = "ecb-des3-sa2ul", + .base.cra_priority = 30000, + .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .base.cra_blocksize = DES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct sa_tfm_ctx), + .base.cra_module = THIS_MODULE, + .init = sa_cipher_cra_init, + .exit = sa_cipher_cra_exit, + .min_keysize = 3 * DES_KEY_SIZE, + .max_keysize = 3 * DES_KEY_SIZE, + .setkey = sa_3des_ecb_setkey, + .encrypt = sa_encrypt, + .decrypt = sa_decrypt, + } + }, + { + .type = CRYPTO_ALG_TYPE_AHASH, + .alg.ahash = { + .halg.base = { + .cra_name = "sha1", + .cra_driver_name = "sha1-sa2ul", + .cra_priority = 400, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sa_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_init = sa_sha1_cra_init, + .cra_exit = sa_sha_cra_exit, + }, + .halg.digestsize = SHA1_DIGEST_SIZE, + .halg.statesize = sizeof(struct sa_sha_req_ctx) + + sizeof(struct sha1_state), + .init = sa_sha_init, + .update = sa_sha_update, + .final = sa_sha_final, + .finup = sa_sha_finup, + .digest = sa_sha_digest, + .export = sa_sha_export, + .import = sa_sha_import, + }, + }, + { + .type = CRYPTO_ALG_TYPE_AHASH, + .alg.ahash = { + .halg.base = { + .cra_name = "sha256", + .cra_driver_name = "sha256-sa2ul", + .cra_priority = 400, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sa_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_init = sa_sha256_cra_init, + .cra_exit = sa_sha_cra_exit, + }, + .halg.digestsize = SHA256_DIGEST_SIZE, + .halg.statesize = sizeof(struct sa_sha_req_ctx) + + sizeof(struct sha256_state), + .init = sa_sha_init, + .update = sa_sha_update, + .final = sa_sha_final, + .finup = sa_sha_finup, + .digest = sa_sha_digest, + .export = sa_sha_export, + .import = sa_sha_import, + }, + }, + { + .type = CRYPTO_ALG_TYPE_AHASH, + .alg.ahash = { + .halg.base = { + .cra_name = "sha512", + .cra_driver_name = "sha512-sa2ul", + .cra_priority = 400, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA512_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sa_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_init = sa_sha512_cra_init, + .cra_exit = sa_sha_cra_exit, + }, + .halg.digestsize = SHA512_DIGEST_SIZE, + .halg.statesize = sizeof(struct sa_sha_req_ctx) + + sizeof(struct sha512_state), + .init = sa_sha_init, + .update = sa_sha_update, + .final = sa_sha_final, + .finup = sa_sha_finup, + .digest = sa_sha_digest, + .export = sa_sha_export, + .import = sa_sha_import, + }, + }, + { + .type = CRYPTO_ALG_TYPE_AEAD, + .alg.aead = { + .base = { + .cra_name = "authenc(hmac(sha1),cbc(aes))", + .cra_driver_name = + "authenc(hmac(sha1),cbc(aes))-sa2ul", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_ctxsize = sizeof(struct sa_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_priority = 3000, + }, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + + .init = sa_cra_init_aead_sha1, + .exit = sa_exit_tfm_aead, + .setkey = sa_aead_cbc_sha1_setkey, + .setauthsize = sa_aead_setauthsize, + .encrypt = sa_aead_encrypt, + .decrypt = sa_aead_decrypt, + }, + }, + { + .type = CRYPTO_ALG_TYPE_AEAD, + .alg.aead = { + .base = { + .cra_name = "authenc(hmac(sha256),cbc(aes))", + .cra_driver_name = + "authenc(hmac(sha256),cbc(aes))-sa2ul", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_ctxsize = sizeof(struct sa_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_alignmask = 0, + .cra_priority = 3000, + }, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + + .init = sa_cra_init_aead_sha256, + .exit = sa_exit_tfm_aead, + .setkey = sa_aead_cbc_sha256_setkey, + .setauthsize = sa_aead_setauthsize, + .encrypt = sa_aead_encrypt, + .decrypt = sa_aead_decrypt, + }, + }, +}; + +/* Register the algorithms in crypto framework */ +static void sa_register_algos(const struct device *dev) +{ + char *alg_name; + u32 type; + int i, err; + + for (i = 0; i < ARRAY_SIZE(sa_algs); i++) { + type = sa_algs[i].type; + if (type == CRYPTO_ALG_TYPE_SKCIPHER) { + alg_name = sa_algs[i].alg.skcipher.base.cra_name; + err = crypto_register_skcipher(&sa_algs[i].alg.skcipher); + } else if (type == CRYPTO_ALG_TYPE_AHASH) { + alg_name = sa_algs[i].alg.ahash.halg.base.cra_name; + err = crypto_register_ahash(&sa_algs[i].alg.ahash); + } else if (type == CRYPTO_ALG_TYPE_AEAD) { + alg_name = sa_algs[i].alg.aead.base.cra_name; + err = crypto_register_aead(&sa_algs[i].alg.aead); + } else { + dev_err(dev, + "un-supported crypto algorithm (%d)", + sa_algs[i].type); + continue; + } + + if (err) + dev_err(dev, "Failed to register '%s'\n", alg_name); + else + sa_algs[i].registered = true; + } +} + +/* Unregister the algorithms in crypto framework */ +static void sa_unregister_algos(const struct device *dev) +{ + u32 type; + int i; + + for (i = 0; i < ARRAY_SIZE(sa_algs); i++) { + type = sa_algs[i].type; + if (!sa_algs[i].registered) + continue; + if (type == CRYPTO_ALG_TYPE_SKCIPHER) + crypto_unregister_skcipher(&sa_algs[i].alg.skcipher); + else if (type == CRYPTO_ALG_TYPE_AHASH) + crypto_unregister_ahash(&sa_algs[i].alg.ahash); + else if (type == CRYPTO_ALG_TYPE_AEAD) + crypto_unregister_aead(&sa_algs[i].alg.aead); + + sa_algs[i].registered = false; + } +} + +static int sa_init_mem(struct sa_crypto_data *dev_data) +{ + struct device *dev = &dev_data->pdev->dev; + /* Setup dma pool for security context buffers */ + dev_data->sc_pool = dma_pool_create("keystone-sc", dev, + SA_CTX_MAX_SZ, 64, 0); + if (!dev_data->sc_pool) { + dev_err(dev, "Failed to create dma pool"); + return -ENOMEM; + } + + return 0; +} + +static int sa_dma_init(struct sa_crypto_data *dd) +{ + int ret; + struct dma_slave_config cfg; + + dd->dma_rx1 = NULL; + dd->dma_tx = NULL; + dd->dma_rx2 = NULL; + + ret = dma_coerce_mask_and_coherent(dd->dev, DMA_BIT_MASK(48)); + if (ret) + return ret; + + dd->dma_rx1 = dma_request_chan(dd->dev, "rx1"); + if (IS_ERR(dd->dma_rx1)) { + if (PTR_ERR(dd->dma_rx1) != -EPROBE_DEFER) + dev_err(dd->dev, "Unable to request rx1 DMA channel\n"); + return PTR_ERR(dd->dma_rx1); + } + + dd->dma_rx2 = dma_request_chan(dd->dev, "rx2"); + if (IS_ERR(dd->dma_rx2)) { + dma_release_channel(dd->dma_rx1); + if (PTR_ERR(dd->dma_rx2) != -EPROBE_DEFER) + dev_err(dd->dev, "Unable to request rx2 DMA channel\n"); + return PTR_ERR(dd->dma_rx2); + } + + dd->dma_tx = dma_request_chan(dd->dev, "tx"); + if (IS_ERR(dd->dma_tx)) { + if (PTR_ERR(dd->dma_tx) != -EPROBE_DEFER) + dev_err(dd->dev, "Unable to request tx DMA channel\n"); + ret = PTR_ERR(dd->dma_tx); + goto err_dma_tx; + } + + memzero_explicit(&cfg, sizeof(cfg)); + + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.src_maxburst = 4; + cfg.dst_maxburst = 4; + + ret = dmaengine_slave_config(dd->dma_rx1, &cfg); + if (ret) { + dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n", + ret); + return ret; + } + + ret = dmaengine_slave_config(dd->dma_rx2, &cfg); + if (ret) { + dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n", + ret); + return ret; + } + + ret = dmaengine_slave_config(dd->dma_tx, &cfg); + if (ret) { + dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n", + ret); + return ret; + } + + return 0; + +err_dma_tx: + dma_release_channel(dd->dma_rx1); + dma_release_channel(dd->dma_rx2); + + return ret; +} + +static int sa_link_child(struct device *dev, void *data) +{ + struct device *parent = data; + + device_link_add(dev, parent, DL_FLAG_AUTOPROBE_CONSUMER); + + return 0; +} + +static int sa_ul_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct resource *res; + static void __iomem *saul_base; + struct sa_crypto_data *dev_data; + u32 val; + int ret; + + dev_data = devm_kzalloc(dev, sizeof(*dev_data), GFP_KERNEL); + if (!dev_data) + return -ENOMEM; + + sa_k3_dev = dev; + dev_data->dev = dev; + dev_data->pdev = pdev; + platform_set_drvdata(pdev, dev_data); + dev_set_drvdata(sa_k3_dev, dev_data); + + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret) { + dev_err(&pdev->dev, "%s: failed to get sync: %d\n", __func__, + ret); + return ret; + } + + sa_init_mem(dev_data); + ret = sa_dma_init(dev_data); + if (ret) + goto disable_pm_runtime; + + spin_lock_init(&dev_data->scid_lock); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + saul_base = devm_ioremap_resource(dev, res); + + dev_data->base = saul_base; + val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN | + SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN | + SA_EEC_TRNG_EN; + + writel_relaxed(val, saul_base + SA_ENGINE_ENABLE_CONTROL); + + sa_register_algos(dev); + + ret = of_platform_populate(node, NULL, NULL, &pdev->dev); + if (ret) + goto release_dma; + + device_for_each_child(&pdev->dev, &pdev->dev, sa_link_child); + + return 0; + +release_dma: + sa_unregister_algos(&pdev->dev); + + dma_release_channel(dev_data->dma_rx2); + dma_release_channel(dev_data->dma_rx1); + dma_release_channel(dev_data->dma_tx); + + dma_pool_destroy(dev_data->sc_pool); + +disable_pm_runtime: + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + return ret; +} + +static int sa_ul_remove(struct platform_device *pdev) +{ + struct sa_crypto_data *dev_data = platform_get_drvdata(pdev); + + sa_unregister_algos(&pdev->dev); + + dma_release_channel(dev_data->dma_rx2); + dma_release_channel(dev_data->dma_rx1); + dma_release_channel(dev_data->dma_tx); + + dma_pool_destroy(dev_data->sc_pool); + + platform_set_drvdata(pdev, NULL); + + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + return 0; +} + +static const struct of_device_id of_match[] = { + {.compatible = "ti,j721e-sa2ul",}, + {.compatible = "ti,am654-sa2ul",}, + {}, +}; +MODULE_DEVICE_TABLE(of, of_match); + +static struct platform_driver sa_ul_driver = { + .probe = sa_ul_probe, + .remove = sa_ul_remove, + .driver = { + .name = "saul-crypto", + .of_match_table = of_match, + }, +}; +module_platform_driver(sa_ul_driver); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/crypto/sa2ul.h b/drivers/crypto/sa2ul.h new file mode 100644 index 000000000000..7f7e3fe60d11 --- /dev/null +++ b/drivers/crypto/sa2ul.h @@ -0,0 +1,403 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * K3 SA2UL crypto accelerator driver + * + * Copyright (C) 2018-2020 Texas Instruments Incorporated - http://www.ti.com + * + * Authors: Keerthy + * Vitaly Andrianov + * Tero Kristo + */ + +#ifndef _K3_SA2UL_ +#define _K3_SA2UL_ + +#include <linux/interrupt.h> +#include <linux/skbuff.h> +#include <linux/hw_random.h> +#include <crypto/aes.h> + +#define SA_ENGINE_ENABLE_CONTROL 0x1000 + +struct sa_tfm_ctx; +/* + * SA_ENGINE_ENABLE_CONTROL register bits + */ +#define SA_EEC_ENCSS_EN 0x00000001 +#define SA_EEC_AUTHSS_EN 0x00000002 +#define SA_EEC_TRNG_EN 0x00000008 +#define SA_EEC_PKA_EN 0x00000010 +#define SA_EEC_CTXCACH_EN 0x00000080 +#define SA_EEC_CPPI_PORT_IN_EN 0x00000200 +#define SA_EEC_CPPI_PORT_OUT_EN 0x00000800 + +/* + * Encoding used to identify the typo of crypto operation + * performed on the packet when the packet is returned + * by SA + */ +#define SA_REQ_SUBTYPE_ENC 0x0001 +#define SA_REQ_SUBTYPE_DEC 0x0002 +#define SA_REQ_SUBTYPE_SHIFT 16 +#define SA_REQ_SUBTYPE_MASK 0xffff + +/* Number of 32 bit words in EPIB */ +#define SA_DMA_NUM_EPIB_WORDS 4 + +/* Number of 32 bit words in PS data */ +#define SA_DMA_NUM_PS_WORDS 16 +#define NKEY_SZ 3 +#define MCI_SZ 27 + +/* + * Maximum number of simultaeneous security contexts + * supported by the driver + */ +#define SA_MAX_NUM_CTX 512 + +/* + * Assumption: CTX size is multiple of 32 + */ +#define SA_CTX_SIZE_TO_DMA_SIZE(ctx_sz) \ + ((ctx_sz) ? ((ctx_sz) / 32 - 1) : 0) + +#define SA_CTX_ENC_KEY_OFFSET 32 +#define SA_CTX_ENC_AUX1_OFFSET 64 +#define SA_CTX_ENC_AUX2_OFFSET 96 +#define SA_CTX_ENC_AUX3_OFFSET 112 +#define SA_CTX_ENC_AUX4_OFFSET 128 + +/* Next Engine Select code in CP_ACE */ +#define SA_ENG_ID_EM1 2 /* Enc/Dec engine with AES/DEC core */ +#define SA_ENG_ID_EM2 3 /* Encryption/Decryption enginefor pass 2 */ +#define SA_ENG_ID_AM1 4 /* Auth. engine with SHA1/MD5/SHA2 core */ +#define SA_ENG_ID_AM2 5 /* Authentication engine for pass 2 */ +#define SA_ENG_ID_OUTPORT2 20 /* Egress module 2 */ + +/* + * Command Label Definitions + */ +#define SA_CMDL_OFFSET_NESC 0 /* Next Engine Select Code */ +#define SA_CMDL_OFFSET_LABEL_LEN 1 /* Engine Command Label Length */ +/* 16-bit Length of Data to be processed */ +#define SA_CMDL_OFFSET_DATA_LEN 2 +#define SA_CMDL_OFFSET_DATA_OFFSET 4 /* Stat Data Offset */ +#define SA_CMDL_OFFSET_OPTION_CTRL1 5 /* Option Control Byte 1 */ +#define SA_CMDL_OFFSET_OPTION_CTRL2 6 /* Option Control Byte 2 */ +#define SA_CMDL_OFFSET_OPTION_CTRL3 7 /* Option Control Byte 3 */ +#define SA_CMDL_OFFSET_OPTION_BYTE 8 + +#define SA_CMDL_HEADER_SIZE_BYTES 8 + +#define SA_CMDL_OPTION_BYTES_MAX_SIZE 72 +#define SA_CMDL_MAX_SIZE_BYTES (SA_CMDL_HEADER_SIZE_BYTES + \ + SA_CMDL_OPTION_BYTES_MAX_SIZE) + +/* SWINFO word-0 flags */ +#define SA_SW_INFO_FLAG_EVICT 0x0001 +#define SA_SW_INFO_FLAG_TEAR 0x0002 +#define SA_SW_INFO_FLAG_NOPD 0x0004 + +/* + * This type represents the various packet types to be processed + * by the PHP engine in SA. + * It is used to identify the corresponding PHP processing function. + */ +#define SA_CTX_PE_PKT_TYPE_3GPP_AIR 0 /* 3GPP Air Cipher */ +#define SA_CTX_PE_PKT_TYPE_SRTP 1 /* SRTP */ +#define SA_CTX_PE_PKT_TYPE_IPSEC_AH 2 /* IPSec Authentication Header */ +/* IPSec Encapsulating Security Payload */ +#define SA_CTX_PE_PKT_TYPE_IPSEC_ESP 3 +/* Indicates that it is in data mode, It may not be used by PHP */ +#define SA_CTX_PE_PKT_TYPE_NONE 4 +#define SA_CTX_ENC_TYPE1_SZ 64 /* Encryption SC with Key only */ +#define SA_CTX_ENC_TYPE2_SZ 96 /* Encryption SC with Key and Aux1 */ + +#define SA_CTX_AUTH_TYPE1_SZ 64 /* Auth SC with Key only */ +#define SA_CTX_AUTH_TYPE2_SZ 96 /* Auth SC with Key and Aux1 */ +/* Size of security context for PHP engine */ +#define SA_CTX_PHP_PE_CTX_SZ 64 + +#define SA_CTX_MAX_SZ (64 + SA_CTX_ENC_TYPE2_SZ + SA_CTX_AUTH_TYPE2_SZ) + +/* + * Encoding of F/E control in SCCTL + * Bit 0-1: Fetch PHP Bytes + * Bit 2-3: Fetch Encryption/Air Ciphering Bytes + * Bit 4-5: Fetch Authentication Bytes or Encr pass 2 + * Bit 6-7: Evict PHP Bytes + * + * where 00 = 0 bytes + * 01 = 64 bytes + * 10 = 96 bytes + * 11 = 128 bytes + */ +#define SA_CTX_DMA_SIZE_0 0 +#define SA_CTX_DMA_SIZE_64 1 +#define SA_CTX_DMA_SIZE_96 2 +#define SA_CTX_DMA_SIZE_128 3 + +/* + * Byte offset of the owner word in SCCTL + * in the security context + */ +#define SA_CTX_SCCTL_OWNER_OFFSET 0 + +#define SA_CTX_ENC_KEY_OFFSET 32 +#define SA_CTX_ENC_AUX1_OFFSET 64 +#define SA_CTX_ENC_AUX2_OFFSET 96 +#define SA_CTX_ENC_AUX3_OFFSET 112 +#define SA_CTX_ENC_AUX4_OFFSET 128 + +#define SA_SCCTL_FE_AUTH_ENC 0x65 +#define SA_SCCTL_FE_ENC 0x8D + +#define SA_ALIGN_MASK (sizeof(u32) - 1) +#define SA_ALIGNED __aligned(32) + +#define SA_AUTH_SW_CTRL_MD5 1 +#define SA_AUTH_SW_CTRL_SHA1 2 +#define SA_AUTH_SW_CTRL_SHA224 3 +#define SA_AUTH_SW_CTRL_SHA256 4 +#define SA_AUTH_SW_CTRL_SHA384 5 +#define SA_AUTH_SW_CTRL_SHA512 6 + +/* SA2UL can only handle maximum data size of 64KB */ +#define SA_MAX_DATA_SZ U16_MAX + +/* + * SA2UL can provide unpredictable results with packet sizes that fall + * the following range, so avoid using it. + */ +#define SA_UNSAFE_DATA_SZ_MIN 240 +#define SA_UNSAFE_DATA_SZ_MAX 256 + +/** + * struct sa_crypto_data - Crypto driver instance data + * @base: Base address of the register space + * @pdev: Platform device pointer + * @sc_pool: security context pool + * @dev: Device pointer + * @scid_lock: secure context ID lock + * @sc_id_start: starting index for SC ID + * @sc_id_end: Ending index for SC ID + * @sc_id: Security Context ID + * @ctx_bm: Bitmap to keep track of Security context ID's + * @ctx: SA tfm context pointer + * @dma_rx1: Pointer to DMA rx channel for sizes < 256 Bytes + * @dma_rx2: Pointer to DMA rx channel for sizes > 256 Bytes + * @dma_tx: Pointer to DMA TX channel + */ +struct sa_crypto_data { + void __iomem *base; + struct platform_device *pdev; + struct dma_pool *sc_pool; + struct device *dev; + spinlock_t scid_lock; /* lock for SC-ID allocation */ + /* Security context data */ + u16 sc_id_start; + u16 sc_id_end; + u16 sc_id; + unsigned long ctx_bm[DIV_ROUND_UP(SA_MAX_NUM_CTX, + BITS_PER_LONG)]; + struct sa_tfm_ctx *ctx; + struct dma_chan *dma_rx1; + struct dma_chan *dma_rx2; + struct dma_chan *dma_tx; +}; + +/** + * struct sa_cmdl_param_info: Command label parameters info + * @index: Index of the parameter in the command label format + * @offset: the offset of the parameter + * @size: Size of the parameter + */ +struct sa_cmdl_param_info { + u16 index; + u16 offset; + u16 size; +}; + +/* Maximum length of Auxiliary data in 32bit words */ +#define SA_MAX_AUX_DATA_WORDS 8 + +/** + * struct sa_cmdl_upd_info: Command label updation info + * @flags: flags in command label + * @submode: Encryption submodes + * @enc_size: Size of first pass encryption size + * @enc_size2: Size of second pass encryption size + * @enc_offset: Encryption payload offset in the packet + * @enc_iv: Encryption initialization vector for pass2 + * @enc_iv2: Encryption initialization vector for pass2 + * @aad: Associated data + * @payload: Payload info + * @auth_size: Authentication size for pass 1 + * @auth_size2: Authentication size for pass 2 + * @auth_offset: Authentication payload offset + * @auth_iv: Authentication initialization vector + * @aux_key_info: Authentication aux key information + * @aux_key: Aux key for authentication + */ +struct sa_cmdl_upd_info { + u16 flags; + u16 submode; + struct sa_cmdl_param_info enc_size; + struct sa_cmdl_param_info enc_size2; + struct sa_cmdl_param_info enc_offset; + struct sa_cmdl_param_info enc_iv; + struct sa_cmdl_param_info enc_iv2; + struct sa_cmdl_param_info aad; + struct sa_cmdl_param_info payload; + struct sa_cmdl_param_info auth_size; + struct sa_cmdl_param_info auth_size2; + struct sa_cmdl_param_info auth_offset; + struct sa_cmdl_param_info auth_iv; + struct sa_cmdl_param_info aux_key_info; + u32 aux_key[SA_MAX_AUX_DATA_WORDS]; +}; + +/* + * Number of 32bit words appended after the command label + * in PSDATA to identify the crypto request context. + * word-0: Request type + * word-1: pointer to request + */ +#define SA_PSDATA_CTX_WORDS 4 + +/* Maximum size of Command label in 32 words */ +#define SA_MAX_CMDL_WORDS (SA_DMA_NUM_PS_WORDS - SA_PSDATA_CTX_WORDS) + +/** + * struct sa_ctx_info: SA context information + * @sc: Pointer to security context + * @sc_phys: Security context physical address that is passed on to SA2UL + * @sc_id: Security context ID + * @cmdl_size: Command label size + * @cmdl: Command label for a particular iteration + * @cmdl_upd_info: structure holding command label updation info + * @epib: Extended protocol information block words + */ +struct sa_ctx_info { + u8 *sc; + dma_addr_t sc_phys; + u16 sc_id; + u16 cmdl_size; + u32 cmdl[SA_MAX_CMDL_WORDS]; + struct sa_cmdl_upd_info cmdl_upd_info; + /* Store Auxiliary data such as K2/K3 subkeys in AES-XCBC */ + u32 epib[SA_DMA_NUM_EPIB_WORDS]; +}; + +/** + * struct sa_tfm_ctx: TFM context structure + * @dev_data: struct sa_crypto_data pointer + * @enc: struct sa_ctx_info for encryption + * @dec: struct sa_ctx_info for decryption + * @keylen: encrption/decryption keylength + * @iv_idx: Initialization vector index + * @key: encryption key + * @fallback: SW fallback algorithm + */ +struct sa_tfm_ctx { + struct sa_crypto_data *dev_data; + struct sa_ctx_info enc; + struct sa_ctx_info dec; + struct sa_ctx_info auth; + int keylen; + int iv_idx; + u32 key[AES_KEYSIZE_256 / sizeof(u32)]; + u8 authkey[SHA512_BLOCK_SIZE]; + struct crypto_shash *shash; + /* for fallback */ + union { + struct crypto_sync_skcipher *skcipher; + struct crypto_ahash *ahash; + struct crypto_aead *aead; + } fallback; +}; + +/** + * struct sa_sha_req_ctx: Structure used for sha request + * @dev_data: struct sa_crypto_data pointer + * @cmdl: Complete command label with psdata and epib included + * @fallback_req: SW fallback request container + */ +struct sa_sha_req_ctx { + struct sa_crypto_data *dev_data; + u32 cmdl[SA_MAX_CMDL_WORDS + SA_PSDATA_CTX_WORDS]; + struct ahash_request fallback_req; +}; + +enum sa_submode { + SA_MODE_GEN = 0, + SA_MODE_CCM, + SA_MODE_GCM, + SA_MODE_GMAC +}; + +/* Encryption algorithms */ +enum sa_ealg_id { + SA_EALG_ID_NONE = 0, /* No encryption */ + SA_EALG_ID_NULL, /* NULL encryption */ + SA_EALG_ID_AES_CTR, /* AES Counter mode */ + SA_EALG_ID_AES_F8, /* AES F8 mode */ + SA_EALG_ID_AES_CBC, /* AES CBC mode */ + SA_EALG_ID_DES_CBC, /* DES CBC mode */ + SA_EALG_ID_3DES_CBC, /* 3DES CBC mode */ + SA_EALG_ID_CCM, /* Counter with CBC-MAC mode */ + SA_EALG_ID_GCM, /* Galois Counter mode */ + SA_EALG_ID_AES_ECB, + SA_EALG_ID_LAST +}; + +/* Authentication algorithms */ +enum sa_aalg_id { + SA_AALG_ID_NONE = 0, /* No Authentication */ + SA_AALG_ID_NULL = SA_EALG_ID_LAST, /* NULL Authentication */ + SA_AALG_ID_MD5, /* MD5 mode */ + SA_AALG_ID_SHA1, /* SHA1 mode */ + SA_AALG_ID_SHA2_224, /* 224-bit SHA2 mode */ + SA_AALG_ID_SHA2_256, /* 256-bit SHA2 mode */ + SA_AALG_ID_SHA2_512, /* 512-bit SHA2 mode */ + SA_AALG_ID_HMAC_MD5, /* HMAC with MD5 mode */ + SA_AALG_ID_HMAC_SHA1, /* HMAC with SHA1 mode */ + SA_AALG_ID_HMAC_SHA2_224, /* HMAC with 224-bit SHA2 mode */ + SA_AALG_ID_HMAC_SHA2_256, /* HMAC with 256-bit SHA2 mode */ + SA_AALG_ID_GMAC, /* Galois Message Auth. Code mode */ + SA_AALG_ID_CMAC, /* Cipher-based Mes. Auth. Code mode */ + SA_AALG_ID_CBC_MAC, /* Cipher Block Chaining */ + SA_AALG_ID_AES_XCBC /* AES Extended Cipher Block Chaining */ +}; + +/* + * Mode control engine algorithms used to index the + * mode control instruction tables + */ +enum sa_eng_algo_id { + SA_ENG_ALGO_ECB = 0, + SA_ENG_ALGO_CBC, + SA_ENG_ALGO_CFB, + SA_ENG_ALGO_OFB, + SA_ENG_ALGO_CTR, + SA_ENG_ALGO_F8, + SA_ENG_ALGO_F8F9, + SA_ENG_ALGO_GCM, + SA_ENG_ALGO_GMAC, + SA_ENG_ALGO_CCM, + SA_ENG_ALGO_CMAC, + SA_ENG_ALGO_CBCMAC, + SA_NUM_ENG_ALGOS +}; + +/** + * struct sa_eng_info: Security accelerator engine info + * @eng_id: Engine ID + * @sc_size: security context size + */ +struct sa_eng_info { + u8 eng_id; + u16 sc_size; +}; + +#endif /* _K3_SA2UL_ */ diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index 466e30bd529c..0c8cb23ae708 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c @@ -146,11 +146,12 @@ struct sahara_ctx { /* AES-specific context */ int keylen; u8 key[AES_KEYSIZE_128]; - struct crypto_sync_skcipher *fallback; + struct crypto_skcipher *fallback; }; struct sahara_aes_reqctx { unsigned long mode; + struct skcipher_request fallback_req; // keep at the end }; /* @@ -617,10 +618,10 @@ static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, /* * The requested key size is not supported by HW, do a fallback. */ - crypto_sync_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags & + crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); - return crypto_sync_skcipher_setkey(ctx->fallback, key, keylen); + return crypto_skcipher_setkey(ctx->fallback, key, keylen); } static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode) @@ -651,21 +652,19 @@ static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode) static int sahara_aes_ecb_encrypt(struct skcipher_request *req) { + struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req); struct sahara_ctx *ctx = crypto_skcipher_ctx( crypto_skcipher_reqtfm(req)); - int err; if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); - - skcipher_request_set_sync_tfm(subreq, ctx->fallback); - skcipher_request_set_callback(subreq, req->base.flags, - NULL, NULL); - skcipher_request_set_crypt(subreq, req->src, req->dst, - req->cryptlen, req->iv); - err = crypto_skcipher_encrypt(subreq); - skcipher_request_zero(subreq); - return err; + skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); + skcipher_request_set_callback(&rctx->fallback_req, + req->base.flags, + req->base.complete, + req->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, req->src, + req->dst, req->cryptlen, req->iv); + return crypto_skcipher_encrypt(&rctx->fallback_req); } return sahara_aes_crypt(req, FLAGS_ENCRYPT); @@ -673,21 +672,19 @@ static int sahara_aes_ecb_encrypt(struct skcipher_request *req) static int sahara_aes_ecb_decrypt(struct skcipher_request *req) { + struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req); struct sahara_ctx *ctx = crypto_skcipher_ctx( crypto_skcipher_reqtfm(req)); - int err; if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); - - skcipher_request_set_sync_tfm(subreq, ctx->fallback); - skcipher_request_set_callback(subreq, req->base.flags, - NULL, NULL); - skcipher_request_set_crypt(subreq, req->src, req->dst, - req->cryptlen, req->iv); - err = crypto_skcipher_decrypt(subreq); - skcipher_request_zero(subreq); - return err; + skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); + skcipher_request_set_callback(&rctx->fallback_req, + req->base.flags, + req->base.complete, + req->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, req->src, + req->dst, req->cryptlen, req->iv); + return crypto_skcipher_decrypt(&rctx->fallback_req); } return sahara_aes_crypt(req, 0); @@ -695,21 +692,19 @@ static int sahara_aes_ecb_decrypt(struct skcipher_request *req) static int sahara_aes_cbc_encrypt(struct skcipher_request *req) { + struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req); struct sahara_ctx *ctx = crypto_skcipher_ctx( crypto_skcipher_reqtfm(req)); - int err; if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); - - skcipher_request_set_sync_tfm(subreq, ctx->fallback); - skcipher_request_set_callback(subreq, req->base.flags, - NULL, NULL); - skcipher_request_set_crypt(subreq, req->src, req->dst, - req->cryptlen, req->iv); - err = crypto_skcipher_encrypt(subreq); - skcipher_request_zero(subreq); - return err; + skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); + skcipher_request_set_callback(&rctx->fallback_req, + req->base.flags, + req->base.complete, + req->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, req->src, + req->dst, req->cryptlen, req->iv); + return crypto_skcipher_encrypt(&rctx->fallback_req); } return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC); @@ -717,21 +712,19 @@ static int sahara_aes_cbc_encrypt(struct skcipher_request *req) static int sahara_aes_cbc_decrypt(struct skcipher_request *req) { + struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req); struct sahara_ctx *ctx = crypto_skcipher_ctx( crypto_skcipher_reqtfm(req)); - int err; if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); - - skcipher_request_set_sync_tfm(subreq, ctx->fallback); - skcipher_request_set_callback(subreq, req->base.flags, - NULL, NULL); - skcipher_request_set_crypt(subreq, req->src, req->dst, - req->cryptlen, req->iv); - err = crypto_skcipher_decrypt(subreq); - skcipher_request_zero(subreq); - return err; + skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); + skcipher_request_set_callback(&rctx->fallback_req, + req->base.flags, + req->base.complete, + req->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, req->src, + req->dst, req->cryptlen, req->iv); + return crypto_skcipher_decrypt(&rctx->fallback_req); } return sahara_aes_crypt(req, FLAGS_CBC); @@ -742,14 +735,15 @@ static int sahara_aes_init_tfm(struct crypto_skcipher *tfm) const char *name = crypto_tfm_alg_name(&tfm->base); struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm); - ctx->fallback = crypto_alloc_sync_skcipher(name, 0, + ctx->fallback = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ctx->fallback)) { pr_err("Error allocating fallback algo %s\n", name); return PTR_ERR(ctx->fallback); } - crypto_skcipher_set_reqsize(tfm, sizeof(struct sahara_aes_reqctx)); + crypto_skcipher_set_reqsize(tfm, sizeof(struct sahara_aes_reqctx) + + crypto_skcipher_reqsize(ctx->fallback)); return 0; } @@ -758,7 +752,7 @@ static void sahara_aes_exit_tfm(struct crypto_skcipher *tfm) { struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm); - crypto_free_sync_skcipher(ctx->fallback); + crypto_free_skcipher(ctx->fallback); } static u32 sahara_sha_init_hdr(struct sahara_dev *dev, diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 9c6db7f698c4..7c547352a862 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -2264,7 +2264,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha1-" "cbc-aes-talitos", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, @@ -2285,7 +2286,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha1-" "cbc-aes-talitos-hsna", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, @@ -2306,7 +2308,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha1-" "cbc-3des-talitos", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, @@ -2330,7 +2333,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha1-" "cbc-3des-talitos-hsna", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, @@ -2352,7 +2356,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha224-" "cbc-aes-talitos", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, @@ -2373,7 +2378,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha224-" "cbc-aes-talitos-hsna", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, @@ -2394,7 +2400,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha224-" "cbc-3des-talitos", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, @@ -2418,7 +2425,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha224-" "cbc-3des-talitos-hsna", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, @@ -2440,7 +2448,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha256-" "cbc-aes-talitos", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, @@ -2461,7 +2470,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha256-" "cbc-aes-talitos-hsna", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, @@ -2482,7 +2492,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha256-" "cbc-3des-talitos", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, @@ -2506,7 +2517,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha256-" "cbc-3des-talitos-hsna", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, @@ -2528,7 +2540,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha384-" "cbc-aes-talitos", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, @@ -2549,7 +2562,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha384-" "cbc-3des-talitos", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, @@ -2571,7 +2585,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha512-" "cbc-aes-talitos", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, @@ -2592,7 +2607,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha512-" "cbc-3des-talitos", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, @@ -2614,7 +2630,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-md5-" "cbc-aes-talitos", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = AES_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, @@ -2635,7 +2652,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-md5-" "cbc-aes-talitos-hsna", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = AES_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, @@ -2655,7 +2673,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-md5-" "cbc-3des-talitos", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, @@ -2678,7 +2697,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-md5-" "cbc-3des-talitos-hsna", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, @@ -2699,7 +2719,8 @@ static struct talitos_alg_template driver_algs[] = { .base.cra_name = "ecb(aes)", .base.cra_driver_name = "ecb-aes-talitos", .base.cra_blocksize = AES_BLOCK_SIZE, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = skcipher_aes_setkey, @@ -2712,7 +2733,8 @@ static struct talitos_alg_template driver_algs[] = { .base.cra_name = "cbc(aes)", .base.cra_driver_name = "cbc-aes-talitos", .base.cra_blocksize = AES_BLOCK_SIZE, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, @@ -2727,7 +2749,8 @@ static struct talitos_alg_template driver_algs[] = { .base.cra_name = "ctr(aes)", .base.cra_driver_name = "ctr-aes-talitos", .base.cra_blocksize = 1, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, @@ -2742,7 +2765,8 @@ static struct talitos_alg_template driver_algs[] = { .base.cra_name = "ecb(des)", .base.cra_driver_name = "ecb-des-talitos", .base.cra_blocksize = DES_BLOCK_SIZE, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .setkey = skcipher_des_setkey, @@ -2755,7 +2779,8 @@ static struct talitos_alg_template driver_algs[] = { .base.cra_name = "cbc(des)", .base.cra_driver_name = "cbc-des-talitos", .base.cra_blocksize = DES_BLOCK_SIZE, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .ivsize = DES_BLOCK_SIZE, @@ -2770,7 +2795,8 @@ static struct talitos_alg_template driver_algs[] = { .base.cra_name = "ecb(des3_ede)", .base.cra_driver_name = "ecb-3des-talitos", .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .setkey = skcipher_des3_setkey, @@ -2784,7 +2810,8 @@ static struct talitos_alg_template driver_algs[] = { .base.cra_name = "cbc(des3_ede)", .base.cra_driver_name = "cbc-3des-talitos", .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .ivsize = DES3_EDE_BLOCK_SIZE, @@ -2804,7 +2831,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_name = "md5", .cra_driver_name = "md5-talitos", .cra_blocksize = MD5_HMAC_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, } }, .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | @@ -2819,7 +2847,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_name = "sha1", .cra_driver_name = "sha1-talitos", .cra_blocksize = SHA1_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, } }, .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | @@ -2834,7 +2863,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_name = "sha224", .cra_driver_name = "sha224-talitos", .cra_blocksize = SHA224_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, } }, .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | @@ -2849,7 +2879,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_name = "sha256", .cra_driver_name = "sha256-talitos", .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, } }, .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | @@ -2864,7 +2895,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_name = "sha384", .cra_driver_name = "sha384-talitos", .cra_blocksize = SHA384_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, } }, .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | @@ -2879,7 +2911,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_name = "sha512", .cra_driver_name = "sha512-talitos", .cra_blocksize = SHA512_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, } }, .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | @@ -2894,7 +2927,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_name = "hmac(md5)", .cra_driver_name = "hmac-md5-talitos", .cra_blocksize = MD5_HMAC_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, } }, .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | @@ -2909,7 +2943,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_name = "hmac(sha1)", .cra_driver_name = "hmac-sha1-talitos", .cra_blocksize = SHA1_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, } }, .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | @@ -2924,7 +2959,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_name = "hmac(sha224)", .cra_driver_name = "hmac-sha224-talitos", .cra_blocksize = SHA224_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, } }, .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | @@ -2939,7 +2975,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_name = "hmac(sha256)", .cra_driver_name = "hmac-sha256-talitos", .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, } }, .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | @@ -2954,7 +2991,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_name = "hmac(sha384)", .cra_driver_name = "hmac-sha384-talitos", .cra_blocksize = SHA384_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, } }, .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | @@ -2969,7 +3007,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_name = "hmac(sha512)", .cra_driver_name = "hmac-sha512-talitos", .cra_blocksize = SHA512_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, } }, .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index c24f2db8d5e8..a5ee8c2fb4e0 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c @@ -545,7 +545,7 @@ static bool hash_dma_valid_data(struct scatterlist *sg, int datasize) * * Initialize structures. */ -static int hash_init(struct ahash_request *req) +static int ux500_hash_init(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct hash_ctx *ctx = crypto_ahash_ctx(tfm); @@ -1359,7 +1359,7 @@ static int ahash_sha1_init(struct ahash_request *req) ctx->config.oper_mode = HASH_OPER_MODE_HASH; ctx->digestsize = SHA1_DIGEST_SIZE; - return hash_init(req); + return ux500_hash_init(req); } static int ahash_sha256_init(struct ahash_request *req) @@ -1372,7 +1372,7 @@ static int ahash_sha256_init(struct ahash_request *req) ctx->config.oper_mode = HASH_OPER_MODE_HASH; ctx->digestsize = SHA256_DIGEST_SIZE; - return hash_init(req); + return ux500_hash_init(req); } static int ahash_sha1_digest(struct ahash_request *req) @@ -1425,7 +1425,7 @@ static int hmac_sha1_init(struct ahash_request *req) ctx->config.oper_mode = HASH_OPER_MODE_HMAC; ctx->digestsize = SHA1_DIGEST_SIZE; - return hash_init(req); + return ux500_hash_init(req); } static int hmac_sha256_init(struct ahash_request *req) @@ -1438,7 +1438,7 @@ static int hmac_sha256_init(struct ahash_request *req) ctx->config.oper_mode = HASH_OPER_MODE_HMAC; ctx->digestsize = SHA256_DIGEST_SIZE; - return hash_init(req); + return ux500_hash_init(req); } static int hmac_sha1_digest(struct ahash_request *req) @@ -1515,7 +1515,7 @@ static struct hash_algo_template hash_algs[] = { .conf.algorithm = HASH_ALGO_SHA1, .conf.oper_mode = HASH_OPER_MODE_HASH, .hash = { - .init = hash_init, + .init = ux500_hash_init, .update = ahash_update, .final = ahash_final, .digest = ahash_sha1_digest, @@ -1538,7 +1538,7 @@ static struct hash_algo_template hash_algs[] = { .conf.algorithm = HASH_ALGO_SHA256, .conf.oper_mode = HASH_OPER_MODE_HASH, .hash = { - .init = hash_init, + .init = ux500_hash_init, .update = ahash_update, .final = ahash_final, .digest = ahash_sha256_digest, @@ -1561,7 +1561,7 @@ static struct hash_algo_template hash_algs[] = { .conf.algorithm = HASH_ALGO_SHA1, .conf.oper_mode = HASH_OPER_MODE_HMAC, .hash = { - .init = hash_init, + .init = ux500_hash_init, .update = ahash_update, .final = ahash_final, .digest = hmac_sha1_digest, @@ -1585,7 +1585,7 @@ static struct hash_algo_template hash_algs[] = { .conf.algorithm = HASH_ALGO_SHA256, .conf.oper_mode = HASH_OPER_MODE_HMAC, .hash = { - .init = hash_init, + .init = ux500_hash_init, .update = ahash_update, .final = ahash_final, .digest = hmac_sha256_digest, diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c index cb8a6ea2a4bc..b2601958282e 100644 --- a/drivers/crypto/virtio/virtio_crypto_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_algs.c @@ -597,7 +597,8 @@ static struct virtio_crypto_algo virtio_crypto_algs[] = { { .base.cra_name = "cbc(aes)", .base.cra_driver_name = "virtio_crypto_aes_cbc", .base.cra_priority = 150, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct virtio_crypto_skcipher_ctx), .base.cra_module = THIS_MODULE, diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c index c8a962c62663..77e744eaedd0 100644 --- a/drivers/crypto/virtio/virtio_crypto_core.c +++ b/drivers/crypto/virtio/virtio_crypto_core.c @@ -498,11 +498,11 @@ free_vqs: } #endif -static unsigned int features[] = { +static const unsigned int features[] = { /* none */ }; -static struct virtio_device_id id_table[] = { +static const struct virtio_device_id id_table[] = { { VIRTIO_ID_CRYPTO, VIRTIO_DEV_ANY_ID }, { 0 }, }; diff --git a/drivers/crypto/xilinx/zynqmp-aes-gcm.c b/drivers/crypto/xilinx/zynqmp-aes-gcm.c index cd11558893cd..27079354dbe9 100644 --- a/drivers/crypto/xilinx/zynqmp-aes-gcm.c +++ b/drivers/crypto/xilinx/zynqmp-aes-gcm.c @@ -364,6 +364,7 @@ static struct zynqmp_aead_drv_ctx aes_drv_ctx = { .cra_priority = 200, .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = ZYNQMP_AES_BLK_SIZE, diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c index c2f1d4e6630b..feb7fe6f2da7 100644 --- a/drivers/firmware/efi/efi-pstore.c +++ b/drivers/firmware/efi/efi-pstore.c @@ -356,10 +356,7 @@ static struct pstore_info efi_pstore_info = { static __init int efivars_pstore_init(void) { - if (!efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES)) - return 0; - - if (!efivars_kobject()) + if (!efivars_kobject() || !efivar_supports_writes()) return 0; if (efivars_pstore_disable) diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 5114cae4ec97..fdd1db025dbf 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -176,11 +176,13 @@ static struct efivar_operations generic_ops; static int generic_ops_register(void) { generic_ops.get_variable = efi.get_variable; - generic_ops.set_variable = efi.set_variable; - generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking; generic_ops.get_next_variable = efi.get_next_variable; generic_ops.query_variable_store = efi_query_variable_store; + if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) { + generic_ops.set_variable = efi.set_variable; + generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking; + } return efivars_register(&generic_efivars, &generic_ops, efi_kobj); } @@ -382,7 +384,8 @@ static int __init efisubsys_init(void) return -ENOMEM; } - if (efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES)) { + if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE | + EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) { efivar_ssdt_load(); error = generic_ops_register(); if (error) @@ -416,7 +419,8 @@ static int __init efisubsys_init(void) err_remove_group: sysfs_remove_group(efi_kobj, &efi_subsys_attr_group); err_unregister: - if (efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES)) + if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE | + EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) generic_ops_unregister(); err_put: kobject_put(efi_kobj); diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c index 26528a46d99e..dcea137142b3 100644 --- a/drivers/firmware/efi/efivars.c +++ b/drivers/firmware/efi/efivars.c @@ -680,11 +680,8 @@ int efivars_sysfs_init(void) struct kobject *parent_kobj = efivars_kobject(); int error = 0; - if (!efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES)) - return -ENODEV; - /* No efivars has been registered yet */ - if (!parent_kobj) + if (!parent_kobj || !efivar_supports_writes()) return 0; printk(KERN_INFO "EFI Variables Facility v%s %s\n", EFIVARS_VERSION, diff --git a/drivers/firmware/efi/embedded-firmware.c b/drivers/firmware/efi/embedded-firmware.c index a1b199de9006..e97a9c9d010c 100644 --- a/drivers/firmware/efi/embedded-firmware.c +++ b/drivers/firmware/efi/embedded-firmware.c @@ -37,9 +37,8 @@ static const struct dmi_system_id * const embedded_fw_table[] = { static int __init efi_check_md_for_embedded_firmware( efi_memory_desc_t *md, const struct efi_embedded_fw_desc *desc) { - struct sha256_state sctx; struct efi_embedded_fw *fw; - u8 sha256[32]; + u8 hash[32]; u64 i, size; u8 *map; @@ -54,10 +53,8 @@ static int __init efi_check_md_for_embedded_firmware( if (memcmp(map + i, desc->prefix, EFI_EMBEDDED_FW_PREFIX_LEN)) continue; - sha256_init(&sctx); - sha256_update(&sctx, map + i, desc->length); - sha256_final(&sctx, sha256); - if (memcmp(sha256, desc->sha256, 32) == 0) + sha256(map + i, desc->length, hash); + if (memcmp(hash, desc->sha256, 32) == 0) break; } if ((i + desc->length) > size) { diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index 4cce372edaf4..75daaf20374e 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -6,8 +6,7 @@ # enabled, even if doing so doesn't break the build. # cflags-$(CONFIG_X86_32) := -march=i386 -cflags-$(CONFIG_X86_64) := -mcmodel=small \ - $(call cc-option,-maccumulate-outgoing-args) +cflags-$(CONFIG_X86_64) := -mcmodel=small cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ \ -fPIC -fno-strict-aliasing -mno-red-zone \ -mno-mmx -mno-sse -fshort-wchar \ diff --git a/drivers/firmware/efi/libstub/alignedmem.c b/drivers/firmware/efi/libstub/alignedmem.c index cc89c4d6196f..1de9878ddd3a 100644 --- a/drivers/firmware/efi/libstub/alignedmem.c +++ b/drivers/firmware/efi/libstub/alignedmem.c @@ -44,7 +44,7 @@ efi_status_t efi_allocate_pages_aligned(unsigned long size, unsigned long *addr, *addr = ALIGN((unsigned long)alloc_addr, align); if (slack > 0) { - int l = (alloc_addr % align) / EFI_PAGE_SIZE; + int l = (alloc_addr & (align - 1)) / EFI_PAGE_SIZE; if (l) { efi_bs_call(free_pages, alloc_addr, slack - l + 1); diff --git a/drivers/firmware/efi/libstub/efi-stub.c b/drivers/firmware/efi/libstub/efi-stub.c index 3318ec3f8e5b..a5a405d8ab44 100644 --- a/drivers/firmware/efi/libstub/efi-stub.c +++ b/drivers/firmware/efi/libstub/efi-stub.c @@ -122,23 +122,6 @@ static unsigned long get_dram_base(void) } /* - * This function handles the architcture specific differences between arm and - * arm64 regarding where the kernel image must be loaded and any memory that - * must be reserved. On failure it is required to free all - * all allocations it has made. - */ -efi_status_t handle_kernel_image(unsigned long *image_addr, - unsigned long *image_size, - unsigned long *reserve_addr, - unsigned long *reserve_size, - unsigned long dram_base, - efi_loaded_image_t *image); - -asmlinkage void __noreturn efi_enter_kernel(unsigned long entrypoint, - unsigned long fdt_addr, - unsigned long fdt_size); - -/* * EFI entry point for the arm/arm64 EFI stubs. This is the entrypoint * that is described in the PE/COFF header. Most of the code is the same * for both archictectures, with the arch-specific code provided in the diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h index 2c9d42264c29..85050f5a1b28 100644 --- a/drivers/firmware/efi/libstub/efistub.h +++ b/drivers/firmware/efi/libstub/efistub.h @@ -776,6 +776,22 @@ efi_status_t efi_load_initrd(efi_loaded_image_t *image, unsigned long *load_size, unsigned long soft_limit, unsigned long hard_limit); +/* + * This function handles the architcture specific differences between arm and + * arm64 regarding where the kernel image must be loaded and any memory that + * must be reserved. On failure it is required to free all + * all allocations it has made. + */ +efi_status_t handle_kernel_image(unsigned long *image_addr, + unsigned long *image_size, + unsigned long *reserve_addr, + unsigned long *reserve_size, + unsigned long dram_base, + efi_loaded_image_t *image); + +asmlinkage void __noreturn efi_enter_kernel(unsigned long entrypoint, + unsigned long fdt_addr, + unsigned long fdt_size); void efi_handle_post_ebs_state(void); diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c index 5a48d996ed71..3672539cb96e 100644 --- a/drivers/firmware/efi/libstub/x86-stub.c +++ b/drivers/firmware/efi/libstub/x86-stub.c @@ -8,6 +8,7 @@ #include <linux/efi.h> #include <linux/pci.h> +#include <linux/stddef.h> #include <asm/efi.h> #include <asm/e820/types.h> @@ -361,8 +362,6 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, int options_size = 0; efi_status_t status; char *cmdline_ptr; - unsigned long ramdisk_addr; - unsigned long ramdisk_size; efi_system_table = sys_table_arg; @@ -390,8 +389,9 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, hdr = &boot_params->hdr; - /* Copy the second sector to boot_params */ - memcpy(&hdr->jump, image_base + 512, 512); + /* Copy the setup header from the second sector to boot_params */ + memcpy(&hdr->jump, image_base + 512, + sizeof(struct setup_header) - offsetof(struct setup_header, jump)); /* * Fill out some of the header fields ourselves because the diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c index 5f2a4d162795..973eef234b36 100644 --- a/drivers/firmware/efi/vars.c +++ b/drivers/firmware/efi/vars.c @@ -1229,3 +1229,9 @@ out: return rv; } EXPORT_SYMBOL_GPL(efivars_unregister); + +int efivar_supports_writes(void) +{ + return __efivars && __efivars->ops->set_variable; +} +EXPORT_SYMBOL_GPL(efivar_supports_writes); diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c index 039e0f91dba8..6945c3c96637 100644 --- a/drivers/firmware/qemu_fw_cfg.c +++ b/drivers/firmware/qemu_fw_cfg.c @@ -605,8 +605,10 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f) /* register entry under "/sys/firmware/qemu_fw_cfg/by_key/" */ err = kobject_init_and_add(&entry->kobj, &fw_cfg_sysfs_entry_ktype, fw_cfg_sel_ko, "%d", entry->select); - if (err) - goto err_register; + if (err) { + kobject_put(&entry->kobj); + return err; + } /* add raw binary content access */ err = sysfs_create_bin_file(&entry->kobj, &fw_cfg_sysfs_attr_raw); @@ -622,7 +624,6 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f) err_add_raw: kobject_del(&entry->kobj); -err_register: kfree(entry); return err; } diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c index b0c31789a909..3fa2c5992173 100644 --- a/drivers/fpga/dfl-afu-main.c +++ b/drivers/fpga/dfl-afu-main.c @@ -83,7 +83,8 @@ int __afu_port_disable(struct platform_device *pdev) * on this port and minimum soft reset pulse width has elapsed. * Driver polls port_soft_reset_ack to determine if reset done by HW. */ - if (readq_poll_timeout(base + PORT_HDR_CTRL, v, v & PORT_CTRL_SFTRST, + if (readq_poll_timeout(base + PORT_HDR_CTRL, v, + v & PORT_CTRL_SFTRST_ACK, RST_POLL_INVL, RST_POLL_TIMEOUT)) { dev_err(&pdev->dev, "timeout, fail to reset device\n"); return -ETIMEDOUT; diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c index 538755062ab7..a78c409bf2c4 100644 --- a/drivers/fpga/dfl-pci.c +++ b/drivers/fpga/dfl-pci.c @@ -227,7 +227,6 @@ static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs) { struct cci_drvdata *drvdata = pci_get_drvdata(pcidev); struct dfl_fpga_cdev *cdev = drvdata->cdev; - int ret = 0; if (!num_vfs) { /* @@ -239,6 +238,8 @@ static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs) dfl_fpga_cdev_config_ports_pf(cdev); } else { + int ret; + /* * before enable SRIOV, put released ports into VF access mode * first of all. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index d7e17e34fee1..21292098bc02 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -692,9 +692,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file return n ? -EFAULT : 0; } case AMDGPU_INFO_DEV_INFO: { - struct drm_amdgpu_info_device dev_info = {}; + struct drm_amdgpu_info_device dev_info; uint64_t vm_size; + memset(&dev_info, 0, sizeof(dev_info)); dev_info.device_id = dev->pdev->device; dev_info.chip_rev = adev->rev_id; dev_info.external_rev = adev->external_rev_id; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 86ffa0c2880f..710edc70e37e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -8717,20 +8717,38 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, * the same resource. If we have a new DC context as part of * the DM atomic state from validation we need to free it and * retain the existing one instead. + * + * Furthermore, since the DM atomic state only contains the DC + * context and can safely be annulled, we can free the state + * and clear the associated private object now to free + * some memory and avoid a possible use-after-free later. */ - struct dm_atomic_state *new_dm_state, *old_dm_state; - new_dm_state = dm_atomic_get_new_state(state); - old_dm_state = dm_atomic_get_old_state(state); + for (i = 0; i < state->num_private_objs; i++) { + struct drm_private_obj *obj = state->private_objs[i].ptr; - if (new_dm_state && old_dm_state) { - if (new_dm_state->context) - dc_release_state(new_dm_state->context); + if (obj->funcs == adev->dm.atomic_obj.funcs) { + int j = state->num_private_objs-1; - new_dm_state->context = old_dm_state->context; + dm_atomic_destroy_state(obj, + state->private_objs[i].state); + + /* If i is not at the end of the array then the + * last element needs to be moved to where i was + * before the array can safely be truncated. + */ + if (i != j) + state->private_objs[i] = + state->private_objs[j]; - if (old_dm_state->context) - dc_retain_state(old_dm_state->context); + state->private_objs[j].ptr = NULL; + state->private_objs[j].state = NULL; + state->private_objs[j].old_state = NULL; + state->private_objs[j].new_state = NULL; + + state->num_private_objs = j; + break; + } } } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c index 3da71a088b92..0ecc18b55ffb 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c @@ -644,9 +644,6 @@ static int vegam_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, /* sclk is bigger than max sclk in the dependence table */ *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; - vddci = phm_find_closest_vddci(&(data->vddci_voltage_table), - (dep_table->entries[i - 1].vddc - - (uint16_t)VDDC_VDDCI_DELTA)); if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control) *voltage |= (data->vbios_boot_state.vddci_bootup_value * @@ -654,8 +651,13 @@ static int vegam_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, else if (dep_table->entries[i - 1].vddci) *voltage |= (dep_table->entries[i - 1].vddci * VOLTAGE_SCALE) << VDDC_SHIFT; - else + else { + vddci = phm_find_closest_vddci(&(data->vddci_voltage_table), + (dep_table->entries[i - 1].vddc - + (uint16_t)VDDC_VDDCI_DELTA)); + *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; + } if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE; diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c index 05d8373888e8..079f46f5cdb6 100644 --- a/drivers/gpu/drm/bochs/bochs_kms.c +++ b/drivers/gpu/drm/bochs/bochs_kms.c @@ -146,6 +146,7 @@ int bochs_kms_init(struct bochs_device *bochs) bochs->dev->mode_config.preferred_depth = 24; bochs->dev->mode_config.prefer_shadow = 0; bochs->dev->mode_config.prefer_shadow_fbdev = 1; + bochs->dev->mode_config.fbdev_use_iomem = true; bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true; bochs->dev->mode_config.funcs = &bochs_mode_funcs; diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index 87b58c1acff4..648eb23d0784 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -1224,6 +1224,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) adv7511->bridge.funcs = &adv7511_bridge_funcs; adv7511->bridge.of_node = dev->of_node; + adv7511->bridge.type = DRM_MODE_CONNECTOR_HDMIA; drm_bridge_add(&adv7511->bridge); diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c index b14d725bf609..c7bc194bbce3 100644 --- a/drivers/gpu/drm/bridge/nwl-dsi.c +++ b/drivers/gpu/drm/bridge/nwl-dsi.c @@ -917,11 +917,6 @@ static int nwl_dsi_bridge_attach(struct drm_bridge *bridge, struct drm_panel *panel; int ret; - if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) { - DRM_ERROR("Fix bridge driver to make connector optional!"); - return -EINVAL; - } - ret = drm_of_find_panel_or_bridge(dsi->dev->of_node, 1, 0, &panel, &panel_bridge); if (ret) diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 5609e164805f..89cfd68ef400 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -399,7 +399,11 @@ static void drm_fb_helper_dirty_blit_real(struct drm_fb_helper *fb_helper, unsigned int y; for (y = clip->y1; y < clip->y2; y++) { - memcpy(dst, src, len); + if (!fb_helper->dev->mode_config.fbdev_use_iomem) + memcpy(dst, src, len); + else + memcpy_toio((void __iomem *)dst, src, len); + src += fb->pitches[0]; dst += fb->pitches[0]; } diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 7bf628e13023..ee2058ad482c 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -871,9 +871,6 @@ err: * @file_priv: drm file-private structure * * Open an object using the global name, returning a handle and the size. - * - * This handle (of course) holds a reference to the object, so the object - * will not go away until the handle is deleted. */ int drm_gem_open_ioctl(struct drm_device *dev, void *data, @@ -898,14 +895,15 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data, /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ ret = drm_gem_handle_create_tail(file_priv, obj, &handle); - drm_gem_object_put_unlocked(obj); if (ret) - return ret; + goto err; args->handle = handle; args->size = obj->size; - return 0; +err: + drm_gem_object_put_unlocked(obj); + return ret; } /** diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c index bb27c82757f1..bf7888ad9ad4 100644 --- a/drivers/gpu/drm/drm_mipi_dbi.c +++ b/drivers/gpu/drm/drm_mipi_dbi.c @@ -923,7 +923,7 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *dbi, int dc, } } - tr.len = chunk; + tr.len = chunk * 2; len -= chunk; ret = spi_sync(spi, &m); diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c index b50b44e76279..8fc3f67e3e76 100644 --- a/drivers/gpu/drm/drm_of.c +++ b/drivers/gpu/drm/drm_of.c @@ -322,10 +322,8 @@ static int drm_of_lvds_get_remote_pixels_type( * configurations by passing the endpoints explicitly to * drm_of_lvds_get_dual_link_pixel_order(). */ - if (!current_pt || pixels_type != current_pt) { - of_node_put(remote_port); + if (!current_pt || pixels_type != current_pt) return -EINVAL; - } } return pixels_type; diff --git a/drivers/gpu/drm/lima/lima_pp.c b/drivers/gpu/drm/lima/lima_pp.c index 33f01383409c..a5c95bed08c0 100644 --- a/drivers/gpu/drm/lima/lima_pp.c +++ b/drivers/gpu/drm/lima/lima_pp.c @@ -271,6 +271,8 @@ void lima_pp_fini(struct lima_ip *ip) int lima_pp_bcast_resume(struct lima_ip *ip) { + /* PP has been reset by individual PP resume */ + ip->data.async_reset = false; return 0; } diff --git a/drivers/gpu/drm/mcde/mcde_display.c b/drivers/gpu/drm/mcde/mcde_display.c index 08802e5177f6..4d2290f88edb 100644 --- a/drivers/gpu/drm/mcde/mcde_display.c +++ b/drivers/gpu/drm/mcde/mcde_display.c @@ -1060,9 +1060,14 @@ static void mcde_display_update(struct drm_simple_display_pipe *pipe, */ if (fb) { mcde_set_extsrc(mcde, drm_fb_cma_get_gem_addr(fb, pstate, 0)); - if (!mcde->video_mode) - /* Send a single frame using software sync */ - mcde_display_send_one_frame(mcde); + if (!mcde->video_mode) { + /* + * Send a single frame using software sync if the flow + * is not active yet. + */ + if (mcde->flow_active == 0) + mcde_display_send_one_frame(mcde); + } dev_info_once(mcde->dev, "sent first display update\n"); } else { /* diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 519f99868e35..800b7757252e 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -2073,7 +2073,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) */ if (core->assign_windows) { core->func->wndw.owner(core); - core->func->update(core, interlock, false); + nv50_disp_atomic_commit_core(state, interlock); core->assign_windows = false; interlock[NV50_DISP_INTERLOCK_CORE] = 0; } @@ -2506,7 +2506,7 @@ nv50_display_create(struct drm_device *dev) if (disp->disp->object.oclass >= TU102_DISP) nouveau_display(dev)->format_modifiers = wndwc57e_modifiers; else - if (disp->disp->object.oclass >= GF110_DISP) + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI) nouveau_display(dev)->format_modifiers = disp90xx_modifiers; else nouveau_display(dev)->format_modifiers = disp50xx_modifiers; diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 496c4621cc78..07373bbc2acf 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -191,6 +191,7 @@ nouveau_decode_mod(struct nouveau_drm *drm, uint32_t *tile_mode, uint8_t *kind) { + struct nouveau_display *disp = nouveau_display(drm->dev); BUG_ON(!tile_mode || !kind); if (modifier == DRM_FORMAT_MOD_LINEAR) { @@ -202,6 +203,12 @@ nouveau_decode_mod(struct nouveau_drm *drm, * Extract the block height and kind from the corresponding * modifier fields. See drm_fourcc.h for details. */ + + if ((modifier & (0xffull << 12)) == 0ull) { + /* Legacy modifier. Translate to this dev's 'kind.' */ + modifier |= disp->format_modifiers[0] & (0xffull << 12); + } + *tile_mode = (uint32_t)(modifier & 0xF); *kind = (uint8_t)((modifier >> 12) & 0xFF); @@ -227,6 +234,16 @@ nouveau_framebuffer_get_layout(struct drm_framebuffer *fb, } } +static const u64 legacy_modifiers[] = { + DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0), + DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1), + DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2), + DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3), + DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4), + DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5), + DRM_FORMAT_MOD_INVALID +}; + static int nouveau_validate_decode_mod(struct nouveau_drm *drm, uint64_t modifier, @@ -247,8 +264,14 @@ nouveau_validate_decode_mod(struct nouveau_drm *drm, (disp->format_modifiers[mod] != modifier); mod++); - if (disp->format_modifiers[mod] == DRM_FORMAT_MOD_INVALID) - return -EINVAL; + if (disp->format_modifiers[mod] == DRM_FORMAT_MOD_INVALID) { + for (mod = 0; + (legacy_modifiers[mod] != DRM_FORMAT_MOD_INVALID) && + (legacy_modifiers[mod] != modifier); + mod++); + if (legacy_modifiers[mod] == DRM_FORMAT_MOD_INVALID) + return -EINVAL; + } nouveau_decode_mod(drm, modifier, tile_mode, kind); diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 3d11b84d4cf9..d5c23d1c20d8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -315,7 +315,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper, struct drm_framebuffer *fb; struct nouveau_channel *chan; struct nouveau_bo *nvbo; - struct drm_mode_fb_cmd2 mode_cmd; + struct drm_mode_fb_cmd2 mode_cmd = {}; int ret; mode_cmd.width = sizes->surface_width; @@ -590,6 +590,7 @@ fini: drm_fb_helper_fini(&fbcon->helper); free: kfree(fbcon); + drm->fbcon = NULL; return ret; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c index dcf08249374a..dffcac249211 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c @@ -117,15 +117,6 @@ nvkm_outp_acquire_hda(struct nvkm_outp *outp, enum nvkm_ior_type type, { struct nvkm_ior *ior; - /* First preference is to reuse the OR that is currently armed - * on HW, if any, in order to prevent unnecessary switching. - */ - list_for_each_entry(ior, &outp->disp->ior, head) { - if (!ior->identity && !!ior->func->hda.hpd == hda && - !ior->asy.outp && ior->arm.outp == outp) - return nvkm_outp_acquire_ior(outp, user, ior); - } - /* Failing that, a completely unused OR is the next best thing. */ list_for_each_entry(ior, &outp->disp->ior, head) { if (!ior->identity && !!ior->func->hda.hpd == hda && @@ -173,6 +164,27 @@ nvkm_outp_acquire(struct nvkm_outp *outp, u8 user, bool hda) return nvkm_outp_acquire_ior(outp, user, ior); } + /* First preference is to reuse the OR that is currently armed + * on HW, if any, in order to prevent unnecessary switching. + */ + list_for_each_entry(ior, &outp->disp->ior, head) { + if (!ior->identity && !ior->asy.outp && ior->arm.outp == outp) { + /*XXX: For various complicated reasons, we can't outright switch + * the boot-time OR on the first modeset without some fairly + * invasive changes. + * + * The systems that were fixed by modifying the OR selection + * code to account for HDA support shouldn't regress here as + * the HDA-enabled ORs match the relevant output's pad macro + * index, and the firmware seems to select an OR this way. + * + * This warning is to make it obvious if that proves wrong. + */ + WARN_ON(hda && !ior->func->hda.hpd); + return nvkm_outp_acquire_ior(outp, user, ior); + } + } + /* If we don't need HDA, first try to acquire an OR that doesn't * support it to leave free the ones that do. */ diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c index 46fe1805c588..2649469070aa 100644 --- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c +++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c @@ -615,9 +615,9 @@ static const struct panel_desc boe_tv101wum_nl6_desc = { static const struct drm_display_mode auo_kd101n80_45na_default_mode = { .clock = 157000, .hdisplay = 1200, - .hsync_start = 1200 + 80, - .hsync_end = 1200 + 80 + 24, - .htotal = 1200 + 80 + 24 + 36, + .hsync_start = 1200 + 60, + .hsync_end = 1200 + 60 + 24, + .htotal = 1200 + 60 + 24 + 56, .vdisplay = 1920, .vsync_start = 1920 + 16, .vsync_end = 1920 + 16 + 4, diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 5178f87d6574..4aeb960ccf15 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -1250,7 +1250,21 @@ static const struct panel_desc boe_nv133fhm_n61 = { .height = 165, }, .delay = { - .hpd_absent_delay = 200, + /* + * When power is first given to the panel there's a short + * spike on the HPD line. It was explained that this spike + * was until the TCON data download was complete. On + * one system this was measured at 8 ms. We'll put 15 ms + * in the prepare delay just to be safe and take it away + * from the hpd_absent_delay (which would otherwise be 200 ms) + * to handle this. That means: + * - If HPD isn't hooked up you still have 200 ms delay. + * - If HPD is hooked up we won't try to look at it for the + * first 15 ms. + */ + .prepare = 15, + .hpd_absent_delay = 185, + .unprepare = 500, }, .bus_format = MEDIA_BUS_FMT_RGB888_1X24, diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c index 557cbe5ab35f..2f2c9f0a1071 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c @@ -260,7 +260,7 @@ sun4i_hdmi_connector_detect(struct drm_connector *connector, bool force) unsigned long reg; reg = readl(hdmi->base + SUN4I_HDMI_HPD_REG); - if (reg & SUN4I_HDMI_HPD_HIGH) { + if (!(reg & SUN4I_HDMI_HPD_HIGH)) { cec_phys_addr_invalidate(hdmi->cec_adap); return connector_status_disconnected; } diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c index 4b72398af505..e4b7f2a951ad 100644 --- a/drivers/i2c/busses/i2c-cadence.c +++ b/drivers/i2c/busses/i2c-cadence.c @@ -421,20 +421,21 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr) /* Read data if receive data valid is set */ while (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) & CDNS_I2C_SR_RXDV) { - /* - * Clear hold bit that was set for FIFO control if - * RX data left is less than FIFO depth, unless - * repeated start is selected. - */ - if ((id->recv_count < CDNS_I2C_FIFO_DEPTH) && - !id->bus_hold_flag) - cdns_i2c_clear_bus_hold(id); - if (id->recv_count > 0) { *(id->p_recv_buf)++ = cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET); id->recv_count--; id->curr_recv_count--; + + /* + * Clear hold bit that was set for FIFO control + * if RX data left is less than or equal to + * FIFO DEPTH unless repeated start is selected + */ + if (id->recv_count <= CDNS_I2C_FIFO_DEPTH && + !id->bus_hold_flag) + cdns_i2c_clear_bus_hold(id); + } else { dev_err(id->adap.dev.parent, "xfer_size reg rollover. xfer aborted!\n"); @@ -594,10 +595,8 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id) * Check for the message size against FIFO depth and set the * 'hold bus' bit if it is greater than FIFO depth. */ - if ((id->recv_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag) + if (id->recv_count > CDNS_I2C_FIFO_DEPTH) ctrl_reg |= CDNS_I2C_CR_HOLD; - else - ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD; cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); @@ -654,11 +653,8 @@ static void cdns_i2c_msend(struct cdns_i2c *id) * Check for the message size against FIFO depth and set the * 'hold bus' bit if it is greater than FIFO depth. */ - if ((id->send_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag) + if (id->send_count > CDNS_I2C_FIFO_DEPTH) ctrl_reg |= CDNS_I2C_CR_HOLD; - else - ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD; - cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); /* Clear the interrupts in interrupt status register. */ diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c index 18d1e4fd4cf3..7f130829bf01 100644 --- a/drivers/i2c/busses/i2c-qcom-geni.c +++ b/drivers/i2c/busses/i2c-qcom-geni.c @@ -367,7 +367,6 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg, geni_se_select_mode(se, GENI_SE_FIFO); writel_relaxed(len, se->base + SE_I2C_RX_TRANS_LEN); - geni_se_setup_m_cmd(se, I2C_READ, m_param); if (dma_buf && geni_se_rx_dma_prep(se, dma_buf, len, &rx_dma)) { geni_se_select_mode(se, GENI_SE_FIFO); @@ -375,6 +374,8 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg, dma_buf = NULL; } + geni_se_setup_m_cmd(se, I2C_READ, m_param); + time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT); if (!time_left) geni_i2c_abort_xfer(gi2c); @@ -408,7 +409,6 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg, geni_se_select_mode(se, GENI_SE_FIFO); writel_relaxed(len, se->base + SE_I2C_TX_TRANS_LEN); - geni_se_setup_m_cmd(se, I2C_WRITE, m_param); if (dma_buf && geni_se_tx_dma_prep(se, dma_buf, len, &tx_dma)) { geni_se_select_mode(se, GENI_SE_FIFO); @@ -416,6 +416,8 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg, dma_buf = NULL; } + geni_se_setup_m_cmd(se, I2C_WRITE, m_param); + if (!dma_buf) /* Get FIFO IRQ */ writel_relaxed(1, se->base + SE_GENI_TX_WATERMARK_REG); diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index a45c4bf1ec01..2e3e1bb75013 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c @@ -868,6 +868,7 @@ static int rcar_unreg_slave(struct i2c_client *slave) /* disable irqs and ensure none is running before clearing ptr */ rcar_i2c_write(priv, ICSIER, 0); rcar_i2c_write(priv, ICSCR, 0); + rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */ synchronize_irq(priv->irq); priv->slave = NULL; @@ -969,6 +970,8 @@ static int rcar_i2c_probe(struct platform_device *pdev) if (ret < 0) goto out_pm_put; + rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */ + if (priv->devtype == I2C_RCAR_GEN3) { priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); if (!IS_ERR(priv->rstc)) { diff --git a/drivers/i2c/i2c-core-slave.c b/drivers/i2c/i2c-core-slave.c index 5427f047faf0..1589179d5eb9 100644 --- a/drivers/i2c/i2c-core-slave.c +++ b/drivers/i2c/i2c-core-slave.c @@ -18,10 +18,8 @@ int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb) { int ret; - if (!client || !slave_cb) { - WARN(1, "insufficient data\n"); + if (WARN(IS_ERR_OR_NULL(client) || !slave_cb, "insufficient data\n")) return -EINVAL; - } if (!(client->flags & I2C_CLIENT_SLAVE)) dev_warn(&client->dev, "%s: client slave flag not set. You might see address collisions\n", @@ -60,6 +58,9 @@ int i2c_slave_unregister(struct i2c_client *client) { int ret; + if (IS_ERR_OR_NULL(client)) + return -EINVAL; + if (!client->adapter->algo->unreg_slave) { dev_err(&client->dev, "%s: not supported by adapter\n", __func__); return -EOPNOTSUPP; diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 0d1377232933..dc0558b23158 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -3676,10 +3676,12 @@ static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv, return ret; } cm_id_priv->id.state = IB_CM_IDLE; + spin_lock_irq(&cm.lock); if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) { rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); RB_CLEAR_NODE(&cm_id_priv->sidr_id_node); } + spin_unlock_irq(&cm.lock); return 0; } diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c index 655795bfa0ee..513825e424bf 100644 --- a/drivers/infiniband/core/cq.c +++ b/drivers/infiniband/core/cq.c @@ -72,6 +72,15 @@ static void rdma_dim_init(struct ib_cq *cq) INIT_WORK(&dim->work, ib_cq_rdma_dim_work); } +static void rdma_dim_destroy(struct ib_cq *cq) +{ + if (!cq->dim) + return; + + cancel_work_sync(&cq->dim->work); + kfree(cq->dim); +} + static int __poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc) { int rc; @@ -266,6 +275,7 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private, return cq; out_destroy_cq: + rdma_dim_destroy(cq); rdma_restrack_del(&cq->res); cq->device->ops.destroy_cq(cq, udata); out_free_wc: @@ -331,12 +341,10 @@ void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata) WARN_ON_ONCE(1); } + rdma_dim_destroy(cq); trace_cq_free(cq); rdma_restrack_del(&cq->res); cq->device->ops.destroy_cq(cq, udata); - if (cq->dim) - cancel_work_sync(&cq->dim->work); - kfree(cq->dim); kfree(cq->wc); kfree(cq); } diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c index 3027cd2fb247..6d3ed7c6e19e 100644 --- a/drivers/infiniband/core/rdma_core.c +++ b/drivers/infiniband/core/rdma_core.c @@ -649,9 +649,6 @@ void rdma_alloc_commit_uobject(struct ib_uobject *uobj, { struct ib_uverbs_file *ufile = attrs->ufile; - /* alloc_commit consumes the uobj kref */ - uobj->uapi_object->type_class->alloc_commit(uobj); - /* kref is held so long as the uobj is on the uobj list. */ uverbs_uobject_get(uobj); spin_lock_irq(&ufile->uobjects_lock); @@ -661,6 +658,9 @@ void rdma_alloc_commit_uobject(struct ib_uobject *uobj, /* matches atomic_set(-1) in alloc_uobj */ atomic_set(&uobj->usecnt, 0); + /* alloc_commit consumes the uobj kref */ + uobj->uapi_object->type_class->alloc_commit(uobj); + /* Matches the down_read in rdma_alloc_begin_uobject */ up_read(&ufile->hw_destroy_rwsem); } diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 5b87eee8ccc8..d03dacaef788 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -1084,6 +1084,8 @@ static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf, size_t in_size; int ret; + if (in_len < offsetofend(typeof(cmd), reserved)) + return -EINVAL; in_size = min_t(size_t, in_len, sizeof(cmd)); if (copy_from_user(&cmd, inbuf, in_size)) return -EFAULT; @@ -1141,6 +1143,8 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf, size_t in_size; int ret; + if (in_len < offsetofend(typeof(cmd), reserved)) + return -EINVAL; in_size = min_t(size_t, in_len, sizeof(cmd)); if (copy_from_user(&cmd, inbuf, in_size)) return -EFAULT; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index dd01a51816cc..0618ced45bf8 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -3954,6 +3954,15 @@ static int config_qp_sq_buf(struct hns_roce_dev *hr_dev, return 0; } +static inline enum ib_mtu get_mtu(struct ib_qp *ibqp, + const struct ib_qp_attr *attr) +{ + if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD) + return IB_MTU_4096; + + return attr->path_mtu; +} + static int modify_qp_init_to_rtr(struct ib_qp *ibqp, const struct ib_qp_attr *attr, int attr_mask, struct hns_roce_v2_qp_context *context, @@ -3965,6 +3974,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, struct ib_device *ibdev = &hr_dev->ib_dev; dma_addr_t trrl_ba; dma_addr_t irrl_ba; + enum ib_mtu mtu; u8 port_num; u64 *mtts; u8 *dmac; @@ -4062,23 +4072,23 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M, V2_QPC_BYTE_52_DMAC_S, 0); - /* mtu*(2^LP_PKTN_INI) should not bigger than 1 message length 64kb */ + mtu = get_mtu(ibqp, attr); + + if (attr_mask & IB_QP_PATH_MTU) { + roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M, + V2_QPC_BYTE_24_MTU_S, mtu); + roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M, + V2_QPC_BYTE_24_MTU_S, 0); + } + +#define MAX_LP_MSG_LEN 65536 + /* MTU*(2^LP_PKTN_INI) shouldn't be bigger than 64kb */ roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M, V2_QPC_BYTE_56_LP_PKTN_INI_S, - ilog2(hr_dev->caps.max_sq_inline / IB_MTU_4096)); + ilog2(MAX_LP_MSG_LEN / ib_mtu_enum_to_int(mtu))); roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M, V2_QPC_BYTE_56_LP_PKTN_INI_S, 0); - if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD) - roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M, - V2_QPC_BYTE_24_MTU_S, IB_MTU_4096); - else if (attr_mask & IB_QP_PATH_MTU) - roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M, - V2_QPC_BYTE_24_MTU_S, attr->path_mtu); - - roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M, - V2_QPC_BYTE_24_MTU_S, 0); - roce_set_bit(qpc_mask->byte_108_rx_reqepsn, V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0); roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M, diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 0e71ebee9e52..6b226a5eb7db 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -120,7 +120,7 @@ static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr, mr->pbl_hop_num = is_fast ? 1 : hr_dev->caps.pbl_hop_num; buf_attr.page_shift = is_fast ? PAGE_SHIFT : - hr_dev->caps.pbl_buf_pg_sz + HNS_HW_PAGE_SHIFT; + hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT; buf_attr.region[0].size = length; buf_attr.region[0].hopnum = mr->pbl_hop_num; buf_attr.region_count = 1; diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 7d2ec9ee5097..77dca1e05bba 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -601,6 +601,23 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr) */ synchronize_srcu(&dev->odp_srcu); + /* + * All work on the prefetch list must be completed, xa_erase() prevented + * new work from being created. + */ + wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work)); + + /* + * At this point it is forbidden for any other thread to enter + * pagefault_mr() on this imr. It is already forbidden to call + * pagefault_mr() on an implicit child. Due to this additions to + * implicit_children are prevented. + */ + + /* + * Block destroy_unused_implicit_child_mr() from incrementing + * num_deferred_work. + */ xa_lock(&imr->implicit_children); xa_for_each (&imr->implicit_children, idx, mtt) { __xa_erase(&imr->implicit_children, idx); @@ -609,9 +626,8 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr) xa_unlock(&imr->implicit_children); /* - * num_deferred_work can only be incremented inside the odp_srcu, or - * under xa_lock while the child is in the xarray. Thus at this point - * it is only decreasing, and all work holding it is now on the wq. + * Wait for any concurrent destroy_unused_implicit_child_mr() to + * complete. */ wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work)); @@ -1781,9 +1797,7 @@ static bool init_prefetch_work(struct ib_pd *pd, work->frags[i].mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey); if (!work->frags[i].mr) { - work->num_sge = i - 1; - if (i) - destroy_prefetch_work(work); + work->num_sge = i; return false; } @@ -1849,6 +1863,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, srcu_key = srcu_read_lock(&dev->odp_srcu); if (!init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge)) { srcu_read_unlock(&dev->odp_srcu, srcu_key); + destroy_prefetch_work(work); return -EINVAL; } queue_work(system_unbound_wq, &work->work); diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index e050eade97a1..1225b8d77510 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1766,15 +1766,14 @@ err: } static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev, + struct mlx5_ib_qp *qp, struct ib_qp_init_attr *init_attr, - struct mlx5_ib_create_qp *ucmd, void *qpc) { int scqe_sz; bool allow_scat_cqe = false; - if (ucmd) - allow_scat_cqe = ucmd->flags & MLX5_QP_FLAG_ALLOW_SCATTER_CQE; + allow_scat_cqe = qp->flags_en & MLX5_QP_FLAG_ALLOW_SCATTER_CQE; if (!allow_scat_cqe && init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) return; @@ -1853,8 +1852,6 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, u32 *in; int err; - mutex_init(&qp->mutex); - if (attr->sq_sig_type == IB_SIGNAL_ALL_WR) qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE; @@ -1938,7 +1935,6 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, u32 *in; int err; - mutex_init(&qp->mutex); spin_lock_init(&qp->sq.lock); spin_lock_init(&qp->rq.lock); @@ -2012,7 +2008,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, } if ((qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) && (qp->type == MLX5_IB_QPT_DCI || qp->type == IB_QPT_RC)) - configure_requester_scat_cqe(dev, init_attr, ucmd, qpc); + configure_requester_scat_cqe(dev, qp, init_attr, qpc); if (qp->rq.wqe_cnt) { MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4); @@ -2129,7 +2125,6 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, u32 *in; int err; - mutex_init(&qp->mutex); spin_lock_init(&qp->sq.lock); spin_lock_init(&qp->rq.lock); @@ -2543,13 +2538,18 @@ static void process_vendor_flag(struct mlx5_ib_dev *dev, int *flags, int flag, return; } - if (flag == MLX5_QP_FLAG_SCATTER_CQE) { + switch (flag) { + case MLX5_QP_FLAG_SCATTER_CQE: + case MLX5_QP_FLAG_ALLOW_SCATTER_CQE: /* - * We don't return error if this flag was provided, - * and mlx5 doesn't have right capability. - */ - *flags &= ~MLX5_QP_FLAG_SCATTER_CQE; + * We don't return error if these flags were provided, + * and mlx5 doesn't have right capability. + */ + *flags &= ~(MLX5_QP_FLAG_SCATTER_CQE | + MLX5_QP_FLAG_ALLOW_SCATTER_CQE); return; + default: + break; } mlx5_ib_dbg(dev, "Vendor create QP flag 0x%X is not supported\n", flag); } @@ -2589,6 +2589,8 @@ static int process_vendor_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SIGNATURE, true, qp); process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SCATTER_CQE, MLX5_CAP_GEN(mdev, sctr_data_cqe), qp); + process_vendor_flag(dev, &flags, MLX5_QP_FLAG_ALLOW_SCATTER_CQE, + MLX5_CAP_GEN(mdev, sctr_data_cqe), qp); if (qp->type == IB_QPT_RAW_PACKET) { cond = MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) || @@ -2963,6 +2965,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr, goto free_ucmd; } + mutex_init(&qp->mutex); qp->type = type; if (udata) { err = process_vendor_flags(dev, qp, params.ucmd, attr); diff --git a/drivers/infiniband/hw/mlx5/srq_cmd.c b/drivers/infiniband/hw/mlx5/srq_cmd.c index 6f5eadc4d183..37aaacebd3f2 100644 --- a/drivers/infiniband/hw/mlx5/srq_cmd.c +++ b/drivers/infiniband/hw/mlx5/srq_cmd.c @@ -83,11 +83,11 @@ struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn) struct mlx5_srq_table *table = &dev->srq_table; struct mlx5_core_srq *srq; - xa_lock(&table->array); + xa_lock_irq(&table->array); srq = xa_load(&table->array, srqn); if (srq) refcount_inc(&srq->common.refcount); - xa_unlock(&table->array); + xa_unlock_irq(&table->array); return srq; } diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 7db35dd6ad74..332a8ba94b81 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -901,8 +901,6 @@ static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, qp->s_tail_ack_queue = 0; qp->s_acked_ack_queue = 0; qp->s_num_rd_atomic = 0; - if (qp->r_rq.kwq) - qp->r_rq.kwq->count = qp->r_rq.size; qp->r_sge.num_sge = 0; atomic_set(&qp->s_reserved_used, 0); } @@ -2367,31 +2365,6 @@ bad_lkey: } /** - * get_count - count numbers of request work queue entries - * in circular buffer - * @rq: data structure for request queue entry - * @tail: tail indices of the circular buffer - * @head: head indices of the circular buffer - * - * Return - total number of entries in the circular buffer - */ -static u32 get_count(struct rvt_rq *rq, u32 tail, u32 head) -{ - u32 count; - - count = head; - - if (count >= rq->size) - count = 0; - if (count < tail) - count += rq->size - tail; - else - count -= tail; - - return count; -} - -/** * get_rvt_head - get head indices of the circular buffer * @rq: data structure for request queue entry * @ip: the QP @@ -2465,7 +2438,7 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only) if (kwq->count < RVT_RWQ_COUNT_THRESHOLD) { head = get_rvt_head(rq, ip); - kwq->count = get_count(rq, tail, head); + kwq->count = rvt_get_rq_count(rq, head, tail); } if (unlikely(kwq->count == 0)) { ret = 0; @@ -2500,7 +2473,9 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only) * the number of remaining WQEs. */ if (kwq->count < srq->limit) { - kwq->count = get_count(rq, tail, get_rvt_head(rq, ip)); + kwq->count = + rvt_get_rq_count(rq, + get_rvt_head(rq, ip), tail); if (kwq->count < srq->limit) { struct ib_event ev; diff --git a/drivers/infiniband/sw/rdmavt/rc.c b/drivers/infiniband/sw/rdmavt/rc.c index 977906cc0d11..c58735f4c94a 100644 --- a/drivers/infiniband/sw/rdmavt/rc.c +++ b/drivers/infiniband/sw/rdmavt/rc.c @@ -127,9 +127,7 @@ __be32 rvt_compute_aeth(struct rvt_qp *qp) * not atomic, which is OK, since the fuzziness is * resolved as further ACKs go out. */ - credits = head - tail; - if ((int)credits < 0) - credits += qp->r_rq.size; + credits = rvt_get_rq_count(&qp->r_rq, head, tail); } /* * Binary search the credit table to find the code to diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c index e5f998744501..9e1ab701785c 100644 --- a/drivers/interconnect/core.c +++ b/drivers/interconnect/core.c @@ -243,6 +243,7 @@ static int aggregate_requests(struct icc_node *node) { struct icc_provider *p = node->provider; struct icc_req *r; + u32 avg_bw, peak_bw; node->avg_bw = 0; node->peak_bw = 0; @@ -251,9 +252,14 @@ static int aggregate_requests(struct icc_node *node) p->pre_aggregate(node); hlist_for_each_entry(r, &node->req_list, req_node) { - if (!r->enabled) - continue; - p->aggregate(node, r->tag, r->avg_bw, r->peak_bw, + if (r->enabled) { + avg_bw = r->avg_bw; + peak_bw = r->peak_bw; + } else { + avg_bw = 0; + peak_bw = 0; + } + p->aggregate(node, r->tag, avg_bw, peak_bw, &node->avg_bw, &node->peak_bw); } diff --git a/drivers/interconnect/qcom/msm8916.c b/drivers/interconnect/qcom/msm8916.c index e94f3c5228b7..42c6c5581662 100644 --- a/drivers/interconnect/qcom/msm8916.c +++ b/drivers/interconnect/qcom/msm8916.c @@ -197,13 +197,13 @@ DEFINE_QNODE(pcnoc_int_0, MSM8916_PNOC_INT_0, 8, -1, -1, MSM8916_PNOC_SNOC_MAS, DEFINE_QNODE(pcnoc_int_1, MSM8916_PNOC_INT_1, 8, -1, -1, MSM8916_PNOC_SNOC_MAS); DEFINE_QNODE(pcnoc_m_0, MSM8916_PNOC_MAS_0, 8, -1, -1, MSM8916_PNOC_INT_0); DEFINE_QNODE(pcnoc_m_1, MSM8916_PNOC_MAS_1, 8, -1, -1, MSM8916_PNOC_SNOC_MAS); -DEFINE_QNODE(pcnoc_s_0, MSM8916_PNOC_SLV_0, 8, -1, -1, MSM8916_SLAVE_CLK_CTL, MSM8916_SLAVE_TLMM, MSM8916_SLAVE_TCSR, MSM8916_SLAVE_SECURITY, MSM8916_SLAVE_MSS); -DEFINE_QNODE(pcnoc_s_1, MSM8916_PNOC_SLV_1, 8, -1, -1, MSM8916_SLAVE_IMEM_CFG, MSM8916_SLAVE_CRYPTO_0_CFG, MSM8916_SLAVE_MSG_RAM, MSM8916_SLAVE_PDM, MSM8916_SLAVE_PRNG); -DEFINE_QNODE(pcnoc_s_2, MSM8916_PNOC_SLV_2, 8, -1, -1, MSM8916_SLAVE_SPDM, MSM8916_SLAVE_BOOT_ROM, MSM8916_SLAVE_BIMC_CFG, MSM8916_SLAVE_PNOC_CFG, MSM8916_SLAVE_PMIC_ARB); -DEFINE_QNODE(pcnoc_s_3, MSM8916_PNOC_SLV_3, 8, -1, -1, MSM8916_SLAVE_MPM, MSM8916_SLAVE_SNOC_CFG, MSM8916_SLAVE_RBCPR_CFG, MSM8916_SLAVE_QDSS_CFG, MSM8916_SLAVE_DEHR_CFG); -DEFINE_QNODE(pcnoc_s_4, MSM8916_PNOC_SLV_4, 8, -1, -1, MSM8916_SLAVE_VENUS_CFG, MSM8916_SLAVE_CAMERA_CFG, MSM8916_SLAVE_DISPLAY_CFG); -DEFINE_QNODE(pcnoc_s_8, MSM8916_PNOC_SLV_8, 8, -1, -1, MSM8916_SLAVE_USB_HS, MSM8916_SLAVE_SDCC_1, MSM8916_SLAVE_BLSP_1); -DEFINE_QNODE(pcnoc_s_9, MSM8916_PNOC_SLV_9, 8, -1, -1, MSM8916_SLAVE_SDCC_2, MSM8916_SLAVE_LPASS, MSM8916_SLAVE_GRAPHICS_3D_CFG); +DEFINE_QNODE(pcnoc_s_0, MSM8916_PNOC_SLV_0, 4, -1, -1, MSM8916_SLAVE_CLK_CTL, MSM8916_SLAVE_TLMM, MSM8916_SLAVE_TCSR, MSM8916_SLAVE_SECURITY, MSM8916_SLAVE_MSS); +DEFINE_QNODE(pcnoc_s_1, MSM8916_PNOC_SLV_1, 4, -1, -1, MSM8916_SLAVE_IMEM_CFG, MSM8916_SLAVE_CRYPTO_0_CFG, MSM8916_SLAVE_MSG_RAM, MSM8916_SLAVE_PDM, MSM8916_SLAVE_PRNG); +DEFINE_QNODE(pcnoc_s_2, MSM8916_PNOC_SLV_2, 4, -1, -1, MSM8916_SLAVE_SPDM, MSM8916_SLAVE_BOOT_ROM, MSM8916_SLAVE_BIMC_CFG, MSM8916_SLAVE_PNOC_CFG, MSM8916_SLAVE_PMIC_ARB); +DEFINE_QNODE(pcnoc_s_3, MSM8916_PNOC_SLV_3, 4, -1, -1, MSM8916_SLAVE_MPM, MSM8916_SLAVE_SNOC_CFG, MSM8916_SLAVE_RBCPR_CFG, MSM8916_SLAVE_QDSS_CFG, MSM8916_SLAVE_DEHR_CFG); +DEFINE_QNODE(pcnoc_s_4, MSM8916_PNOC_SLV_4, 4, -1, -1, MSM8916_SLAVE_VENUS_CFG, MSM8916_SLAVE_CAMERA_CFG, MSM8916_SLAVE_DISPLAY_CFG); +DEFINE_QNODE(pcnoc_s_8, MSM8916_PNOC_SLV_8, 4, -1, -1, MSM8916_SLAVE_USB_HS, MSM8916_SLAVE_SDCC_1, MSM8916_SLAVE_BLSP_1); +DEFINE_QNODE(pcnoc_s_9, MSM8916_PNOC_SLV_9, 4, -1, -1, MSM8916_SLAVE_SDCC_2, MSM8916_SLAVE_LPASS, MSM8916_SLAVE_GRAPHICS_3D_CFG); DEFINE_QNODE(pcnoc_snoc_mas, MSM8916_PNOC_SNOC_MAS, 8, 29, -1, MSM8916_PNOC_SNOC_SLV); DEFINE_QNODE(pcnoc_snoc_slv, MSM8916_PNOC_SNOC_SLV, 8, -1, 45, MSM8916_SNOC_INT_0, MSM8916_SNOC_INT_BIMC, MSM8916_SNOC_INT_1); DEFINE_QNODE(qdss_int, MSM8916_SNOC_QDSS_INT, 8, -1, -1, MSM8916_SNOC_INT_0, MSM8916_SNOC_INT_BIMC); diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index c3e1fbd1988c..d176df569af8 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c @@ -65,6 +65,7 @@ struct qcom_iommu_domain { struct mutex init_mutex; /* Protects iommu pointer */ struct iommu_domain domain; struct qcom_iommu_dev *iommu; + struct iommu_fwspec *fwspec; }; static struct qcom_iommu_domain *to_qcom_iommu_domain(struct iommu_domain *dom) @@ -84,9 +85,9 @@ static struct qcom_iommu_dev * to_iommu(struct device *dev) return dev_iommu_priv_get(dev); } -static struct qcom_iommu_ctx * to_ctx(struct device *dev, unsigned asid) +static struct qcom_iommu_ctx * to_ctx(struct qcom_iommu_domain *d, unsigned asid) { - struct qcom_iommu_dev *qcom_iommu = to_iommu(dev); + struct qcom_iommu_dev *qcom_iommu = d->iommu; if (!qcom_iommu) return NULL; return qcom_iommu->ctxs[asid - 1]; @@ -118,14 +119,12 @@ iommu_readq(struct qcom_iommu_ctx *ctx, unsigned reg) static void qcom_iommu_tlb_sync(void *cookie) { - struct iommu_fwspec *fwspec; - struct device *dev = cookie; + struct qcom_iommu_domain *qcom_domain = cookie; + struct iommu_fwspec *fwspec = qcom_domain->fwspec; unsigned i; - fwspec = dev_iommu_fwspec_get(dev); - for (i = 0; i < fwspec->num_ids; i++) { - struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]); + struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]); unsigned int val, ret; iommu_writel(ctx, ARM_SMMU_CB_TLBSYNC, 0); @@ -139,14 +138,12 @@ static void qcom_iommu_tlb_sync(void *cookie) static void qcom_iommu_tlb_inv_context(void *cookie) { - struct device *dev = cookie; - struct iommu_fwspec *fwspec; + struct qcom_iommu_domain *qcom_domain = cookie; + struct iommu_fwspec *fwspec = qcom_domain->fwspec; unsigned i; - fwspec = dev_iommu_fwspec_get(dev); - for (i = 0; i < fwspec->num_ids; i++) { - struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]); + struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]); iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid); } @@ -156,16 +153,14 @@ static void qcom_iommu_tlb_inv_context(void *cookie) static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size, size_t granule, bool leaf, void *cookie) { - struct device *dev = cookie; - struct iommu_fwspec *fwspec; + struct qcom_iommu_domain *qcom_domain = cookie; + struct iommu_fwspec *fwspec = qcom_domain->fwspec; unsigned i, reg; reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA; - fwspec = dev_iommu_fwspec_get(dev); - for (i = 0; i < fwspec->num_ids; i++) { - struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]); + struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]); size_t s = size; iova = (iova >> 12) << 12; @@ -256,7 +251,9 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain, }; qcom_domain->iommu = qcom_iommu; - pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, dev); + qcom_domain->fwspec = fwspec; + + pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, qcom_domain); if (!pgtbl_ops) { dev_err(qcom_iommu->dev, "failed to allocate pagetable ops\n"); ret = -ENOMEM; @@ -269,7 +266,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain, domain->geometry.force_aperture = true; for (i = 0; i < fwspec->num_ids; i++) { - struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]); + struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]); if (!ctx->secure_init) { ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid); @@ -419,7 +416,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de pm_runtime_get_sync(qcom_iommu->dev); for (i = 0; i < fwspec->num_ids; i++) { - struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]); + struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]); /* Disable the context bank: */ iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0); diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 81dc5ff08909..a83a1de1e03f 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -2420,7 +2420,7 @@ static void integrity_writer(struct work_struct *w) unsigned prev_free_sectors; /* the following test is not needed, but it tests the replay code */ - if (unlikely(dm_suspended(ic->ti)) && !ic->meta_dev) + if (unlikely(dm_post_suspending(ic->ti)) && !ic->meta_dev) return; spin_lock_irq(&ic->endio_wait.lock); @@ -2481,7 +2481,7 @@ static void integrity_recalc(struct work_struct *w) next_chunk: - if (unlikely(dm_suspended(ic->ti))) + if (unlikely(dm_post_suspending(ic->ti))) goto unlock_ret; range.logical_sector = le64_to_cpu(ic->sb->recalc_sector); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 52449afd58eb..5b9de2f71bb0 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -143,6 +143,7 @@ EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr); #define DMF_NOFLUSH_SUSPENDING 5 #define DMF_DEFERRED_REMOVE 6 #define DMF_SUSPENDED_INTERNALLY 7 +#define DMF_POST_SUSPENDING 8 #define DM_NUMA_NODE NUMA_NO_NODE static int dm_numa_node = DM_NUMA_NODE; @@ -2408,6 +2409,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait) if (!dm_suspended_md(md)) { dm_table_presuspend_targets(map); set_bit(DMF_SUSPENDED, &md->flags); + set_bit(DMF_POST_SUSPENDING, &md->flags); dm_table_postsuspend_targets(map); } /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ @@ -2766,7 +2768,9 @@ retry: if (r) goto out_unlock; + set_bit(DMF_POST_SUSPENDING, &md->flags); dm_table_postsuspend_targets(map); + clear_bit(DMF_POST_SUSPENDING, &md->flags); out_unlock: mutex_unlock(&md->suspend_lock); @@ -2863,7 +2867,9 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, DMF_SUSPENDED_INTERNALLY); + set_bit(DMF_POST_SUSPENDING, &md->flags); dm_table_postsuspend_targets(map); + clear_bit(DMF_POST_SUSPENDING, &md->flags); } static void __dm_internal_resume(struct mapped_device *md) @@ -3024,6 +3030,11 @@ int dm_suspended_md(struct mapped_device *md) return test_bit(DMF_SUSPENDED, &md->flags); } +static int dm_post_suspending_md(struct mapped_device *md) +{ + return test_bit(DMF_POST_SUSPENDING, &md->flags); +} + int dm_suspended_internally_md(struct mapped_device *md) { return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); @@ -3040,6 +3051,12 @@ int dm_suspended(struct dm_target *ti) } EXPORT_SYMBOL_GPL(dm_suspended); +int dm_post_suspending(struct dm_target *ti) +{ + return dm_post_suspending_md(dm_table_get_md(ti->table)); +} +EXPORT_SYMBOL_GPL(dm_post_suspending); + int dm_noflush_suspending(struct dm_target *ti) { return __noflush_suspending(dm_table_get_md(ti->table)); diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c index b0f62cbbdc87..f3a8f113865d 100644 --- a/drivers/misc/habanalabs/command_submission.c +++ b/drivers/misc/habanalabs/command_submission.c @@ -499,11 +499,19 @@ static int validate_queue_index(struct hl_device *hdev, struct asic_fixed_properties *asic = &hdev->asic_prop; struct hw_queue_properties *hw_queue_prop; + /* This must be checked here to prevent out-of-bounds access to + * hw_queues_props array + */ + if (chunk->queue_index >= HL_MAX_QUEUES) { + dev_err(hdev->dev, "Queue index %d is invalid\n", + chunk->queue_index); + return -EINVAL; + } + hw_queue_prop = &asic->hw_queues_props[chunk->queue_index]; - if ((chunk->queue_index >= HL_MAX_QUEUES) || - (hw_queue_prop->type == QUEUE_TYPE_NA)) { - dev_err(hdev->dev, "Queue index %d is invalid\n", + if (hw_queue_prop->type == QUEUE_TYPE_NA) { + dev_err(hdev->dev, "Queue index %d is not applicable\n", chunk->queue_index); return -EINVAL; } diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/debugfs.c index fc4372c18ce2..0bc036e01ee8 100644 --- a/drivers/misc/habanalabs/debugfs.c +++ b/drivers/misc/habanalabs/debugfs.c @@ -36,7 +36,7 @@ static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr, pkt.i2c_reg = i2c_reg; rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - HL_DEVICE_TIMEOUT_USEC, (long *) val); + 0, (long *) val); if (rc) dev_err(hdev->dev, "Failed to read from I2C, error %d\n", rc); @@ -63,7 +63,7 @@ static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr, pkt.value = cpu_to_le64(val); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - HL_DEVICE_TIMEOUT_USEC, NULL); + 0, NULL); if (rc) dev_err(hdev->dev, "Failed to write to I2C, error %d\n", rc); @@ -87,7 +87,7 @@ static void hl_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state) pkt.value = cpu_to_le64(state); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - HL_DEVICE_TIMEOUT_USEC, NULL); + 0, NULL); if (rc) dev_err(hdev->dev, "Failed to set LED %d, error %d\n", led, rc); @@ -981,7 +981,7 @@ static ssize_t hl_clk_gate_read(struct file *f, char __user *buf, if (*ppos) return 0; - sprintf(tmp_buf, "%d\n", hdev->clock_gating); + sprintf(tmp_buf, "0x%llx\n", hdev->clock_gating_mask); rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf, strlen(tmp_buf) + 1); @@ -993,7 +993,7 @@ static ssize_t hl_clk_gate_write(struct file *f, const char __user *buf, { struct hl_dbg_device_entry *entry = file_inode(f)->i_private; struct hl_device *hdev = entry->hdev; - u32 value; + u64 value; ssize_t rc; if (atomic_read(&hdev->in_reset)) { @@ -1002,19 +1002,12 @@ static ssize_t hl_clk_gate_write(struct file *f, const char __user *buf, return 0; } - rc = kstrtouint_from_user(buf, count, 10, &value); + rc = kstrtoull_from_user(buf, count, 16, &value); if (rc) return rc; - if (value) { - hdev->clock_gating = 1; - if (hdev->asic_funcs->enable_clock_gating) - hdev->asic_funcs->enable_clock_gating(hdev); - } else { - if (hdev->asic_funcs->disable_clock_gating) - hdev->asic_funcs->disable_clock_gating(hdev); - hdev->clock_gating = 0; - } + hdev->clock_gating_mask = value; + hdev->asic_funcs->set_clock_gating(hdev); return count; } diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c index 2b38a119704c..59608d1bac88 100644 --- a/drivers/misc/habanalabs/device.c +++ b/drivers/misc/habanalabs/device.c @@ -608,7 +608,7 @@ int hl_device_set_debug_mode(struct hl_device *hdev, bool enable) hdev->in_debug = 0; if (!hdev->hard_reset_pending) - hdev->asic_funcs->enable_clock_gating(hdev); + hdev->asic_funcs->set_clock_gating(hdev); goto out; } diff --git a/drivers/misc/habanalabs/firmware_if.c b/drivers/misc/habanalabs/firmware_if.c index baf790cf4b78..d27841cb5bcb 100644 --- a/drivers/misc/habanalabs/firmware_if.c +++ b/drivers/misc/habanalabs/firmware_if.c @@ -61,7 +61,7 @@ int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode) pkt.ctl = cpu_to_le32(opcode << ARMCP_PKT_CTL_OPCODE_SHIFT); return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, - sizeof(pkt), HL_DEVICE_TIMEOUT_USEC, NULL); + sizeof(pkt), 0, NULL); } int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg, @@ -144,7 +144,7 @@ int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type) pkt.value = cpu_to_le64(event_type); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - HL_DEVICE_TIMEOUT_USEC, &result); + 0, &result); if (rc) dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type); @@ -183,7 +183,7 @@ int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr, ARMCP_PKT_CTL_OPCODE_SHIFT); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt, - total_pkt_size, HL_DEVICE_TIMEOUT_USEC, &result); + total_pkt_size, 0, &result); if (rc) dev_err(hdev->dev, "failed to unmask IRQ array\n"); @@ -204,7 +204,7 @@ int hl_fw_test_cpu_queue(struct hl_device *hdev) test_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &test_pkt, - sizeof(test_pkt), HL_DEVICE_TIMEOUT_USEC, &result); + sizeof(test_pkt), 0, &result); if (!rc) { if (result != ARMCP_PACKET_FENCE_VAL) @@ -248,7 +248,7 @@ int hl_fw_send_heartbeat(struct hl_device *hdev) hb_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &hb_pkt, - sizeof(hb_pkt), HL_DEVICE_TIMEOUT_USEC, &result); + sizeof(hb_pkt), 0, &result); if ((rc) || (result != ARMCP_PACKET_FENCE_VAL)) rc = -EIO; diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 834470d10b46..637a9d608707 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -80,6 +80,7 @@ #define GAUDI_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30) #define GAUDI_PLDM_TPC_KERNEL_WAIT_USEC (HL_DEVICE_TIMEOUT_USEC * 30) #define GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC 1000000 /* 1s */ +#define GAUDI_MSG_TO_CPU_TIMEOUT_USEC 4000000 /* 4s */ #define GAUDI_QMAN0_FENCE_VAL 0x72E91AB9 @@ -98,6 +99,11 @@ #define GAUDI_ARB_WDT_TIMEOUT 0x1000000 +#define GAUDI_CLK_GATE_DEBUGFS_MASK (\ + BIT(GAUDI_ENGINE_ID_MME_0) |\ + BIT(GAUDI_ENGINE_ID_MME_2) |\ + GENMASK_ULL(GAUDI_ENGINE_ID_TPC_7, GAUDI_ENGINE_ID_TPC_0)) + static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = { "gaudi cq 0_0", "gaudi cq 0_1", "gaudi cq 0_2", "gaudi cq 0_3", "gaudi cq 1_0", "gaudi cq 1_1", "gaudi cq 1_2", "gaudi cq 1_3", @@ -106,14 +112,14 @@ static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = { }; static const u8 gaudi_dma_assignment[GAUDI_DMA_MAX] = { - [GAUDI_PCI_DMA_1] = 0, - [GAUDI_PCI_DMA_2] = 1, - [GAUDI_PCI_DMA_3] = 5, - [GAUDI_HBM_DMA_1] = 2, - [GAUDI_HBM_DMA_2] = 3, - [GAUDI_HBM_DMA_3] = 4, - [GAUDI_HBM_DMA_4] = 6, - [GAUDI_HBM_DMA_5] = 7 + [GAUDI_PCI_DMA_1] = GAUDI_ENGINE_ID_DMA_0, + [GAUDI_PCI_DMA_2] = GAUDI_ENGINE_ID_DMA_1, + [GAUDI_PCI_DMA_3] = GAUDI_ENGINE_ID_DMA_5, + [GAUDI_HBM_DMA_1] = GAUDI_ENGINE_ID_DMA_2, + [GAUDI_HBM_DMA_2] = GAUDI_ENGINE_ID_DMA_3, + [GAUDI_HBM_DMA_3] = GAUDI_ENGINE_ID_DMA_4, + [GAUDI_HBM_DMA_4] = GAUDI_ENGINE_ID_DMA_6, + [GAUDI_HBM_DMA_5] = GAUDI_ENGINE_ID_DMA_7 }; static const u8 gaudi_cq_assignment[NUMBER_OF_CMPLT_QUEUES] = { @@ -1819,7 +1825,7 @@ static void gaudi_init_golden_registers(struct hl_device *hdev) gaudi_init_rate_limiter(hdev); - gaudi_disable_clock_gating(hdev); + hdev->asic_funcs->disable_clock_gating(hdev); for (tpc_id = 0, tpc_offset = 0; tpc_id < TPC_NUMBER_OF_ENGINES; @@ -2531,46 +2537,55 @@ static void gaudi_tpc_stall(struct hl_device *hdev) WREG32(mmTPC7_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT); } -static void gaudi_enable_clock_gating(struct hl_device *hdev) +static void gaudi_set_clock_gating(struct hl_device *hdev) { struct gaudi_device *gaudi = hdev->asic_specific; u32 qman_offset; int i; - if (!hdev->clock_gating) - return; - - if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) - return; - /* In case we are during debug session, don't enable the clock gate * as it may interfere */ if (hdev->in_debug) return; - for (i = 0, qman_offset = 0 ; i < PCI_DMA_NUMBER_OF_CHNLS ; i++) { + for (i = GAUDI_PCI_DMA_1, qman_offset = 0 ; i < GAUDI_HBM_DMA_1 ; i++) { + if (!(hdev->clock_gating_mask & + (BIT_ULL(gaudi_dma_assignment[i])))) + continue; + qman_offset = gaudi_dma_assignment[i] * DMA_QMAN_OFFSET; WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, QMAN_CGM1_PWR_GATE_EN); WREG32(mmDMA0_QM_CGM_CFG + qman_offset, QMAN_UPPER_CP_CGM_PWR_GATE_EN); } - for (; i < HBM_DMA_NUMBER_OF_CHNLS ; i++) { + for (i = GAUDI_HBM_DMA_1 ; i < GAUDI_DMA_MAX ; i++) { + if (!(hdev->clock_gating_mask & + (BIT_ULL(gaudi_dma_assignment[i])))) + continue; + qman_offset = gaudi_dma_assignment[i] * DMA_QMAN_OFFSET; WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, QMAN_CGM1_PWR_GATE_EN); WREG32(mmDMA0_QM_CGM_CFG + qman_offset, QMAN_COMMON_CP_CGM_PWR_GATE_EN); } - WREG32(mmMME0_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN); - WREG32(mmMME0_QM_CGM_CFG, - QMAN_COMMON_CP_CGM_PWR_GATE_EN); - WREG32(mmMME2_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN); - WREG32(mmMME2_QM_CGM_CFG, - QMAN_COMMON_CP_CGM_PWR_GATE_EN); + if (hdev->clock_gating_mask & (BIT_ULL(GAUDI_ENGINE_ID_MME_0))) { + WREG32(mmMME0_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN); + WREG32(mmMME0_QM_CGM_CFG, QMAN_COMMON_CP_CGM_PWR_GATE_EN); + } + + if (hdev->clock_gating_mask & (BIT_ULL(GAUDI_ENGINE_ID_MME_2))) { + WREG32(mmMME2_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN); + WREG32(mmMME2_QM_CGM_CFG, QMAN_COMMON_CP_CGM_PWR_GATE_EN); + } for (i = 0, qman_offset = 0 ; i < TPC_NUMBER_OF_ENGINES ; i++) { + if (!(hdev->clock_gating_mask & + (BIT_ULL(GAUDI_ENGINE_ID_TPC_0 + i)))) + continue; + WREG32(mmTPC0_QM_CGM_CFG1 + qman_offset, QMAN_CGM1_PWR_GATE_EN); WREG32(mmTPC0_QM_CGM_CFG + qman_offset, @@ -2663,7 +2678,7 @@ static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset) gaudi_stop_hbm_dma_qmans(hdev); gaudi_stop_pci_dma_qmans(hdev); - gaudi_disable_clock_gating(hdev); + hdev->asic_funcs->disable_clock_gating(hdev); msleep(wait_timeout_ms); @@ -3003,7 +3018,7 @@ static int gaudi_hw_init(struct hl_device *hdev) gaudi_init_tpc_qmans(hdev); - gaudi_enable_clock_gating(hdev); + hdev->asic_funcs->set_clock_gating(hdev); gaudi_enable_timestamp(hdev); @@ -3112,7 +3127,9 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset) HW_CAP_HBM_DMA | HW_CAP_PLL | HW_CAP_MMU | HW_CAP_SRAM_SCRAMBLER | - HW_CAP_HBM_SCRAMBLER); + HW_CAP_HBM_SCRAMBLER | + HW_CAP_CLK_GATE); + memset(gaudi->events_stat, 0, sizeof(gaudi->events_stat)); } @@ -3463,6 +3480,9 @@ static int gaudi_send_cpu_message(struct hl_device *hdev, u32 *msg, return 0; } + if (!timeout) + timeout = GAUDI_MSG_TO_CPU_TIMEOUT_USEC; + return hl_fw_send_cpu_message(hdev, GAUDI_QUEUE_ID_CPU_PQ, msg, len, timeout, result); } @@ -3865,6 +3885,12 @@ static int gaudi_validate_cb(struct hl_device *hdev, rc = -EPERM; break; + case PACKET_WREG_BULK: + dev_err(hdev->dev, + "User not allowed to use WREG_BULK\n"); + rc = -EPERM; + break; + case PACKET_LOAD_AND_EXE: rc = gaudi_validate_load_and_exe_pkt(hdev, parser, (struct packet_load_and_exe *) user_pkt); @@ -3880,7 +3906,6 @@ static int gaudi_validate_cb(struct hl_device *hdev, break; case PACKET_WREG_32: - case PACKET_WREG_BULK: case PACKET_MSG_LONG: case PACKET_MSG_SHORT: case PACKET_REPEAT: @@ -4521,13 +4546,18 @@ static int gaudi_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val) int rc = 0; if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) { - if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) { + + if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) && + (hdev->clock_gating_mask & + GAUDI_CLK_GATE_DEBUGFS_MASK)) { + dev_err_ratelimited(hdev->dev, "Can't read register - clock gating is enabled!\n"); rc = -EFAULT; } else { *val = RREG32(addr - CFG_BASE); } + } else if ((addr >= SRAM_BASE_ADDR) && (addr < SRAM_BASE_ADDR + SRAM_BAR_SIZE)) { *val = readl(hdev->pcie_bar[SRAM_BAR_ID] + @@ -4563,13 +4593,18 @@ static int gaudi_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val) int rc = 0; if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) { - if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) { + + if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) && + (hdev->clock_gating_mask & + GAUDI_CLK_GATE_DEBUGFS_MASK)) { + dev_err_ratelimited(hdev->dev, "Can't write register - clock gating is enabled!\n"); rc = -EFAULT; } else { WREG32(addr - CFG_BASE, val); } + } else if ((addr >= SRAM_BASE_ADDR) && (addr < SRAM_BASE_ADDR + SRAM_BAR_SIZE)) { writel(val, hdev->pcie_bar[SRAM_BAR_ID] + @@ -4605,7 +4640,11 @@ static int gaudi_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val) int rc = 0; if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) { - if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) { + + if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) && + (hdev->clock_gating_mask & + GAUDI_CLK_GATE_DEBUGFS_MASK)) { + dev_err_ratelimited(hdev->dev, "Can't read register - clock gating is enabled!\n"); rc = -EFAULT; @@ -4615,6 +4654,7 @@ static int gaudi_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val) *val = (((u64) val_h) << 32) | val_l; } + } else if ((addr >= SRAM_BASE_ADDR) && (addr <= SRAM_BASE_ADDR + SRAM_BAR_SIZE - sizeof(u64))) { *val = readq(hdev->pcie_bar[SRAM_BAR_ID] + @@ -4651,7 +4691,11 @@ static int gaudi_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val) int rc = 0; if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) { - if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) { + + if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) && + (hdev->clock_gating_mask & + GAUDI_CLK_GATE_DEBUGFS_MASK)) { + dev_err_ratelimited(hdev->dev, "Can't write register - clock gating is enabled!\n"); rc = -EFAULT; @@ -4660,6 +4704,7 @@ static int gaudi_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val) WREG32(addr + sizeof(u32) - CFG_BASE, upper_32_bits(val)); } + } else if ((addr >= SRAM_BASE_ADDR) && (addr <= SRAM_BASE_ADDR + SRAM_BAR_SIZE - sizeof(u64))) { writeq(val, hdev->pcie_bar[SRAM_BAR_ID] + @@ -4881,7 +4926,7 @@ static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid) gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_ARUSER, asid); gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_AWUSER, asid); - hdev->asic_funcs->enable_clock_gating(hdev); + hdev->asic_funcs->set_clock_gating(hdev); mutex_unlock(&gaudi->clk_gate_mutex); } @@ -5262,7 +5307,7 @@ static void gaudi_print_ecc_info_generic(struct hl_device *hdev, } if (disable_clock_gating) { - hdev->asic_funcs->enable_clock_gating(hdev); + hdev->asic_funcs->set_clock_gating(hdev); mutex_unlock(&gaudi->clk_gate_mutex); } } @@ -5749,7 +5794,7 @@ static bool gaudi_tpc_read_interrupts(struct hl_device *hdev, u8 tpc_id, /* Clear interrupts */ WREG32(mmTPC0_CFG_TPC_INTR_CAUSE + tpc_offset, 0); - hdev->asic_funcs->enable_clock_gating(hdev); + hdev->asic_funcs->set_clock_gating(hdev); mutex_unlock(&gaudi->clk_gate_mutex); @@ -6265,7 +6310,7 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u32 *mask, if (s) seq_puts(s, "\n"); - hdev->asic_funcs->enable_clock_gating(hdev); + hdev->asic_funcs->set_clock_gating(hdev); mutex_unlock(&gaudi->clk_gate_mutex); @@ -6366,7 +6411,7 @@ static int gaudi_run_tpc_kernel(struct hl_device *hdev, u64 tpc_kernel, dev_err(hdev->dev, "Timeout while waiting for TPC%d icache prefetch\n", tpc_id); - hdev->asic_funcs->enable_clock_gating(hdev); + hdev->asic_funcs->set_clock_gating(hdev); mutex_unlock(&gaudi->clk_gate_mutex); return -EIO; } @@ -6395,7 +6440,7 @@ static int gaudi_run_tpc_kernel(struct hl_device *hdev, u64 tpc_kernel, 1000, kernel_timeout); - hdev->asic_funcs->enable_clock_gating(hdev); + hdev->asic_funcs->set_clock_gating(hdev); mutex_unlock(&gaudi->clk_gate_mutex); if (rc) { @@ -6736,7 +6781,7 @@ static const struct hl_asic_funcs gaudi_funcs = { .mmu_invalidate_cache = gaudi_mmu_invalidate_cache, .mmu_invalidate_cache_range = gaudi_mmu_invalidate_cache_range, .send_heartbeat = gaudi_send_heartbeat, - .enable_clock_gating = gaudi_enable_clock_gating, + .set_clock_gating = gaudi_set_clock_gating, .disable_clock_gating = gaudi_disable_clock_gating, .debug_coresight = gaudi_debug_coresight, .is_device_idle = gaudi_is_device_idle, diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index 0d2952bb58df..88460b2138d8 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -88,6 +88,7 @@ #define GOYA_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100) #define GOYA_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30) #define GOYA_BOOT_FIT_REQ_TIMEOUT_USEC 1000000 /* 1s */ +#define GOYA_MSG_TO_CPU_TIMEOUT_USEC 4000000 /* 4s */ #define GOYA_QMAN0_FENCE_VAL 0xD169B243 @@ -2830,6 +2831,9 @@ int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len, return 0; } + if (!timeout) + timeout = GOYA_MSG_TO_CPU_TIMEOUT_USEC; + return hl_fw_send_cpu_message(hdev, GOYA_QUEUE_ID_CPU_PQ, msg, len, timeout, result); } @@ -4431,8 +4435,8 @@ static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr, pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY << ARMCP_PKT_CTL_OPCODE_SHIFT); - rc = goya_send_cpu_message(hdev, (u32 *) pkt, total_pkt_size, - HL_DEVICE_TIMEOUT_USEC, &result); + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt, + total_pkt_size, 0, &result); if (rc) dev_err(hdev->dev, "failed to unmask IRQ array\n"); @@ -4464,8 +4468,8 @@ static int goya_unmask_irq(struct hl_device *hdev, u16 event_type) ARMCP_PKT_CTL_OPCODE_SHIFT); pkt.value = cpu_to_le64(event_type); - rc = goya_send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - HL_DEVICE_TIMEOUT_USEC, &result); + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + 0, &result); if (rc) dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type); @@ -5028,14 +5032,14 @@ int goya_armcp_info_get(struct hl_device *hdev) return 0; } -static void goya_enable_clock_gating(struct hl_device *hdev) +static void goya_set_clock_gating(struct hl_device *hdev) { - + /* clock gating not supported in Goya */ } static void goya_disable_clock_gating(struct hl_device *hdev) { - + /* clock gating not supported in Goya */ } static bool goya_is_device_idle(struct hl_device *hdev, u32 *mask, @@ -5259,7 +5263,7 @@ static const struct hl_asic_funcs goya_funcs = { .mmu_invalidate_cache = goya_mmu_invalidate_cache, .mmu_invalidate_cache_range = goya_mmu_invalidate_cache_range, .send_heartbeat = goya_send_heartbeat, - .enable_clock_gating = goya_enable_clock_gating, + .set_clock_gating = goya_set_clock_gating, .disable_clock_gating = goya_disable_clock_gating, .debug_coresight = goya_debug_coresight, .is_device_idle = goya_is_device_idle, diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h index 1ecdcf8b763a..194d83352696 100644 --- a/drivers/misc/habanalabs/habanalabs.h +++ b/drivers/misc/habanalabs/habanalabs.h @@ -578,8 +578,9 @@ enum hl_pll_frequency { * @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with * ASID-VA-size mask. * @send_heartbeat: send is-alive packet to ArmCP and verify response. - * @enable_clock_gating: enable clock gating for reducing power consumption. - * @disable_clock_gating: disable clock for accessing registers on HBW. + * @set_clock_gating: enable/disable clock gating per engine according to + * clock gating mask in hdev + * @disable_clock_gating: disable clock gating completely * @debug_coresight: perform certain actions on Coresight for debugging. * @is_device_idle: return true if device is idle, false otherwise. * @soft_reset_late_init: perform certain actions needed after soft reset. @@ -587,7 +588,11 @@ enum hl_pll_frequency { * @hw_queues_unlock: release H/W queues lock. * @get_pci_id: retrieve PCI ID. * @get_eeprom_data: retrieve EEPROM data from F/W. - * @send_cpu_message: send buffer to ArmCP. + * @send_cpu_message: send message to F/W. If the message is timedout, the + * driver will eventually reset the device. The timeout can + * be determined by the calling function or it can be 0 and + * then the timeout is the default timeout for the specific + * ASIC * @get_hw_state: retrieve the H/W state * @pci_bars_map: Map PCI BARs. * @set_dram_bar_base: Set DRAM BAR to map specific device address. Returns @@ -680,7 +685,7 @@ struct hl_asic_funcs { int (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard, u32 asid, u64 va, u64 size); int (*send_heartbeat)(struct hl_device *hdev); - void (*enable_clock_gating)(struct hl_device *hdev); + void (*set_clock_gating)(struct hl_device *hdev); void (*disable_clock_gating)(struct hl_device *hdev); int (*debug_coresight)(struct hl_device *hdev, void *data); bool (*is_device_idle)(struct hl_device *hdev, u32 *mask, @@ -1398,6 +1403,9 @@ struct hl_device_idle_busy_ts { * @max_power: the max power of the device, as configured by the sysadmin. This * value is saved so in case of hard-reset, the driver will restore * this value and update the F/W after the re-initialization + * @clock_gating_mask: is clock gating enabled. bitmask that represents the + * different engines. See debugfs-driver-habanalabs for + * details. * @in_reset: is device in reset flow. * @curr_pll_profile: current PLL profile. * @cs_active_cnt: number of active command submissions on this device (active @@ -1425,7 +1433,6 @@ struct hl_device_idle_busy_ts { * @init_done: is the initialization of the device done. * @mmu_enable: is MMU enabled. * @mmu_huge_page_opt: is MMU huge pages optimization enabled. - * @clock_gating: is clock gating enabled. * @device_cpu_disabled: is the device CPU disabled (due to timeouts) * @dma_mask: the dma mask that was set for this device * @in_debug: is device under debug. This, together with fpriv_list, enforces @@ -1493,6 +1500,7 @@ struct hl_device { atomic64_t dram_used_mem; u64 timeout_jiffies; u64 max_power; + u64 clock_gating_mask; atomic_t in_reset; enum hl_pll_frequency curr_pll_profile; int cs_active_cnt; @@ -1514,7 +1522,6 @@ struct hl_device { u8 dram_default_page_mapping; u8 pmmu_huge_range; u8 init_done; - u8 clock_gating; u8 device_cpu_disabled; u8 dma_mask; u8 in_debug; diff --git a/drivers/misc/habanalabs/habanalabs_drv.c b/drivers/misc/habanalabs/habanalabs_drv.c index 8652c7e5d7f1..22716da9f85f 100644 --- a/drivers/misc/habanalabs/habanalabs_drv.c +++ b/drivers/misc/habanalabs/habanalabs_drv.c @@ -232,7 +232,7 @@ static void set_driver_behavior_per_device(struct hl_device *hdev) hdev->fw_loading = 1; hdev->cpu_queues_enable = 1; hdev->heartbeat = 1; - hdev->clock_gating = 1; + hdev->clock_gating_mask = ULONG_MAX; hdev->reset_pcilink = 0; hdev->axi_drain = 0; diff --git a/drivers/misc/habanalabs/hwmon.c b/drivers/misc/habanalabs/hwmon.c index 8c6cd77e6af6..b997336fa75f 100644 --- a/drivers/misc/habanalabs/hwmon.c +++ b/drivers/misc/habanalabs/hwmon.c @@ -10,7 +10,6 @@ #include <linux/pci.h> #include <linux/hwmon.h> -#define SENSORS_PKT_TIMEOUT 1000000 /* 1s */ #define HWMON_NR_SENSOR_TYPES (hwmon_pwm + 1) int hl_build_hwmon_channel_info(struct hl_device *hdev, @@ -323,7 +322,7 @@ int hl_get_temperature(struct hl_device *hdev, pkt.type = __cpu_to_le16(attr); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - SENSORS_PKT_TIMEOUT, value); + 0, value); if (rc) { dev_err(hdev->dev, @@ -350,7 +349,7 @@ int hl_set_temperature(struct hl_device *hdev, pkt.value = __cpu_to_le64(value); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - SENSORS_PKT_TIMEOUT, NULL); + 0, NULL); if (rc) dev_err(hdev->dev, @@ -374,7 +373,7 @@ int hl_get_voltage(struct hl_device *hdev, pkt.type = __cpu_to_le16(attr); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - SENSORS_PKT_TIMEOUT, value); + 0, value); if (rc) { dev_err(hdev->dev, @@ -400,7 +399,7 @@ int hl_get_current(struct hl_device *hdev, pkt.type = __cpu_to_le16(attr); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - SENSORS_PKT_TIMEOUT, value); + 0, value); if (rc) { dev_err(hdev->dev, @@ -426,7 +425,7 @@ int hl_get_fan_speed(struct hl_device *hdev, pkt.type = __cpu_to_le16(attr); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - SENSORS_PKT_TIMEOUT, value); + 0, value); if (rc) { dev_err(hdev->dev, @@ -452,7 +451,7 @@ int hl_get_pwm_info(struct hl_device *hdev, pkt.type = __cpu_to_le16(attr); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - SENSORS_PKT_TIMEOUT, value); + 0, value); if (rc) { dev_err(hdev->dev, @@ -479,7 +478,7 @@ void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr, pkt.value = cpu_to_le64(value); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - SENSORS_PKT_TIMEOUT, NULL); + 0, NULL); if (rc) dev_err(hdev->dev, @@ -502,7 +501,7 @@ int hl_set_voltage(struct hl_device *hdev, pkt.value = __cpu_to_le64(value); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - SENSORS_PKT_TIMEOUT, NULL); + 0, NULL); if (rc) dev_err(hdev->dev, @@ -527,7 +526,7 @@ int hl_set_current(struct hl_device *hdev, pkt.value = __cpu_to_le64(value); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - SENSORS_PKT_TIMEOUT, NULL); + 0, NULL); if (rc) dev_err(hdev->dev, diff --git a/drivers/misc/habanalabs/sysfs.c b/drivers/misc/habanalabs/sysfs.c index 5d78d5e1c782..70b6b1863c2e 100644 --- a/drivers/misc/habanalabs/sysfs.c +++ b/drivers/misc/habanalabs/sysfs.c @@ -9,9 +9,6 @@ #include <linux/pci.h> -#define SET_CLK_PKT_TIMEOUT 1000000 /* 1s */ -#define SET_PWR_PKT_TIMEOUT 1000000 /* 1s */ - long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr) { struct armcp_packet pkt; @@ -29,7 +26,7 @@ long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr) pkt.pll_index = cpu_to_le32(pll_index); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - SET_CLK_PKT_TIMEOUT, &result); + 0, &result); if (rc) { dev_err(hdev->dev, @@ -54,7 +51,7 @@ void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq) pkt.value = cpu_to_le64(freq); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - SET_CLK_PKT_TIMEOUT, NULL); + 0, NULL); if (rc) dev_err(hdev->dev, @@ -74,7 +71,7 @@ u64 hl_get_max_power(struct hl_device *hdev) ARMCP_PKT_CTL_OPCODE_SHIFT); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - SET_PWR_PKT_TIMEOUT, &result); + 0, &result); if (rc) { dev_err(hdev->dev, "Failed to get max power, error %d\n", rc); @@ -96,7 +93,7 @@ void hl_set_max_power(struct hl_device *hdev, u64 value) pkt.value = cpu_to_le64(value); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - SET_PWR_PKT_TIMEOUT, NULL); + 0, NULL); if (rc) dev_err(hdev->dev, "Failed to set max power, error %d\n", rc); diff --git a/drivers/mmc/host/sdhci-of-aspeed.c b/drivers/mmc/host/sdhci-of-aspeed.c index 56912e30c47e..a1bcc0f4ba9e 100644 --- a/drivers/mmc/host/sdhci-of-aspeed.c +++ b/drivers/mmc/host/sdhci-of-aspeed.c @@ -68,7 +68,7 @@ static void aspeed_sdhci_set_clock(struct sdhci_host *host, unsigned int clock) if (WARN_ON(clock > host->max_clk)) clock = host->max_clk; - for (div = 1; div < 256; div *= 2) { + for (div = 2; div < 256; div *= 2) { if ((parent / div) <= clock) break; } diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c index 3dd46cd55114..88e7900853db 100644 --- a/drivers/net/bareudp.c +++ b/drivers/net/bareudp.c @@ -407,19 +407,34 @@ free_dst: return err; } +static bool bareudp_proto_valid(struct bareudp_dev *bareudp, __be16 proto) +{ + if (bareudp->ethertype == proto) + return true; + + if (!bareudp->multi_proto_mode) + return false; + + if (bareudp->ethertype == htons(ETH_P_MPLS_UC) && + proto == htons(ETH_P_MPLS_MC)) + return true; + + if (bareudp->ethertype == htons(ETH_P_IP) && + proto == htons(ETH_P_IPV6)) + return true; + + return false; +} + static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev) { struct bareudp_dev *bareudp = netdev_priv(dev); struct ip_tunnel_info *info = NULL; int err; - if (skb->protocol != bareudp->ethertype) { - if (!bareudp->multi_proto_mode || - (skb->protocol != htons(ETH_P_MPLS_MC) && - skb->protocol != htons(ETH_P_IPV6))) { - err = -EINVAL; - goto tx_error; - } + if (!bareudp_proto_valid(bareudp, skb->protocol)) { + err = -EINVAL; + goto tx_error; } info = skb_tunnel_info(skb); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 004919aea5fb..f88cb097b022 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -5053,15 +5053,19 @@ int bond_create(struct net *net, const char *name) bond_dev->rtnl_link_ops = &bond_link_ops; res = register_netdevice(bond_dev); + if (res < 0) { + free_netdev(bond_dev); + rtnl_unlock(); + + return res; + } netif_carrier_off(bond_dev); bond_work_init_all(bond); rtnl_unlock(); - if (res < 0) - free_netdev(bond_dev); - return res; + return 0; } static int __net_init bond_net_init(struct net *net) diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c index b43b51646b11..f0f9138e967f 100644 --- a/drivers/net/bonding/bond_netlink.c +++ b/drivers/net/bonding/bond_netlink.c @@ -456,11 +456,10 @@ static int bond_newlink(struct net *src_net, struct net_device *bond_dev, return err; err = register_netdevice(bond_dev); - - netif_carrier_off(bond_dev); if (!err) { struct bonding *bond = netdev_priv(bond_dev); + netif_carrier_off(bond_dev); bond_work_init_all(bond); } diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index 8d15c3016024..4a9239b2c2e4 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -974,23 +974,6 @@ static void ksz9477_port_mirror_del(struct dsa_switch *ds, int port, PORT_MIRROR_SNIFFER, false); } -static void ksz9477_phy_setup(struct ksz_device *dev, int port, - struct phy_device *phy) -{ - /* Only apply to port with PHY. */ - if (port >= dev->phy_port_cnt) - return; - - /* The MAC actually cannot run in 1000 half-duplex mode. */ - phy_remove_link_mode(phy, - ETHTOOL_LINK_MODE_1000baseT_Half_BIT); - - /* PHY does not support gigabit. */ - if (!(dev->features & GBIT_SUPPORT)) - phy_remove_link_mode(phy, - ETHTOOL_LINK_MODE_1000baseT_Full_BIT); -} - static bool ksz9477_get_gbit(struct ksz_device *dev, u8 data) { bool gbit; @@ -1603,7 +1586,6 @@ static const struct ksz_dev_ops ksz9477_dev_ops = { .get_port_addr = ksz9477_get_port_addr, .cfg_port_member = ksz9477_cfg_port_member, .flush_dyn_mac_table = ksz9477_flush_dyn_mac_table, - .phy_setup = ksz9477_phy_setup, .port_setup = ksz9477_port_setup, .r_mib_cnt = ksz9477_r_mib_cnt, .r_mib_pkt = ksz9477_r_mib_pkt, @@ -1617,7 +1599,29 @@ static const struct ksz_dev_ops ksz9477_dev_ops = { int ksz9477_switch_register(struct ksz_device *dev) { - return ksz_switch_register(dev, &ksz9477_dev_ops); + int ret, i; + struct phy_device *phydev; + + ret = ksz_switch_register(dev, &ksz9477_dev_ops); + if (ret) + return ret; + + for (i = 0; i < dev->phy_port_cnt; ++i) { + if (!dsa_is_user_port(dev->ds, i)) + continue; + + phydev = dsa_to_port(dev->ds, i)->slave->phydev; + + /* The MAC actually cannot run in 1000 half-duplex mode. */ + phy_remove_link_mode(phydev, + ETHTOOL_LINK_MODE_1000baseT_Half_BIT); + + /* PHY does not support gigabit. */ + if (!(dev->features & GBIT_SUPPORT)) + phy_remove_link_mode(phydev, + ETHTOOL_LINK_MODE_1000baseT_Full_BIT); + } + return ret; } EXPORT_SYMBOL(ksz9477_switch_register); diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index fd1d6676ae4f..7b6c0dce7536 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -358,8 +358,6 @@ int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) /* setup slave port */ dev->dev_ops->port_setup(dev, port, false); - if (dev->dev_ops->phy_setup) - dev->dev_ops->phy_setup(dev, port, phy); /* port_stp_state_set() will be called after to enable the port so * there is no need to do anything. diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index f2c9bb68fd33..7d11dd32ec0d 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -119,8 +119,6 @@ struct ksz_dev_ops { u32 (*get_port_addr)(int port, int offset); void (*cfg_port_member)(struct ksz_device *dev, int port, u8 member); void (*flush_dyn_mac_table)(struct ksz_device *dev, int port); - void (*phy_setup)(struct ksz_device *dev, int port, - struct phy_device *phy); void (*port_cleanup)(struct ksz_device *dev, int port); void (*port_setup)(struct ksz_device *dev, int port, bool cpu_port); void (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val); diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 7627ea61e0ea..fee16c947c2e 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -664,8 +664,11 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port, const struct phylink_link_state *state) { struct mv88e6xxx_chip *chip = ds->priv; + struct mv88e6xxx_port *p; int err; + p = &chip->ports[port]; + /* FIXME: is this the correct test? If we're in fixed mode on an * internal port, why should we process this any different from * PHY mode? On the other hand, the port may be automedia between @@ -675,10 +678,14 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port, return; mv88e6xxx_reg_lock(chip); - /* FIXME: should we force the link down here - but if we do, how - * do we restore the link force/unforce state? The driver layering - * gets in the way. + /* In inband mode, the link may come up at any time while the link + * is not forced down. Force the link down while we reconfigure the + * interface mode. */ + if (mode == MLO_AN_INBAND && p->interface != state->interface && + chip->info->ops->port_set_link) + chip->info->ops->port_set_link(chip, port, LINK_FORCED_DOWN); + err = mv88e6xxx_port_config_interface(chip, port, state->interface); if (err && err != -EOPNOTSUPP) goto err_unlock; @@ -691,6 +698,15 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port, if (err > 0) err = 0; + /* Undo the forced down state above after completing configuration + * irrespective of its state on entry, which allows the link to come up. + */ + if (mode == MLO_AN_INBAND && p->interface != state->interface && + chip->info->ops->port_set_link) + chip->info->ops->port_set_link(chip, port, LINK_UNFORCED); + + p->interface = state->interface; + err_unlock: mv88e6xxx_reg_unlock(chip); diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index e5430cf2ad71..6476524e8239 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -232,6 +232,7 @@ struct mv88e6xxx_port { u64 atu_full_violation; u64 vtu_member_violation; u64 vtu_miss_violation; + phy_interface_t interface; u8 cmode; bool mirror_ingress; bool mirror_egress; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index ed5b465bc664..992fedbe4ce3 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@ -64,6 +64,7 @@ struct aq_hw_caps_s { u8 rx_rings; bool flow_control; bool is_64_dma; + u32 quirks; u32 priv_data_len; }; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 4435c6374f7e..7c7bf6bf163f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -415,6 +415,15 @@ int aq_nic_init(struct aq_nic_s *self) self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_TP) { self->aq_hw->phy_id = HW_ATL_PHY_ID_MAX; err = aq_phy_init(self->aq_hw); + + /* Disable the PTP on NICs where it's known to cause datapath + * problems. + * Ideally this should have been done by PHY provisioning, but + * many units have been shipped with enabled PTP block already. + */ + if (self->aq_nic_cfg.aq_hw_caps->quirks & AQ_NIC_QUIRK_BAD_PTP) + if (self->aq_hw->phy_id != HW_ATL_PHY_ID_MAX) + aq_phy_disable_ptp(self->aq_hw); } for (i = 0U; i < self->aq_vecs; i++) { diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index 2ab003065e62..439ce9692dac 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h @@ -81,6 +81,8 @@ struct aq_nic_cfg_s { #define AQ_NIC_FLAG_ERR_UNPLUG 0x40000000U #define AQ_NIC_FLAG_ERR_HW 0x80000000U +#define AQ_NIC_QUIRK_BAD_PTP BIT(0) + #define AQ_NIC_WOL_MODES (WAKE_MAGIC |\ WAKE_PHY) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_phy.c b/drivers/net/ethernet/aquantia/atlantic/aq_phy.c index 51ae921e3e1f..949ac2351701 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_phy.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_phy.c @@ -1,10 +1,14 @@ // SPDX-License-Identifier: GPL-2.0-only -/* aQuantia Corporation Network Driver - * Copyright (C) 2018-2019 aQuantia Corporation. All rights reserved +/* Atlantic Network Driver + * + * Copyright (C) 2018-2019 aQuantia Corporation + * Copyright (C) 2019-2020 Marvell International Ltd. */ #include "aq_phy.h" +#define HW_ATL_PTP_DISABLE_MSK BIT(10) + bool aq_mdio_busy_wait(struct aq_hw_s *aq_hw) { int err = 0; @@ -145,3 +149,24 @@ bool aq_phy_init(struct aq_hw_s *aq_hw) return true; } + +void aq_phy_disable_ptp(struct aq_hw_s *aq_hw) +{ + static const u16 ptp_registers[] = { + 0x031e, + 0x031d, + 0x031c, + 0x031b, + }; + u16 val; + int i; + + for (i = 0; i < ARRAY_SIZE(ptp_registers); i++) { + val = aq_phy_read_reg(aq_hw, MDIO_MMD_VEND1, + ptp_registers[i]); + + aq_phy_write_reg(aq_hw, MDIO_MMD_VEND1, + ptp_registers[i], + val & ~HW_ATL_PTP_DISABLE_MSK); + } +} diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_phy.h b/drivers/net/ethernet/aquantia/atlantic/aq_phy.h index 84b72ad04a4a..86cc1ee836e2 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_phy.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_phy.h @@ -1,6 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* aQuantia Corporation Network Driver - * Copyright (C) 2018-2019 aQuantia Corporation. All rights reserved +/* Atlantic Network Driver + * + * Copyright (C) 2018-2019 aQuantia Corporation + * Copyright (C) 2019-2020 Marvell International Ltd. */ #ifndef AQ_PHY_H @@ -29,4 +31,6 @@ bool aq_phy_init_phy_id(struct aq_hw_s *aq_hw); bool aq_phy_init(struct aq_hw_s *aq_hw); +void aq_phy_disable_ptp(struct aq_hw_s *aq_hw); + #endif /* AQ_PHY_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 14d79f70cad7..2125bc20ab6a 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -93,6 +93,25 @@ const struct aq_hw_caps_s hw_atl_b0_caps_aqc109 = { AQ_NIC_RATE_100M, }; +const struct aq_hw_caps_s hw_atl_b0_caps_aqc111 = { + DEFAULT_B0_BOARD_BASIC_CAPABILITIES, + .media_type = AQ_HW_MEDIA_TYPE_TP, + .link_speed_msk = AQ_NIC_RATE_5G | + AQ_NIC_RATE_2G5 | + AQ_NIC_RATE_1G | + AQ_NIC_RATE_100M, + .quirks = AQ_NIC_QUIRK_BAD_PTP, +}; + +const struct aq_hw_caps_s hw_atl_b0_caps_aqc112 = { + DEFAULT_B0_BOARD_BASIC_CAPABILITIES, + .media_type = AQ_HW_MEDIA_TYPE_TP, + .link_speed_msk = AQ_NIC_RATE_2G5 | + AQ_NIC_RATE_1G | + AQ_NIC_RATE_100M, + .quirks = AQ_NIC_QUIRK_BAD_PTP, +}; + static int hw_atl_b0_hw_reset(struct aq_hw_s *self) { int err = 0; @@ -354,8 +373,13 @@ static int hw_atl_b0_hw_init_tx_tc_rate_limit(struct aq_hw_s *self) /* WSP, if min_rate is set for at least one TC. * RR otherwise. + * + * NB! MAC FW sets arb mode itself if PTP is enabled. We shouldn't + * overwrite it here in that case. */ - hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, min_rate_msk ? 1U : 0U); + if (!nic_cfg->is_ptp) + hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, min_rate_msk ? 1U : 0U); + /* Data TC Arbiter takes precedence over Descriptor TC Arbiter, * leave Descriptor TC Arbiter as RR. */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h index 30f468f2084d..16091af17980 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h @@ -18,17 +18,15 @@ extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc100; extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc107; extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc108; extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc109; - -#define hw_atl_b0_caps_aqc111 hw_atl_b0_caps_aqc108 -#define hw_atl_b0_caps_aqc112 hw_atl_b0_caps_aqc109 +extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc111; +extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc112; #define hw_atl_b0_caps_aqc100s hw_atl_b0_caps_aqc100 #define hw_atl_b0_caps_aqc107s hw_atl_b0_caps_aqc107 #define hw_atl_b0_caps_aqc108s hw_atl_b0_caps_aqc108 #define hw_atl_b0_caps_aqc109s hw_atl_b0_caps_aqc109 - -#define hw_atl_b0_caps_aqc111s hw_atl_b0_caps_aqc108 -#define hw_atl_b0_caps_aqc112s hw_atl_b0_caps_aqc109 +#define hw_atl_b0_caps_aqc111s hw_atl_b0_caps_aqc111 +#define hw_atl_b0_caps_aqc112s hw_atl_b0_caps_aqc112 extern const struct aq_hw_ops hw_atl_ops_b0; diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c index 112edbd30823..38cce66ef212 100644 --- a/drivers/net/ethernet/atheros/ag71xx.c +++ b/drivers/net/ethernet/atheros/ag71xx.c @@ -556,7 +556,8 @@ static int ag71xx_mdio_probe(struct ag71xx *ag) ag->mdio_reset = of_reset_control_get_exclusive(np, "mdio"); if (IS_ERR(ag->mdio_reset)) { netif_err(ag, probe, ndev, "Failed to get reset mdio.\n"); - return PTR_ERR(ag->mdio_reset); + err = PTR_ERR(ag->mdio_reset); + goto mdio_err_put_clk; } mii_bus->name = "ag71xx_mdio"; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 6a884df44612..7463a1847ceb 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -3418,7 +3418,7 @@ void bnxt_set_tpa_flags(struct bnxt *bp) */ void bnxt_set_ring_params(struct bnxt *bp) { - u32 ring_size, rx_size, rx_space; + u32 ring_size, rx_size, rx_space, max_rx_cmpl; u32 agg_factor = 0, agg_ring_size = 0; /* 8 for CRC and VLAN */ @@ -3474,7 +3474,15 @@ void bnxt_set_ring_params(struct bnxt *bp) bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT); bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1; - ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size; + max_rx_cmpl = bp->rx_ring_size; + /* MAX TPA needs to be added because TPA_START completions are + * immediately recycled, so the TPA completions are not bound by + * the RX ring size. + */ + if (bp->flags & BNXT_FLAG_TPA) + max_rx_cmpl += bp->max_tpa; + /* RX and TPA completions are 32-byte, all others are 16-byte */ + ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size; bp->cp_ring_size = ring_size; bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT); @@ -10385,15 +10393,15 @@ static void bnxt_sp_task(struct work_struct *work) &bp->sp_event)) bnxt_hwrm_phy_qcaps(bp); - if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, - &bp->sp_event)) - bnxt_init_ethtool_link_settings(bp); - rc = bnxt_update_link(bp, true); - mutex_unlock(&bp->link_lock); if (rc) netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", rc); + + if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, + &bp->sp_event)) + bnxt_init_ethtool_link_settings(bp); + mutex_unlock(&bp->link_lock); } if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) { int rc; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 6b88143af5ea..b4aa56dc4f9f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1765,8 +1765,11 @@ static int bnxt_set_pauseparam(struct net_device *dev, if (epause->tx_pause) link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX; - if (netif_running(dev)) + if (netif_running(dev)) { + mutex_lock(&bp->link_lock); rc = bnxt_hwrm_set_pause(bp); + mutex_unlock(&bp->link_lock); + } return rc; } diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index af924a8b885f..e471b14fc6e9 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -543,14 +543,14 @@ static int bcmgenet_hfb_validate_mask(void *mask, size_t size) #define VALIDATE_MASK(x) \ bcmgenet_hfb_validate_mask(&(x), sizeof(x)) -static int bcmgenet_hfb_insert_data(u32 *f, int offset, - void *val, void *mask, size_t size) +static int bcmgenet_hfb_insert_data(struct bcmgenet_priv *priv, u32 f_index, + u32 offset, void *val, void *mask, + size_t size) { - int index; - u32 tmp; + u32 index, tmp; - index = offset / 2; - tmp = f[index]; + index = f_index * priv->hw_params->hfb_filter_size + offset / 2; + tmp = bcmgenet_hfb_readl(priv, index * sizeof(u32)); while (size--) { if (offset++ & 1) { @@ -567,9 +567,10 @@ static int bcmgenet_hfb_insert_data(u32 *f, int offset, tmp |= 0x10000; break; } - f[index++] = tmp; + bcmgenet_hfb_writel(priv, tmp, index++ * sizeof(u32)); if (size) - tmp = f[index]; + tmp = bcmgenet_hfb_readl(priv, + index * sizeof(u32)); } else { tmp &= ~0xCFF00; tmp |= (*(unsigned char *)val++) << 8; @@ -585,44 +586,26 @@ static int bcmgenet_hfb_insert_data(u32 *f, int offset, break; } if (!size) - f[index] = tmp; + bcmgenet_hfb_writel(priv, tmp, index * sizeof(u32)); } } return 0; } -static void bcmgenet_hfb_set_filter(struct bcmgenet_priv *priv, u32 *f_data, - u32 f_length, u32 rx_queue, int f_index) -{ - u32 base = f_index * priv->hw_params->hfb_filter_size; - int i; - - for (i = 0; i < f_length; i++) - bcmgenet_hfb_writel(priv, f_data[i], (base + i) * sizeof(u32)); - - bcmgenet_hfb_set_filter_length(priv, f_index, 2 * f_length); - bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f_index, rx_queue); -} - -static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv, - struct bcmgenet_rxnfc_rule *rule) +static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv, + struct bcmgenet_rxnfc_rule *rule) { struct ethtool_rx_flow_spec *fs = &rule->fs; - int err = 0, offset = 0, f_length = 0; + u32 offset = 0, f_length = 0, f; u8 val_8, mask_8; __be16 val_16; u16 mask_16; size_t size; - u32 *f_data; - - f_data = kcalloc(priv->hw_params->hfb_filter_size, sizeof(u32), - GFP_KERNEL); - if (!f_data) - return -ENOMEM; + f = fs->location; if (fs->flow_type & FLOW_MAC_EXT) { - bcmgenet_hfb_insert_data(f_data, 0, + bcmgenet_hfb_insert_data(priv, f, 0, &fs->h_ext.h_dest, &fs->m_ext.h_dest, sizeof(fs->h_ext.h_dest)); } @@ -630,11 +613,11 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv, if (fs->flow_type & FLOW_EXT) { if (fs->m_ext.vlan_etype || fs->m_ext.vlan_tci) { - bcmgenet_hfb_insert_data(f_data, 12, + bcmgenet_hfb_insert_data(priv, f, 12, &fs->h_ext.vlan_etype, &fs->m_ext.vlan_etype, sizeof(fs->h_ext.vlan_etype)); - bcmgenet_hfb_insert_data(f_data, 14, + bcmgenet_hfb_insert_data(priv, f, 14, &fs->h_ext.vlan_tci, &fs->m_ext.vlan_tci, sizeof(fs->h_ext.vlan_tci)); @@ -646,15 +629,15 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv, switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { case ETHER_FLOW: f_length += DIV_ROUND_UP(ETH_HLEN, 2); - bcmgenet_hfb_insert_data(f_data, 0, + bcmgenet_hfb_insert_data(priv, f, 0, &fs->h_u.ether_spec.h_dest, &fs->m_u.ether_spec.h_dest, sizeof(fs->h_u.ether_spec.h_dest)); - bcmgenet_hfb_insert_data(f_data, ETH_ALEN, + bcmgenet_hfb_insert_data(priv, f, ETH_ALEN, &fs->h_u.ether_spec.h_source, &fs->m_u.ether_spec.h_source, sizeof(fs->h_u.ether_spec.h_source)); - bcmgenet_hfb_insert_data(f_data, (2 * ETH_ALEN) + offset, + bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset, &fs->h_u.ether_spec.h_proto, &fs->m_u.ether_spec.h_proto, sizeof(fs->h_u.ether_spec.h_proto)); @@ -664,21 +647,21 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv, /* Specify IP Ether Type */ val_16 = htons(ETH_P_IP); mask_16 = 0xFFFF; - bcmgenet_hfb_insert_data(f_data, (2 * ETH_ALEN) + offset, + bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset, &val_16, &mask_16, sizeof(val_16)); - bcmgenet_hfb_insert_data(f_data, 15 + offset, + bcmgenet_hfb_insert_data(priv, f, 15 + offset, &fs->h_u.usr_ip4_spec.tos, &fs->m_u.usr_ip4_spec.tos, sizeof(fs->h_u.usr_ip4_spec.tos)); - bcmgenet_hfb_insert_data(f_data, 23 + offset, + bcmgenet_hfb_insert_data(priv, f, 23 + offset, &fs->h_u.usr_ip4_spec.proto, &fs->m_u.usr_ip4_spec.proto, sizeof(fs->h_u.usr_ip4_spec.proto)); - bcmgenet_hfb_insert_data(f_data, 26 + offset, + bcmgenet_hfb_insert_data(priv, f, 26 + offset, &fs->h_u.usr_ip4_spec.ip4src, &fs->m_u.usr_ip4_spec.ip4src, sizeof(fs->h_u.usr_ip4_spec.ip4src)); - bcmgenet_hfb_insert_data(f_data, 30 + offset, + bcmgenet_hfb_insert_data(priv, f, 30 + offset, &fs->h_u.usr_ip4_spec.ip4dst, &fs->m_u.usr_ip4_spec.ip4dst, sizeof(fs->h_u.usr_ip4_spec.ip4dst)); @@ -688,11 +671,11 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv, /* Only supports 20 byte IPv4 header */ val_8 = 0x45; mask_8 = 0xFF; - bcmgenet_hfb_insert_data(f_data, ETH_HLEN + offset, + bcmgenet_hfb_insert_data(priv, f, ETH_HLEN + offset, &val_8, &mask_8, sizeof(val_8)); size = sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes); - bcmgenet_hfb_insert_data(f_data, + bcmgenet_hfb_insert_data(priv, f, ETH_HLEN + 20 + offset, &fs->h_u.usr_ip4_spec.l4_4_bytes, &fs->m_u.usr_ip4_spec.l4_4_bytes, @@ -701,34 +684,42 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv, break; } + bcmgenet_hfb_set_filter_length(priv, f, 2 * f_length); if (!fs->ring_cookie || fs->ring_cookie == RX_CLS_FLOW_WAKE) { /* Ring 0 flows can be handled by the default Descriptor Ring * We'll map them to ring 0, but don't enable the filter */ - bcmgenet_hfb_set_filter(priv, f_data, f_length, 0, - fs->location); + bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, 0); rule->state = BCMGENET_RXNFC_STATE_DISABLED; } else { /* Other Rx rings are direct mapped here */ - bcmgenet_hfb_set_filter(priv, f_data, f_length, - fs->ring_cookie, fs->location); - bcmgenet_hfb_enable_filter(priv, fs->location); + bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, + fs->ring_cookie); + bcmgenet_hfb_enable_filter(priv, f); rule->state = BCMGENET_RXNFC_STATE_ENABLED; } - - kfree(f_data); - - return err; } /* bcmgenet_hfb_clear * * Clear Hardware Filter Block and disable all filtering. */ +static void bcmgenet_hfb_clear_filter(struct bcmgenet_priv *priv, u32 f_index) +{ + u32 base, i; + + base = f_index * priv->hw_params->hfb_filter_size; + for (i = 0; i < priv->hw_params->hfb_filter_size; i++) + bcmgenet_hfb_writel(priv, 0x0, (base + i) * sizeof(u32)); +} + static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv) { u32 i; + if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) + return; + bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL); bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS); bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4); @@ -740,19 +731,18 @@ static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv) bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_LEN_V3PLUS + i * sizeof(u32)); - for (i = 0; i < priv->hw_params->hfb_filter_cnt * - priv->hw_params->hfb_filter_size; i++) - bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32)); + for (i = 0; i < priv->hw_params->hfb_filter_cnt; i++) + bcmgenet_hfb_clear_filter(priv, i); } static void bcmgenet_hfb_init(struct bcmgenet_priv *priv) { int i; + INIT_LIST_HEAD(&priv->rxnfc_list); if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) return; - INIT_LIST_HEAD(&priv->rxnfc_list); for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) { INIT_LIST_HEAD(&priv->rxnfc_rules[i].list); priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED; @@ -1437,18 +1427,15 @@ static int bcmgenet_insert_flow(struct net_device *dev, loc_rule = &priv->rxnfc_rules[cmd->fs.location]; if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED) bcmgenet_hfb_disable_filter(priv, cmd->fs.location); - if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) + if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) { list_del(&loc_rule->list); + bcmgenet_hfb_clear_filter(priv, cmd->fs.location); + } loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED; memcpy(&loc_rule->fs, &cmd->fs, sizeof(struct ethtool_rx_flow_spec)); - err = bcmgenet_hfb_create_rxnfc_filter(priv, loc_rule); - if (err) { - netdev_err(dev, "rxnfc: Could not install rule (%d)\n", - err); - return err; - } + bcmgenet_hfb_create_rxnfc_filter(priv, loc_rule); list_add_tail(&loc_rule->list, &priv->rxnfc_list); @@ -1473,8 +1460,10 @@ static int bcmgenet_delete_flow(struct net_device *dev, if (rule->state == BCMGENET_RXNFC_STATE_ENABLED) bcmgenet_hfb_disable_filter(priv, cmd->fs.location); - if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) + if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) { list_del(&rule->list); + bcmgenet_hfb_clear_filter(priv, cmd->fs.location); + } rule->state = BCMGENET_RXNFC_STATE_UNUSED; memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec)); @@ -3999,7 +3988,7 @@ static int bcmgenet_probe(struct platform_device *pdev) if (err) err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) - goto err; + goto err_clk_disable; /* Mii wait queue */ init_waitqueue_head(&priv->wq); @@ -4011,14 +4000,14 @@ static int bcmgenet_probe(struct platform_device *pdev) if (IS_ERR(priv->clk_wol)) { dev_dbg(&priv->pdev->dev, "failed to get enet-wol clock\n"); err = PTR_ERR(priv->clk_wol); - goto err; + goto err_clk_disable; } priv->clk_eee = devm_clk_get_optional(&priv->pdev->dev, "enet-eee"); if (IS_ERR(priv->clk_eee)) { dev_dbg(&priv->pdev->dev, "failed to get enet-eee clock\n"); err = PTR_ERR(priv->clk_eee); - goto err; + goto err_clk_disable; } /* If this is an internal GPHY, power it on now, before UniMAC is @@ -4129,8 +4118,9 @@ static int bcmgenet_resume(struct device *d) { struct net_device *dev = dev_get_drvdata(d); struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rxnfc_rule *rule; unsigned long dma_ctrl; - u32 offset, reg; + u32 reg; int ret; if (!netif_running(dev)) @@ -4161,10 +4151,11 @@ static int bcmgenet_resume(struct device *d) bcmgenet_set_hw_addr(priv, dev->dev_addr); - offset = HFB_FLT_ENABLE_V3PLUS; - bcmgenet_hfb_reg_writel(priv, priv->hfb_en[1], offset); - bcmgenet_hfb_reg_writel(priv, priv->hfb_en[2], offset + sizeof(u32)); - bcmgenet_hfb_reg_writel(priv, priv->hfb_en[0], HFB_CTRL); + /* Restore hardware filters */ + bcmgenet_hfb_clear(priv); + list_for_each_entry(rule, &priv->rxnfc_list, list) + if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) + bcmgenet_hfb_create_rxnfc_filter(priv, rule); if (priv->internal_phy) { reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); @@ -4208,7 +4199,6 @@ static int bcmgenet_suspend(struct device *d) { struct net_device *dev = dev_get_drvdata(d); struct bcmgenet_priv *priv = netdev_priv(dev); - u32 offset; if (!netif_running(dev)) return 0; @@ -4220,11 +4210,7 @@ static int bcmgenet_suspend(struct device *d) if (!device_may_wakeup(d)) phy_suspend(dev->phydev); - /* Preserve filter state and disable filtering */ - priv->hfb_en[0] = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); - offset = HFB_FLT_ENABLE_V3PLUS; - priv->hfb_en[1] = bcmgenet_hfb_reg_readl(priv, offset); - priv->hfb_en[2] = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32)); + /* Disable filtering */ bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL); return 0; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index a12cb59298f4..f6ca01da141d 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -696,7 +696,6 @@ struct bcmgenet_priv { u32 wolopts; u8 sopass[SOPASS_MAX]; bool wol_active; - u32 hfb_en[3]; struct bcmgenet_mib_counters mib; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c index 4ea6a26b04f7..1c86eddb1b51 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c @@ -217,20 +217,28 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, priv->wol_active = 0; clk_disable_unprepare(priv->clk_wol); + priv->crc_fwd_en = 0; /* Disable Magic Packet Detection */ - reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); - reg &= ~(MPD_EN | MPD_PW_EN); - bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); + if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) { + reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); + if (!(reg & MPD_EN)) + return; /* already reset so skip the rest */ + reg &= ~(MPD_EN | MPD_PW_EN); + bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); + } /* Disable WAKE_FILTER Detection */ - reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); - reg &= ~(RBUF_HFB_EN | RBUF_ACPI_EN); - bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); + if (priv->wolopts & WAKE_FILTER) { + reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); + if (!(reg & RBUF_ACPI_EN)) + return; /* already reset so skip the rest */ + reg &= ~(RBUF_HFB_EN | RBUF_ACPI_EN); + bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); + } /* Disable CRC Forward */ reg = bcmgenet_umac_readl(priv, UMAC_CMD); reg &= ~CMD_CRC_FWD; bcmgenet_umac_writel(priv, reg, UMAC_CMD); - priv->crc_fwd_en = 0; } diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index f1f0976e7669..2213e6ab8151 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -3736,7 +3736,7 @@ static int macb_init(struct platform_device *pdev) if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) { val = 0; - if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII) + if (phy_interface_mode_is_rgmii(bp->phy_interface)) val = GEM_BIT(RGMII); else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 32a45dc51ed7..92eee66cbc84 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -2938,6 +2938,7 @@ static inline int uld_send(struct adapter *adap, struct sk_buff *skb, txq_info = adap->sge.uld_txq_info[tx_uld_type]; if (unlikely(!txq_info)) { WARN_ON(true); + kfree_skb(skb); return NET_XMIT_DROP; } diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 8d13ea370db1..66e67b24a887 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -2446,6 +2446,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) port->reset = devm_reset_control_get_exclusive(dev, NULL); if (IS_ERR(port->reset)) { dev_err(dev, "no reset\n"); + clk_disable_unprepare(port->pclk); return PTR_ERR(port->reset); } reset_control_reset(port->reset); @@ -2501,8 +2502,10 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) IRQF_SHARED, port_names[port->id], port); - if (ret) + if (ret) { + clk_disable_unprepare(port->pclk); return ret; + } ret = register_netdev(netdev); if (!ret) { diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 2972244e6eb0..43570f4911ea 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -2938,7 +2938,7 @@ static int dpaa_eth_probe(struct platform_device *pdev) DMA_BIT_MASK(40)); if (err) { netdev_err(net_dev, "dma_coerce_mask_and_coherent() failed\n"); - return err; + goto free_netdev; } /* If fsl_fm_max_frm is set to a higher value than the all-common 1500, diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index f150cd454fa4..0998ceb1a26e 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -3632,7 +3632,7 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv) dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent); dpmac_dev = fsl_mc_get_endpoint(dpni_dev); - if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) + if (IS_ERR_OR_NULL(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) return 0; if (dpaa2_mac_is_type_fixed(dpmac_dev, priv->mc_io)) diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index 4fac57dbb3c8..7a9675bd36e8 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -906,6 +906,7 @@ static int enetc_pf_probe(struct pci_dev *pdev, return 0; err_reg_netdev: + enetc_mdio_remove(pf); enetc_of_put_phy(priv); enetc_free_msix(priv); err_alloc_msix: diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index d8d76da51c5e..832a2175636d 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -590,6 +590,7 @@ struct fec_enet_private { void fec_ptp_init(struct platform_device *pdev, int irq_idx); void fec_ptp_stop(struct platform_device *pdev); void fec_ptp_start_cyclecounter(struct net_device *ndev); +void fec_ptp_disable_hwts(struct net_device *ndev); int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr); int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 3982285ed020..cc7fbfc09354 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1294,8 +1294,13 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) ndev->stats.tx_bytes += skb->len; } - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) && - fep->bufdesc_ex) { + /* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who + * are to time stamp the packet, so we still need to check time + * stamping enabled flag. + */ + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS && + fep->hwts_tx_en) && + fep->bufdesc_ex) { struct skb_shared_hwtstamps shhwtstamps; struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; @@ -2723,10 +2728,16 @@ static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) return -ENODEV; if (fep->bufdesc_ex) { - if (cmd == SIOCSHWTSTAMP) - return fec_ptp_set(ndev, rq); - if (cmd == SIOCGHWTSTAMP) - return fec_ptp_get(ndev, rq); + bool use_fec_hwts = !phy_has_hwtstamp(phydev); + + if (cmd == SIOCSHWTSTAMP) { + if (use_fec_hwts) + return fec_ptp_set(ndev, rq); + fec_ptp_disable_hwts(ndev); + } else if (cmd == SIOCGHWTSTAMP) { + if (use_fec_hwts) + return fec_ptp_get(ndev, rq); + } } return phy_mii_ioctl(phydev, rq, cmd); diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index 945643c02615..f8a592c96beb 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -452,6 +452,18 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp, return -EOPNOTSUPP; } +/** + * fec_ptp_disable_hwts - disable hardware time stamping + * @ndev: pointer to net_device + */ +void fec_ptp_disable_hwts(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + fep->hwts_tx_en = 0; + fep->hwts_rx_en = 0; +} + int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr) { struct fec_enet_private *fep = netdev_priv(ndev); diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index b3c69e9038ea..b513b8c5c3b5 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -779,8 +779,12 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) mac_addr = of_get_mac_address(np); - if (!IS_ERR(mac_addr)) + if (!IS_ERR(mac_addr)) { ether_addr_copy(dev->dev_addr, mac_addr); + } else { + eth_hw_addr_random(dev); + dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr); + } if (model && !strcasecmp(model, "TSEC")) priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT | diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index d041cac9a487..088550db2de7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -77,6 +77,7 @@ ((ring)->p = ((ring)->p - 1 + (ring)->desc_num) % (ring)->desc_num) enum hns_desc_type { + DESC_TYPE_UNKNOWN, DESC_TYPE_SKB, DESC_TYPE_FRAGLIST_SKB, DESC_TYPE_PAGE, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index c38f3bbe7d97..71ed4c54f6d5 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -1093,16 +1093,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, int k, sizeoflast; dma_addr_t dma; - if (type == DESC_TYPE_SKB) { - struct sk_buff *skb = (struct sk_buff *)priv; - int ret; - - ret = hns3_fill_skb_desc(ring, skb, desc); - if (unlikely(ret < 0)) - return ret; - - dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); - } else if (type == DESC_TYPE_FRAGLIST_SKB) { + if (type == DESC_TYPE_FRAGLIST_SKB || + type == DESC_TYPE_SKB) { struct sk_buff *skb = (struct sk_buff *)priv; dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); @@ -1118,12 +1110,12 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, return -ENOMEM; } + desc_cb->priv = priv; desc_cb->length = size; + desc_cb->dma = dma; + desc_cb->type = type; if (likely(size <= HNS3_MAX_BD_SIZE)) { - desc_cb->priv = priv; - desc_cb->dma = dma; - desc_cb->type = type; desc->addr = cpu_to_le64(dma); desc->tx.send_size = cpu_to_le16(size); desc->tx.bdtp_fe_sc_vld_ra_ri = @@ -1135,18 +1127,11 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, } frag_buf_num = hns3_tx_bd_count(size); - sizeoflast = size & HNS3_TX_LAST_SIZE_M; + sizeoflast = size % HNS3_MAX_BD_SIZE; sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; /* When frag size is bigger than hardware limit, split this frag */ for (k = 0; k < frag_buf_num; k++) { - /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */ - desc_cb->priv = priv; - desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k; - desc_cb->type = ((type == DESC_TYPE_FRAGLIST_SKB || - type == DESC_TYPE_SKB) && !k) ? - type : DESC_TYPE_PAGE; - /* now, fill the descriptor */ desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k); desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ? @@ -1158,7 +1143,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, /* move ring pointer to next */ ring_ptr_move_fw(ring, next_to_use); - desc_cb = &ring->desc_cb[ring->next_to_use]; desc = &ring->desc[ring->next_to_use]; } @@ -1346,6 +1330,10 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) unsigned int i; for (i = 0; i < ring->desc_num; i++) { + struct hns3_desc *desc = &ring->desc[ring->next_to_use]; + + memset(desc, 0, sizeof(*desc)); + /* check if this is where we started */ if (ring->next_to_use == next_to_use_orig) break; @@ -1353,6 +1341,9 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) /* rollback one */ ring_ptr_move_bw(ring, next_to_use); + if (!ring->desc_cb[ring->next_to_use].dma) + continue; + /* unmap the descriptor dma address */ if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB || ring->desc_cb[ring->next_to_use].type == @@ -1369,6 +1360,7 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) ring->desc_cb[ring->next_to_use].length = 0; ring->desc_cb[ring->next_to_use].dma = 0; + ring->desc_cb[ring->next_to_use].type = DESC_TYPE_UNKNOWN; } } @@ -1439,6 +1431,10 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) next_to_use_head = ring->next_to_use; + ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use]); + if (unlikely(ret < 0)) + goto fill_err; + ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB); if (unlikely(ret < 0)) goto fill_err; @@ -4140,8 +4136,8 @@ static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup) return; if (linkup) { - netif_carrier_on(netdev); netif_tx_wake_all_queues(netdev); + netif_carrier_on(netdev); if (netif_msg_link(handle)) netdev_info(netdev, "link up\n"); } else { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 66cd4395f781..a8776620acbc 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -165,8 +165,6 @@ enum hns3_nic_state { #define HNS3_TXD_MSS_S 0 #define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S) -#define HNS3_TX_LAST_SIZE_M 0xffff - #define HNS3_VECTOR_TX_IRQ BIT_ULL(0) #define HNS3_VECTOR_RX_IRQ BIT_ULL(1) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index d6bfdc6520df..36575e72a915 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -2673,11 +2673,10 @@ void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time) delay_time); } -static int hclge_get_mac_link_status(struct hclge_dev *hdev) +static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status) { struct hclge_link_status_cmd *req; struct hclge_desc desc; - int link_status; int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true); @@ -2689,33 +2688,25 @@ static int hclge_get_mac_link_status(struct hclge_dev *hdev) } req = (struct hclge_link_status_cmd *)desc.data; - link_status = req->status & HCLGE_LINK_STATUS_UP_M; + *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ? + HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN; - return !!link_status; + return 0; } -static int hclge_get_mac_phy_link(struct hclge_dev *hdev) +static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status) { - unsigned int mac_state; - int link_stat; + struct phy_device *phydev = hdev->hw.mac.phydev; + + *link_status = HCLGE_LINK_STATUS_DOWN; if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) return 0; - mac_state = hclge_get_mac_link_status(hdev); - - if (hdev->hw.mac.phydev) { - if (hdev->hw.mac.phydev->state == PHY_RUNNING) - link_stat = mac_state & - hdev->hw.mac.phydev->link; - else - link_stat = 0; - - } else { - link_stat = mac_state; - } + if (phydev && (phydev->state != PHY_RUNNING || !phydev->link)) + return 0; - return !!link_stat; + return hclge_get_mac_link_status(hdev, link_status); } static void hclge_update_link_status(struct hclge_dev *hdev) @@ -2725,6 +2716,7 @@ static void hclge_update_link_status(struct hclge_dev *hdev) struct hnae3_handle *rhandle; struct hnae3_handle *handle; int state; + int ret; int i; if (!client) @@ -2733,7 +2725,12 @@ static void hclge_update_link_status(struct hclge_dev *hdev) if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state)) return; - state = hclge_get_mac_phy_link(hdev); + ret = hclge_get_mac_phy_link(hdev, &state); + if (ret) { + clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); + return; + } + if (state != hdev->hw.mac.link) { for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { handle = &hdev->vport[i].nic; @@ -5809,9 +5806,9 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle, /* to avoid rule conflict, when user configure rule by ethtool, * we need to clear all arfs rules */ + spin_lock_bh(&hdev->fd_rule_lock); hclge_clear_arfs_rules(handle); - spin_lock_bh(&hdev->fd_rule_lock); ret = hclge_fd_config_rule(hdev, rule); spin_unlock_bh(&hdev->fd_rule_lock); @@ -5854,6 +5851,7 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle, return ret; } +/* make sure being called after lock up with fd_rule_lock */ static void hclge_del_all_fd_entries(struct hnae3_handle *handle, bool clear_list) { @@ -5866,7 +5864,6 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle, if (!hnae3_dev_fd_supported(hdev)) return; - spin_lock_bh(&hdev->fd_rule_lock); for_each_set_bit(location, hdev->fd_bmap, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location, @@ -5883,8 +5880,6 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle, bitmap_zero(hdev->fd_bmap, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); } - - spin_unlock_bh(&hdev->fd_rule_lock); } static int hclge_restore_fd_entries(struct hnae3_handle *handle) @@ -6266,7 +6261,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id, u16 flow_id, struct flow_keys *fkeys) { struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_fd_rule_tuples new_tuples; + struct hclge_fd_rule_tuples new_tuples = {}; struct hclge_dev *hdev = vport->back; struct hclge_fd_rule *rule; u16 tmp_queue_id; @@ -6276,19 +6271,17 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id, if (!hnae3_dev_fd_supported(hdev)) return -EOPNOTSUPP; - memset(&new_tuples, 0, sizeof(new_tuples)); - hclge_fd_get_flow_tuples(fkeys, &new_tuples); - - spin_lock_bh(&hdev->fd_rule_lock); - /* when there is already fd rule existed add by user, * arfs should not work */ + spin_lock_bh(&hdev->fd_rule_lock); if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) { spin_unlock_bh(&hdev->fd_rule_lock); return -EOPNOTSUPP; } + hclge_fd_get_flow_tuples(fkeys, &new_tuples); + /* check is there flow director filter existed for this flow, * if not, create a new filter for it; * if filter exist with different queue id, modify the filter; @@ -6371,6 +6364,7 @@ static void hclge_rfs_filter_expire(struct hclge_dev *hdev) #endif } +/* make sure being called after lock up with fd_rule_lock */ static void hclge_clear_arfs_rules(struct hnae3_handle *handle) { #ifdef CONFIG_RFS_ACCEL @@ -6423,10 +6417,14 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) hdev->fd_en = enable; clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE; - if (!enable) + + if (!enable) { + spin_lock_bh(&hdev->fd_rule_lock); hclge_del_all_fd_entries(handle, clear); - else + spin_unlock_bh(&hdev->fd_rule_lock); + } else { hclge_restore_fd_entries(handle); + } } static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) @@ -6524,14 +6522,15 @@ static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret) { #define HCLGE_MAC_LINK_STATUS_NUM 100 + int link_status; int i = 0; int ret; do { - ret = hclge_get_mac_link_status(hdev); - if (ret < 0) + ret = hclge_get_mac_link_status(hdev, &link_status); + if (ret) return ret; - else if (ret == link_ret) + if (link_status == link_ret) return 0; msleep(HCLGE_LINK_STATUS_MS); @@ -6542,9 +6541,6 @@ static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret) static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en, bool is_phy) { -#define HCLGE_LINK_STATUS_DOWN 0 -#define HCLGE_LINK_STATUS_UP 1 - int link_ret; link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN; @@ -6891,8 +6887,9 @@ static void hclge_ae_stop(struct hnae3_handle *handle) int i; set_bit(HCLGE_STATE_DOWN, &hdev->state); - + spin_lock_bh(&hdev->fd_rule_lock); hclge_clear_arfs_rules(handle); + spin_unlock_bh(&hdev->fd_rule_lock); /* If it is not PF reset, the firmware will disable the MAC, * so it only need to stop phy here. @@ -9045,11 +9042,12 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, bool writen_to_tbl = false; int ret = 0; - /* When device is resetting, firmware is unable to handle - * mailbox. Just record the vlan id, and remove it after + /* When device is resetting or reset failed, firmware is unable to + * handle mailbox. Just record the vlan id, and remove it after * reset finished. */ - if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) { + if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || + test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) { set_bit(vlan_id, vport->vlan_del_fail_bmap); return -EBUSY; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 46e6e0fef3ba..9bbdd4557c27 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -317,6 +317,9 @@ enum hclge_link_fail_code { HCLGE_LF_XSFP_ABSENT, }; +#define HCLGE_LINK_STATUS_DOWN 0 +#define HCLGE_LINK_STATUS_UP 1 + #define HCLGE_PG_NUM 4 #define HCLGE_SCH_MODE_SP 0 #define HCLGE_SCH_MODE_DWRR 1 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index a10b022d1951..9162856de1b1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -1592,11 +1592,12 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, if (proto != htons(ETH_P_8021Q)) return -EPROTONOSUPPORT; - /* When device is resetting, firmware is unable to handle - * mailbox. Just record the vlan id, and remove it after + /* When device is resetting or reset failed, firmware is unable to + * handle mailbox. Just record the vlan id, and remove it after * reset finished. */ - if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) && is_kill) { + if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || + test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) { set_bit(vlan_id, hdev->vlan_del_fail_bmap); return -EBUSY; } @@ -3439,23 +3440,36 @@ void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, { struct hnae3_handle *nic = &hdev->nic; struct hclge_vf_to_pf_msg send_msg; + int ret; rtnl_lock(); - hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); - rtnl_unlock(); + + if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || + test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) { + dev_warn(&hdev->pdev->dev, + "is resetting when updating port based vlan info\n"); + rtnl_unlock(); + return; + } + + ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); + if (ret) { + rtnl_unlock(); + return; + } /* send msg to PF and wait update port based vlan info */ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, HCLGE_MBX_PORT_BASE_VLAN_CFG); memcpy(send_msg.data, port_base_vlan_info, data_size); - hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); - - if (state == HNAE3_PORT_BASE_VLAN_DISABLE) - nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE; - else - nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; + ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); + if (!ret) { + if (state == HNAE3_PORT_BASE_VLAN_DISABLE) + nic->port_base_vlan_state = state; + else + nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; + } - rtnl_lock(); hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); rtnl_unlock(); } diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 0fd7eae25fe9..5afb3c9c52d2 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -3206,7 +3206,7 @@ req_rx_irq_failed: req_tx_irq_failed: for (j = 0; j < i; j++) { free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]); - irq_dispose_mapping(adapter->rx_scrq[j]->irq); + irq_dispose_mapping(adapter->tx_scrq[j]->irq); } release_sub_crqs(adapter, 1); return rc; diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index f999cca37a8a..489bb5b59475 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -301,10 +301,8 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) */ hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown; ret_val = e1000_disable_ulp_lpt_lp(hw, true); - if (ret_val) { + if (ret_val) e_warn("Failed to disable ULP\n"); - goto out; - } ret_val = hw->phy.ops.acquire(hw); if (ret_val) { diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 8bb3db2cbd41..6e5861bfb0fa 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -6224,9 +6224,18 @@ static void igb_reset_task(struct work_struct *work) struct igb_adapter *adapter; adapter = container_of(work, struct igb_adapter, reset_task); + rtnl_lock(); + /* If we're already down or resetting, just bail */ + if (test_bit(__IGB_DOWN, &adapter->state) || + test_bit(__IGB_RESETTING, &adapter->state)) { + rtnl_unlock(); + return; + } + igb_dump(adapter); netdev_err(adapter->netdev, "Reset adapter\n"); igb_reinit_locked(adapter); + rtnl_unlock(); } /** diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 64786568af0d..75a8c407e815 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -1730,10 +1730,12 @@ static void otx2_reset_task(struct work_struct *work) if (!netif_running(pf->netdev)) return; + rtnl_lock(); otx2_stop(pf->netdev); pf->reset_count++; otx2_open(pf->netdev); netif_trans_update(pf->netdev); + rtnl_unlock(); } static const struct net_device_ops otx2_netdev_ops = { @@ -2111,6 +2113,7 @@ static void otx2_remove(struct pci_dev *pdev) pf = netdev_priv(netdev); + cancel_work_sync(&pf->reset_task); /* Disable link notifications */ otx2_cgx_config_linkevents(pf, false); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index f4227517dc8e..92a3db69a6cd 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -617,6 +617,8 @@ static void otx2vf_remove(struct pci_dev *pdev) vf = netdev_priv(netdev); + cancel_work_sync(&vf->reset_task); + unregister_netdev(netdev); otx2vf_disable_mbox_intr(vf); otx2_detach_resources(&vf->mbox); diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index f6a1f8666f95..a1c45b39a230 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -171,11 +171,21 @@ static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth, return 0; } -static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed) +static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, + phy_interface_t interface, int speed) { u32 val; int ret; + if (interface == PHY_INTERFACE_MODE_TRGMII) { + mtk_w32(eth, TRGMII_MODE, INTF_MODE); + val = 500000000; + ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val); + if (ret) + dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret); + return; + } + val = (speed == SPEED_1000) ? INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100; mtk_w32(eth, val, INTF_MODE); @@ -262,10 +272,9 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode, state->interface)) goto err_phy; } else { - if (state->interface != - PHY_INTERFACE_MODE_TRGMII) - mtk_gmac0_rgmii_adjust(mac->hw, - state->speed); + mtk_gmac0_rgmii_adjust(mac->hw, + state->interface, + state->speed); /* mt7623_pad_clk_setup */ for (i = 0 ; i < NUM_TRGMII_CTRL; i++) @@ -2882,6 +2891,8 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) eth->netdev[id]->irq = eth->irq[0]; eth->netdev[id]->dev.of_node = np; + eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN; + return 0; free_netdev: diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 3d9aa7da95e9..2d3e45780719 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -4356,12 +4356,14 @@ end: static void mlx4_shutdown(struct pci_dev *pdev) { struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); + struct mlx4_dev *dev = persist->dev; mlx4_info(persist->dev, "mlx4_shutdown was called\n"); mutex_lock(&persist->interface_state_mutex); if (persist->interface_state & MLX4_INTERFACE_STATE_UP) mlx4_unload_one(pdev); mutex_unlock(&persist->interface_state_mutex); + mlx4_pci_disable_device(dev); } static const struct pci_error_handlers mlx4_err_handler = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c index bdb71332cbf2..3e44e4d820c5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c @@ -183,13 +183,16 @@ void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw, static bool mlx5e_rep_is_lag_netdev(struct net_device *netdev) { - struct mlx5e_priv *priv = netdev_priv(netdev); - struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5e_rep_priv *rpriv; + struct mlx5e_priv *priv; /* A given netdev is not a representor or not a slave of LAG configuration */ if (!mlx5e_eswitch_rep(netdev) || !bond_slave_get_rtnl(netdev)) return false; + priv = netdev_priv(netdev); + rpriv = priv->ppriv; + /* Egress acl forward to vport is supported only non-uplink representor */ return rpriv->rep->vport != MLX5_VPORT_UPLINK; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c index eefeb1cdc2ee..245a99f69641 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c @@ -551,19 +551,31 @@ static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb, } } - tun_dst = tun_rx_dst(enc_opts.key.len); + if (key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + tun_dst = __ip_tun_set_dst(key.enc_ipv4.src, key.enc_ipv4.dst, + key.enc_ip.tos, key.enc_ip.ttl, + key.enc_tp.dst, TUNNEL_KEY, + key32_to_tunnel_id(key.enc_key_id.keyid), + enc_opts.key.len); + } else if (key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { + tun_dst = __ipv6_tun_set_dst(&key.enc_ipv6.src, &key.enc_ipv6.dst, + key.enc_ip.tos, key.enc_ip.ttl, + key.enc_tp.dst, 0, TUNNEL_KEY, + key32_to_tunnel_id(key.enc_key_id.keyid), + enc_opts.key.len); + } else { + netdev_dbg(priv->netdev, + "Couldn't restore tunnel, unsupported addr_type: %d\n", + key.enc_control.addr_type); + return false; + } + if (!tun_dst) { - WARN_ON_ONCE(true); + netdev_dbg(priv->netdev, "Couldn't restore tunnel, no tun_dst\n"); return false; } - ip_tunnel_key_init(&tun_dst->u.tun_info.key, - key.enc_ipv4.src, key.enc_ipv4.dst, - key.enc_ip.tos, key.enc_ip.ttl, - 0, /* label */ - key.enc_tp.src, key.enc_tp.dst, - key32_to_tunnel_id(key.enc_key_id.keyid), - TUNNEL_KEY); + tun_dst->u.tun_info.key.tp_src = key.enc_tp.src; if (enc_opts.key.len) ip_tunnel_info_opts_set(&tun_dst->u.tun_info, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c index 951ea26d96bc..e472ed0eacfb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c @@ -301,6 +301,8 @@ static int mlx5e_tc_tun_parse_geneve_params(struct mlx5e_priv *priv, MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type, ETH_P_TEB); } + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c index 58b13192df23..2805416c32a3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c @@ -80,6 +80,8 @@ static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv, gre_key.key, be32_to_cpu(enc_keyid.key->keyid)); } + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c index 37b176801bcc..038a0f1cecec 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c @@ -136,6 +136,8 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv, MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni, be32_to_cpu(enc_keyid.key->keyid)); + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 081f15074cac..3b892ec301b4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -419,7 +419,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq, &rq->wq_ctrl); if (err) - return err; + goto err_rq_wq_destroy; rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR]; @@ -470,7 +470,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq, &rq->wq_ctrl); if (err) - return err; + goto err_rq_wq_destroy; rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR]; @@ -3069,6 +3069,25 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv) priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE; } +static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev, + enum mlx5_port_status state) +{ + struct mlx5_eswitch *esw = mdev->priv.eswitch; + int vport_admin_state; + + mlx5_set_port_admin_status(mdev, state); + + if (!MLX5_ESWITCH_MANAGER(mdev) || mlx5_eswitch_mode(esw) == MLX5_ESWITCH_OFFLOADS) + return; + + if (state == MLX5_PORT_UP) + vport_admin_state = MLX5_VPORT_ADMIN_STATE_AUTO; + else + vport_admin_state = MLX5_VPORT_ADMIN_STATE_DOWN; + + mlx5_eswitch_set_vport_state(esw, MLX5_VPORT_UPLINK, vport_admin_state); +} + int mlx5e_open_locked(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -3101,7 +3120,7 @@ int mlx5e_open(struct net_device *netdev) mutex_lock(&priv->state_lock); err = mlx5e_open_locked(netdev); if (!err) - mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP); + mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_UP); mutex_unlock(&priv->state_lock); return err; @@ -3135,7 +3154,7 @@ int mlx5e_close(struct net_device *netdev) return -ENODEV; mutex_lock(&priv->state_lock); - mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_DOWN); + mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_DOWN); err = mlx5e_close_locked(netdev); mutex_unlock(&priv->state_lock); @@ -5182,7 +5201,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) /* Marking the link as currently not needed by the Driver */ if (!netif_running(netdev)) - mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN); + mlx5e_modify_admin_state(mdev, MLX5_PORT_DOWN); mlx5e_set_netdev_mtu_boundaries(priv); mlx5e_set_dev_port_mtu(priv); @@ -5390,6 +5409,8 @@ err_cleanup_tx: profile->cleanup_tx(priv); out: + set_bit(MLX5E_STATE_DESTROYING, &priv->state); + cancel_work_sync(&priv->update_stats_work); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 006807e04eda..9519a61bd8ec 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -936,6 +936,7 @@ err_close_drop_rq: static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv) { + mlx5e_ethtool_cleanup_steering(priv); rep_vport_rx_rule_destroy(priv); mlx5e_destroy_rep_root_ft(priv); mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); @@ -1080,6 +1081,8 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv) mlx5e_rep_tc_enable(priv); + mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK, + 0, 0, MLX5_VPORT_ADMIN_STATE_AUTO); mlx5_lag_add(mdev, netdev); priv->events_nb.notifier_call = uplink_rep_async_event; mlx5_notifier_register(mdev, &priv->events_nb); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index cc8412151ca0..fcedb5bdca9e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -2356,6 +2356,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, match.key->vlan_priority); *match_level = MLX5_MATCH_L2; + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 1116ab9bea6c..43005caff09e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1608,7 +1608,7 @@ abort: mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH); } - + esw_destroy_tsar(esw); return err; } @@ -1653,8 +1653,6 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf) else if (esw->mode == MLX5_ESWITCH_OFFLOADS) esw_offloads_disable(esw); - esw_destroy_tsar(esw); - old_mode = esw->mode; esw->mode = MLX5_ESWITCH_NONE; @@ -1664,6 +1662,8 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf) mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH); } + esw_destroy_tsar(esw); + if (clear_vf) mlx5_eswitch_clear_vf_vports_info(esw); } @@ -1826,6 +1826,8 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); + int opmod = MLX5_VPORT_STATE_OP_MOD_ESW_VPORT; + int other_vport = 1; int err = 0; if (!ESW_ALLOWED(esw)) @@ -1833,15 +1835,17 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, if (IS_ERR(evport)) return PTR_ERR(evport); + if (vport == MLX5_VPORT_UPLINK) { + opmod = MLX5_VPORT_STATE_OP_MOD_UPLINK; + other_vport = 0; + vport = 0; + } mutex_lock(&esw->state_lock); - err = mlx5_modify_vport_admin_state(esw->dev, - MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, - vport, 1, link_state); + err = mlx5_modify_vport_admin_state(esw->dev, opmod, vport, other_vport, link_state); if (err) { - mlx5_core_warn(esw->dev, - "Failed to set vport %d link state, err = %d", - vport, err); + mlx5_core_warn(esw->dev, "Failed to set vport %d link state, opmod = %d, err = %d", + vport, opmod, err); goto unlock; } @@ -1883,8 +1887,6 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); int err = 0; - if (!ESW_ALLOWED(esw)) - return -EPERM; if (IS_ERR(evport)) return PTR_ERR(evport); if (vlan > 4095 || qos > 7) @@ -1912,6 +1914,9 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, u8 set_flags = 0; int err; + if (!ESW_ALLOWED(esw)) + return -EPERM; + if (vlan || qos) set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index a5175e98c0b3..5785596f13f5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -680,6 +680,8 @@ static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { r static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {} static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; } static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; } +static inline +int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; } static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) { return ERR_PTR(-EOPNOTSUPP); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 060354bb211a..ed75353c56b8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -236,6 +236,15 @@ static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw, return &esw->offloads.vport_reps[idx]; } +static void +mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw, + struct mlx5_flow_spec *spec, + struct mlx5_esw_flow_attr *attr) +{ + if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) && + attr && attr->in_rep && attr->in_rep->vport == MLX5_VPORT_UPLINK) + spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK; +} static void mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw, @@ -259,9 +268,6 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw, mlx5_eswitch_get_vport_metadata_mask()); spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; - misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); - if (memchr_inv(misc, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc))) - spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; } else { misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport); @@ -279,10 +285,6 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw, spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; } - - if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) && - attr->in_rep->vport == MLX5_VPORT_UPLINK) - spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK; } struct mlx5_flow_handle * @@ -396,6 +398,8 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, goto err_esw_get; } + mlx5_eswitch_set_rule_flow_source(esw, spec, attr); + if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec)) rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr, &flow_act, dest, i); @@ -462,6 +466,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, i++; mlx5_eswitch_set_rule_source_port(esw, spec, attr); + mlx5_eswitch_set_rule_flow_source(esw, spec, attr); if (attr->outer_match_level != MLX5_MATCH_NONE) spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 13e2fb79c21a..2569bb6228b6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -797,7 +797,7 @@ static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root, return ft; } -/* If reverse if false then return the first flow table in next priority of +/* If reverse is false then return the first flow table in next priority of * prio in the tree, else return the last flow table in the previous priority * of prio in the tree. */ @@ -829,34 +829,16 @@ static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio) return find_closest_ft(prio, true); } -static struct fs_prio *find_fwd_ns_prio(struct mlx5_flow_root_namespace *root, - struct mlx5_flow_namespace *ns) -{ - struct mlx5_flow_namespace *root_ns = &root->ns; - struct fs_prio *iter_prio; - struct fs_prio *prio; - - fs_get_obj(prio, ns->node.parent); - list_for_each_entry(iter_prio, &root_ns->node.children, node.list) { - if (iter_prio == prio && - !list_is_last(&prio->node.children, &iter_prio->node.list)) - return list_next_entry(iter_prio, node.list); - } - return NULL; -} - static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft, struct mlx5_flow_act *flow_act) { - struct mlx5_flow_root_namespace *root = find_root(&ft->node); struct fs_prio *prio; + bool next_ns; - if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS) - prio = find_fwd_ns_prio(root, ft->ns); - else - fs_get_obj(prio, ft->node.parent); + next_ns = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS; + fs_get_obj(prio, next_ns ? ft->ns->node.parent : ft->node.parent); - return (prio) ? find_next_chained_ft(prio) : NULL; + return find_next_chained_ft(prio); } static int connect_fts_in_prio(struct mlx5_core_dev *dev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index ef0706d15a5b..2d55b7c22c03 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -273,17 +273,17 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp, if (rq->extts.index >= clock->ptp_info.n_pins) return -EINVAL; + pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index); + if (pin < 0) + return -EBUSY; + if (on) { - pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index); - if (pin < 0) - return -EBUSY; pin_mode = MLX5_PIN_MODE_IN; pattern = !!(rq->extts.flags & PTP_FALLING_EDGE); field_select = MLX5_MTPPS_FS_PIN_MODE | MLX5_MTPPS_FS_PATTERN | MLX5_MTPPS_FS_ENABLE; } else { - pin = rq->extts.index; field_select = MLX5_MTPPS_FS_ENABLE; } @@ -331,12 +331,12 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp, if (rq->perout.index >= clock->ptp_info.n_pins) return -EINVAL; - if (on) { - pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, - rq->perout.index); - if (pin < 0) - return -EBUSY; + pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, + rq->perout.index); + if (pin < 0) + return -EBUSY; + if (on) { pin_mode = MLX5_PIN_MODE_OUT; pattern = MLX5_OUT_PATTERN_PERIODIC; ts.tv_sec = rq->perout.period.sec; @@ -362,7 +362,6 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp, MLX5_MTPPS_FS_ENABLE | MLX5_MTPPS_FS_TIME_STAMP; } else { - pin = rq->perout.index; field_select = MLX5_MTPPS_FS_ENABLE; } @@ -409,10 +408,31 @@ static int mlx5_ptp_enable(struct ptp_clock_info *ptp, return 0; } +enum { + MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN = BIT(0), + MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT = BIT(1), +}; + static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, enum ptp_pin_function func, unsigned int chan) { - return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0; + struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, + ptp_info); + + switch (func) { + case PTP_PF_NONE: + return 0; + case PTP_PF_EXTTS: + return !(clock->pps_info.pin_caps[pin] & + MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN); + case PTP_PF_PEROUT: + return !(clock->pps_info.pin_caps[pin] & + MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT); + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; } static const struct ptp_clock_info mlx5_ptp_clock_info = { @@ -432,6 +452,38 @@ static const struct ptp_clock_info mlx5_ptp_clock_info = { .verify = NULL, }; +static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin, + u32 *mtpps, u32 mtpps_size) +{ + u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {}; + + MLX5_SET(mtpps_reg, in, pin, pin); + + return mlx5_core_access_reg(mdev, in, sizeof(in), mtpps, + mtpps_size, MLX5_REG_MTPPS, 0, 0); +} + +static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin) +{ + struct mlx5_core_dev *mdev = clock->mdev; + u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {}; + u8 mode; + int err; + + err = mlx5_query_mtpps_pin_mode(mdev, pin, out, sizeof(out)); + if (err || !MLX5_GET(mtpps_reg, out, enable)) + return PTP_PF_NONE; + + mode = MLX5_GET(mtpps_reg, out, pin_mode); + + if (mode == MLX5_PIN_MODE_IN) + return PTP_PF_EXTTS; + else if (mode == MLX5_PIN_MODE_OUT) + return PTP_PF_PEROUT; + + return PTP_PF_NONE; +} + static int mlx5_init_pin_config(struct mlx5_clock *clock) { int i; @@ -451,8 +503,8 @@ static int mlx5_init_pin_config(struct mlx5_clock *clock) sizeof(clock->ptp_info.pin_config[i].name), "mlx5_pps%d", i); clock->ptp_info.pin_config[i].index = i; - clock->ptp_info.pin_config[i].func = PTP_PF_NONE; - clock->ptp_info.pin_config[i].chan = i; + clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i); + clock->ptp_info.pin_config[i].chan = 0; } return 0; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index e9ccd333f61d..71b6185b4904 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -710,7 +710,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_emad_rx_listener, mlxsw_core); if (err) - return err; + goto err_trap_register; err = mlxsw_core->driver->basic_trap_groups_set(mlxsw_core); if (err) @@ -722,6 +722,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) err_emad_trap_set: mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, mlxsw_core); +err_trap_register: destroy_workqueue(mlxsw_core->emad_wq); return err; } @@ -1813,7 +1814,7 @@ static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core, err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans, bulk_list, cb, cb_priv, tid); if (err) { - kfree(trans); + kfree_rcu(trans, rcu); return err; } return 0; @@ -2050,11 +2051,13 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, break; } } - rcu_read_unlock(); - if (!found) + if (!found) { + rcu_read_unlock(); goto drop; + } rxl->func(skb, local_port, rxl_item->priv); + rcu_read_unlock(); return; drop: diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c index 08215fed193d..a7d86df7123f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c @@ -45,7 +45,7 @@ static int mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id, static int mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module, u16 offset, u16 size, void *data, - unsigned int *p_read_size) + bool qsfp, unsigned int *p_read_size) { char eeprom_tmp[MLXSW_REG_MCIA_EEPROM_SIZE]; char mcia_pl[MLXSW_REG_MCIA_LEN]; @@ -54,6 +54,10 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module, int status; int err; + /* MCIA register accepts buffer size <= 48. Page of size 128 should be + * read by chunks of size 48, 48, 32. Align the size of the last chunk + * to avoid reading after the end of the page. + */ size = min_t(u16, size, MLXSW_REG_MCIA_EEPROM_SIZE); if (offset < MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH && @@ -63,18 +67,25 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module, i2c_addr = MLXSW_REG_MCIA_I2C_ADDR_LOW; if (offset >= MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH) { - page = MLXSW_REG_MCIA_PAGE_GET(offset); - offset -= MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH * page; - /* When reading upper pages 1, 2 and 3 the offset starts at - * 128. Please refer to "QSFP+ Memory Map" figure in SFF-8436 - * specification for graphical depiction. - * MCIA register accepts buffer size <= 48. Page of size 128 - * should be read by chunks of size 48, 48, 32. Align the size - * of the last chunk to avoid reading after the end of the - * page. - */ - if (offset + size > MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH) - size = MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH - offset; + if (qsfp) { + /* When reading upper pages 1, 2 and 3 the offset + * starts at 128. Please refer to "QSFP+ Memory Map" + * figure in SFF-8436 specification for graphical + * depiction. + */ + page = MLXSW_REG_MCIA_PAGE_GET(offset); + offset -= MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH * page; + if (offset + size > MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH) + size = MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH - offset; + } else { + /* When reading upper pages 1, 2 and 3 the offset + * starts at 0 and I2C high address is used. Please refer + * refer to "Memory Organization" figure in SFF-8472 + * specification for graphical depiction. + */ + i2c_addr = MLXSW_REG_MCIA_I2C_ADDR_HIGH; + offset -= MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH; + } } mlxsw_reg_mcia_pack(mcia_pl, module, 0, page, offset, size, i2c_addr); @@ -166,7 +177,7 @@ int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module, int err; err = mlxsw_env_query_module_eeprom(mlxsw_core, module, 0, offset, - module_info, &read_size); + module_info, false, &read_size); if (err) return err; @@ -197,7 +208,7 @@ int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module, /* Verify if transceiver provides diagnostic monitoring page */ err = mlxsw_env_query_module_eeprom(mlxsw_core, module, SFP_DIAGMON, 1, &diag_mon, - &read_size); + false, &read_size); if (err) return err; @@ -225,17 +236,22 @@ int mlxsw_env_get_module_eeprom(struct net_device *netdev, int offset = ee->offset; unsigned int read_size; int i = 0; + bool qsfp; int err; if (!ee->len) return -EINVAL; memset(data, 0, ee->len); + /* Validate module identifier value. */ + err = mlxsw_env_validate_cable_ident(mlxsw_core, module, &qsfp); + if (err) + return err; while (i < ee->len) { err = mlxsw_env_query_module_eeprom(mlxsw_core, module, offset, ee->len - i, data + i, - &read_size); + qsfp, &read_size); if (err) { netdev_err(netdev, "Eeprom query failed\n"); return err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index fcb88d4271bf..8ac987c8c8bc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -5536,6 +5536,7 @@ enum mlxsw_reg_htgt_trap_group { MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST, MLXSW_REG_HTGT_TRAP_GROUP_SP_NEIGH_DISCOVERY, MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP, + MLXSW_REG_HTGT_TRAP_GROUP_SP_EXTERNAL_ROUTE, MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME, MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP, MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 019ed503aadf..0521e9d48c45 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -5001,15 +5001,6 @@ static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp, static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt) { - /* Packets with link-local destination IP arriving to the router - * are trapped to the CPU, so no need to program specific routes - * for them. Only allow prefix routes (usually one fe80::/64) so - * that packets are trapped for the right reason. - */ - if ((ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_LINKLOCAL) && - (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))) - return true; - /* Multicast routes aren't supported, so ignore them. Neighbour * Discovery packets are specifically trapped. */ @@ -8078,16 +8069,6 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp, mlxsw_sp->router = router; router->mlxsw_sp = mlxsw_sp; - router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event; - err = register_inetaddr_notifier(&router->inetaddr_nb); - if (err) - goto err_register_inetaddr_notifier; - - router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event; - err = register_inet6addr_notifier(&router->inet6addr_nb); - if (err) - goto err_register_inet6addr_notifier; - INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list); err = __mlxsw_sp_router_init(mlxsw_sp); if (err) @@ -8128,12 +8109,6 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp, if (err) goto err_neigh_init; - mlxsw_sp->router->netevent_nb.notifier_call = - mlxsw_sp_router_netevent_event; - err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb); - if (err) - goto err_register_netevent_notifier; - err = mlxsw_sp_mp_hash_init(mlxsw_sp); if (err) goto err_mp_hash_init; @@ -8142,6 +8117,22 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp, if (err) goto err_dscp_init; + router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event; + err = register_inetaddr_notifier(&router->inetaddr_nb); + if (err) + goto err_register_inetaddr_notifier; + + router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event; + err = register_inet6addr_notifier(&router->inet6addr_nb); + if (err) + goto err_register_inet6addr_notifier; + + mlxsw_sp->router->netevent_nb.notifier_call = + mlxsw_sp_router_netevent_event; + err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb); + if (err) + goto err_register_netevent_notifier; + mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event; err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp), &mlxsw_sp->router->fib_nb, @@ -8152,10 +8143,15 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp, return 0; err_register_fib_notifier: -err_dscp_init: -err_mp_hash_init: unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb); err_register_netevent_notifier: + unregister_inet6addr_notifier(&router->inet6addr_nb); +err_register_inet6addr_notifier: + unregister_inetaddr_notifier(&router->inetaddr_nb); +err_register_inetaddr_notifier: + mlxsw_core_flush_owq(); +err_dscp_init: +err_mp_hash_init: mlxsw_sp_neigh_fini(mlxsw_sp); err_neigh_init: mlxsw_sp_vrs_fini(mlxsw_sp); @@ -8174,10 +8170,6 @@ err_ipips_init: err_rifs_init: __mlxsw_sp_router_fini(mlxsw_sp); err_router_init: - unregister_inet6addr_notifier(&router->inet6addr_nb); -err_register_inet6addr_notifier: - unregister_inetaddr_notifier(&router->inetaddr_nb); -err_register_inetaddr_notifier: mutex_destroy(&mlxsw_sp->router->lock); kfree(mlxsw_sp->router); return err; @@ -8188,6 +8180,9 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp), &mlxsw_sp->router->fib_nb); unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb); + unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb); + unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb); + mlxsw_core_flush_owq(); mlxsw_sp_neigh_fini(mlxsw_sp); mlxsw_sp_vrs_fini(mlxsw_sp); mlxsw_sp_mr_fini(mlxsw_sp); @@ -8197,8 +8192,6 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) mlxsw_sp_ipips_fini(mlxsw_sp); mlxsw_sp_rifs_fini(mlxsw_sp); __mlxsw_sp_router_fini(mlxsw_sp); - unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb); - unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb); mutex_destroy(&mlxsw_sp->router->lock); kfree(mlxsw_sp->router); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c index 157a42c63066..1e38dfe7cf64 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c @@ -328,6 +328,9 @@ mlxsw_sp_trap_policer_items_arr[] = { { .policer = MLXSW_SP_TRAP_POLICER(18, 1024, 128), }, + { + .policer = MLXSW_SP_TRAP_POLICER(19, 1024, 512), + }, }; static const struct mlxsw_sp_trap_group_item mlxsw_sp_trap_group_items_arr[] = { @@ -422,6 +425,11 @@ static const struct mlxsw_sp_trap_group_item mlxsw_sp_trap_group_items_arr[] = { .priority = 2, }, { + .group = DEVLINK_TRAP_GROUP_GENERIC(EXTERNAL_DELIVERY, 19), + .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_EXTERNAL_ROUTE, + .priority = 1, + }, + { .group = DEVLINK_TRAP_GROUP_GENERIC(IPV6, 15), .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6, .priority = 2, @@ -882,11 +890,11 @@ static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = { }, }, { - .trap = MLXSW_SP_TRAP_CONTROL(EXTERNAL_ROUTE, LOCAL_DELIVERY, + .trap = MLXSW_SP_TRAP_CONTROL(EXTERNAL_ROUTE, EXTERNAL_DELIVERY, TRAP), .listeners_arr = { - MLXSW_SP_RXL_MARK(RTR_INGRESS0, IP2ME, TRAP_TO_CPU, - false), + MLXSW_SP_RXL_MARK(RTR_INGRESS0, EXTERNAL_ROUTE, + TRAP_TO_CPU, false), }, }, { diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 9cfe1fd98c30..f17da67a4622 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -748,21 +748,21 @@ void ocelot_get_txtstamp(struct ocelot *ocelot) spin_unlock_irqrestore(&port->tx_skbs.lock, flags); - /* Next ts */ - ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT); + /* Get the h/w timestamp */ + ocelot_get_hwtimestamp(ocelot, &ts); if (unlikely(!skb_match)) continue; - /* Get the h/w timestamp */ - ocelot_get_hwtimestamp(ocelot, &ts); - /* Set the timestamp into the skb */ memset(&shhwtstamps, 0, sizeof(shhwtstamps)); shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); skb_tstamp_tx(skb_match, &shhwtstamps); dev_kfree_skb_any(skb_match); + + /* Next ts */ + ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT); } } EXPORT_SYMBOL(ocelot_get_txtstamp); diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index 9b63574b6202..b5f1849fd280 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -98,7 +98,7 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo) { struct sk_buff **skb_ptr = NULL; struct sk_buff **temp; -#define NR_SKB_COMPLETED 128 +#define NR_SKB_COMPLETED 16 struct sk_buff *completed[NR_SKB_COMPLETED]; int more; diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c index d2708a57f2ff..4075f5e59955 100644 --- a/drivers/net/ethernet/ni/nixge.c +++ b/drivers/net/ethernet/ni/nixge.c @@ -1299,19 +1299,21 @@ static int nixge_probe(struct platform_device *pdev) netif_napi_add(ndev, &priv->napi, nixge_poll, NAPI_POLL_WEIGHT); err = nixge_of_get_resources(pdev); if (err) - return err; + goto free_netdev; __nixge_hw_set_mac_address(ndev); priv->tx_irq = platform_get_irq_byname(pdev, "tx"); if (priv->tx_irq < 0) { netdev_err(ndev, "could not find 'tx' irq"); - return priv->tx_irq; + err = priv->tx_irq; + goto free_netdev; } priv->rx_irq = platform_get_irq_byname(pdev, "rx"); if (priv->rx_irq < 0) { netdev_err(ndev, "could not find 'rx' irq"); - return priv->rx_irq; + err = priv->rx_irq; + goto free_netdev; } priv->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c index e03ea9b18f95..095561924bdc 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c @@ -103,15 +103,18 @@ static void ionic_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) { struct ionic_lif *lif = netdev_priv(netdev); + unsigned int offset; unsigned int size; regs->version = IONIC_DEV_CMD_REG_VERSION; + offset = 0; size = IONIC_DEV_INFO_REG_COUNT * sizeof(u32); - memcpy_fromio(p, lif->ionic->idev.dev_info_regs->words, size); + memcpy_fromio(p + offset, lif->ionic->idev.dev_info_regs->words, size); + offset += size; size = IONIC_DEV_CMD_REG_COUNT * sizeof(u32); - memcpy_fromio(p, lif->ionic->idev.dev_cmd_regs->words, size); + memcpy_fromio(p + offset, lif->ionic->idev.dev_cmd_regs->words, size); } static int ionic_get_link_ksettings(struct net_device *netdev, diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index f49486b6d04d..e55d41546cff 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -96,8 +96,7 @@ static void ionic_link_status_check(struct ionic_lif *lif) u16 link_status; bool link_up; - if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state) || - test_bit(IONIC_LIF_F_QUEUE_RESET, lif->state)) + if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state)) return; link_status = le16_to_cpu(lif->info->status.link_status); @@ -114,16 +113,22 @@ static void ionic_link_status_check(struct ionic_lif *lif) netif_carrier_on(netdev); } - if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) + if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) { + mutex_lock(&lif->queue_lock); ionic_start_queues(lif); + mutex_unlock(&lif->queue_lock); + } } else { if (netif_carrier_ok(netdev)) { netdev_info(netdev, "Link down\n"); netif_carrier_off(netdev); } - if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) + if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) { + mutex_lock(&lif->queue_lock); ionic_stop_queues(lif); + mutex_unlock(&lif->queue_lock); + } } clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); @@ -863,8 +868,7 @@ static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr) if (f) return 0; - netdev_dbg(lif->netdev, "rx_filter add ADDR %pM (id %d)\n", addr, - ctx.comp.rx_filter_add.filter_id); + netdev_dbg(lif->netdev, "rx_filter add ADDR %pM\n", addr); memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN); err = ionic_adminq_post_wait(lif, &ctx); @@ -893,6 +897,9 @@ static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) return -ENOENT; } + netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", + addr, f->filter_id); + ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); ionic_rx_filter_free(lif, f); spin_unlock_bh(&lif->rx_filters.lock); @@ -901,9 +908,6 @@ static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) if (err && err != -EEXIST) return err; - netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", addr, - ctx.cmd.rx_filter_del.filter_id); - return 0; } @@ -1351,13 +1355,11 @@ static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, }; int err; + netdev_dbg(netdev, "rx_filter add VLAN %d\n", vid); err = ionic_adminq_post_wait(lif, &ctx); if (err) return err; - netdev_dbg(netdev, "rx_filter add VLAN %d (id %d)\n", vid, - ctx.comp.rx_filter_add.filter_id); - return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx); } @@ -1382,8 +1384,8 @@ static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, return -ENOENT; } - netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", vid, - le32_to_cpu(ctx.cmd.rx_filter_del.filter_id)); + netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", + vid, f->filter_id); ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); ionic_rx_filter_free(lif, f); @@ -1993,10 +1995,7 @@ int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg) bool running; int err = 0; - err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET); - if (err) - return err; - + mutex_lock(&lif->queue_lock); running = netif_running(lif->netdev); if (running) { netif_device_detach(lif->netdev); @@ -2014,7 +2013,7 @@ int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg) } reset_out: - clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state); + mutex_unlock(&lif->queue_lock); return err; } @@ -2161,7 +2160,9 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif) if (test_bit(IONIC_LIF_F_UP, lif->state)) { dev_info(ionic->dev, "Surprise FW stop, stopping queues\n"); + mutex_lock(&lif->queue_lock); ionic_stop_queues(lif); + mutex_unlock(&lif->queue_lock); } if (netif_running(lif->netdev)) { @@ -2280,15 +2281,15 @@ static void ionic_lif_deinit(struct ionic_lif *lif) cancel_work_sync(&lif->deferred.work); cancel_work_sync(&lif->tx_timeout_work); ionic_rx_filters_deinit(lif); + if (lif->netdev->features & NETIF_F_RXHASH) + ionic_lif_rss_deinit(lif); } - if (lif->netdev->features & NETIF_F_RXHASH) - ionic_lif_rss_deinit(lif); - napi_disable(&lif->adminqcq->napi); ionic_lif_qcq_deinit(lif, lif->notifyqcq); ionic_lif_qcq_deinit(lif, lif->adminqcq); + mutex_destroy(&lif->queue_lock); ionic_lif_reset(lif); } @@ -2465,6 +2466,7 @@ static int ionic_lif_init(struct ionic_lif *lif) return err; lif->hw_index = le16_to_cpu(comp.hw_index); + mutex_init(&lif->queue_lock); /* now that we have the hw_index we can figure out our doorbell page */ lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h index ed126dd74e01..8dc2c5d77424 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h @@ -135,7 +135,6 @@ enum ionic_lif_state_flags { IONIC_LIF_F_SW_DEBUG_STATS, IONIC_LIF_F_UP, IONIC_LIF_F_LINK_CHECK_REQUESTED, - IONIC_LIF_F_QUEUE_RESET, IONIC_LIF_F_FW_RESET, /* leave this as last */ @@ -165,6 +164,7 @@ struct ionic_lif { unsigned int hw_index; unsigned int kern_pid; u64 __iomem *kern_dbpage; + struct mutex queue_lock; /* lock for queue structures */ spinlock_t adminq_lock; /* lock for AdminQ operations */ struct ionic_qcq *adminqcq; struct ionic_qcq *notifyqcq; @@ -213,12 +213,6 @@ struct ionic_lif { #define lif_to_txq(lif, i) (&lif_to_txqcq((lif), i)->q) #define lif_to_rxq(lif, i) (&lif_to_txqcq((lif), i)->q) -/* return 0 if successfully set the bit, else non-zero */ -static inline int ionic_wait_for_bit(struct ionic_lif *lif, int bitname) -{ - return wait_on_bit_lock(lif->state, bitname, TASK_INTERRUPTIBLE); -} - static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs) { u32 mult = le32_to_cpu(ionic->ident.dev.intr_coal_mult); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c index 80eeb7696e01..cd0076fc3044 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c @@ -21,13 +21,16 @@ void ionic_rx_filter_free(struct ionic_lif *lif, struct ionic_rx_filter *f) void ionic_rx_filter_replay(struct ionic_lif *lif) { struct ionic_rx_filter_add_cmd *ac; + struct hlist_head new_id_list; struct ionic_admin_ctx ctx; struct ionic_rx_filter *f; struct hlist_head *head; struct hlist_node *tmp; + unsigned int key; unsigned int i; int err; + INIT_HLIST_HEAD(&new_id_list); ac = &ctx.cmd.rx_filter_add; for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) { @@ -58,9 +61,30 @@ void ionic_rx_filter_replay(struct ionic_lif *lif) ac->mac.addr); break; } + spin_lock_bh(&lif->rx_filters.lock); + ionic_rx_filter_free(lif, f); + spin_unlock_bh(&lif->rx_filters.lock); + + continue; } + + /* remove from old id list, save new id in tmp list */ + spin_lock_bh(&lif->rx_filters.lock); + hlist_del(&f->by_id); + spin_unlock_bh(&lif->rx_filters.lock); + f->filter_id = le32_to_cpu(ctx.comp.rx_filter_add.filter_id); + hlist_add_head(&f->by_id, &new_id_list); } } + + /* rebuild the by_id hash lists with the new filter ids */ + spin_lock_bh(&lif->rx_filters.lock); + hlist_for_each_entry_safe(f, tmp, &new_id_list, by_id) { + key = f->filter_id & IONIC_RX_FILTER_HLISTS_MASK; + head = &lif->rx_filters.by_id[key]; + hlist_add_head(&f->by_id, head); + } + spin_unlock_bh(&lif->rx_filters.lock); } int ionic_rx_filters_init(struct ionic_lif *lif) @@ -69,10 +93,12 @@ int ionic_rx_filters_init(struct ionic_lif *lif) spin_lock_init(&lif->rx_filters.lock); + spin_lock_bh(&lif->rx_filters.lock); for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) { INIT_HLIST_HEAD(&lif->rx_filters.by_hash[i]); INIT_HLIST_HEAD(&lif->rx_filters.by_id[i]); } + spin_unlock_bh(&lif->rx_filters.lock); return 0; } @@ -84,11 +110,13 @@ void ionic_rx_filters_deinit(struct ionic_lif *lif) struct hlist_node *tmp; unsigned int i; + spin_lock_bh(&lif->rx_filters.lock); for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) { head = &lif->rx_filters.by_id[i]; hlist_for_each_entry_safe(f, tmp, head, by_id) ionic_rx_filter_free(lif, f); } + spin_unlock_bh(&lif->rx_filters.lock); } int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index, @@ -124,6 +152,7 @@ int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index, f->filter_id = le32_to_cpu(ctx->comp.rx_filter_add.filter_id); f->rxq_index = rxq_index; memcpy(&f->cmd, ac, sizeof(f->cmd)); + netdev_dbg(lif->netdev, "rx_filter add filter_id %d\n", f->filter_id); INIT_HLIST_NODE(&f->by_hash); INIT_HLIST_NODE(&f->by_id); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index b7f900c11834..85eb8f276a37 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -161,12 +161,6 @@ static void ionic_rx_clean(struct ionic_queue *q, return; } - /* no packet processing while resetting */ - if (unlikely(test_bit(IONIC_LIF_F_QUEUE_RESET, q->lif->state))) { - stats->dropped++; - return; - } - stats->pkts++; stats->bytes += le16_to_cpu(comp->len); diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 08ba9d54ab63..d13ec88313c3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -2008,8 +2008,8 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn, enum protocol_type proto; if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) { - DP_NOTICE(p_hwfn, - "Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n"); + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n"); p_hwfn->hw_info.personality = QED_PCI_ETH_ROCE; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 9c26fde663b3..dbdac983ccde 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -3102,7 +3102,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) } /* Log and clear previous pglue_b errors if such exist */ - qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt); + qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt, true); /* Enable the PF's internal FID_enable in the PXP */ rc = qed_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt, diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index b7b974f0ef21..5eec1fc6229d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -257,9 +257,10 @@ out: #define PGLUE_ATTENTION_ZLR_VALID (1 << 25) #define PGLUE_ATTENTION_ILT_VALID (1 << 23) -int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + bool hw_init) { + char msg[256]; u32 tmp; tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2); @@ -273,22 +274,23 @@ int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, details = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS); - DP_NOTICE(p_hwfn, - "Illegal write by chip to [%08x:%08x] blocked.\n" - "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" - "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", - addr_hi, addr_lo, details, - (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), - (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), - GET_FIELD(details, - PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0, - tmp, - GET_FIELD(tmp, - PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0, - GET_FIELD(tmp, - PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0, - GET_FIELD(tmp, - PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0); + snprintf(msg, sizeof(msg), + "Illegal write by chip to [%08x:%08x] blocked.\n" + "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" + "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]", + addr_hi, addr_lo, details, + (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), + (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), + !!GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VF_VALID), + tmp, + !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_WAS_ERR), + !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_BME), + !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_FID_EN)); + + if (hw_init) + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "%s\n", msg); + else + DP_NOTICE(p_hwfn, "%s\n", msg); } tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2); @@ -321,8 +323,14 @@ int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, } tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL); - if (tmp & PGLUE_ATTENTION_ICPL_VALID) - DP_NOTICE(p_hwfn, "ICPL error - %08x\n", tmp); + if (tmp & PGLUE_ATTENTION_ICPL_VALID) { + snprintf(msg, sizeof(msg), "ICPL error - %08x", tmp); + + if (hw_init) + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "%s\n", msg); + else + DP_NOTICE(p_hwfn, "%s\n", msg); + } tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS); if (tmp & PGLUE_ATTENTION_ZLR_VALID) { @@ -361,7 +369,7 @@ int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, static int qed_pglueb_rbc_attn_cb(struct qed_hwfn *p_hwfn) { - return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt); + return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt, false); } static int qed_fw_assertion(struct qed_hwfn *p_hwfn) @@ -1193,7 +1201,8 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn) index, attn_bits, attn_acks, asserted_bits, deasserted_bits, p_sb_attn_sw->known_attn); } else if (asserted_bits == 0x100) { - DP_INFO(p_hwfn, "MFW indication via attention\n"); + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "MFW indication via attention\n"); } else { DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "MFW indication [deassertion]\n"); diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index e09db3386367..110169e90121 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -442,7 +442,7 @@ int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, #define QED_MAPPING_MEMORY_SIZE(dev) (NUM_OF_SBS(dev)) -int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt); +int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + bool hw_init); #endif diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index a442bcf64b9c..99f7aae102ce 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1450,6 +1450,7 @@ static void ravb_tx_timeout_work(struct work_struct *work) struct ravb_private *priv = container_of(work, struct ravb_private, work); struct net_device *ndev = priv->ndev; + int error; netif_tx_stop_all_queues(ndev); @@ -1458,15 +1459,36 @@ static void ravb_tx_timeout_work(struct work_struct *work) ravb_ptp_stop(ndev); /* Wait for DMA stopping */ - ravb_stop_dma(ndev); + if (ravb_stop_dma(ndev)) { + /* If ravb_stop_dma() fails, the hardware is still operating + * for TX and/or RX. So, this should not call the following + * functions because ravb_dmac_init() is possible to fail too. + * Also, this should not retry ravb_stop_dma() again and again + * here because it's possible to wait forever. So, this just + * re-enables the TX and RX and skip the following + * re-initialization procedure. + */ + ravb_rcv_snd_enable(ndev); + goto out; + } ravb_ring_free(ndev, RAVB_BE); ravb_ring_free(ndev, RAVB_NC); /* Device init */ - ravb_dmac_init(ndev); + error = ravb_dmac_init(ndev); + if (error) { + /* If ravb_dmac_init() fails, descriptors are freed. So, this + * should return here to avoid re-enabling the TX and RX in + * ravb_emac_init(). + */ + netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n", + __func__, error); + return; + } ravb_emac_init(ndev); +out: /* Initialise PTP Clock driver */ if (priv->chip_id == RCAR_GEN2) ravb_ptp_init(ndev, priv->pdev); diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 90410f9d3b1a..1c4fea9c3ec4 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -2274,7 +2274,7 @@ static int smc_drv_probe(struct platform_device *pdev) ret = try_toggle_control_gpio(&pdev->dev, &lp->power_gpio, "power", 0, 0, 100); if (ret) - return ret; + goto out_free_netdev; /* * Optional reset GPIO configured? Minimum 100 ns reset needed @@ -2283,7 +2283,7 @@ static int smc_drv_probe(struct platform_device *pdev) ret = try_toggle_control_gpio(&pdev->dev, &lp->reset_gpio, "reset", 0, 0, 100); if (ret) - return ret; + goto out_free_netdev; /* * Need to wait for optional EEPROM to load, max 750 us according diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c index f2638446b62e..81b554dd7221 100644 --- a/drivers/net/ethernet/socionext/sni_ave.c +++ b/drivers/net/ethernet/socionext/sni_ave.c @@ -1191,7 +1191,7 @@ static int ave_init(struct net_device *ndev) ret = regmap_update_bits(priv->regmap, SG_ETPINMODE, priv->pinmode_mask, priv->pinmode_val); if (ret) - return ret; + goto out_reset_assert; ave_global_reset(ndev); diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 1492648247d9..6d778bc3d012 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -1850,7 +1850,8 @@ static int am65_cpsw_nuss_init_ndev_2g(struct am65_cpsw_common *common) port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE; port->ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | - NETIF_F_HW_CSUM; + NETIF_F_HW_CSUM | + NETIF_F_HW_TC; port->ndev->features = port->ndev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; port->ndev->vlan_features |= NETIF_F_SG; diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 4661ef865807..dec52b763d50 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -1615,11 +1615,11 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[], struct netlink_ext_ack *extack) { struct geneve_dev *geneve = netdev_priv(dev); + enum ifla_geneve_df df = geneve->df; struct geneve_sock *gs4, *gs6; struct ip_tunnel_info info; bool metadata; bool use_udp6_rx_checksums; - enum ifla_geneve_df df; bool ttl_inherit; int err; diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c index 2a6ec5394966..a4b3fce69ecd 100644 --- a/drivers/net/hippi/rrunner.c +++ b/drivers/net/hippi/rrunner.c @@ -1242,7 +1242,7 @@ static int rr_open(struct net_device *dev) rrpriv->info = NULL; } if (rrpriv->rx_ctrl) { - pci_free_consistent(pdev, sizeof(struct ring_ctrl), + pci_free_consistent(pdev, 256 * sizeof(struct ring_ctrl), rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma); rrpriv->rx_ctrl = NULL; } diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c index 5a37514e4234..c11f32f644db 100644 --- a/drivers/net/ieee802154/adf7242.c +++ b/drivers/net/ieee802154/adf7242.c @@ -4,7 +4,7 @@ * * Copyright 2009-2017 Analog Devices Inc. * - * http://www.analog.com/ADF7242 + * https://www.analog.com/ADF7242 */ #include <linux/kernel.h> @@ -1262,7 +1262,7 @@ static int adf7242_probe(struct spi_device *spi) WQ_MEM_RECLAIM); if (unlikely(!lp->wqueue)) { ret = -ENOMEM; - goto err_hw_init; + goto err_alloc_wq; } ret = adf7242_hw_init(lp); @@ -1294,6 +1294,8 @@ static int adf7242_probe(struct spi_device *spi) return ret; err_hw_init: + destroy_workqueue(lp->wqueue); +err_alloc_wq: mutex_destroy(&lp->bmux); ieee802154_free_hw(lp->hw); diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index 2908e0a0d6e1..23950e7a0f81 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -302,7 +302,7 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port) rtnl_lock(); err = nsim_bpf_init(ns); if (err) - goto err_free_netdev; + goto err_rtnl_unlock; nsim_ipsec_init(ns); @@ -316,8 +316,8 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port) err_ipsec_teardown: nsim_ipsec_teardown(ns); nsim_bpf_uninit(ns); +err_rtnl_unlock: rtnl_unlock(); -err_free_netdev: free_netdev(dev); return ERR_PTR(err); } diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index ecbd5e0d685c..acb0aae60755 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -1260,6 +1260,7 @@ static int dp83640_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr) dp83640->hwts_rx_en = 1; dp83640->layer = PTP_CLASS_L4; dp83640->version = PTP_CLASS_V1; + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; break; case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: @@ -1267,6 +1268,7 @@ static int dp83640_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr) dp83640->hwts_rx_en = 1; dp83640->layer = PTP_CLASS_L4; dp83640->version = PTP_CLASS_V2; + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; break; case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: @@ -1274,6 +1276,7 @@ static int dp83640_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr) dp83640->hwts_rx_en = 1; dp83640->layer = PTP_CLASS_L2; dp83640->version = PTP_CLASS_V2; + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; break; case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_SYNC: @@ -1281,6 +1284,7 @@ static int dp83640_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr) dp83640->hwts_rx_en = 1; dp83640->layer = PTP_CLASS_L4 | PTP_CLASS_L2; dp83640->version = PTP_CLASS_V2; + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; break; default: return -ERANGE; diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c index 4e514f5d7c6c..fd3a04d98dc1 100644 --- a/drivers/net/usb/ax88172a.c +++ b/drivers/net/usb/ax88172a.c @@ -187,6 +187,7 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf) ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0); if (ret < ETH_ALEN) { netdev_err(dev->net, "Failed to read MAC address: %d\n", ret); + ret = -EIO; goto free; } memcpy(dev->net->dev_addr, buf, ETH_ALEN); diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index bb8c34d746ab..d2fdb5430d27 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -1390,8 +1390,9 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old) unsigned long flags; if (old) - hso_dbg(0x16, "Termios called with: cflags new[%d] - old[%d]\n", - tty->termios.c_cflag, old->c_cflag); + hso_dbg(0x16, "Termios called with: cflags new[%u] - old[%u]\n", + (unsigned int)tty->termios.c_cflag, + (unsigned int)old->c_cflag); /* the actual setup */ spin_lock_irqsave(&serial->serial_lock, flags); @@ -2260,12 +2261,14 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs, minor = get_free_serial_index(); if (minor < 0) - goto exit; + goto exit2; /* register our minor number */ serial->parent->dev = tty_port_register_device_attr(&serial->port, tty_drv, minor, &serial->parent->interface->dev, serial->parent, hso_serial_dev_groups); + if (IS_ERR(serial->parent->dev)) + goto exit2; /* fill in specific data for later use */ serial->minor = minor; @@ -2310,6 +2313,7 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs, return 0; exit: hso_serial_tty_unregister(serial); +exit2: hso_serial_common_free(serial); return -1; } diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index eccbf4cd7149..442507f25aad 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -377,10 +377,6 @@ struct lan78xx_net { struct tasklet_struct bh; struct delayed_work wq; - struct usb_host_endpoint *ep_blkin; - struct usb_host_endpoint *ep_blkout; - struct usb_host_endpoint *ep_intr; - int msg_enable; struct urb *urb_intr; @@ -2860,78 +2856,12 @@ lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net) return NETDEV_TX_OK; } -static int -lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf) -{ - int tmp; - struct usb_host_interface *alt = NULL; - struct usb_host_endpoint *in = NULL, *out = NULL; - struct usb_host_endpoint *status = NULL; - - for (tmp = 0; tmp < intf->num_altsetting; tmp++) { - unsigned ep; - - in = NULL; - out = NULL; - status = NULL; - alt = intf->altsetting + tmp; - - for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) { - struct usb_host_endpoint *e; - int intr = 0; - - e = alt->endpoint + ep; - switch (e->desc.bmAttributes) { - case USB_ENDPOINT_XFER_INT: - if (!usb_endpoint_dir_in(&e->desc)) - continue; - intr = 1; - /* FALLTHROUGH */ - case USB_ENDPOINT_XFER_BULK: - break; - default: - continue; - } - if (usb_endpoint_dir_in(&e->desc)) { - if (!intr && !in) - in = e; - else if (intr && !status) - status = e; - } else { - if (!out) - out = e; - } - } - if (in && out) - break; - } - if (!alt || !in || !out) - return -EINVAL; - - dev->pipe_in = usb_rcvbulkpipe(dev->udev, - in->desc.bEndpointAddress & - USB_ENDPOINT_NUMBER_MASK); - dev->pipe_out = usb_sndbulkpipe(dev->udev, - out->desc.bEndpointAddress & - USB_ENDPOINT_NUMBER_MASK); - dev->ep_intr = status; - - return 0; -} - static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf) { struct lan78xx_priv *pdata = NULL; int ret; int i; - ret = lan78xx_get_endpoints(dev, intf); - if (ret) { - netdev_warn(dev->net, "lan78xx_get_endpoints failed: %d\n", - ret); - return ret; - } - dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL); pdata = (struct lan78xx_priv *)(dev->data[0]); @@ -3700,6 +3630,7 @@ static void lan78xx_stat_monitor(struct timer_list *t) static int lan78xx_probe(struct usb_interface *intf, const struct usb_device_id *id) { + struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr; struct lan78xx_net *dev; struct net_device *netdev; struct usb_device *udev; @@ -3748,6 +3679,34 @@ static int lan78xx_probe(struct usb_interface *intf, mutex_init(&dev->stats.access_lock); + if (intf->cur_altsetting->desc.bNumEndpoints < 3) { + ret = -ENODEV; + goto out2; + } + + dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE); + ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in); + if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) { + ret = -ENODEV; + goto out2; + } + + dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE); + ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out); + if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) { + ret = -ENODEV; + goto out2; + } + + ep_intr = &intf->cur_altsetting->endpoint[2]; + if (!usb_endpoint_is_int_in(&ep_intr->desc)) { + ret = -ENODEV; + goto out2; + } + + dev->pipe_intr = usb_rcvintpipe(dev->udev, + usb_endpoint_num(&ep_intr->desc)); + ret = lan78xx_bind(dev, intf); if (ret < 0) goto out2; @@ -3759,18 +3718,7 @@ static int lan78xx_probe(struct usb_interface *intf, netdev->max_mtu = MAX_SINGLE_PACKET_SIZE; netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER); - dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0; - dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1; - dev->ep_intr = (intf->cur_altsetting)->endpoint + 2; - - dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE); - dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE); - - dev->pipe_intr = usb_rcvintpipe(dev->udev, - dev->ep_intr->desc.bEndpointAddress & - USB_ENDPOINT_NUMBER_MASK); - period = dev->ep_intr->desc.bInterval; - + period = ep_intr->desc.bInterval; maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0); buf = kmalloc(maxp, GFP_KERNEL); if (buf) { @@ -3783,6 +3731,7 @@ static int lan78xx_probe(struct usb_interface *intf, usb_fill_int_urb(dev->urb_intr, dev->udev, dev->pipe_intr, buf, maxp, intr_complete, dev, period); + dev->urb_intr->transfer_flags |= URB_FREE_BUFFER; } } diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 89d85dcb200e..a7c3939264b0 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1376,6 +1376,7 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, for (h = 0; h < FDB_HASH_SIZE; ++h) { struct vxlan_fdb *f; + rcu_read_lock(); hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) { struct vxlan_rdst *rd; @@ -1387,8 +1388,10 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, cb->nlh->nlmsg_seq, RTM_NEWNEIGH, NLM_F_MULTI, NULL); - if (err < 0) + if (err < 0) { + rcu_read_unlock(); goto out; + } skip_nh: *idx += 1; continue; @@ -1403,12 +1406,15 @@ skip_nh: cb->nlh->nlmsg_seq, RTM_NEWNEIGH, NLM_F_MULTI, rd); - if (err < 0) + if (err < 0) { + rcu_read_unlock(); goto out; + } skip: *idx += 1; } } + rcu_read_unlock(); } out: return err; @@ -3070,8 +3076,10 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all) if (!do_all && (f->state & (NUD_PERMANENT | NUD_NOARP))) continue; /* the all_zeros_mac entry is deleted at vxlan_uninit */ - if (!is_zero_ether_addr(f->eth_addr)) - vxlan_fdb_destroy(vxlan, f, true, true); + if (is_zero_ether_addr(f->eth_addr) && + f->vni == vxlan->cfg.vni) + continue; + vxlan_fdb_destroy(vxlan, f, true, true); } spin_unlock_bh(&vxlan->hash_lock[h]); } diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c index c84536b03aa8..f70336bb6f52 100644 --- a/drivers/net/wan/hdlc_x25.c +++ b/drivers/net/wan/hdlc_x25.c @@ -71,8 +71,10 @@ static int x25_data_indication(struct net_device *dev, struct sk_buff *skb) { unsigned char *ptr; - if (skb_cow(skb, 1)) + if (skb_cow(skb, 1)) { + kfree_skb(skb); return NET_RX_DROP; + } skb_push(skb, 1); skb_reset_network_header(skb); diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index 284832314f31..b2868433718f 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -128,10 +128,12 @@ static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb) { unsigned char *ptr; - skb_push(skb, 1); - - if (skb_cow(skb, 1)) + if (skb_cow(skb, 1)) { + kfree_skb(skb); return NET_RX_DROP; + } + + skb_push(skb, 1); ptr = skb->data; *ptr = X25_IFACE_DATA; diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c index 69773d228ec1..84640a0c13f3 100644 --- a/drivers/net/wan/x25_asy.c +++ b/drivers/net/wan/x25_asy.c @@ -183,7 +183,7 @@ static inline void x25_asy_unlock(struct x25_asy *sl) netif_wake_queue(sl->dev); } -/* Send one completely decapsulated IP datagram to the IP layer. */ +/* Send an LAPB frame to the LAPB module to process. */ static void x25_asy_bump(struct x25_asy *sl) { @@ -195,13 +195,12 @@ static void x25_asy_bump(struct x25_asy *sl) count = sl->rcount; dev->stats.rx_bytes += count; - skb = dev_alloc_skb(count+1); + skb = dev_alloc_skb(count); if (skb == NULL) { netdev_warn(sl->dev, "memory squeeze, dropping packet\n"); dev->stats.rx_dropped++; return; } - skb_push(skb, 1); /* LAPB internal control */ skb_put_data(skb, sl->rbuff, count); skb->protocol = x25_type_trans(skb, sl->dev); err = lapb_data_received(skb->dev, skb); @@ -209,7 +208,6 @@ static void x25_asy_bump(struct x25_asy *sl) kfree_skb(skb); printk(KERN_DEBUG "x25_asy: data received err - %d\n", err); } else { - netif_rx(skb); dev->stats.rx_packets++; } } @@ -356,12 +354,21 @@ static netdev_tx_t x25_asy_xmit(struct sk_buff *skb, */ /* - * Called when I frame data arrives. We did the work above - throw it - * at the net layer. + * Called when I frame data arrive. We add a pseudo header for upper + * layers and pass it to upper layers. */ static int x25_asy_data_indication(struct net_device *dev, struct sk_buff *skb) { + if (skb_cow(skb, 1)) { + kfree_skb(skb); + return NET_RX_DROP; + } + skb_push(skb, 1); + skb->data[0] = X25_IFACE_DATA; + + skb->protocol = x25_type_trans(skb, dev); + return netif_rx(skb); } @@ -657,7 +664,7 @@ static void x25_asy_unesc(struct x25_asy *sl, unsigned char s) switch (s) { case X25_END: if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && - sl->rcount > 2) + sl->rcount >= 2) x25_asy_bump(sl); clear_bit(SLF_ESCAPE, &sl->flags); sl->rcount = 0; diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c index 342a7e58018a..05a61975c83f 100644 --- a/drivers/net/wireless/ath/ath10k/ahb.c +++ b/drivers/net/wireless/ath/ath10k/ahb.c @@ -820,7 +820,7 @@ err_free_irq: ath10k_ahb_release_irq_legacy(ar); err_free_pipes: - ath10k_pci_free_pipes(ar); + ath10k_pci_release_resource(ar); err_resource_deinit: ath10k_ahb_resource_deinit(ar); diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 1d941d53fdc9..cfde7791291a 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -3473,6 +3473,28 @@ int ath10k_pci_setup_resource(struct ath10k *ar) timer_setup(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 0); + ar_pci->attr = kmemdup(pci_host_ce_config_wlan, + sizeof(pci_host_ce_config_wlan), + GFP_KERNEL); + if (!ar_pci->attr) + return -ENOMEM; + + ar_pci->pipe_config = kmemdup(pci_target_ce_config_wlan, + sizeof(pci_target_ce_config_wlan), + GFP_KERNEL); + if (!ar_pci->pipe_config) { + ret = -ENOMEM; + goto err_free_attr; + } + + ar_pci->serv_to_pipe = kmemdup(pci_target_service_to_ce_map_wlan, + sizeof(pci_target_service_to_ce_map_wlan), + GFP_KERNEL); + if (!ar_pci->serv_to_pipe) { + ret = -ENOMEM; + goto err_free_pipe_config; + } + if (QCA_REV_6174(ar) || QCA_REV_9377(ar)) ath10k_pci_override_ce_config(ar); @@ -3480,18 +3502,31 @@ int ath10k_pci_setup_resource(struct ath10k *ar) if (ret) { ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", ret); - return ret; + goto err_free_serv_to_pipe; } return 0; + +err_free_serv_to_pipe: + kfree(ar_pci->serv_to_pipe); +err_free_pipe_config: + kfree(ar_pci->pipe_config); +err_free_attr: + kfree(ar_pci->attr); + return ret; } void ath10k_pci_release_resource(struct ath10k *ar) { + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + ath10k_pci_rx_retry_sync(ar); netif_napi_del(&ar->napi); ath10k_pci_ce_deinit(ar); ath10k_pci_free_pipes(ar); + kfree(ar_pci->attr); + kfree(ar_pci->pipe_config); + kfree(ar_pci->serv_to_pipe); } static const struct ath10k_bus_ops ath10k_pci_bus_ops = { @@ -3601,30 +3636,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev, timer_setup(&ar_pci->ps_timer, ath10k_pci_ps_timer, 0); - ar_pci->attr = kmemdup(pci_host_ce_config_wlan, - sizeof(pci_host_ce_config_wlan), - GFP_KERNEL); - if (!ar_pci->attr) { - ret = -ENOMEM; - goto err_free; - } - - ar_pci->pipe_config = kmemdup(pci_target_ce_config_wlan, - sizeof(pci_target_ce_config_wlan), - GFP_KERNEL); - if (!ar_pci->pipe_config) { - ret = -ENOMEM; - goto err_free; - } - - ar_pci->serv_to_pipe = kmemdup(pci_target_service_to_ce_map_wlan, - sizeof(pci_target_service_to_ce_map_wlan), - GFP_KERNEL); - if (!ar_pci->serv_to_pipe) { - ret = -ENOMEM; - goto err_free; - } - ret = ath10k_pci_setup_resource(ar); if (ret) { ath10k_err(ar, "failed to setup resource: %d\n", ret); @@ -3705,10 +3716,9 @@ err_unsupported: err_free_irq: ath10k_pci_free_irq(ar); - ath10k_pci_rx_retry_sync(ar); err_deinit_irq: - ath10k_pci_deinit_irq(ar); + ath10k_pci_release_resource(ar); err_sleep: ath10k_pci_sleep_sync(ar); @@ -3720,29 +3730,18 @@ err_free_pipes: err_core_destroy: ath10k_core_destroy(ar); -err_free: - kfree(ar_pci->attr); - kfree(ar_pci->pipe_config); - kfree(ar_pci->serv_to_pipe); - return ret; } static void ath10k_pci_remove(struct pci_dev *pdev) { struct ath10k *ar = pci_get_drvdata(pdev); - struct ath10k_pci *ar_pci; ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n"); if (!ar) return; - ar_pci = ath10k_pci_priv(ar); - - if (!ar_pci) - return; - ath10k_core_unregister(ar); ath10k_pci_free_irq(ar); ath10k_pci_deinit_irq(ar); @@ -3750,9 +3749,6 @@ static void ath10k_pci_remove(struct pci_dev *pdev) ath10k_pci_sleep_sync(ar); ath10k_pci_release(ar); ath10k_core_destroy(ar); - kfree(ar_pci->attr); - kfree(ar_pci->pipe_config); - kfree(ar_pci->serv_to_pipe); } MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table); diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index 4ed21dad6a8e..3f563e02d17d 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c @@ -733,11 +733,13 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb) return; } + rx_buf->skb = nskb; + usb_fill_int_urb(urb, hif_dev->udev, usb_rcvintpipe(hif_dev->udev, USB_REG_IN_PIPE), nskb->data, MAX_REG_IN_BUF_SIZE, - ath9k_hif_usb_reg_in_cb, nskb, 1); + ath9k_hif_usb_reg_in_cb, rx_buf, 1); } resubmit: diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c index 7987a288917b..27116c7d3f4f 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c @@ -271,6 +271,8 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans, { struct iwl_fw_ini_trigger_tlv *trig = (void *)tlv->data; u32 tp = le32_to_cpu(trig->time_point); + struct iwl_ucode_tlv *dup = NULL; + int ret; if (le32_to_cpu(tlv->length) < sizeof(*trig)) return -EINVAL; @@ -283,10 +285,20 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans, return -EINVAL; } - if (!le32_to_cpu(trig->occurrences)) + if (!le32_to_cpu(trig->occurrences)) { + dup = kmemdup(tlv, sizeof(*tlv) + le32_to_cpu(tlv->length), + GFP_KERNEL); + if (!dup) + return -ENOMEM; + trig = (void *)dup->data; trig->occurrences = cpu_to_le32(-1); + tlv = dup; + } + + ret = iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list); + kfree(dup); - return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list); + return ret; } static int (*dbg_tlv_alloc[])(struct iwl_trans *trans, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index fee01cbbd3ac..27977992fd7f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -1189,17 +1189,15 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta) for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES) iwl_mvm_change_queue_tid(mvm, i); + rcu_read_unlock(); + if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) { ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner, alloc_for_sta); - if (ret) { - rcu_read_unlock(); + if (ret) return ret; - } } - rcu_read_unlock(); - return free_queue; } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 65d65c6baf4c..e02bafb8921f 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -582,6 +582,8 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_DEV_INFO(0x30DC, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name), IWL_DEV_INFO(0x31DC, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name), IWL_DEV_INFO(0x31DC, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name), + IWL_DEV_INFO(0xA370, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name), + IWL_DEV_INFO(0xA370, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name), IWL_DEV_INFO(0x271C, 0x0214, iwl9260_2ac_cfg, iwl9260_1_name), diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index dfe625a53c63..3d7db6ffb599 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -301,6 +301,7 @@ struct mt76_hw_cap { #define MT_DRV_TX_ALIGNED4_SKBS BIT(1) #define MT_DRV_SW_RX_AIRTIME BIT(2) #define MT_DRV_RX_DMA_HDR BIT(3) +#define MT_DRV_HW_MGMT_TXQ BIT(4) struct mt76_driver_ops { u32 drv_flags; diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c index 26cb711b465f..83dfa6da4761 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c @@ -642,8 +642,10 @@ mt7603_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class) { struct mt7603_dev *dev = hw->priv; + mutex_lock(&dev->mt76.mutex); dev->coverage_class = max_t(s16, coverage_class, 0); mt7603_mac_set_timing(dev); + mutex_unlock(&dev->mt76.mutex); } static void mt7603_tx(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c index fd3ef483a87c..d06afcf46d67 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c @@ -234,10 +234,11 @@ mt7615_queues_acq(struct seq_file *s, void *data) int i; for (i = 0; i < 16; i++) { - int j, acs = i / 4, index = i % 4; + int j, wmm_idx = i % MT7615_MAX_WMM_SETS; + int acs = i / MT7615_MAX_WMM_SETS; u32 ctrl, val, qlen = 0; - val = mt76_rr(dev, MT_PLE_AC_QEMPTY(acs, index)); + val = mt76_rr(dev, MT_PLE_AC_QEMPTY(acs, wmm_idx)); ctrl = BIT(31) | BIT(15) | (acs << 8); for (j = 0; j < 32; j++) { @@ -245,11 +246,11 @@ mt7615_queues_acq(struct seq_file *s, void *data) continue; mt76_wr(dev, MT_PLE_FL_Q0_CTRL, - ctrl | (j + (index << 5))); + ctrl | (j + (wmm_idx << 5))); qlen += mt76_get_field(dev, MT_PLE_FL_Q3_CTRL, GENMASK(11, 0)); } - seq_printf(s, "AC%d%d: queued=%d\n", acs, index, qlen); + seq_printf(s, "AC%d%d: queued=%d\n", wmm_idx, acs, qlen); } return 0; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c index 5a124610d4af..e5a965df899a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c @@ -36,10 +36,10 @@ static int mt7622_init_tx_queues_multi(struct mt7615_dev *dev) { static const u8 wmm_queue_map[] = { - MT7622_TXQ_AC0, - MT7622_TXQ_AC1, - MT7622_TXQ_AC2, - MT7622_TXQ_AC3, + [IEEE80211_AC_BK] = MT7622_TXQ_AC0, + [IEEE80211_AC_BE] = MT7622_TXQ_AC1, + [IEEE80211_AC_VI] = MT7622_TXQ_AC2, + [IEEE80211_AC_VO] = MT7622_TXQ_AC3, }; int ret; int i; @@ -100,6 +100,7 @@ mt7615_tx_cleanup(struct mt7615_dev *dev) int i; mt76_queue_tx_cleanup(dev, MT_TXQ_MCU, false); + mt76_queue_tx_cleanup(dev, MT_TXQ_PSD, false); if (is_mt7615(&dev->mt76)) { mt76_queue_tx_cleanup(dev, MT_TXQ_BE, false); } else { diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c index edac37e7847b..22e4eabe6578 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c @@ -72,8 +72,7 @@ static int mt7615_eeprom_load(struct mt7615_dev *dev, u32 addr) { int ret; - ret = mt76_eeprom_init(&dev->mt76, MT7615_EEPROM_SIZE + - MT7615_EEPROM_EXTRA_DATA); + ret = mt76_eeprom_init(&dev->mt76, MT7615_EEPROM_FULL_SIZE); if (ret < 0) return ret; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h index 40fed7adc58a..a024dee10362 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h @@ -17,7 +17,7 @@ #define MT7615_EEPROM_TXDPD_SIZE 216 #define MT7615_EEPROM_TXDPD_COUNT (44 + 3) -#define MT7615_EEPROM_EXTRA_DATA (MT7615_EEPROM_TXDPD_OFFSET + \ +#define MT7615_EEPROM_FULL_SIZE (MT7615_EEPROM_TXDPD_OFFSET + \ MT7615_EEPROM_TXDPD_COUNT * \ MT7615_EEPROM_TXDPD_SIZE) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index 9f1c6ca7a665..d97315ec7265 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -526,22 +526,16 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi, fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2; fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4; - if (ieee80211_is_data(fc) || ieee80211_is_bufferable_mmpdu(fc)) { - q_idx = wmm_idx * MT7615_MAX_WMM_SETS + - skb_get_queue_mapping(skb); - p_fmt = is_usb ? MT_TX_TYPE_SF : MT_TX_TYPE_CT; - } else if (beacon) { - if (ext_phy) - q_idx = MT_LMAC_BCN1; - else - q_idx = MT_LMAC_BCN0; + if (beacon) { p_fmt = MT_TX_TYPE_FW; + q_idx = ext_phy ? MT_LMAC_BCN1 : MT_LMAC_BCN0; + } else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) { + p_fmt = is_usb ? MT_TX_TYPE_SF : MT_TX_TYPE_CT; + q_idx = ext_phy ? MT_LMAC_ALTX1 : MT_LMAC_ALTX0; } else { - if (ext_phy) - q_idx = MT_LMAC_ALTX1; - else - q_idx = MT_LMAC_ALTX0; p_fmt = is_usb ? MT_TX_TYPE_SF : MT_TX_TYPE_CT; + q_idx = wmm_idx * MT7615_MAX_WMM_SETS + + mt7615_lmac_mapping(dev, skb_get_queue_mapping(skb)); } val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) | diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h index f0d4b29a52a2..81608ab656b8 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h @@ -124,21 +124,6 @@ enum tx_pkt_type { MT_TX_TYPE_FW, }; -enum tx_pkt_queue_idx { - MT_LMAC_AC00, - MT_LMAC_AC01, - MT_LMAC_AC02, - MT_LMAC_AC03, - MT_LMAC_ALTX0 = 0x10, - MT_LMAC_BMC0, - MT_LMAC_BCN0, - MT_LMAC_PSMP0, - MT_LMAC_ALTX1, - MT_LMAC_BMC1, - MT_LMAC_BCN1, - MT_LMAC_PSMP1, -}; - enum tx_port_idx { MT_TX_PORT_IDX_LMAC, MT_TX_PORT_IDX_MCU diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index c26f99b368d9..beaca8127680 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -397,6 +397,7 @@ mt7615_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue, struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; struct mt7615_dev *dev = mt7615_hw_dev(hw); + queue = mt7615_lmac_mapping(dev, queue); queue += mvif->wmm_idx * MT7615_MAX_WMM_SETS; return mt7615_mcu_set_wmm(dev, queue, params); @@ -735,9 +736,12 @@ static void mt7615_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class) { struct mt7615_phy *phy = mt7615_hw_phy(hw); + struct mt7615_dev *dev = phy->dev; + mutex_lock(&dev->mt76.mutex); phy->coverage_class = max_t(s16, coverage_class, 0); mt7615_mac_set_timing(phy); + mutex_unlock(&dev->mt76.mutex); } static int diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c index e670393506f0..2e99845b9c96 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c @@ -146,7 +146,7 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base, static const struct mt76_driver_ops drv_ops = { /* txwi_size = txd size + txp size */ .txwi_size = MT_TXD_SIZE + sizeof(struct mt7615_txp_common), - .drv_flags = MT_DRV_TXWI_NO_FREE, + .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ, .survey_flags = SURVEY_INFO_TIME_TX | SURVEY_INFO_TIME_RX | SURVEY_INFO_TIME_BSS_RX, diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h index d6176d316bee..3e7d51bf42a4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h @@ -282,6 +282,21 @@ struct mt7615_dev { struct list_head wd_head; }; +enum tx_pkt_queue_idx { + MT_LMAC_AC00, + MT_LMAC_AC01, + MT_LMAC_AC02, + MT_LMAC_AC03, + MT_LMAC_ALTX0 = 0x10, + MT_LMAC_BMC0, + MT_LMAC_BCN0, + MT_LMAC_PSMP0, + MT_LMAC_ALTX1, + MT_LMAC_BMC1, + MT_LMAC_BCN1, + MT_LMAC_PSMP1, +}; + enum { HW_BSSID_0 = 0x0, HW_BSSID_1, @@ -447,6 +462,21 @@ static inline u16 mt7615_wtbl_size(struct mt7615_dev *dev) return MT7615_WTBL_SIZE; } +static inline u8 mt7615_lmac_mapping(struct mt7615_dev *dev, u8 ac) +{ + static const u8 lmac_queue_map[] = { + [IEEE80211_AC_BK] = MT_LMAC_AC00, + [IEEE80211_AC_BE] = MT_LMAC_AC01, + [IEEE80211_AC_VI] = MT_LMAC_AC02, + [IEEE80211_AC_VO] = MT_LMAC_AC03, + }; + + if (WARN_ON_ONCE(ac >= ARRAY_SIZE(lmac_queue_map))) + return MT_LMAC_AC01; /* BE */ + + return lmac_queue_map[ac]; +} + void mt7615_dma_reset(struct mt7615_dev *dev); void mt7615_scan_work(struct work_struct *work); void mt7615_roc_work(struct work_struct *work); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c index a50077eb24d7..5be6704770ad 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c @@ -270,7 +270,7 @@ static int mt7663u_probe(struct usb_interface *usb_intf, { static const struct mt76_driver_ops drv_ops = { .txwi_size = MT_USB_TXD_SIZE, - .drv_flags = MT_DRV_RX_DMA_HDR, + .drv_flags = MT_DRV_RX_DMA_HDR | MT_DRV_HW_MGMT_TXQ, .tx_prepare_skb = mt7663u_tx_prepare_skb, .tx_complete_skb = mt7663u_tx_complete_skb, .tx_status_data = mt7663u_tx_status_data, @@ -329,25 +329,26 @@ static int mt7663u_probe(struct usb_interface *usb_intf, if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_PWR_ON, FW_STATE_PWR_ON << 1, 500)) { dev_err(dev->mt76.dev, "Timeout for power on\n"); - return -EIO; + ret = -EIO; + goto error; } alloc_queues: ret = mt76u_alloc_mcu_queue(&dev->mt76); if (ret) - goto error; + goto error_free_q; ret = mt76u_alloc_queues(&dev->mt76); if (ret) - goto error; + goto error_free_q; ret = mt7663u_register_device(dev); if (ret) - goto error_freeq; + goto error_free_q; return 0; -error_freeq: +error_free_q: mt76u_queues_deinit(&dev->mt76); error: mt76u_deinit(&dev->mt76); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c index cbbe986655fe..5fda6e7b120c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c @@ -456,8 +456,9 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev) tasklet_disable(&dev->mt76.tx_tasklet); napi_disable(&dev->mt76.tx_napi); - for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++) + mt76_for_each_q_rx(&dev->mt76, i) { napi_disable(&dev->mt76.napi[i]); + } mutex_lock(&dev->mt76.mutex); @@ -515,7 +516,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev) tasklet_enable(&dev->mt76.pre_tbtt_tasklet); - for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++) { + mt76_for_each_q_rx(&dev->mt76, i) { napi_enable(&dev->mt76.napi[i]); napi_schedule(&dev->mt76.napi[i]); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c index 0575c259f245..05b5650c56c8 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c @@ -716,9 +716,12 @@ static void mt7915_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class) { struct mt7915_phy *phy = mt7915_hw_phy(hw); + struct mt7915_dev *dev = phy->dev; + mutex_lock(&dev->mt76.mutex); phy->coverage_class = max_t(s16, coverage_class, 0); mt7915_mac_set_timing(phy); + mutex_unlock(&dev->mt76.mutex); } static int diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index fca38ea2441f..f10c98aa883c 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -264,6 +264,13 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta, skb_set_queue_mapping(skb, qid); } + if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) && + !ieee80211_is_data(hdr->frame_control) && + !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) { + qid = MT_TXQ_PSD; + skb_set_queue_mapping(skb, qid); + } + if (!(wcid->tx_info & MT_WCID_TX_INFO_SET)) ieee80211_get_tx_rates(info->control.vif, sta, skb, info->control.rates, 1); diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c index fb97ea25b4d4..87382b2f7443 100644 --- a/drivers/net/wireless/mediatek/mt76/usb.c +++ b/drivers/net/wireless/mediatek/mt76/usb.c @@ -1010,17 +1010,18 @@ static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q) static u8 mt76u_ac_to_hwq(struct mt76_dev *dev, u8 ac) { if (mt76_chip(dev) == 0x7663) { - static const u8 wmm_queue_map[] = { - [IEEE80211_AC_VO] = 0, - [IEEE80211_AC_VI] = 1, - [IEEE80211_AC_BE] = 2, - [IEEE80211_AC_BK] = 4, + static const u8 lmac_queue_map[] = { + /* ac to lmac mapping */ + [IEEE80211_AC_BK] = 0, + [IEEE80211_AC_BE] = 1, + [IEEE80211_AC_VI] = 2, + [IEEE80211_AC_VO] = 4, }; - if (WARN_ON(ac >= ARRAY_SIZE(wmm_queue_map))) - return 2; /* BE */ + if (WARN_ON(ac >= ARRAY_SIZE(lmac_queue_map))) + return 1; /* BE */ - return wmm_queue_map[ac]; + return lmac_queue_map[ac]; } return mt76_ac_to_hwq(ac); @@ -1066,11 +1067,16 @@ static int mt76u_alloc_tx(struct mt76_dev *dev) static void mt76u_free_tx(struct mt76_dev *dev) { - struct mt76_queue *q; - int i, j; + int i; for (i = 0; i < IEEE80211_NUM_ACS; i++) { + struct mt76_queue *q; + int j; + q = dev->q_tx[i].q; + if (!q) + continue; + for (j = 0; j < q->ndesc; j++) usb_free_urb(q->entry[j].urb); } @@ -1078,17 +1084,22 @@ static void mt76u_free_tx(struct mt76_dev *dev) void mt76u_stop_tx(struct mt76_dev *dev) { - struct mt76_queue_entry entry; - struct mt76_queue *q; - int i, j, ret; + int ret; ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(&dev->phy), HZ / 5); if (!ret) { + struct mt76_queue_entry entry; + struct mt76_queue *q; + int i, j; + dev_err(dev->dev, "timed out waiting for pending tx\n"); for (i = 0; i < IEEE80211_NUM_ACS; i++) { q = dev->q_tx[i].q; + if (!q) + continue; + for (j = 0; j < q->ndesc; j++) usb_kill_urb(q->entry[j].urb); } @@ -1100,6 +1111,8 @@ void mt76u_stop_tx(struct mt76_dev *dev) */ for (i = 0; i < IEEE80211_NUM_ACS; i++) { q = dev->q_tx[i].q; + if (!q) + continue; /* Assure we are in sync with killed tasklet. */ spin_lock_bh(&q->lock); diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 482c6c8b0fb7..88280057e032 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -63,6 +63,8 @@ module_param_named(max_queues, xennet_max_queues, uint, 0644); MODULE_PARM_DESC(max_queues, "Maximum number of queues per virtual interface"); +#define XENNET_TIMEOUT (5 * HZ) + static const struct ethtool_ops xennet_ethtool_ops; struct netfront_cb { @@ -1334,12 +1336,15 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) netif_carrier_off(netdev); - xenbus_switch_state(dev, XenbusStateInitialising); - wait_event(module_wq, - xenbus_read_driver_state(dev->otherend) != - XenbusStateClosed && - xenbus_read_driver_state(dev->otherend) != - XenbusStateUnknown); + do { + xenbus_switch_state(dev, XenbusStateInitialising); + err = wait_event_timeout(module_wq, + xenbus_read_driver_state(dev->otherend) != + XenbusStateClosed && + xenbus_read_driver_state(dev->otherend) != + XenbusStateUnknown, XENNET_TIMEOUT); + } while (!err); + return netdev; exit: @@ -2139,28 +2144,43 @@ static const struct attribute_group xennet_dev_group = { }; #endif /* CONFIG_SYSFS */ -static int xennet_remove(struct xenbus_device *dev) +static void xennet_bus_close(struct xenbus_device *dev) { - struct netfront_info *info = dev_get_drvdata(&dev->dev); - - dev_dbg(&dev->dev, "%s\n", dev->nodename); + int ret; - if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) { + if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed) + return; + do { xenbus_switch_state(dev, XenbusStateClosing); - wait_event(module_wq, - xenbus_read_driver_state(dev->otherend) == - XenbusStateClosing || - xenbus_read_driver_state(dev->otherend) == - XenbusStateUnknown); + ret = wait_event_timeout(module_wq, + xenbus_read_driver_state(dev->otherend) == + XenbusStateClosing || + xenbus_read_driver_state(dev->otherend) == + XenbusStateClosed || + xenbus_read_driver_state(dev->otherend) == + XenbusStateUnknown, + XENNET_TIMEOUT); + } while (!ret); + + if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed) + return; + do { xenbus_switch_state(dev, XenbusStateClosed); - wait_event(module_wq, - xenbus_read_driver_state(dev->otherend) == - XenbusStateClosed || - xenbus_read_driver_state(dev->otherend) == - XenbusStateUnknown); - } + ret = wait_event_timeout(module_wq, + xenbus_read_driver_state(dev->otherend) == + XenbusStateClosed || + xenbus_read_driver_state(dev->otherend) == + XenbusStateUnknown, + XENNET_TIMEOUT); + } while (!ret); +} + +static int xennet_remove(struct xenbus_device *dev) +{ + struct netfront_info *info = dev_get_drvdata(&dev->dev); + xennet_bus_close(dev); xennet_disconnect_backend(info); if (info->netdev->reg_state == NETREG_REGISTERED) diff --git a/drivers/nfc/s3fwrn5/core.c b/drivers/nfc/s3fwrn5/core.c index 91d4d5b28a7d..ba6c486d6465 100644 --- a/drivers/nfc/s3fwrn5/core.c +++ b/drivers/nfc/s3fwrn5/core.c @@ -198,6 +198,7 @@ int s3fwrn5_recv_frame(struct nci_dev *ndev, struct sk_buff *skb, case S3FWRN5_MODE_FW: return s3fwrn5_fw_recv_frame(ndev, skb); default: + kfree_skb(skb); return -ENODEV; } } diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index add040168e67..4ee2330c603e 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1102,6 +1102,9 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid, int pos; int len; + if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST) + return 0; + c.identify.opcode = nvme_admin_identify; c.identify.nsid = cpu_to_le32(nsid); c.identify.cns = NVME_ID_CNS_NS_DESC_LIST; @@ -1115,18 +1118,6 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid, if (status) { dev_warn(ctrl->device, "Identify Descriptors failed (%d)\n", status); - /* - * Don't treat non-retryable errors as fatal, as we potentially - * already have a NGUID or EUI-64. If we failed with DNR set, - * we want to silently ignore the error as we can still - * identify the device, but if the status has DNR set, we want - * to propagate the error back specifically for the disk - * revalidation flow to make sure we don't abandon the - * device just because of a temporal retry-able error (such - * as path of transport errors). - */ - if (status > 0 && (status & NVME_SC_DNR)) - status = 0; goto free_data; } diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 1de3f9b827aa..09ffc3246f60 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -129,6 +129,13 @@ enum nvme_quirks { * Don't change the value of the temperature threshold feature */ NVME_QUIRK_NO_TEMP_THRESH_CHANGE = (1 << 14), + + /* + * The controller doesn't handle the Identify Namespace + * Identification Descriptor list subcommand despite claiming + * NVMe 1.3 compliance. + */ + NVME_QUIRK_NO_NS_DESC_LIST = (1 << 15), }; /* diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index b1d18f0633c7..d4b1ff747123 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -3099,6 +3099,8 @@ static const struct pci_device_id nvme_id_table[] = { { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ .driver_data = NVME_QUIRK_IDENTIFY_CNS | NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */ + .driver_data = NVME_QUIRK_NO_NS_DESC_LIST, }, { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ @@ -3122,6 +3124,8 @@ static const struct pci_device_id nvme_id_table[] = { { PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS | NVME_QUIRK_IGNORE_DEV_SUBNQN, }, + { PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001), .driver_data = NVME_QUIRK_SINGLE_VECTOR }, diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 79ef2b8e2b3c..f3a91818167b 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -1382,6 +1382,9 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, if (nctrl->opts->tos >= 0) ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos); + /* Set 10 seconds timeout for icresp recvmsg */ + queue->sock->sk->sk_rcvtimeo = 10 * HZ; + queue->sock->sk->sk_allocation = GFP_ATOMIC; nvme_tcp_set_queue_io_cpu(queue); queue->request = NULL; diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index ce096272f52b..c9338f914a0e 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -4638,8 +4638,7 @@ static int pci_pm_reset(struct pci_dev *dev, int probe) * pcie_wait_for_link_delay - Wait until link is active or inactive * @pdev: Bridge device * @active: waiting for active or inactive? - * @delay: Delay to wait after link has become active (in ms). Specify %0 - * for no delay. + * @delay: Delay to wait after link has become active (in ms) * * Use this to wait till link becomes active or inactive. */ @@ -4680,7 +4679,7 @@ static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, msleep(10); timeout -= 10; } - if (active && ret && delay) + if (active && ret) msleep(delay); else if (ret != active) pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n", @@ -4801,28 +4800,17 @@ void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev) if (!pcie_downstream_port(dev)) return; - /* - * Per PCIe r5.0, sec 6.6.1, for downstream ports that support - * speeds > 5 GT/s, we must wait for link training to complete - * before the mandatory delay. - * - * We can only tell when link training completes via DLL Link - * Active, which is required for downstream ports that support - * speeds > 5 GT/s (sec 7.5.3.6). Unfortunately some common - * devices do not implement Link Active reporting even when it's - * required, so we'll check for that directly instead of checking - * the supported link speed. We assume devices without Link Active - * reporting can train in 100 ms regardless of speed. - */ - if (dev->link_active_reporting) { - pci_dbg(dev, "waiting for link to train\n"); - if (!pcie_wait_for_link_delay(dev, true, 0)) { + if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) { + pci_dbg(dev, "waiting %d ms for downstream link\n", delay); + msleep(delay); + } else { + pci_dbg(dev, "waiting %d ms for downstream link, after activation\n", + delay); + if (!pcie_wait_for_link_delay(dev, true, delay)) { /* Did not train, no need to wait any further */ return; } } - pci_dbg(child, "waiting %d ms to become accessible\n", delay); - msleep(delay); if (!pci_device_is_present(child)) { pci_dbg(child, "waiting additional %d ms to become accessible\n", delay); diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 812bfc32ecb8..2ea61abd5830 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -2330,6 +2330,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s); +static void quirk_disable_aspm_l0s_l1(struct pci_dev *dev) +{ + pci_info(dev, "Disabling ASPM L0s/L1\n"); + pci_disable_link_state(dev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); +} + +/* + * ASM1083/1085 PCIe-PCI bridge devices cause AER timeout errors on the + * upstream PCIe root port when ASPM is enabled. At least L0s mode is affected; + * disable both L0s and L1 for now to be safe. + */ +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x1080, quirk_disable_aspm_l0s_l1); + /* * Some Pericom PCIe-to-PCI bridges in reverse mode need the PCIe Retrain * Link bit cleared after starting the link retrain process to allow this diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig index ff1ee159dca2..f8ff30cdafa6 100644 --- a/drivers/pinctrl/qcom/Kconfig +++ b/drivers/pinctrl/qcom/Kconfig @@ -7,6 +7,8 @@ config PINCTRL_MSM select PINCONF select GENERIC_PINCONF select GPIOLIB_IRQCHIP + select IRQ_DOMAIN_HIERARCHY + select IRQ_FASTEOI_HIERARCHY_HANDLERS config PINCTRL_APQ8064 tristate "Qualcomm APQ8064 pin controller driver" diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index 83b7d64bc4c1..c322f30a2064 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -832,6 +832,52 @@ static void msm_gpio_irq_unmask(struct irq_data *d) msm_gpio_irq_clear_unmask(d, false); } +/** + * msm_gpio_update_dual_edge_parent() - Prime next edge for IRQs handled by parent. + * @d: The irq dta. + * + * This is much like msm_gpio_update_dual_edge_pos() but for IRQs that are + * normally handled by the parent irqchip. The logic here is slightly + * different due to what's easy to do with our parent, but in principle it's + * the same. + */ +static void msm_gpio_update_dual_edge_parent(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct msm_pinctrl *pctrl = gpiochip_get_data(gc); + const struct msm_pingroup *g = &pctrl->soc->groups[d->hwirq]; + int loop_limit = 100; + unsigned int val; + unsigned int type; + + /* Read the value and make a guess about what edge we need to catch */ + val = msm_readl_io(pctrl, g) & BIT(g->in_bit); + type = val ? IRQ_TYPE_EDGE_FALLING : IRQ_TYPE_EDGE_RISING; + + do { + /* Set the parent to catch the next edge */ + irq_chip_set_type_parent(d, type); + + /* + * Possibly the line changed between when we last read "val" + * (and decided what edge we needed) and when set the edge. + * If the value didn't change (or changed and then changed + * back) then we're done. + */ + val = msm_readl_io(pctrl, g) & BIT(g->in_bit); + if (type == IRQ_TYPE_EDGE_RISING) { + if (!val) + return; + type = IRQ_TYPE_EDGE_FALLING; + } else if (type == IRQ_TYPE_EDGE_FALLING) { + if (val) + return; + type = IRQ_TYPE_EDGE_RISING; + } + } while (loop_limit-- > 0); + dev_warn_once(pctrl->dev, "dual-edge irq failed to stabilize\n"); +} + static void msm_gpio_irq_ack(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); @@ -840,8 +886,11 @@ static void msm_gpio_irq_ack(struct irq_data *d) unsigned long flags; u32 val; - if (test_bit(d->hwirq, pctrl->skip_wake_irqs)) + if (test_bit(d->hwirq, pctrl->skip_wake_irqs)) { + if (test_bit(d->hwirq, pctrl->dual_edge_irqs)) + msm_gpio_update_dual_edge_parent(d); return; + } g = &pctrl->soc->groups[d->hwirq]; @@ -860,6 +909,17 @@ static void msm_gpio_irq_ack(struct irq_data *d) raw_spin_unlock_irqrestore(&pctrl->lock, flags); } +static bool msm_gpio_needs_dual_edge_parent_workaround(struct irq_data *d, + unsigned int type) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct msm_pinctrl *pctrl = gpiochip_get_data(gc); + + return type == IRQ_TYPE_EDGE_BOTH && + pctrl->soc->wakeirq_dual_edge_errata && d->parent_data && + test_bit(d->hwirq, pctrl->skip_wake_irqs); +} + static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); @@ -868,11 +928,21 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type) unsigned long flags; u32 val; + if (msm_gpio_needs_dual_edge_parent_workaround(d, type)) { + set_bit(d->hwirq, pctrl->dual_edge_irqs); + irq_set_handler_locked(d, handle_fasteoi_ack_irq); + msm_gpio_update_dual_edge_parent(d); + return 0; + } + if (d->parent_data) irq_chip_set_type_parent(d, type); - if (test_bit(d->hwirq, pctrl->skip_wake_irqs)) + if (test_bit(d->hwirq, pctrl->skip_wake_irqs)) { + clear_bit(d->hwirq, pctrl->dual_edge_irqs); + irq_set_handler_locked(d, handle_fasteoi_irq); return 0; + } g = &pctrl->soc->groups[d->hwirq]; diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h index 9452da18a78b..7486fe08eb9b 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.h +++ b/drivers/pinctrl/qcom/pinctrl-msm.h @@ -113,6 +113,9 @@ struct msm_gpio_wakeirq_map { * @pull_no_keeper: The SoC does not support keeper bias. * @wakeirq_map: The map of wakeup capable GPIOs and the pin at PDC/MPM * @nwakeirq_map: The number of entries in @wakeirq_map + * @wakeirq_dual_edge_errata: If true then GPIOs using the wakeirq_map need + * to be aware that their parent can't handle dual + * edge interrupts. */ struct msm_pinctrl_soc_data { const struct pinctrl_pin_desc *pins; @@ -128,6 +131,7 @@ struct msm_pinctrl_soc_data { const int *reserved_gpios; const struct msm_gpio_wakeirq_map *wakeirq_map; unsigned int nwakeirq_map; + bool wakeirq_dual_edge_errata; }; extern const struct dev_pm_ops msm_pinctrl_dev_pm_ops; diff --git a/drivers/pinctrl/qcom/pinctrl-sc7180.c b/drivers/pinctrl/qcom/pinctrl-sc7180.c index 1b6465a882f2..1d9acad3c1ce 100644 --- a/drivers/pinctrl/qcom/pinctrl-sc7180.c +++ b/drivers/pinctrl/qcom/pinctrl-sc7180.c @@ -1147,6 +1147,7 @@ static const struct msm_pinctrl_soc_data sc7180_pinctrl = { .ntiles = ARRAY_SIZE(sc7180_tiles), .wakeirq_map = sc7180_pdc_map, .nwakeirq_map = ARRAY_SIZE(sc7180_pdc_map), + .wakeirq_dual_edge_errata = true, }; static int sc7180_pinctrl_probe(struct platform_device *pdev) diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 0ba7a65e7c8d..06056e9ec333 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -547,6 +547,15 @@ static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) scsi_uninit_cmd(cmd); } +static void scsi_run_queue_async(struct scsi_device *sdev) +{ + if (scsi_target(sdev)->single_lun || + !list_empty(&sdev->host->starved_list)) + kblockd_schedule_work(&sdev->requeue_work); + else + blk_mq_run_hw_queues(sdev->request_queue, true); +} + /* Returns false when no more bytes to process, true if there are more */ static bool scsi_end_request(struct request *req, blk_status_t error, unsigned int bytes) @@ -591,11 +600,7 @@ static bool scsi_end_request(struct request *req, blk_status_t error, __blk_mq_end_request(req, error); - if (scsi_target(sdev)->single_lun || - !list_empty(&sdev->host->starved_list)) - kblockd_schedule_work(&sdev->requeue_work); - else - blk_mq_run_hw_queues(q, true); + scsi_run_queue_async(sdev); percpu_ref_put(&q->q_usage_counter); return false; @@ -1702,6 +1707,7 @@ out_put_budget: */ if (req->rq_flags & RQF_DONTPREP) scsi_mq_uninit_cmd(cmd); + scsi_run_queue_async(sdev); break; } return ret; diff --git a/drivers/staging/comedi/drivers/addi_apci_1032.c b/drivers/staging/comedi/drivers/addi_apci_1032.c index 560649be9d13..e035c9f757a1 100644 --- a/drivers/staging/comedi/drivers/addi_apci_1032.c +++ b/drivers/staging/comedi/drivers/addi_apci_1032.c @@ -106,14 +106,22 @@ static int apci1032_cos_insn_config(struct comedi_device *dev, unsigned int *data) { struct apci1032_private *devpriv = dev->private; - unsigned int shift, oldmask; + unsigned int shift, oldmask, himask, lomask; switch (data[0]) { case INSN_CONFIG_DIGITAL_TRIG: if (data[1] != 0) return -EINVAL; shift = data[3]; - oldmask = (1U << shift) - 1; + if (shift < 32) { + oldmask = (1U << shift) - 1; + himask = data[4] << shift; + lomask = data[5] << shift; + } else { + oldmask = 0xffffffffu; + himask = 0; + lomask = 0; + } switch (data[2]) { case COMEDI_DIGITAL_TRIG_DISABLE: devpriv->ctrl = 0; @@ -136,8 +144,8 @@ static int apci1032_cos_insn_config(struct comedi_device *dev, devpriv->mode2 &= oldmask; } /* configure specified channels */ - devpriv->mode1 |= data[4] << shift; - devpriv->mode2 |= data[5] << shift; + devpriv->mode1 |= himask; + devpriv->mode2 |= lomask; break; case COMEDI_DIGITAL_TRIG_ENABLE_LEVELS: if (devpriv->ctrl != (APCI1032_CTRL_INT_ENA | @@ -154,8 +162,8 @@ static int apci1032_cos_insn_config(struct comedi_device *dev, devpriv->mode2 &= oldmask; } /* configure specified channels */ - devpriv->mode1 |= data[4] << shift; - devpriv->mode2 |= data[5] << shift; + devpriv->mode1 |= himask; + devpriv->mode2 |= lomask; break; default: return -EINVAL; diff --git a/drivers/staging/comedi/drivers/addi_apci_1500.c b/drivers/staging/comedi/drivers/addi_apci_1500.c index 689acd69a1b9..816dd25b9d0e 100644 --- a/drivers/staging/comedi/drivers/addi_apci_1500.c +++ b/drivers/staging/comedi/drivers/addi_apci_1500.c @@ -452,13 +452,14 @@ static int apci1500_di_cfg_trig(struct comedi_device *dev, struct apci1500_private *devpriv = dev->private; unsigned int trig = data[1]; unsigned int shift = data[3]; - unsigned int hi_mask = data[4] << shift; - unsigned int lo_mask = data[5] << shift; - unsigned int chan_mask = hi_mask | lo_mask; - unsigned int old_mask = (1 << shift) - 1; + unsigned int hi_mask; + unsigned int lo_mask; + unsigned int chan_mask; + unsigned int old_mask; unsigned int pm; unsigned int pt; unsigned int pp; + unsigned int invalid_chan; if (trig > 1) { dev_dbg(dev->class_dev, @@ -466,7 +467,20 @@ static int apci1500_di_cfg_trig(struct comedi_device *dev, return -EINVAL; } - if (chan_mask > 0xffff) { + if (shift <= 16) { + hi_mask = data[4] << shift; + lo_mask = data[5] << shift; + old_mask = (1U << shift) - 1; + invalid_chan = (data[4] | data[5]) >> (16 - shift); + } else { + hi_mask = 0; + lo_mask = 0; + old_mask = 0xffff; + invalid_chan = data[4] | data[5]; + } + chan_mask = hi_mask | lo_mask; + + if (invalid_chan) { dev_dbg(dev->class_dev, "invalid digital trigger channel\n"); return -EINVAL; } diff --git a/drivers/staging/comedi/drivers/addi_apci_1564.c b/drivers/staging/comedi/drivers/addi_apci_1564.c index 10501fe6bb25..1268ba34be5f 100644 --- a/drivers/staging/comedi/drivers/addi_apci_1564.c +++ b/drivers/staging/comedi/drivers/addi_apci_1564.c @@ -331,14 +331,22 @@ static int apci1564_cos_insn_config(struct comedi_device *dev, unsigned int *data) { struct apci1564_private *devpriv = dev->private; - unsigned int shift, oldmask; + unsigned int shift, oldmask, himask, lomask; switch (data[0]) { case INSN_CONFIG_DIGITAL_TRIG: if (data[1] != 0) return -EINVAL; shift = data[3]; - oldmask = (1U << shift) - 1; + if (shift < 32) { + oldmask = (1U << shift) - 1; + himask = data[4] << shift; + lomask = data[5] << shift; + } else { + oldmask = 0xffffffffu; + himask = 0; + lomask = 0; + } switch (data[2]) { case COMEDI_DIGITAL_TRIG_DISABLE: devpriv->ctrl = 0; @@ -362,8 +370,8 @@ static int apci1564_cos_insn_config(struct comedi_device *dev, devpriv->mode2 &= oldmask; } /* configure specified channels */ - devpriv->mode1 |= data[4] << shift; - devpriv->mode2 |= data[5] << shift; + devpriv->mode1 |= himask; + devpriv->mode2 |= lomask; break; case COMEDI_DIGITAL_TRIG_ENABLE_LEVELS: if (devpriv->ctrl != (APCI1564_DI_IRQ_ENA | @@ -380,8 +388,8 @@ static int apci1564_cos_insn_config(struct comedi_device *dev, devpriv->mode2 &= oldmask; } /* configure specified channels */ - devpriv->mode1 |= data[4] << shift; - devpriv->mode2 |= data[5] << shift; + devpriv->mode1 |= himask; + devpriv->mode2 |= lomask; break; default: return -EINVAL; diff --git a/drivers/staging/comedi/drivers/ni_6527.c b/drivers/staging/comedi/drivers/ni_6527.c index 4d1eccb5041d..4518c2680b7c 100644 --- a/drivers/staging/comedi/drivers/ni_6527.c +++ b/drivers/staging/comedi/drivers/ni_6527.c @@ -332,7 +332,7 @@ static int ni6527_intr_insn_config(struct comedi_device *dev, case COMEDI_DIGITAL_TRIG_ENABLE_EDGES: /* check shift amount */ shift = data[3]; - if (shift >= s->n_chan) { + if (shift >= 32) { mask = 0; rising = 0; falling = 0; diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c index 4689b2170e4f..456603fd26c0 100644 --- a/drivers/staging/wlan-ng/prism2usb.c +++ b/drivers/staging/wlan-ng/prism2usb.c @@ -61,11 +61,25 @@ static int prism2sta_probe_usb(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *dev; - + const struct usb_endpoint_descriptor *epd; + const struct usb_host_interface *iface_desc = interface->cur_altsetting; struct wlandevice *wlandev = NULL; struct hfa384x *hw = NULL; int result = 0; + if (iface_desc->desc.bNumEndpoints != 2) { + result = -ENODEV; + goto failed; + } + + result = -EINVAL; + epd = &iface_desc->endpoint[1].desc; + if (!usb_endpoint_is_bulk_in(epd)) + goto failed; + epd = &iface_desc->endpoint[2].desc; + if (!usb_endpoint_is_bulk_out(epd)) + goto failed; + dev = interface_to_usbdev(interface); wlandev = create_wlan(); if (!wlandev) { diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index fc118f649887..cae61d1ebec5 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -524,6 +524,7 @@ static void __init serial8250_isa_init_ports(void) */ up->mcr_mask = ~ALPHA_KLUDGE_MCR; up->mcr_force = ALPHA_KLUDGE_MCR; + serial8250_set_defaults(up); } /* chain base port ops to support Remote Supervisor Adapter */ @@ -547,7 +548,6 @@ static void __init serial8250_isa_init_ports(void) port->membase = old_serial_port[i].iomem_base; port->iotype = old_serial_port[i].io_type; port->regshift = old_serial_port[i].iomem_reg_shift; - serial8250_set_defaults(up); port->irqflags |= irqflag; if (serial8250_isa_config != NULL) diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c index ddb6aeb76dc5..04b9af7ed941 100644 --- a/drivers/tty/serial/8250/8250_exar.c +++ b/drivers/tty/serial/8250/8250_exar.c @@ -326,7 +326,17 @@ static void setup_gpio(struct pci_dev *pcidev, u8 __iomem *p) * devices will export them as GPIOs, so we pre-configure them safely * as inputs. */ - u8 dir = pcidev->vendor == PCI_VENDOR_ID_EXAR ? 0xff : 0x00; + + u8 dir = 0x00; + + if ((pcidev->vendor == PCI_VENDOR_ID_EXAR) && + (pcidev->subsystem_vendor != PCI_VENDOR_ID_SEALEVEL)) { + // Configure GPIO as inputs for Commtech adapters + dir = 0xff; + } else { + // Configure GPIO as outputs for SeaLevel adapters + dir = 0x00; + } writeb(0x00, p + UART_EXAR_MPIOINT_7_0); writeb(0x00, p + UART_EXAR_MPIOLVL_7_0); diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c index f839380c2f4c..98b8a3e30733 100644 --- a/drivers/tty/serial/8250/8250_mtk.c +++ b/drivers/tty/serial/8250/8250_mtk.c @@ -306,8 +306,21 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios, } #endif + /* + * Store the requested baud rate before calling the generic 8250 + * set_termios method. Standard 8250 port expects bauds to be + * no higher than (uartclk / 16) so the baud will be clamped if it + * gets out of that bound. Mediatek 8250 port supports speed + * higher than that, therefore we'll get original baud rate back + * after calling the generic set_termios method and recalculate + * the speed later in this method. + */ + baud = tty_termios_baud_rate(termios); + serial8250_do_set_termios(port, termios, old); + tty_termios_encode_baud_rate(termios, baud, baud); + /* * Mediatek UARTs use an extra highspeed register (MTK_UART_HIGHS) * @@ -339,6 +352,11 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios, */ spin_lock_irqsave(&port->lock, flags); + /* + * Update the per-port timeout. + */ + uart_update_timeout(port, termios->c_cflag, baud); + /* set DLAB we have cval saved in up->lcr from the call to the core */ serial_port_out(port, UART_LCR, up->lcr | UART_LCR_DLAB); serial_dl_write(up, quot); diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c index 8de8bac9c6c7..04d1b0807e66 100644 --- a/drivers/tty/serial/serial-tegra.c +++ b/drivers/tty/serial/serial-tegra.c @@ -635,7 +635,7 @@ static void tegra_uart_handle_tx_pio(struct tegra_uart_port *tup) } static void tegra_uart_handle_rx_pio(struct tegra_uart_port *tup, - struct tty_port *tty) + struct tty_port *port) { do { char flag = TTY_NORMAL; @@ -653,16 +653,18 @@ static void tegra_uart_handle_rx_pio(struct tegra_uart_port *tup, ch = (unsigned char) tegra_uart_read(tup, UART_RX); tup->uport.icount.rx++; - if (!uart_handle_sysrq_char(&tup->uport, ch) && tty) - tty_insert_flip_char(tty, ch, flag); + if (uart_handle_sysrq_char(&tup->uport, ch)) + continue; if (tup->uport.ignore_status_mask & UART_LSR_DR) continue; + + tty_insert_flip_char(port, ch, flag); } while (1); } static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup, - struct tty_port *tty, + struct tty_port *port, unsigned int count) { int copied; @@ -672,17 +674,13 @@ static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup, return; tup->uport.icount.rx += count; - if (!tty) { - dev_err(tup->uport.dev, "No tty port\n"); - return; - } if (tup->uport.ignore_status_mask & UART_LSR_DR) return; dma_sync_single_for_cpu(tup->uport.dev, tup->rx_dma_buf_phys, count, DMA_FROM_DEVICE); - copied = tty_insert_flip_string(tty, + copied = tty_insert_flip_string(port, ((unsigned char *)(tup->rx_dma_buf_virt)), count); if (copied != count) { WARN_ON(1); diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index 672cfa075e28..2833f1418d6d 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c @@ -1580,8 +1580,10 @@ static int cdns_uart_probe(struct platform_device *pdev) * If register_console() don't assign value, then console_port pointer * is cleanup. */ - if (!console_port) + if (!console_port) { + cdns_uart_console.index = id; console_port = port; + } #endif rc = uart_add_one_port(&cdns_uart_uart_driver, port); @@ -1594,8 +1596,10 @@ static int cdns_uart_probe(struct platform_device *pdev) #ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE /* This is not port which is used for console that's why clean it up */ if (console_port == port && - !(cdns_uart_uart_driver.cons->flags & CON_ENABLED)) + !(cdns_uart_uart_driver.cons->flags & CON_ENABLED)) { console_port = NULL; + cdns_uart_console.index = -1; + } #endif cdns_uart_data->cts_override = of_property_read_bool(pdev->dev.of_node, diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 48a8199f7845..42d8c67a481f 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -1092,10 +1092,19 @@ static const struct tty_port_operations vc_port_ops = { .destruct = vc_port_destruct, }; +/* + * Change # of rows and columns (0 means unchanged/the size of fg_console) + * [this is to be used together with some user program + * like resize that changes the hardware videomode] + */ +#define VC_MAXCOL (32767) +#define VC_MAXROW (32767) + int vc_allocate(unsigned int currcons) /* return 0 on success */ { struct vt_notifier_param param; struct vc_data *vc; + int err; WARN_CONSOLE_UNLOCKED(); @@ -1125,6 +1134,11 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */ if (!*vc->vc_uni_pagedir_loc) con_set_default_unimap(vc); + err = -EINVAL; + if (vc->vc_cols > VC_MAXCOL || vc->vc_rows > VC_MAXROW || + vc->vc_screenbuf_size > KMALLOC_MAX_SIZE || !vc->vc_screenbuf_size) + goto err_free; + err = -ENOMEM; vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_KERNEL); if (!vc->vc_screenbuf) goto err_free; @@ -1143,7 +1157,7 @@ err_free: visual_deinit(vc); kfree(vc); vc_cons[currcons].d = NULL; - return -ENOMEM; + return err; } static inline int resize_screen(struct vc_data *vc, int width, int height, @@ -1158,14 +1172,6 @@ static inline int resize_screen(struct vc_data *vc, int width, int height, return err; } -/* - * Change # of rows and columns (0 means unchanged/the size of fg_console) - * [this is to be used together with some user program - * like resize that changes the hardware videomode] - */ -#define VC_RESIZE_MAXCOL (32767) -#define VC_RESIZE_MAXROW (32767) - /** * vc_do_resize - resizing method for the tty * @tty: tty being resized @@ -1201,7 +1207,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, user = vc->vc_resize_user; vc->vc_resize_user = 0; - if (cols > VC_RESIZE_MAXCOL || lines > VC_RESIZE_MAXROW) + if (cols > VC_MAXCOL || lines > VC_MAXROW) return -EINVAL; new_cols = (cols ? cols : vc->vc_cols); @@ -1212,7 +1218,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, if (new_cols == vc->vc_cols && new_rows == vc->vc_rows) return 0; - if (new_screen_size > KMALLOC_MAX_SIZE) + if (new_screen_size > KMALLOC_MAX_SIZE || !new_screen_size) return -EINVAL; newscreen = kzalloc(new_screen_size, GFP_USER); if (!newscreen) @@ -3393,6 +3399,7 @@ static int __init con_init(void) INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK); tty_port_init(&vc->port); visual_init(vc, currcons, 1); + /* Assuming vc->vc_{cols,rows,screenbuf_size} are sane here. */ vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_NOWAIT); vc_init(vc, vc->vc_rows, vc->vc_cols, currcons || !vc->vc_sw->con_save_screen); diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c index fea555570ad4..45c54d56ecbd 100644 --- a/drivers/usb/host/xhci-mtk-sch.c +++ b/drivers/usb/host/xhci-mtk-sch.c @@ -557,6 +557,10 @@ static bool need_bw_sch(struct usb_host_endpoint *ep, if (is_fs_or_ls(speed) && !has_tt) return false; + /* skip endpoint with zero maxpkt */ + if (usb_endpoint_maxp(&ep->desc) == 0) + return false; + return true; } diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index ef513c2fb843..9234c82e70e4 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -265,6 +265,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == 0x1142) xhci->quirks |= XHCI_TRUST_TX_LENGTH; + if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && + pdev->device == 0x2142) + xhci->quirks |= XHCI_NO_64BIT_SUPPORT; if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c index 2eaf5c0af80c..ee6bf01775bb 100644 --- a/drivers/usb/host/xhci-tegra.c +++ b/drivers/usb/host/xhci-tegra.c @@ -856,7 +856,7 @@ static int tegra_xusb_init_context(struct tegra_xusb *tegra) if (!tegra->context.ipfs) return -ENOMEM; - tegra->context.fpci = devm_kcalloc(tegra->dev, soc->ipfs.num_offsets, + tegra->context.fpci = devm_kcalloc(tegra->dev, soc->fpci.num_offsets, sizeof(u32), GFP_KERNEL); if (!tegra->context.fpci) return -ENOMEM; diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 6fb4d7ecfa19..b22adf03f584 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -1215,7 +1215,7 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) continue; } - switch (v_req.type) { + switch (vhost32_to_cpu(vq, v_req.type)) { case VIRTIO_SCSI_T_TMF: vc.req = &v_req.tmf; vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req); diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c index ca935c09a261..35ebeeccde4d 100644 --- a/drivers/video/fbdev/core/bitblit.c +++ b/drivers/video/fbdev/core/bitblit.c @@ -216,7 +216,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info, region.color = color; region.rop = ROP_COPY; - if (rw && !bottom_only) { + if ((int) rw > 0 && !bottom_only) { region.dx = info->var.xoffset + rs; region.dy = 0; region.width = rw; @@ -224,7 +224,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info, info->fbops->fb_fillrect(info, ®ion); } - if (bh) { + if ((int) bh > 0) { region.dx = info->var.xoffset; region.dy = info->var.yoffset + bs; region.width = rs; diff --git a/drivers/video/fbdev/core/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c index dfa9a8aa4509..78f3a5621478 100644 --- a/drivers/video/fbdev/core/fbcon_ccw.c +++ b/drivers/video/fbdev/core/fbcon_ccw.c @@ -201,7 +201,7 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info, region.color = color; region.rop = ROP_COPY; - if (rw && !bottom_only) { + if ((int) rw > 0 && !bottom_only) { region.dx = 0; region.dy = info->var.yoffset; region.height = rw; @@ -209,7 +209,7 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info, info->fbops->fb_fillrect(info, ®ion); } - if (bh) { + if ((int) bh > 0) { region.dx = info->var.xoffset + bs; region.dy = 0; region.height = info->var.yres_virtual; diff --git a/drivers/video/fbdev/core/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c index ce08251bfd38..fd098ff17574 100644 --- a/drivers/video/fbdev/core/fbcon_cw.c +++ b/drivers/video/fbdev/core/fbcon_cw.c @@ -184,7 +184,7 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info, region.color = color; region.rop = ROP_COPY; - if (rw && !bottom_only) { + if ((int) rw > 0 && !bottom_only) { region.dx = 0; region.dy = info->var.yoffset + rs; region.height = rw; @@ -192,7 +192,7 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info, info->fbops->fb_fillrect(info, ®ion); } - if (bh) { + if ((int) bh > 0) { region.dx = info->var.xoffset; region.dy = info->var.yoffset; region.height = info->var.yres; diff --git a/drivers/video/fbdev/core/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c index 1936afc78fec..e165a3fad29a 100644 --- a/drivers/video/fbdev/core/fbcon_ud.c +++ b/drivers/video/fbdev/core/fbcon_ud.c @@ -231,7 +231,7 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info, region.color = color; region.rop = ROP_COPY; - if (rw && !bottom_only) { + if ((int) rw > 0 && !bottom_only) { region.dy = 0; region.dx = info->var.xoffset; region.width = rw; @@ -239,7 +239,7 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info, info->fbops->fb_fillrect(info, ®ion); } - if (bh) { + if ((int) bh > 0) { region.dy = info->var.yoffset; region.dx = info->var.xoffset; region.height = bh; diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig index 5809e5f5b157..5c92e4a50882 100644 --- a/drivers/virtio/Kconfig +++ b/drivers/virtio/Kconfig @@ -85,7 +85,7 @@ config VIRTIO_MEM depends on VIRTIO depends on MEMORY_HOTPLUG_SPARSE depends on MEMORY_HOTREMOVE - select CONTIG_ALLOC + depends on CONTIG_ALLOC help This driver provides access to virtio-mem paravirtualized memory devices, allowing to hotplug and hotunplug memory. diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 1f157d2f4952..8be02f333b7a 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -578,10 +578,14 @@ static int init_vqs(struct virtio_balloon *vb) static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb) { if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID, - &vb->config_read_bitmap)) + &vb->config_read_bitmap)) { virtio_cread(vb->vdev, struct virtio_balloon_config, free_page_hint_cmd_id, &vb->cmd_id_received_cache); + /* Legacy balloon config space is LE, unlike all other devices. */ + if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1)) + vb->cmd_id_received_cache = le32_to_cpu((__force __le32)vb->cmd_id_received_cache); + } return vb->cmd_id_received_cache; } @@ -974,6 +978,11 @@ static int virtballoon_probe(struct virtio_device *vdev) /* * Let the hypervisor know that we are expecting a * specific value to be written back in balloon pages. + * + * If the PAGE_POISON value was larger than a byte we would + * need to byte swap poison_val here to guarantee it is + * little-endian. However for now it is a single byte so we + * can pass it as-is. */ if (!want_init_on_free()) memset(&poison_val, PAGE_POISON, sizeof(poison_val)); diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index 9d16aaffca9d..627ac0487494 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -641,11 +641,11 @@ static int vm_cmdline_set(const char *device, &vm_cmdline_id, &consumed); /* - * sscanf() must processes at least 2 chunks; also there + * sscanf() must process at least 2 chunks; also there * must be no extra characters after the last chunk, so * str[consumed] must be '\0' */ - if (processed < 2 || str[consumed]) + if (processed < 2 || str[consumed] || irq == 0) return -EINVAL; resources[0].flags = IORESOURCE_MEM; diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index d888e71e66b6..ea10f7bc99ab 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -1461,6 +1461,7 @@ static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans, if (ret < 0 && ret != -ENOENT) { ulist_free(tmp); ulist_free(*roots); + *roots = NULL; return ret; } node = ulist_next(tmp, &uiter); diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index c037ef514b64..613920c17ac1 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -65,11 +65,8 @@ static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags) spin_lock(&fs_info->balance_lock); target = get_restripe_target(fs_info, flags); if (target) { - /* Pick target profile only if it's already available */ - if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) { - spin_unlock(&fs_info->balance_lock); - return extended_to_chunk(target); - } + spin_unlock(&fs_info->balance_lock); + return extended_to_chunk(target); } spin_unlock(&fs_info->balance_lock); @@ -118,12 +115,12 @@ u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags) void btrfs_get_block_group(struct btrfs_block_group *cache) { - atomic_inc(&cache->count); + refcount_inc(&cache->refs); } void btrfs_put_block_group(struct btrfs_block_group *cache) { - if (atomic_dec_and_test(&cache->count)) { + if (refcount_dec_and_test(&cache->refs)) { WARN_ON(cache->pinned > 0); WARN_ON(cache->reserved > 0); @@ -1111,7 +1108,6 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, if (ret < 0) goto out; - mutex_lock(&fs_info->chunk_mutex); spin_lock(&block_group->lock); block_group->removed = 1; /* @@ -1143,8 +1139,6 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, remove_em = (atomic_read(&block_group->frozen) == 0); spin_unlock(&block_group->lock); - mutex_unlock(&fs_info->chunk_mutex); - if (remove_em) { struct extent_map_tree *em_tree; @@ -1532,21 +1526,70 @@ void btrfs_mark_bg_unused(struct btrfs_block_group *bg) spin_unlock(&fs_info->unused_bgs_lock); } +static int read_bg_from_eb(struct btrfs_fs_info *fs_info, struct btrfs_key *key, + struct btrfs_path *path) +{ + struct extent_map_tree *em_tree; + struct extent_map *em; + struct btrfs_block_group_item bg; + struct extent_buffer *leaf; + int slot; + u64 flags; + int ret = 0; + + slot = path->slots[0]; + leaf = path->nodes[0]; + + em_tree = &fs_info->mapping_tree; + read_lock(&em_tree->lock); + em = lookup_extent_mapping(em_tree, key->objectid, key->offset); + read_unlock(&em_tree->lock); + if (!em) { + btrfs_err(fs_info, + "logical %llu len %llu found bg but no related chunk", + key->objectid, key->offset); + return -ENOENT; + } + + if (em->start != key->objectid || em->len != key->offset) { + btrfs_err(fs_info, + "block group %llu len %llu mismatch with chunk %llu len %llu", + key->objectid, key->offset, em->start, em->len); + ret = -EUCLEAN; + goto out_free_em; + } + + read_extent_buffer(leaf, &bg, btrfs_item_ptr_offset(leaf, slot), + sizeof(bg)); + flags = btrfs_stack_block_group_flags(&bg) & + BTRFS_BLOCK_GROUP_TYPE_MASK; + + if (flags != (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { + btrfs_err(fs_info, +"block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx", + key->objectid, key->offset, flags, + (BTRFS_BLOCK_GROUP_TYPE_MASK & em->map_lookup->type)); + ret = -EUCLEAN; + } + +out_free_em: + free_extent_map(em); + return ret; +} + static int find_first_block_group(struct btrfs_fs_info *fs_info, struct btrfs_path *path, struct btrfs_key *key) { struct btrfs_root *root = fs_info->extent_root; - int ret = 0; + int ret; struct btrfs_key found_key; struct extent_buffer *leaf; - struct btrfs_block_group_item bg; - u64 flags; int slot; ret = btrfs_search_slot(NULL, root, key, path, 0, 0); if (ret < 0) - goto out; + return ret; while (1) { slot = path->slots[0]; @@ -1563,49 +1606,10 @@ static int find_first_block_group(struct btrfs_fs_info *fs_info, if (found_key.objectid >= key->objectid && found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) { - struct extent_map_tree *em_tree; - struct extent_map *em; - - em_tree = &root->fs_info->mapping_tree; - read_lock(&em_tree->lock); - em = lookup_extent_mapping(em_tree, found_key.objectid, - found_key.offset); - read_unlock(&em_tree->lock); - if (!em) { - btrfs_err(fs_info, - "logical %llu len %llu found bg but no related chunk", - found_key.objectid, found_key.offset); - ret = -ENOENT; - } else if (em->start != found_key.objectid || - em->len != found_key.offset) { - btrfs_err(fs_info, - "block group %llu len %llu mismatch with chunk %llu len %llu", - found_key.objectid, found_key.offset, - em->start, em->len); - ret = -EUCLEAN; - } else { - read_extent_buffer(leaf, &bg, - btrfs_item_ptr_offset(leaf, slot), - sizeof(bg)); - flags = btrfs_stack_block_group_flags(&bg) & - BTRFS_BLOCK_GROUP_TYPE_MASK; - - if (flags != (em->map_lookup->type & - BTRFS_BLOCK_GROUP_TYPE_MASK)) { - btrfs_err(fs_info, -"block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx", - found_key.objectid, - found_key.offset, flags, - (BTRFS_BLOCK_GROUP_TYPE_MASK & - em->map_lookup->type)); - ret = -EUCLEAN; - } else { - ret = 0; - } - } - free_extent_map(em); - goto out; + ret = read_bg_from_eb(fs_info, &found_key, path); + break; } + path->slots[0]++; } out: @@ -1657,19 +1661,12 @@ int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start, return -EIO; map = em->map_lookup; - data_stripe_length = em->len; + data_stripe_length = em->orig_block_len; io_stripe_size = map->stripe_len; - if (map->type & BTRFS_BLOCK_GROUP_RAID10) - data_stripe_length = div_u64(data_stripe_length, - map->num_stripes / map->sub_stripes); - else if (map->type & BTRFS_BLOCK_GROUP_RAID0) - data_stripe_length = div_u64(data_stripe_length, map->num_stripes); - else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { - data_stripe_length = div_u64(data_stripe_length, - nr_data_stripes(map)); + /* For RAID5/6 adjust to a full IO stripe length */ + if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) io_stripe_size = map->stripe_len * nr_data_stripes(map); - } buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS); if (!buf) { @@ -1748,25 +1745,12 @@ static int exclude_super_stripes(struct btrfs_block_group *cache) return ret; while (nr--) { - u64 start, len; - - if (logical[nr] > cache->start + cache->length) - continue; - - if (logical[nr] + stripe_len <= cache->start) - continue; - - start = logical[nr]; - if (start < cache->start) { - start = cache->start; - len = (logical[nr] + stripe_len) - start; - } else { - len = min_t(u64, stripe_len, - cache->start + cache->length - start); - } + u64 len = min_t(u64, stripe_len, + cache->start + cache->length - logical[nr]); cache->bytes_super += len; - ret = btrfs_add_excluded_extent(fs_info, start, len); + ret = btrfs_add_excluded_extent(fs_info, logical[nr], + len); if (ret) { kfree(logical); return ret; @@ -1818,7 +1802,7 @@ static struct btrfs_block_group *btrfs_create_block_group_cache( cache->discard_index = BTRFS_DISCARD_INDEX_UNUSED; - atomic_set(&cache->count, 1); + refcount_set(&cache->refs, 1); spin_lock_init(&cache->lock); init_rwsem(&cache->data_rwsem); INIT_LIST_HEAD(&cache->list); @@ -2207,54 +2191,6 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, return 0; } -static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags) -{ - u64 num_devices; - u64 stripped; - - /* - * if restripe for this chunk_type is on pick target profile and - * return, otherwise do the usual balance - */ - stripped = get_restripe_target(fs_info, flags); - if (stripped) - return extended_to_chunk(stripped); - - num_devices = fs_info->fs_devices->rw_devices; - - stripped = BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID56_MASK | - BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10; - - if (num_devices == 1) { - stripped |= BTRFS_BLOCK_GROUP_DUP; - stripped = flags & ~stripped; - - /* turn raid0 into single device chunks */ - if (flags & BTRFS_BLOCK_GROUP_RAID0) - return stripped; - - /* turn mirroring into duplication */ - if (flags & (BTRFS_BLOCK_GROUP_RAID1_MASK | - BTRFS_BLOCK_GROUP_RAID10)) - return stripped | BTRFS_BLOCK_GROUP_DUP; - } else { - /* they already had raid on here, just return */ - if (flags & stripped) - return flags; - - stripped |= BTRFS_BLOCK_GROUP_DUP; - stripped = flags & ~stripped; - - /* switch duplicated blocks with raid1 */ - if (flags & BTRFS_BLOCK_GROUP_DUP) - return stripped | BTRFS_BLOCK_GROUP_RAID1; - - /* this is drive concat, leave it alone */ - } - - return flags; -} - /* * Mark one block group RO, can be called several times for the same block * group. @@ -2300,7 +2236,7 @@ again: * If we are changing raid levels, try to allocate a * corresponding block group with the new raid level. */ - alloc_flags = update_block_group_flags(fs_info, cache->flags); + alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); if (alloc_flags != cache->flags) { ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); @@ -2327,7 +2263,7 @@ again: ret = inc_block_group_ro(cache, 0); out: if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) { - alloc_flags = update_block_group_flags(fs_info, cache->flags); + alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); mutex_lock(&fs_info->chunk_mutex); check_system_chunk(trans, alloc_flags); mutex_unlock(&fs_info->chunk_mutex); @@ -2521,7 +2457,8 @@ again: num_pages *= 16; num_pages *= PAGE_SIZE; - ret = btrfs_check_data_free_space(inode, &data_reserved, 0, num_pages); + ret = btrfs_check_data_free_space(BTRFS_I(inode), &data_reserved, 0, + num_pages); if (ret) goto out_put; @@ -3392,7 +3329,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) ASSERT(list_empty(&block_group->dirty_list)); ASSERT(list_empty(&block_group->io_list)); ASSERT(list_empty(&block_group->bg_list)); - ASSERT(atomic_read(&block_group->count) == 1); + ASSERT(refcount_read(&block_group->refs) == 1); btrfs_put_block_group(block_group); spin_lock(&info->block_group_cache_lock); @@ -3447,7 +3384,6 @@ void btrfs_unfreeze_block_group(struct btrfs_block_group *block_group) spin_unlock(&block_group->lock); if (cleanup) { - mutex_lock(&fs_info->chunk_mutex); em_tree = &fs_info->mapping_tree; write_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, block_group->start, @@ -3455,7 +3391,6 @@ void btrfs_unfreeze_block_group(struct btrfs_block_group *block_group) BUG_ON(!em); /* logic error, can't happen */ remove_extent_mapping(em_tree, em); write_unlock(&em_tree->lock); - mutex_unlock(&fs_info->chunk_mutex); /* once for us and once for the tree */ free_extent_map(em); diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index b6ee70a039c7..adfd7583a17b 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -114,8 +114,7 @@ struct btrfs_block_group { /* For block groups in the same raid type */ struct list_head list; - /* Usage count */ - atomic_t count; + refcount_t refs; /* * List of struct btrfs_free_clusters for this block group. diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index e7d709505cb1..c47b6c6fea9f 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -152,6 +152,17 @@ struct btrfs_inode { u64 last_unlink_trans; /* + * The id/generation of the last transaction where this inode was + * either the source or the destination of a clone/dedupe operation. + * Used when logging an inode to know if there are shared extents that + * need special care when logging checksum items, to avoid duplicate + * checksum items in a log (which can lead to a corruption where we end + * up with missing checksum ranges after log replay). + * Protected by the vfs inode lock. + */ + u64 last_reflink_trans; + + /* * Number of bytes outstanding that are going to need csums. This is * used in ENOSPC accounting. */ diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c index 32e11a23b47f..81a8c87a5afb 100644 --- a/fs/btrfs/check-integrity.c +++ b/fs/btrfs/check-integrity.c @@ -631,10 +631,8 @@ static int btrfsic_process_superblock(struct btrfsic_state *state, int pass; selected_super = kzalloc(sizeof(*selected_super), GFP_NOFS); - if (NULL == selected_super) { - pr_info("btrfsic: error, kmalloc failed!\n"); + if (!selected_super) return -ENOMEM; - } list_for_each_entry(device, dev_head, dev_list) { int i; @@ -795,7 +793,6 @@ static int btrfsic_process_superblock_dev_mirror( if (NULL == superblock_tmp) { superblock_tmp = btrfsic_block_alloc(); if (NULL == superblock_tmp) { - pr_info("btrfsic: error, kmalloc failed!\n"); ret = -1; goto out; } @@ -921,9 +918,7 @@ static struct btrfsic_stack_frame *btrfsic_stack_frame_alloc(void) struct btrfsic_stack_frame *sf; sf = kzalloc(sizeof(*sf), GFP_NOFS); - if (NULL == sf) - pr_info("btrfsic: alloc memory failed!\n"); - else + if (sf) sf->magic = BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER; return sf; } @@ -1313,7 +1308,6 @@ static int btrfsic_create_link_to_next_block( if (NULL == l) { l = btrfsic_block_link_alloc(); if (NULL == l) { - pr_info("btrfsic: error, kmalloc failed!\n"); btrfsic_release_block_ctx(next_block_ctx); *next_blockp = NULL; return -1; @@ -1470,7 +1464,6 @@ static int btrfsic_handle_extent_data( mirror_num, &block_was_created); if (NULL == next_block) { - pr_info("btrfsic: error, kmalloc failed!\n"); btrfsic_release_block_ctx(&next_block_ctx); return -1; } @@ -2013,7 +2006,6 @@ again: block = btrfsic_block_alloc(); if (NULL == block) { - pr_info("btrfsic: error, kmalloc failed!\n"); btrfsic_release_block_ctx(&block_ctx); goto continue_loop; } @@ -2234,7 +2226,6 @@ static int btrfsic_process_written_superblock( mirror_num, &was_created); if (NULL == next_block) { - pr_info("btrfsic: error, kmalloc failed!\n"); btrfsic_release_block_ctx(&tmp_next_block_ctx); return -1; } @@ -2542,10 +2533,8 @@ static struct btrfsic_block_link *btrfsic_block_link_lookup_or_add( &state->block_link_hashtable); if (NULL == l) { l = btrfsic_block_link_alloc(); - if (NULL == l) { - pr_info("btrfsic: error, kmalloc failed!\n"); + if (!l) return NULL; - } l->block_ref_to = next_block; l->block_ref_from = from_block; @@ -2589,10 +2578,9 @@ static struct btrfsic_block *btrfsic_block_lookup_or_add( struct btrfsic_dev_state *dev_state; block = btrfsic_block_alloc(); - if (NULL == block) { - pr_info("btrfsic: error, kmalloc failed!\n"); + if (!block) return NULL; - } + dev_state = btrfsic_dev_state_lookup(block_ctx->dev->bdev->bd_dev); if (NULL == dev_state) { pr_info("btrfsic: error, lookup dev_state failed!\n"); @@ -2797,10 +2785,8 @@ int btrfsic_mount(struct btrfs_fs_info *fs_info, return -1; } state = kvzalloc(sizeof(*state), GFP_KERNEL); - if (!state) { - pr_info("btrfs check-integrity: allocation failed!\n"); + if (!state) return -ENOMEM; - } if (!btrfsic_is_initialized) { mutex_init(&btrfsic_mutex); @@ -2829,7 +2815,6 @@ int btrfsic_mount(struct btrfs_fs_info *fs_info, ds = btrfsic_dev_state_alloc(); if (NULL == ds) { - pr_info("btrfs check-integrity: kmalloc() failed!\n"); mutex_unlock(&btrfsic_mutex); return -ENOMEM; } diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index c6e648603f85..1ab56a734e70 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -172,18 +172,17 @@ static inline int compressed_bio_size(struct btrfs_fs_info *fs_info, (DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size; } -static int check_compressed_csum(struct btrfs_inode *inode, - struct compressed_bio *cb, +static int check_compressed_csum(struct btrfs_inode *inode, struct bio *bio, u64 disk_start) { struct btrfs_fs_info *fs_info = inode->root->fs_info; SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); - int ret; struct page *page; unsigned long i; char *kaddr; u8 csum[BTRFS_CSUM_SIZE]; + struct compressed_bio *cb = bio->bi_private; u8 *cb_sum = cb->sums; if (inode->flags & BTRFS_INODE_NODATASUM) @@ -201,15 +200,15 @@ static int check_compressed_csum(struct btrfs_inode *inode, if (memcmp(&csum, cb_sum, csum_size)) { btrfs_print_data_csum_error(inode, disk_start, csum, cb_sum, cb->mirror_num); - ret = -EIO; - goto fail; + if (btrfs_io_bio(bio)->device) + btrfs_dev_stat_inc_and_print( + btrfs_io_bio(bio)->device, + BTRFS_DEV_STAT_CORRUPTION_ERRS); + return -EIO; } cb_sum += csum_size; - } - ret = 0; -fail: - return ret; + return 0; } /* when we finish reading compressed pages from the disk, we @@ -244,7 +243,6 @@ static void end_compressed_bio_read(struct bio *bio) * Record the correct mirror_num in cb->orig_bio so that * read-repair can work properly. */ - ASSERT(btrfs_io_bio(cb->orig_bio)); btrfs_io_bio(cb->orig_bio)->mirror_num = mirror; cb->mirror_num = mirror; @@ -256,7 +254,7 @@ static void end_compressed_bio_read(struct bio *bio) goto csum_failed; inode = cb->inode; - ret = check_compressed_csum(BTRFS_I(inode), cb, + ret = check_compressed_csum(BTRFS_I(inode), bio, (u64)bio->bi_iter.bi_sector << 9); if (ret) goto csum_failed; @@ -405,7 +403,7 @@ out: * This also checksums the file bytes and gets things ready for * the end io hooks. */ -blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, +blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, unsigned long len, u64 disk_start, unsigned long compressed_len, struct page **compressed_pages, @@ -413,7 +411,7 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, unsigned int write_flags, struct cgroup_subsys_state *blkcg_css) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = inode->root->fs_info; struct bio *bio = NULL; struct compressed_bio *cb; unsigned long bytes_left; @@ -421,7 +419,7 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, struct page *page; u64 first_byte = disk_start; blk_status_t ret; - int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; + int skip_sum = inode->flags & BTRFS_INODE_NODATASUM; WARN_ON(!PAGE_ALIGNED(start)); cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS); @@ -429,7 +427,7 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, return BLK_STS_RESOURCE; refcount_set(&cb->pending_bios, 0); cb->errors = 0; - cb->inode = inode; + cb->inode = &inode->vfs_inode; cb->start = start; cb->len = len; cb->mirror_num = 0; @@ -455,7 +453,7 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, int submit = 0; page = compressed_pages[pg_index]; - page->mapping = inode->i_mapping; + page->mapping = inode->vfs_inode.i_mapping; if (bio->bi_iter.bi_size) submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE, bio, 0); diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index 284a3ad31350..9f3dbe372631 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -8,6 +8,8 @@ #include <linux/sizes.h> +struct btrfs_inode; + /* * We want to make sure that amount of RAM required to uncompress an extent is * reasonable, so we limit the total size in ram of a compressed extent to @@ -88,7 +90,7 @@ int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start, unsigned long total_out, u64 disk_start, struct bio *bio); -blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, +blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, unsigned long len, u64 disk_start, unsigned long compressed_len, struct page **compressed_pages, diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 82ab6e5a386d..70e49d8d4f6c 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1501,6 +1501,22 @@ static int close_blocks(u64 blocknr, u64 other, u32 blocksize) return 0; } +#ifdef __LITTLE_ENDIAN + +/* + * Compare two keys, on little-endian the disk order is same as CPU order and + * we can avoid the conversion. + */ +static int comp_keys(const struct btrfs_disk_key *disk_key, + const struct btrfs_key *k2) +{ + const struct btrfs_key *k1 = (const struct btrfs_key *)disk_key; + + return btrfs_comp_cpu_keys(k1, k2); +} + +#else + /* * compare two keys in a memcmp fashion */ @@ -1513,6 +1529,7 @@ static int comp_keys(const struct btrfs_disk_key *disk, return btrfs_comp_cpu_keys(&k1, k2); } +#endif /* * same as comp_keys only with two btrfs_key's diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index d404cce8ae40..9c7e466f27a9 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -546,11 +546,6 @@ enum { */ BTRFS_FS_EXCL_OP, /* - * To info transaction_kthread we need an immediate commit so it - * doesn't need to wait for commit_interval - */ - BTRFS_FS_NEED_ASYNC_COMMIT, - /* * Indicate that balance has been set up from the ioctl and is in the * main phase. The fs_info::balance_ctl is initialized. * Set and cleared while holding fs_info::balance_mutex. @@ -779,6 +774,7 @@ struct btrfs_fs_info { u32 thread_pool_size; struct kobject *space_info_kobj; + struct kobject *qgroups_kobj; u64 total_pinned; @@ -1011,6 +1007,8 @@ enum { BTRFS_ROOT_DEAD_TREE, /* The root has a log tree. Used only for subvolume roots. */ BTRFS_ROOT_HAS_LOG_TREE, + /* Qgroup flushing is in progress */ + BTRFS_ROOT_QGROUP_FLUSHING, }; /* @@ -1059,8 +1057,10 @@ struct btrfs_root { wait_queue_head_t log_writer_wait; wait_queue_head_t log_commit_wait[2]; struct list_head log_ctxs[2]; + /* Used only for log trees of subvolumes, not for the log root tree */ atomic_t log_writers; atomic_t log_commit[2]; + /* Used only for log trees of subvolumes, not for the log root tree */ atomic_t log_batch; int log_transid; /* No matter the commit succeeds or not*/ @@ -1075,7 +1075,6 @@ struct btrfs_root { u64 highest_objectid; - u64 defrag_trans_start; struct btrfs_key defrag_progress; struct btrfs_key defrag_max; @@ -1162,6 +1161,7 @@ struct btrfs_root { spinlock_t qgroup_meta_rsv_lock; u64 qgroup_meta_rsv_pertrans; u64 qgroup_meta_rsv_prealloc; + wait_queue_head_t qgroup_flush_wait; /* Number of active swapfiles */ atomic_t nr_swapfiles; @@ -1277,18 +1277,18 @@ static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_fs_info *info) BTRFS_MOUNT_##opt) #define btrfs_set_and_info(fs_info, opt, fmt, args...) \ -{ \ +do { \ if (!btrfs_test_opt(fs_info, opt)) \ btrfs_info(fs_info, fmt, ##args); \ btrfs_set_opt(fs_info->mount_opt, opt); \ -} +} while (0) #define btrfs_clear_and_info(fs_info, opt, fmt, args...) \ -{ \ +do { \ if (btrfs_test_opt(fs_info, opt)) \ btrfs_info(fs_info, fmt, ##args); \ btrfs_clear_opt(fs_info->mount_opt, opt); \ -} +} while (0) /* * Requests for changes that need to be done during transaction commit. @@ -1895,6 +1895,52 @@ BTRFS_SETGET_STACK_FUNCS(disk_key_objectid, struct btrfs_disk_key, BTRFS_SETGET_STACK_FUNCS(disk_key_offset, struct btrfs_disk_key, offset, 64); BTRFS_SETGET_STACK_FUNCS(disk_key_type, struct btrfs_disk_key, type, 8); +#ifdef __LITTLE_ENDIAN + +/* + * Optimized helpers for little-endian architectures where CPU and on-disk + * structures have the same endianness and we can skip conversions. + */ + +static inline void btrfs_disk_key_to_cpu(struct btrfs_key *cpu_key, + const struct btrfs_disk_key *disk_key) +{ + memcpy(cpu_key, disk_key, sizeof(struct btrfs_key)); +} + +static inline void btrfs_cpu_key_to_disk(struct btrfs_disk_key *disk_key, + const struct btrfs_key *cpu_key) +{ + memcpy(disk_key, cpu_key, sizeof(struct btrfs_key)); +} + +static inline void btrfs_node_key_to_cpu(const struct extent_buffer *eb, + struct btrfs_key *cpu_key, int nr) +{ + struct btrfs_disk_key *disk_key = (struct btrfs_disk_key *)cpu_key; + + btrfs_node_key(eb, disk_key, nr); +} + +static inline void btrfs_item_key_to_cpu(const struct extent_buffer *eb, + struct btrfs_key *cpu_key, int nr) +{ + struct btrfs_disk_key *disk_key = (struct btrfs_disk_key *)cpu_key; + + btrfs_item_key(eb, disk_key, nr); +} + +static inline void btrfs_dir_item_key_to_cpu(const struct extent_buffer *eb, + const struct btrfs_dir_item *item, + struct btrfs_key *cpu_key) +{ + struct btrfs_disk_key *disk_key = (struct btrfs_disk_key *)cpu_key; + + btrfs_dir_item_key(eb, item, disk_key); +} + +#else + static inline void btrfs_disk_key_to_cpu(struct btrfs_key *cpu, const struct btrfs_disk_key *disk) { @@ -1936,6 +1982,8 @@ static inline void btrfs_dir_item_key_to_cpu(const struct extent_buffer *eb, btrfs_disk_key_to_cpu(key, &disk_key); } +#endif + /* struct btrfs_header */ BTRFS_SETGET_HEADER_FUNCS(header_bytenr, struct btrfs_header, bytenr, 64); BTRFS_SETGET_HEADER_FUNCS(header_generation, struct btrfs_header, @@ -2232,7 +2280,8 @@ static inline unsigned int leaf_data_end(const struct extent_buffer *leaf) } /* struct btrfs_file_extent_item */ -BTRFS_SETGET_FUNCS(file_extent_type, struct btrfs_file_extent_item, type, 8); +BTRFS_SETGET_STACK_FUNCS(stack_file_extent_type, struct btrfs_file_extent_item, + type, 8); BTRFS_SETGET_STACK_FUNCS(stack_file_extent_disk_bytenr, struct btrfs_file_extent_item, disk_bytenr, 64); BTRFS_SETGET_STACK_FUNCS(stack_file_extent_offset, @@ -2241,6 +2290,8 @@ BTRFS_SETGET_STACK_FUNCS(stack_file_extent_generation, struct btrfs_file_extent_item, generation, 64); BTRFS_SETGET_STACK_FUNCS(stack_file_extent_num_bytes, struct btrfs_file_extent_item, num_bytes, 64); +BTRFS_SETGET_STACK_FUNCS(stack_file_extent_ram_bytes, + struct btrfs_file_extent_item, ram_bytes, 64); BTRFS_SETGET_STACK_FUNCS(stack_file_extent_disk_num_bytes, struct btrfs_file_extent_item, disk_num_bytes, 64); BTRFS_SETGET_STACK_FUNCS(stack_file_extent_compression, @@ -2257,6 +2308,7 @@ static inline u32 btrfs_file_extent_calc_inline_size(u32 datasize) return BTRFS_FILE_EXTENT_INLINE_DATA_START + datasize; } +BTRFS_SETGET_FUNCS(file_extent_type, struct btrfs_file_extent_item, type, 8); BTRFS_SETGET_FUNCS(file_extent_disk_bytenr, struct btrfs_file_extent_item, disk_bytenr, 64); BTRFS_SETGET_FUNCS(file_extent_generation, struct btrfs_file_extent_item, @@ -2508,16 +2560,46 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr); void btrfs_clear_space_info_full(struct btrfs_fs_info *info); +/* + * Different levels for to flush space when doing space reservations. + * + * The higher the level, the more methods we try to reclaim space. + */ enum btrfs_reserve_flush_enum { /* If we are in the transaction, we can't flush anything.*/ BTRFS_RESERVE_NO_FLUSH, + /* - * Flushing delalloc may cause deadlock somewhere, in this - * case, use FLUSH LIMIT + * Flush space by: + * - Running delayed inode items + * - Allocating a new chunk */ BTRFS_RESERVE_FLUSH_LIMIT, + + /* + * Flush space by: + * - Running delayed inode items + * - Running delayed refs + * - Running delalloc and waiting for ordered extents + * - Allocating a new chunk + */ BTRFS_RESERVE_FLUSH_EVICT, + + /* + * Flush space by above mentioned methods and by: + * - Running delayed iputs + * - Commiting transaction + * + * Can be interruped by fatal signal. + */ BTRFS_RESERVE_FLUSH_ALL, + + /* + * Pretty much the same as FLUSH_ALL, but can also steal space from + * global rsv. + * + * Can be interruped by fatal signal. + */ BTRFS_RESERVE_FLUSH_ALL_STEAL, }; @@ -2831,8 +2913,8 @@ int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_ordered_sum *sums); -blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio, - u64 file_start, int contig); +blk_status_t btrfs_csum_one_bio(struct btrfs_inode *inode, struct bio *bio, + u64 file_start, int contig); int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, struct list_head *list, int search_commit); void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode, @@ -2875,7 +2957,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, int btrfs_start_delalloc_snapshot(struct btrfs_root *root); int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int nr); -int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, +int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end, unsigned int extra_bits, struct extent_state **cached_state); int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, @@ -2928,7 +3010,7 @@ int btrfs_prealloc_file_range_trans(struct inode *inode, struct btrfs_trans_handle *trans, int mode, u64 start, u64 num_bytes, u64 min_size, loff_t actual_len, u64 *alloc_hint); -int btrfs_run_delalloc_range(struct inode *inode, struct page *locked_page, +int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page, u64 start, u64 end, int *page_started, unsigned long *nr_written, struct writeback_control *wbc); int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end); @@ -2962,7 +3044,7 @@ void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end, int skip_pinned); extern const struct file_operations btrfs_file_operations; int __btrfs_drop_extents(struct btrfs_trans_handle *trans, - struct btrfs_root *root, struct inode *inode, + struct btrfs_root *root, struct btrfs_inode *inode, struct btrfs_path *path, u64 start, u64 end, u64 *drop_end, int drop_cache, int replace_extent, @@ -2978,10 +3060,13 @@ int btrfs_punch_hole_range(struct inode *inode, struct btrfs_path *path, int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, struct btrfs_inode *inode, u64 start, u64 end); int btrfs_release_file(struct inode *inode, struct file *file); -int btrfs_dirty_pages(struct inode *inode, struct page **pages, +int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages, size_t num_pages, loff_t pos, size_t write_bytes, struct extent_state **cached); int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end); +int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos, + size_t *write_bytes); +void btrfs_check_nocow_unlock(struct btrfs_inode *inode); /* tree-defrag.c */ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, @@ -3194,7 +3279,7 @@ do { \ /* Report first abort since mount */ \ if (!test_and_set_bit(BTRFS_FS_STATE_TRANS_ABORTED, \ &((trans)->fs_info->fs_state))) { \ - if ((errno) != -EIO) { \ + if ((errno) != -EIO && (errno) != -EROFS) { \ WARN(1, KERN_DEBUG \ "BTRFS: Transaction aborted (error %d)\n", \ (errno)); \ @@ -3378,7 +3463,7 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, struct btrfs_root *root); int btrfs_recover_relocation(struct btrfs_root *root); -int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len); +int btrfs_reloc_clone_csums(struct btrfs_inode *inode, u64 file_pos, u64 len); int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, struct extent_buffer *cow); diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c index 1245739a3a6e..0e354e9e57d0 100644 --- a/fs/btrfs/delalloc-space.c +++ b/fs/btrfs/delalloc-space.c @@ -237,10 +237,10 @@ commit_trans: return 0; } -int btrfs_check_data_free_space(struct inode *inode, +int btrfs_check_data_free_space(struct btrfs_inode *inode, struct extent_changeset **reserved, u64 start, u64 len) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = inode->root->fs_info; int ret; /* align the range */ @@ -248,14 +248,14 @@ int btrfs_check_data_free_space(struct inode *inode, round_down(start, fs_info->sectorsize); start = round_down(start, fs_info->sectorsize); - ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode), len); + ret = btrfs_alloc_data_chunk_ondemand(inode, len); if (ret < 0) return ret; /* Use new btrfs_qgroup_reserve_data to reserve precious data space. */ ret = btrfs_qgroup_reserve_data(inode, reserved, start, len); if (ret < 0) - btrfs_free_reserved_data_space_noquota(inode, start, len); + btrfs_free_reserved_data_space_noquota(fs_info, len); else ret = 0; return ret; @@ -269,16 +269,12 @@ int btrfs_check_data_free_space(struct inode *inode, * which we can't sleep and is sure it won't affect qgroup reserved space. * Like clear_bit_hook(). */ -void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start, +void btrfs_free_reserved_data_space_noquota(struct btrfs_fs_info *fs_info, u64 len) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_space_info *data_sinfo; - /* Make sure the range is aligned to sectorsize */ - len = round_up(start + len, fs_info->sectorsize) - - round_down(start, fs_info->sectorsize); - start = round_down(start, fs_info->sectorsize); + ASSERT(IS_ALIGNED(len, fs_info->sectorsize)); data_sinfo = fs_info->data_sinfo; spin_lock(&data_sinfo->lock); @@ -293,17 +289,17 @@ void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start, * This one will handle the per-inode data rsv map for accurate reserved * space framework. */ -void btrfs_free_reserved_data_space(struct inode *inode, +void btrfs_free_reserved_data_space(struct btrfs_inode *inode, struct extent_changeset *reserved, u64 start, u64 len) { - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_fs_info *fs_info = inode->root->fs_info; /* Make sure the range is aligned to sectorsize */ - len = round_up(start + len, root->fs_info->sectorsize) - - round_down(start, root->fs_info->sectorsize); - start = round_down(start, root->fs_info->sectorsize); + len = round_up(start + len, fs_info->sectorsize) - + round_down(start, fs_info->sectorsize); + start = round_down(start, fs_info->sectorsize); - btrfs_free_reserved_data_space_noquota(inode, start, len); + btrfs_free_reserved_data_space_noquota(fs_info, len); btrfs_qgroup_free_data(inode, reserved, start, len); } @@ -557,7 +553,7 @@ void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes) * Return 0 for success * Return <0 for error(-ENOSPC or -EQUOT) */ -int btrfs_delalloc_reserve_space(struct inode *inode, +int btrfs_delalloc_reserve_space(struct btrfs_inode *inode, struct extent_changeset **reserved, u64 start, u64 len) { int ret; @@ -565,7 +561,7 @@ int btrfs_delalloc_reserve_space(struct inode *inode, ret = btrfs_check_data_free_space(inode, reserved, start, len); if (ret < 0) return ret; - ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len); + ret = btrfs_delalloc_reserve_metadata(inode, len); if (ret < 0) btrfs_free_reserved_data_space(inode, *reserved, start, len); return ret; @@ -583,10 +579,10 @@ int btrfs_delalloc_reserve_space(struct inode *inode, * list if there are no delalloc bytes left. * Also it will handle the qgroup reserved space. */ -void btrfs_delalloc_release_space(struct inode *inode, +void btrfs_delalloc_release_space(struct btrfs_inode *inode, struct extent_changeset *reserved, u64 start, u64 len, bool qgroup_free) { - btrfs_delalloc_release_metadata(BTRFS_I(inode), len, qgroup_free); + btrfs_delalloc_release_metadata(inode, len, qgroup_free); btrfs_free_reserved_data_space(inode, reserved, start, len); } diff --git a/fs/btrfs/delalloc-space.h b/fs/btrfs/delalloc-space.h index 54466fbd7075..28bf5c3ef430 100644 --- a/fs/btrfs/delalloc-space.h +++ b/fs/btrfs/delalloc-space.h @@ -6,18 +6,18 @@ struct extent_changeset; int btrfs_alloc_data_chunk_ondemand(struct btrfs_inode *inode, u64 bytes); -int btrfs_check_data_free_space(struct inode *inode, +int btrfs_check_data_free_space(struct btrfs_inode *inode, struct extent_changeset **reserved, u64 start, u64 len); -void btrfs_free_reserved_data_space(struct inode *inode, +void btrfs_free_reserved_data_space(struct btrfs_inode *inode, struct extent_changeset *reserved, u64 start, u64 len); -void btrfs_delalloc_release_space(struct inode *inode, +void btrfs_delalloc_release_space(struct btrfs_inode *inode, struct extent_changeset *reserved, u64 start, u64 len, bool qgroup_free); -void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start, +void btrfs_free_reserved_data_space_noquota(struct btrfs_fs_info *fs_info, u64 len); void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes, bool qgroup_free); -int btrfs_delalloc_reserve_space(struct inode *inode, +int btrfs_delalloc_reserve_space(struct btrfs_inode *inode, struct extent_changeset **reserved, u64 start, u64 len); #endif /* BTRFS_DELALLOC_SPACE_H */ diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index b1a148058773..c850d7f44fbe 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1116,6 +1116,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info, mutex_init(&root->log_mutex); mutex_init(&root->ordered_extent_mutex); mutex_init(&root->delalloc_mutex); + init_waitqueue_head(&root->qgroup_flush_wait); init_waitqueue_head(&root->log_writer_wait); init_waitqueue_head(&root->log_commit_wait[0]); init_waitqueue_head(&root->log_commit_wait[1]); @@ -1141,10 +1142,6 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info, memset(&root->root_key, 0, sizeof(root->root_key)); memset(&root->root_item, 0, sizeof(root->root_item)); memset(&root->defrag_progress, 0, sizeof(root->defrag_progress)); - if (!dummy) - root->defrag_trans_start = fs_info->generation; - else - root->defrag_trans_start = 0; root->root_key.objectid = objectid; root->anon_dev = 0; @@ -1395,7 +1392,12 @@ alloc_fail: goto out; } -static int btrfs_init_fs_root(struct btrfs_root *root) +/* + * Initialize subvolume root in-memory structure + * + * @anon_dev: anonymous device to attach to the root, if zero, allocate new + */ +static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev) { int ret; unsigned int nofs_flag; @@ -1428,9 +1430,20 @@ static int btrfs_init_fs_root(struct btrfs_root *root) spin_lock_init(&root->ino_cache_lock); init_waitqueue_head(&root->ino_cache_wait); - ret = get_anon_bdev(&root->anon_dev); - if (ret) - goto fail; + /* + * Don't assign anonymous block device to roots that are not exposed to + * userspace, the id pool is limited to 1M + */ + if (is_fstree(root->root_key.objectid) && + btrfs_root_refs(&root->root_item) > 0) { + if (!anon_dev) { + ret = get_anon_bdev(&root->anon_dev); + if (ret) + goto fail; + } else { + root->anon_dev = anon_dev; + } + } mutex_lock(&root->objectid_mutex); ret = btrfs_find_highest_objectid(root, @@ -1534,8 +1547,27 @@ void btrfs_free_fs_info(struct btrfs_fs_info *fs_info) } -struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info, - u64 objectid, bool check_ref) +/* + * Get an in-memory reference of a root structure. + * + * For essential trees like root/extent tree, we grab it from fs_info directly. + * For subvolume trees, we check the cached filesystem roots first. If not + * found, then read it from disk and add it to cached fs roots. + * + * Caller should release the root by calling btrfs_put_root() after the usage. + * + * NOTE: Reloc and log trees can't be read by this function as they share the + * same root objectid. + * + * @objectid: root id + * @anon_dev: preallocated anonymous block device number for new roots, + * pass 0 for new allocation. + * @check_ref: whether to check root item references, If true, return -ENOENT + * for orphan roots + */ +static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info, + u64 objectid, dev_t anon_dev, + bool check_ref) { struct btrfs_root *root; struct btrfs_path *path; @@ -1564,6 +1596,8 @@ struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info, again: root = btrfs_lookup_fs_root(fs_info, objectid); if (root) { + /* Shouldn't get preallocated anon_dev for cached roots */ + ASSERT(!anon_dev); if (check_ref && btrfs_root_refs(&root->root_item) == 0) { btrfs_put_root(root); return ERR_PTR(-ENOENT); @@ -1583,7 +1617,7 @@ again: goto fail; } - ret = btrfs_init_fs_root(root); + ret = btrfs_init_fs_root(root, anon_dev); if (ret) goto fail; @@ -1616,6 +1650,33 @@ fail: return ERR_PTR(ret); } +/* + * Get in-memory reference of a root structure + * + * @objectid: tree objectid + * @check_ref: if set, verify that the tree exists and the item has at least + * one reference + */ +struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info, + u64 objectid, bool check_ref) +{ + return btrfs_get_root_ref(fs_info, objectid, 0, check_ref); +} + +/* + * Get in-memory reference of a root structure, created as new, optionally pass + * the anonymous block device id + * + * @objectid: tree objectid + * @anon_dev: if zero, allocate a new anonymous block device or use the + * parameter value + */ +struct btrfs_root *btrfs_get_new_fs_root(struct btrfs_fs_info *fs_info, + u64 objectid, dev_t anon_dev) +{ + return btrfs_get_root_ref(fs_info, objectid, anon_dev, true); +} + static int btrfs_congested_fn(void *congested_data, int bdi_bits) { struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data; @@ -1749,7 +1810,6 @@ static int transaction_kthread(void *arg) now = ktime_get_seconds(); if (cur->state < TRANS_STATE_COMMIT_START && - !test_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags) && (now < cur->start_time || now - cur->start_time < fs_info->commit_interval)) { spin_unlock(&fs_info->trans_lock); @@ -2001,8 +2061,7 @@ void btrfs_put_root(struct btrfs_root *root) if (root->anon_dev) free_anon_bdev(root->anon_dev); btrfs_drew_lock_destroy(&root->snapshot_lock); - free_extent_buffer(root->node); - free_extent_buffer(root->commit_root); + free_root_extent_buffers(root); kfree(root->free_ino_ctl); kfree(root->free_ino_pinned); #ifdef CONFIG_BTRFS_DEBUG @@ -4058,6 +4117,11 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info) ASSERT(list_empty(&fs_info->delayed_iputs)); set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags); + if (btrfs_check_quota_leak(fs_info)) { + WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); + btrfs_err(fs_info, "qgroup reserved space leaked"); + } + btrfs_free_qgroup_config(fs_info); ASSERT(list_empty(&fs_info->delalloc_roots)); diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index bf43245406c4..00dc39d47ed3 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -67,6 +67,8 @@ void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info); struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info, u64 objectid, bool check_ref); +struct btrfs_root *btrfs_get_new_fs_root(struct btrfs_fs_info *fs_info, + u64 objectid, dev_t anon_dev); void btrfs_free_fs_info(struct btrfs_fs_info *fs_info); int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info); diff --git a/fs/btrfs/extent-io-tree.h b/fs/btrfs/extent-io-tree.h index b6561455b3c4..f39d47a2d01a 100644 --- a/fs/btrfs/extent-io-tree.h +++ b/fs/btrfs/extent-io-tree.h @@ -233,14 +233,11 @@ bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start, struct extent_state **cached_state); /* This should be reworked in the future and put elsewhere. */ -int get_state_failrec(struct extent_io_tree *tree, u64 start, - struct io_failure_record **failrec); +struct io_failure_record *get_state_failrec(struct extent_io_tree *tree, u64 start); int set_state_failrec(struct extent_io_tree *tree, u64 start, struct io_failure_record *failrec); void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, u64 end); -int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end, - struct io_failure_record **failrec_ret); int free_io_failure(struct extent_io_tree *failure_tree, struct extent_io_tree *io_tree, struct io_failure_record *rec); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index c0bc35f932bf..61ede335f6c3 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -5298,7 +5298,14 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc) goto out; } - trans = btrfs_start_transaction(tree_root, 0); + /* + * Use join to avoid potential EINTR from transaction start. See + * wait_reserve_ticket and the whole reservation callchain. + */ + if (for_reloc) + trans = btrfs_join_transaction(tree_root); + else + trans = btrfs_start_transaction(tree_root, 0); if (IS_ERR(trans)) { err = PTR_ERR(trans); goto out_free; @@ -5466,6 +5473,14 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc) } } + /* + * This subvolume is going to be completely dropped, and won't be + * recorded as dirty roots, thus pertrans meta rsv will not be freed at + * commit transaction time. So free it here manually. + */ + btrfs_qgroup_convert_reserved_meta(root, INT_MAX); + btrfs_qgroup_free_meta_all_pertrans(root); + if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) btrfs_add_dropped_root(trans, root); else diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 608f93438b29..617ea38e6fd7 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1999,7 +1999,8 @@ static int __process_pages_contig(struct address_space *mapping, if (!PageDirty(pages[i]) || pages[i]->mapping != mapping) { unlock_page(pages[i]); - put_page(pages[i]); + for (; i < ret; i++) + put_page(pages[i]); err = -EAGAIN; goto out; } @@ -2017,15 +2018,14 @@ out: return err; } -void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, +void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end, struct page *locked_page, unsigned clear_bits, unsigned long page_ops) { - clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, clear_bits, 1, 0, - NULL); + clear_extent_bit(&inode->io_tree, start, end, clear_bits, 1, 0, NULL); - __process_pages_contig(inode->i_mapping, locked_page, + __process_pages_contig(inode->vfs_inode.i_mapping, locked_page, start >> PAGE_SHIFT, end >> PAGE_SHIFT, page_ops, NULL); } @@ -2122,12 +2122,11 @@ out: return ret; } -int get_state_failrec(struct extent_io_tree *tree, u64 start, - struct io_failure_record **failrec) +struct io_failure_record *get_state_failrec(struct extent_io_tree *tree, u64 start) { struct rb_node *node; struct extent_state *state; - int ret = 0; + struct io_failure_record *failrec; spin_lock(&tree->lock); /* @@ -2136,18 +2135,19 @@ int get_state_failrec(struct extent_io_tree *tree, u64 start, */ node = tree_search(tree, start); if (!node) { - ret = -ENOENT; + failrec = ERR_PTR(-ENOENT); goto out; } state = rb_entry(node, struct extent_state, rb_node); if (state->start != start) { - ret = -ENOENT; + failrec = ERR_PTR(-ENOENT); goto out; } - *failrec = state->failrec; + + failrec = state->failrec; out: spin_unlock(&tree->lock); - return ret; + return failrec; } /* @@ -2377,8 +2377,8 @@ int clean_io_failure(struct btrfs_fs_info *fs_info, if (!ret) return 0; - ret = get_state_failrec(failure_tree, start, &failrec); - if (ret) + failrec = get_state_failrec(failure_tree, start); + if (IS_ERR(failrec)) return 0; BUG_ON(!failrec->this_mirror); @@ -2450,8 +2450,8 @@ void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, u64 end) spin_unlock(&failure_tree->lock); } -int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end, - struct io_failure_record **failrec_ret) +static struct io_failure_record *btrfs_get_io_failure_record(struct inode *inode, + u64 start, u64 end) { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct io_failure_record *failrec; @@ -2462,65 +2462,8 @@ int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end, int ret; u64 logical; - ret = get_state_failrec(failure_tree, start, &failrec); - if (ret) { - failrec = kzalloc(sizeof(*failrec), GFP_NOFS); - if (!failrec) - return -ENOMEM; - - failrec->start = start; - failrec->len = end - start + 1; - failrec->this_mirror = 0; - failrec->bio_flags = 0; - failrec->in_validation = 0; - - read_lock(&em_tree->lock); - em = lookup_extent_mapping(em_tree, start, failrec->len); - if (!em) { - read_unlock(&em_tree->lock); - kfree(failrec); - return -EIO; - } - - if (em->start > start || em->start + em->len <= start) { - free_extent_map(em); - em = NULL; - } - read_unlock(&em_tree->lock); - if (!em) { - kfree(failrec); - return -EIO; - } - - logical = start - em->start; - logical = em->block_start + logical; - if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { - logical = em->block_start; - failrec->bio_flags = EXTENT_BIO_COMPRESSED; - extent_set_compress_type(&failrec->bio_flags, - em->compress_type); - } - - btrfs_debug(fs_info, - "Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu", - logical, start, failrec->len); - - failrec->logical = logical; - free_extent_map(em); - - /* set the bits in the private failure tree */ - ret = set_extent_bits(failure_tree, start, end, - EXTENT_LOCKED | EXTENT_DIRTY); - if (ret >= 0) - ret = set_state_failrec(failure_tree, start, failrec); - /* set the bits in the inode's tree */ - if (ret >= 0) - ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED); - if (ret < 0) { - kfree(failrec); - return ret; - } - } else { + failrec = get_state_failrec(failure_tree, start); + if (!IS_ERR(failrec)) { btrfs_debug(fs_info, "Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu, validation=%d", failrec->logical, failrec->start, failrec->len, @@ -2530,11 +2473,66 @@ int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end, * (e.g. with a list for failed_mirror) to make * clean_io_failure() clean all those errors at once. */ + + return failrec; } - *failrec_ret = failrec; + failrec = kzalloc(sizeof(*failrec), GFP_NOFS); + if (!failrec) + return ERR_PTR(-ENOMEM); - return 0; + failrec->start = start; + failrec->len = end - start + 1; + failrec->this_mirror = 0; + failrec->bio_flags = 0; + failrec->in_validation = 0; + + read_lock(&em_tree->lock); + em = lookup_extent_mapping(em_tree, start, failrec->len); + if (!em) { + read_unlock(&em_tree->lock); + kfree(failrec); + return ERR_PTR(-EIO); + } + + if (em->start > start || em->start + em->len <= start) { + free_extent_map(em); + em = NULL; + } + read_unlock(&em_tree->lock); + if (!em) { + kfree(failrec); + return ERR_PTR(-EIO); + } + + logical = start - em->start; + logical = em->block_start + logical; + if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { + logical = em->block_start; + failrec->bio_flags = EXTENT_BIO_COMPRESSED; + extent_set_compress_type(&failrec->bio_flags, em->compress_type); + } + + btrfs_debug(fs_info, + "Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu", + logical, start, failrec->len); + + failrec->logical = logical; + free_extent_map(em); + + /* Set the bits in the private failure tree */ + ret = set_extent_bits(failure_tree, start, end, + EXTENT_LOCKED | EXTENT_DIRTY); + if (ret >= 0) { + ret = set_state_failrec(failure_tree, start, failrec); + /* Set the bits in the inode's tree */ + ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED); + } else if (ret < 0) { + kfree(failrec); + return ERR_PTR(ret); + } + + return failrec; } static bool btrfs_check_repairable(struct inode *inode, bool needs_validation, @@ -2659,16 +2657,15 @@ blk_status_t btrfs_submit_read_repair(struct inode *inode, struct bio *repair_bio; struct btrfs_io_bio *repair_io_bio; blk_status_t status; - int ret; btrfs_debug(fs_info, "repair read error: read error at %llu", start); BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE); - ret = btrfs_get_io_failure_record(inode, start, end, &failrec); - if (ret) - return errno_to_blk_status(ret); + failrec = btrfs_get_io_failure_record(inode, start, end); + if (IS_ERR(failrec)) + return errno_to_blk_status(PTR_ERR(failrec)); need_validation = btrfs_io_needs_validation(inode, failed_bio); @@ -3419,7 +3416,7 @@ static void update_nr_written(struct writeback_control *wbc, * This returns 0 if all went well (page still locked) * This returns < 0 if there were errors (page still locked) */ -static noinline_for_stack int writepage_delalloc(struct inode *inode, +static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode, struct page *page, struct writeback_control *wbc, u64 delalloc_start, unsigned long *nr_written) { @@ -3432,7 +3429,7 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode, while (delalloc_end < page_end) { - found = find_lock_delalloc_range(inode, page, + found = find_lock_delalloc_range(&inode->vfs_inode, page, &delalloc_start, &delalloc_end); if (!found) { @@ -3449,8 +3446,7 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode, * started, so we don't want to return > 0 unless * things are going well. */ - ret = ret < 0 ? ret : -EIO; - goto done; + return ret < 0 ? ret : -EIO; } /* * delalloc_end is already one less than the total length, so @@ -3482,10 +3478,7 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode, return 1; } - ret = 0; - -done: - return ret; + return 0; } /* @@ -3496,7 +3489,7 @@ done: * 0 if all went well (page still locked) * < 0 if there were errors (page still locked) */ -static noinline_for_stack int __extent_writepage_io(struct inode *inode, +static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode, struct page *page, struct writeback_control *wbc, struct extent_page_data *epd, @@ -3504,7 +3497,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, unsigned long nr_written, int *nr_ret) { - struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; + struct extent_io_tree *tree = &inode->io_tree; u64 start = page_offset(page); u64 page_end = start + PAGE_SIZE - 1; u64 end; @@ -3536,7 +3529,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, update_nr_written(wbc, nr_written + 1); end = page_end; - blocksize = inode->i_sb->s_blocksize; + blocksize = inode->vfs_inode.i_sb->s_blocksize; while (cur <= end) { u64 em_end; @@ -3547,8 +3540,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, page_end, 1); break; } - em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur, - end - cur + 1); + em = btrfs_get_extent(inode, NULL, 0, cur, end - cur + 1); if (IS_ERR_OR_NULL(em)) { SetPageError(page); ret = PTR_ERR_OR_ZERO(em); @@ -3585,7 +3577,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, btrfs_set_range_writeback(tree, cur, cur + iosize - 1); if (!PageWriteback(page)) { - btrfs_err(BTRFS_I(inode)->root->fs_info, + btrfs_err(inode->root->fs_info, "page %lu not writeback, cur %llu end %llu", page->index, cur, end); } @@ -3658,15 +3650,16 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, set_page_extent_mapped(page); if (!epd->extent_locked) { - ret = writepage_delalloc(inode, page, wbc, start, &nr_written); + ret = writepage_delalloc(BTRFS_I(inode), page, wbc, start, + &nr_written); if (ret == 1) return 0; if (ret) goto done; } - ret = __extent_writepage_io(inode, page, wbc, epd, - i_size, nr_written, &nr); + ret = __extent_writepage_io(BTRFS_I(inode), page, wbc, epd, i_size, + nr_written, &nr); if (ret == 1) return 0; @@ -4126,7 +4119,7 @@ retry: if (!test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { ret = flush_write_bio(&epd); } else { - ret = -EUCLEAN; + ret = -EROFS; end_write_bio(&epd, ret); } return ret; @@ -4488,6 +4481,9 @@ int try_release_extent_mapping(struct page *page, gfp_t mask) page->mapping->host->i_size > SZ_16M) { u64 len; while (start <= end) { + struct btrfs_fs_info *fs_info; + u64 cur_gen; + len = end - start + 1; write_lock(&map->lock); em = lookup_extent_mapping(map, start, len); @@ -4501,15 +4497,45 @@ int try_release_extent_mapping(struct page *page, gfp_t mask) free_extent_map(em); break; } - if (!test_range_bit(tree, em->start, - extent_map_end(em) - 1, - EXTENT_LOCKED, 0, NULL)) { - set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, - &btrfs_inode->runtime_flags); - remove_extent_mapping(map, em); - /* once for the rb tree */ - free_extent_map(em); - } + if (test_range_bit(tree, em->start, + extent_map_end(em) - 1, + EXTENT_LOCKED, 0, NULL)) + goto next; + /* + * If it's not in the list of modified extents, used + * by a fast fsync, we can remove it. If it's being + * logged we can safely remove it since fsync took an + * extra reference on the em. + */ + if (list_empty(&em->list) || + test_bit(EXTENT_FLAG_LOGGING, &em->flags)) + goto remove_em; + /* + * If it's in the list of modified extents, remove it + * only if its generation is older then the current one, + * in which case we don't need it for a fast fsync. + * Otherwise don't remove it, we could be racing with an + * ongoing fast fsync that could miss the new extent. + */ + fs_info = btrfs_inode->root->fs_info; + spin_lock(&fs_info->trans_lock); + cur_gen = fs_info->generation; + spin_unlock(&fs_info->trans_lock); + if (em->generation >= cur_gen) + goto next; +remove_em: + /* + * We only remove extent maps that are not in the list of + * modified extents or that are in the list but with a + * generation lower then the current generation, so there + * is no need to set the full fsync flag on the inode (it + * hurts the fsync performance for workloads with a data + * size that exceeds or is close to the system's memory). + */ + remove_extent_mapping(map, em); + /* once for the rb tree */ + free_extent_map(em); +next: start = extent_map_end(em); write_unlock(&map->lock); @@ -4669,7 +4695,7 @@ static int emit_last_fiemap_cache(struct fiemap_extent_info *fieinfo, } int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, - __u64 start, __u64 len) + u64 start, u64 len) { int ret = 0; u64 off = start; diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 87f60a48f750..00a88f2eb5ab 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -204,7 +204,7 @@ int btree_write_cache_pages(struct address_space *mapping, struct writeback_control *wbc); void extent_readahead(struct readahead_control *rac); int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, - __u64 start, __u64 len); + u64 start, u64 len); void set_page_extent_mapped(struct page *page); struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, @@ -277,7 +277,7 @@ void clear_extent_buffer_uptodate(struct extent_buffer *eb); int extent_buffer_under_io(const struct extent_buffer *eb); void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end); void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end); -void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, +void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end, struct page *locked_page, unsigned bits_to_clear, unsigned long page_ops); diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index 706a3128e192..7d5ec71615b8 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -522,10 +522,10 @@ fail: * means this bio can contains potentially discontigous bio vecs * so the logical offset of each should be calculated separately. */ -blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio, +blk_status_t btrfs_csum_one_bio(struct btrfs_inode *inode, struct bio *bio, u64 file_start, int contig) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = inode->root->fs_info; SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); struct btrfs_ordered_sum *sums; struct btrfs_ordered_extent *ordered = NULL; diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index b0d2c976587e..841c516079a9 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -500,18 +500,18 @@ next: * this also makes the decision about creating an inline extent vs * doing real data extents, marking pages dirty and delalloc as required. */ -int btrfs_dirty_pages(struct inode *inode, struct page **pages, +int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages, size_t num_pages, loff_t pos, size_t write_bytes, struct extent_state **cached) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = inode->root->fs_info; int err = 0; int i; u64 num_bytes; u64 start_pos; u64 end_of_last_block; u64 end_pos = pos + write_bytes; - loff_t isize = i_size_read(inode); + loff_t isize = i_size_read(&inode->vfs_inode); unsigned int extra_bits = 0; start_pos = pos & ~((u64) fs_info->sectorsize - 1); @@ -524,13 +524,13 @@ int btrfs_dirty_pages(struct inode *inode, struct page **pages, * The pages may have already been dirty, clear out old accounting so * we can set things up properly */ - clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos, end_of_last_block, + clear_extent_bit(&inode->io_tree, start_pos, end_of_last_block, EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0, cached); - if (!btrfs_is_free_space_inode(BTRFS_I(inode))) { + if (!btrfs_is_free_space_inode(inode)) { if (start_pos >= isize && - !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)) { + !(inode->flags & BTRFS_INODE_PREALLOC)) { /* * There can't be any extents following eof in this case * so just set the delalloc new bit for the range @@ -538,8 +538,7 @@ int btrfs_dirty_pages(struct inode *inode, struct page **pages, */ extra_bits |= EXTENT_DELALLOC_NEW; } else { - err = btrfs_find_new_delalloc_bytes(BTRFS_I(inode), - start_pos, + err = btrfs_find_new_delalloc_bytes(inode, start_pos, num_bytes, cached); if (err) return err; @@ -564,7 +563,7 @@ int btrfs_dirty_pages(struct inode *inode, struct page **pages, * at this time. */ if (end_pos > isize) - i_size_write(inode, end_pos); + i_size_write(&inode->vfs_inode, end_pos); return 0; } @@ -731,7 +730,7 @@ next: * is deleted from the tree. */ int __btrfs_drop_extents(struct btrfs_trans_handle *trans, - struct btrfs_root *root, struct inode *inode, + struct btrfs_root *root, struct btrfs_inode *inode, struct btrfs_path *path, u64 start, u64 end, u64 *drop_end, int drop_cache, int replace_extent, @@ -744,7 +743,8 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans, struct btrfs_ref ref = { 0 }; struct btrfs_key key; struct btrfs_key new_key; - u64 ino = btrfs_ino(BTRFS_I(inode)); + struct inode *vfs_inode = &inode->vfs_inode; + u64 ino = btrfs_ino(inode); u64 search_start = start; u64 disk_bytenr = 0; u64 num_bytes = 0; @@ -762,9 +762,9 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans, int leafs_visited = 0; if (drop_cache) - btrfs_drop_extent_cache(BTRFS_I(inode), start, end - 1, 0); + btrfs_drop_extent_cache(inode, start, end - 1, 0); - if (start >= BTRFS_I(inode)->disk_i_size && !replace_extent) + if (start >= inode->disk_i_size && !replace_extent) modify_tree = 0; update_refs = (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) || @@ -935,7 +935,7 @@ next_slot: extent_end - end); btrfs_mark_buffer_dirty(leaf); if (update_refs && disk_bytenr > 0) - inode_sub_bytes(inode, end - key.offset); + inode_sub_bytes(vfs_inode, end - key.offset); break; } @@ -955,7 +955,7 @@ next_slot: start - key.offset); btrfs_mark_buffer_dirty(leaf); if (update_refs && disk_bytenr > 0) - inode_sub_bytes(inode, extent_end - start); + inode_sub_bytes(vfs_inode, extent_end - start); if (end == extent_end) break; @@ -979,7 +979,7 @@ delete_extent_item: if (update_refs && extent_type == BTRFS_FILE_EXTENT_INLINE) { - inode_sub_bytes(inode, + inode_sub_bytes(vfs_inode, extent_end - key.offset); extent_end = ALIGN(extent_end, fs_info->sectorsize); @@ -993,7 +993,7 @@ delete_extent_item: key.offset - extent_offset); ret = btrfs_free_extent(trans, &ref); BUG_ON(ret); /* -ENOMEM */ - inode_sub_bytes(inode, + inode_sub_bytes(vfs_inode, extent_end - key.offset); } @@ -1082,8 +1082,8 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, path = btrfs_alloc_path(); if (!path) return -ENOMEM; - ret = __btrfs_drop_extents(trans, root, inode, path, start, end, NULL, - drop_cache, 0, 0, NULL); + ret = __btrfs_drop_extents(trans, root, BTRFS_I(inode), path, start, + end, NULL, drop_cache, 0, 0, NULL); btrfs_free_path(path); return ret; } @@ -1532,8 +1532,8 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages, return ret; } -static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos, - size_t *write_bytes, bool nowait) +static int check_can_nocow(struct btrfs_inode *inode, loff_t pos, + size_t *write_bytes, bool nowait) { struct btrfs_fs_info *fs_info = inode->root->fs_info; struct btrfs_root *root = inode->root; @@ -1541,6 +1541,9 @@ static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos, u64 num_bytes; int ret; + if (!(inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC))) + return 0; + if (!nowait && !btrfs_drew_try_write_lock(&root->snapshot_lock)) return -EAGAIN; @@ -1583,6 +1586,42 @@ out_unlock: return ret; } +static int check_nocow_nolock(struct btrfs_inode *inode, loff_t pos, + size_t *write_bytes) +{ + return check_can_nocow(inode, pos, write_bytes, true); +} + +/* + * Check if we can do nocow write into the range [@pos, @pos + @write_bytes) + * + * @pos: File offset + * @write_bytes: The length to write, will be updated to the nocow writeable + * range + * + * This function will flush ordered extents in the range to ensure proper + * nocow checks. + * + * Return: + * >0 and update @write_bytes if we can do nocow write + * 0 if we can't do nocow write + * -EAGAIN if we can't get the needed lock or there are ordered extents + * for * (nowait == true) case + * <0 if other error happened + * + * NOTE: Callers need to release the lock by btrfs_check_nocow_unlock(). + */ +int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos, + size_t *write_bytes) +{ + return check_can_nocow(inode, pos, write_bytes, false); +} + +void btrfs_check_nocow_unlock(struct btrfs_inode *inode) +{ + btrfs_drew_write_unlock(&inode->root->snapshot_lock); +} + static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb, struct iov_iter *i) { @@ -1590,7 +1629,6 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb, loff_t pos = iocb->ki_pos; struct inode *inode = file_inode(file); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_root *root = BTRFS_I(inode)->root; struct page **pages = NULL; struct extent_changeset *data_reserved = NULL; u64 release_bytes = 0; @@ -1643,13 +1681,12 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb, fs_info->sectorsize); extent_changeset_release(data_reserved); - ret = btrfs_check_data_free_space(inode, &data_reserved, pos, + ret = btrfs_check_data_free_space(BTRFS_I(inode), + &data_reserved, pos, write_bytes); if (ret < 0) { - if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW | - BTRFS_INODE_PREALLOC)) && - check_can_nocow(BTRFS_I(inode), pos, - &write_bytes, false) > 0) { + if (btrfs_check_nocow_lock(BTRFS_I(inode), pos, + &write_bytes) > 0) { /* * For nodata cow case, no need to reserve * data space. @@ -1674,11 +1711,11 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb, reserve_bytes); if (ret) { if (!only_release_metadata) - btrfs_free_reserved_data_space(inode, + btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved, pos, write_bytes); else - btrfs_drew_write_unlock(&root->snapshot_lock); + btrfs_check_nocow_unlock(BTRFS_I(inode)); break; } @@ -1748,7 +1785,7 @@ again: __pos = round_down(pos, fs_info->sectorsize) + (dirty_pages << PAGE_SHIFT); - btrfs_delalloc_release_space(inode, + btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved, __pos, release_bytes, true); } @@ -1758,8 +1795,9 @@ again: fs_info->sectorsize); if (copied > 0) - ret = btrfs_dirty_pages(inode, pages, dirty_pages, - pos, copied, &cached_state); + ret = btrfs_dirty_pages(BTRFS_I(inode), pages, + dirty_pages, pos, copied, + &cached_state); /* * If we have not locked the extent range, because the range's @@ -1782,7 +1820,7 @@ again: release_bytes = 0; if (only_release_metadata) - btrfs_drew_write_unlock(&root->snapshot_lock); + btrfs_check_nocow_unlock(BTRFS_I(inode)); if (only_release_metadata && copied > 0) { lockstart = round_down(pos, @@ -1800,8 +1838,6 @@ again: cond_resched(); balance_dirty_pages_ratelimited(inode->i_mapping); - if (dirty_pages < (fs_info->nodesize >> PAGE_SHIFT) + 1) - btrfs_btree_balance_dirty(fs_info); pos += copied; num_written += copied; @@ -1811,11 +1847,12 @@ again: if (release_bytes) { if (only_release_metadata) { - btrfs_drew_write_unlock(&root->snapshot_lock); + btrfs_check_nocow_unlock(BTRFS_I(inode)); btrfs_delalloc_release_metadata(BTRFS_I(inode), release_bytes, true); } else { - btrfs_delalloc_release_space(inode, data_reserved, + btrfs_delalloc_release_space(BTRFS_I(inode), + data_reserved, round_down(pos, fs_info->sectorsize), release_bytes, true); } @@ -1926,10 +1963,8 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb, * We will allocate space in case nodatacow is not set, * so bail */ - if (!(BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW | - BTRFS_INODE_PREALLOC)) || - check_can_nocow(BTRFS_I(inode), pos, &nocow_bytes, - true) <= 0) { + if (check_nocow_nolock(BTRFS_I(inode), pos, &nocow_bytes) + <= 0) { inode_unlock(inode); return -EAGAIN; } @@ -2598,7 +2633,7 @@ int btrfs_punch_hole_range(struct inode *inode, struct btrfs_path *path, cur_offset = start; while (cur_offset < end) { - ret = __btrfs_drop_extents(trans, root, inode, path, + ret = __btrfs_drop_extents(trans, root, BTRFS_I(inode), path, cur_offset, end + 1, &drop_end, 1, 0, 0, NULL); if (ret != -ENOSPC) { @@ -3176,14 +3211,14 @@ reserve_space: if (ret < 0) goto out; space_reserved = true; - ret = btrfs_qgroup_reserve_data(inode, &data_reserved, - alloc_start, bytes_to_reserve); - if (ret) - goto out; ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend, &cached_state); if (ret) goto out; + ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), &data_reserved, + alloc_start, bytes_to_reserve); + if (ret) + goto out; ret = btrfs_prealloc_file_range(inode, mode, alloc_start, alloc_end - alloc_start, i_blocksize(inode), @@ -3199,7 +3234,7 @@ reserve_space: ret = btrfs_fallocate_update_isize(inode, offset + len, mode); out: if (ret && space_reserved) - btrfs_free_reserved_data_space(inode, data_reserved, + btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved, alloc_start, bytes_to_reserve); extent_changeset_free(data_reserved); @@ -3350,8 +3385,9 @@ static long btrfs_fallocate(struct file *file, int mode, free_extent_map(em); break; } - ret = btrfs_qgroup_reserve_data(inode, &data_reserved, - cur_offset, last_byte - cur_offset); + ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), + &data_reserved, cur_offset, + last_byte - cur_offset); if (ret < 0) { cur_offset = last_byte; free_extent_map(em); @@ -3363,8 +3399,9 @@ static long btrfs_fallocate(struct file *file, int mode, * range, free reserved data space first, otherwise * it'll result in false ENOSPC error. */ - btrfs_free_reserved_data_space(inode, data_reserved, - cur_offset, last_byte - cur_offset); + btrfs_free_reserved_data_space(BTRFS_I(inode), + data_reserved, cur_offset, + last_byte - cur_offset); } free_extent_map(em); cur_offset = last_byte; @@ -3381,7 +3418,7 @@ static long btrfs_fallocate(struct file *file, int mode, range->len, i_blocksize(inode), offset + len, &alloc_hint); else - btrfs_free_reserved_data_space(inode, + btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved, range->start, range->len); list_del(&range->list); @@ -3402,7 +3439,7 @@ out: inode_unlock(inode); /* Let go of our reservation. */ if (ret != 0 && !(mode & FALLOC_FL_ZERO_RANGE)) - btrfs_free_reserved_data_space(inode, data_reserved, + btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved, cur_offset, alloc_end - cur_offset); extent_changeset_free(data_reserved); return ret; diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 55955bd424d7..6d961e11639e 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1334,8 +1334,9 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, io_ctl_zero_remaining_pages(io_ctl); /* Everything is written out, now we dirty the pages in the file. */ - ret = btrfs_dirty_pages(inode, io_ctl->pages, io_ctl->num_pages, 0, - i_size_read(inode), &cached_state); + ret = btrfs_dirty_pages(BTRFS_I(inode), io_ctl->pages, + io_ctl->num_pages, 0, i_size_read(inode), + &cached_state); if (ret) goto out_nospc; @@ -2703,8 +2704,7 @@ void btrfs_init_free_space_ctl(struct btrfs_block_group *block_group) * pointed to by the cluster, someone else raced in and freed the * cluster already. In that case, we just return without changing anything */ -static int -__btrfs_return_cluster_to_free_space( +static void __btrfs_return_cluster_to_free_space( struct btrfs_block_group *block_group, struct btrfs_free_cluster *cluster) { @@ -2756,7 +2756,6 @@ __btrfs_return_cluster_to_free_space( out: spin_unlock(&cluster->lock); btrfs_put_block_group(block_group); - return 0; } static void __btrfs_remove_free_space_cache_locked( @@ -2907,12 +2906,11 @@ out: * Otherwise, it'll get a reference on the block group pointed to by the * cluster and remove the cluster from it. */ -int btrfs_return_cluster_to_free_space( +void btrfs_return_cluster_to_free_space( struct btrfs_block_group *block_group, struct btrfs_free_cluster *cluster) { struct btrfs_free_space_ctl *ctl; - int ret; /* first, get a safe pointer to the block group */ spin_lock(&cluster->lock); @@ -2920,28 +2918,27 @@ int btrfs_return_cluster_to_free_space( block_group = cluster->block_group; if (!block_group) { spin_unlock(&cluster->lock); - return 0; + return; } } else if (cluster->block_group != block_group) { /* someone else has already freed it don't redo their work */ spin_unlock(&cluster->lock); - return 0; + return; } - atomic_inc(&block_group->count); + btrfs_get_block_group(block_group); spin_unlock(&cluster->lock); ctl = block_group->free_space_ctl; /* now return any extents the cluster had on it */ spin_lock(&ctl->tree_lock); - ret = __btrfs_return_cluster_to_free_space(block_group, cluster); + __btrfs_return_cluster_to_free_space(block_group, cluster); spin_unlock(&ctl->tree_lock); btrfs_discard_queue_work(&block_group->fs_info->discard_ctl, block_group); /* finally drop our ref */ btrfs_put_block_group(block_group); - return ret; } static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group *block_group, @@ -3358,7 +3355,7 @@ int btrfs_find_space_cluster(struct btrfs_block_group *block_group, list_del_init(&entry->list); if (!ret) { - atomic_inc(&block_group->count); + btrfs_get_block_group(block_group); list_add_tail(&cluster->block_group_list, &block_group->cluster_list); cluster->block_group = block_group; diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index 2e0a8077aa74..e3d5e0ad8f8e 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h @@ -136,7 +136,7 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster); u64 btrfs_alloc_from_cluster(struct btrfs_block_group *block_group, struct btrfs_free_cluster *cluster, u64 bytes, u64 min_start, u64 *max_extent_size); -int btrfs_return_cluster_to_free_space( +void btrfs_return_cluster_to_free_space( struct btrfs_block_group *block_group, struct btrfs_free_cluster *cluster); int btrfs_trim_block_group(struct btrfs_block_group *block_group, diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index 6009e0e939b5..76d2e43817ea 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -495,7 +495,8 @@ again: /* Just to make sure we have enough space */ prealloc += 8 * PAGE_SIZE; - ret = btrfs_delalloc_reserve_space(inode, &data_reserved, 0, prealloc); + ret = btrfs_delalloc_reserve_space(BTRFS_I(inode), &data_reserved, 0, + prealloc); if (ret) goto out_put; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 43c803c16b48..611b3412fbfd 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -80,17 +80,17 @@ struct kmem_cache *btrfs_free_space_bitmap_cachep; static int btrfs_setsize(struct inode *inode, struct iattr *attr); static int btrfs_truncate(struct inode *inode, bool skip_writeback); static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent); -static noinline int cow_file_range(struct inode *inode, +static noinline int cow_file_range(struct btrfs_inode *inode, struct page *locked_page, u64 start, u64 end, int *page_started, unsigned long *nr_written, int unlock); -static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len, - u64 orig_start, u64 block_start, +static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start, + u64 len, u64 orig_start, u64 block_start, u64 block_len, u64 orig_block_len, u64 ram_bytes, int compress_type, int type); -static void __endio_write_update_ordered(struct inode *inode, +static void __endio_write_update_ordered(struct btrfs_inode *inode, const u64 offset, const u64 bytes, const bool uptodate); @@ -104,7 +104,7 @@ static void __endio_write_update_ordered(struct inode *inode, * to be released, which we want to happen only when finishing the ordered * extent (btrfs_finish_ordered_io()). */ -static inline void btrfs_cleanup_ordered_extents(struct inode *inode, +static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode, struct page *locked_page, u64 offset, u64 bytes) { @@ -116,7 +116,7 @@ static inline void btrfs_cleanup_ordered_extents(struct inode *inode, struct page *page; while (index <= end_index) { - page = find_get_page(inode->i_mapping, index); + page = find_get_page(inode->vfs_inode.i_mapping, index); index++; if (!page) continue; @@ -274,15 +274,15 @@ fail: * does the checks required to make sure the data is small enough * to fit as an inline extent. */ -static noinline int cow_file_range_inline(struct inode *inode, u64 start, +static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 start, u64 end, size_t compressed_size, int compress_type, struct page **compressed_pages) { - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_root *root = inode->root; struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_trans_handle *trans; - u64 isize = i_size_read(inode); + u64 isize = i_size_read(&inode->vfs_inode); u64 actual_end = min(end + 1, isize); u64 inline_len = actual_end - start; u64 aligned_end = ALIGN(end, fs_info->sectorsize); @@ -314,7 +314,7 @@ static noinline int cow_file_range_inline(struct inode *inode, u64 start, btrfs_free_path(path); return PTR_ERR(trans); } - trans->block_rsv = &BTRFS_I(inode)->block_rsv; + trans->block_rsv = &inode->block_rsv; if (compressed_size && compressed_pages) extent_item_size = btrfs_file_extent_calc_inline_size( @@ -323,9 +323,9 @@ static noinline int cow_file_range_inline(struct inode *inode, u64 start, extent_item_size = btrfs_file_extent_calc_inline_size( inline_len); - ret = __btrfs_drop_extents(trans, root, inode, path, - start, aligned_end, NULL, - 1, 1, extent_item_size, &extent_inserted); + ret = __btrfs_drop_extents(trans, root, inode, path, start, aligned_end, + NULL, 1, 1, extent_item_size, + &extent_inserted); if (ret) { btrfs_abort_transaction(trans, ret); goto out; @@ -334,7 +334,7 @@ static noinline int cow_file_range_inline(struct inode *inode, u64 start, if (isize > actual_end) inline_len = min_t(u64, isize, actual_end); ret = insert_inline_extent(trans, path, extent_inserted, - root, inode, start, + root, &inode->vfs_inode, start, inline_len, compressed_size, compress_type, compressed_pages); if (ret && ret != -ENOSPC) { @@ -345,8 +345,8 @@ static noinline int cow_file_range_inline(struct inode *inode, u64 start, goto out; } - set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags); - btrfs_drop_extent_cache(BTRFS_I(inode), start, aligned_end - 1, 0); + set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags); + btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0); out: /* * Don't forget to free the reserved space, as for inlined extent @@ -412,10 +412,10 @@ static noinline int add_async_extent(struct async_chunk *cow, /* * Check if the inode has flags compatible with compression */ -static inline bool inode_can_compress(struct inode *inode) +static inline bool inode_can_compress(struct btrfs_inode *inode) { - if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW || - BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) + if (inode->flags & BTRFS_INODE_NODATACOW || + inode->flags & BTRFS_INODE_NODATASUM) return false; return true; } @@ -424,29 +424,30 @@ static inline bool inode_can_compress(struct inode *inode) * Check if the inode needs to be submitted to compression, based on mount * options, defragmentation, properties or heuristics. */ -static inline int inode_need_compress(struct inode *inode, u64 start, u64 end) +static inline int inode_need_compress(struct btrfs_inode *inode, u64 start, + u64 end) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = inode->root->fs_info; if (!inode_can_compress(inode)) { WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG), KERN_ERR "BTRFS: unexpected compression for ino %llu\n", - btrfs_ino(BTRFS_I(inode))); + btrfs_ino(inode)); return 0; } /* force compress */ if (btrfs_test_opt(fs_info, FORCE_COMPRESS)) return 1; /* defrag ioctl */ - if (BTRFS_I(inode)->defrag_compress) + if (inode->defrag_compress) return 1; /* bad compression ratios */ - if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) + if (inode->flags & BTRFS_INODE_NOCOMPRESS) return 0; if (btrfs_test_opt(fs_info, COMPRESS) || - BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS || - BTRFS_I(inode)->prop_compress) - return btrfs_compress_heuristic(inode, start, end); + inode->flags & BTRFS_INODE_COMPRESS || + inode->prop_compress) + return btrfs_compress_heuristic(&inode->vfs_inode, start, end); return 0; } @@ -552,7 +553,7 @@ again: * inode has not been flagged as nocompress. This flag can * change at any time if we discover bad compression ratios. */ - if (inode_need_compress(inode, start, end)) { + if (inode_need_compress(BTRFS_I(inode), start, end)) { WARN_ON(pages); pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); if (!pages) { @@ -616,11 +617,12 @@ cont: /* we didn't compress the entire range, try * to make an uncompressed inline extent. */ - ret = cow_file_range_inline(inode, start, end, 0, - BTRFS_COMPRESS_NONE, NULL); + ret = cow_file_range_inline(BTRFS_I(inode), start, end, + 0, BTRFS_COMPRESS_NONE, + NULL); } else { /* try making a compressed inline extent */ - ret = cow_file_range_inline(inode, start, end, + ret = cow_file_range_inline(BTRFS_I(inode), start, end, total_compressed, compress_type, pages); } @@ -642,7 +644,8 @@ cont: * our outstanding extent for clearing delalloc for this * range. */ - extent_clear_unlock_delalloc(inode, start, end, NULL, + extent_clear_unlock_delalloc(BTRFS_I(inode), start, end, + NULL, clear_flags, PAGE_UNLOCK | PAGE_CLEAR_DIRTY | @@ -762,14 +765,14 @@ static void free_async_extent_pages(struct async_extent *async_extent) */ static noinline void submit_compressed_extents(struct async_chunk *async_chunk) { - struct inode *inode = async_chunk->inode; - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_inode *inode = BTRFS_I(async_chunk->inode); + struct btrfs_fs_info *fs_info = inode->root->fs_info; struct async_extent *async_extent; u64 alloc_hint = 0; struct btrfs_key ins; struct extent_map *em; - struct btrfs_root *root = BTRFS_I(inode)->root; - struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + struct btrfs_root *root = inode->root; + struct extent_io_tree *io_tree = &inode->io_tree; int ret = 0; again: @@ -802,7 +805,7 @@ retry: * all those pages down to the drive. */ if (!page_started && !ret) - extent_write_locked_range(inode, + extent_write_locked_range(&inode->vfs_inode, async_extent->start, async_extent->start + async_extent->ram_size - 1, @@ -832,7 +835,7 @@ retry: * will not submit these pages down to lower * layers. */ - extent_range_redirty_for_io(inode, + extent_range_redirty_for_io(&inode->vfs_inode, async_extent->start, async_extent->start + async_extent->ram_size - 1); @@ -867,8 +870,7 @@ retry: BTRFS_ORDERED_COMPRESSED, async_extent->compress_type); if (ret) { - btrfs_drop_extent_cache(BTRFS_I(inode), - async_extent->start, + btrfs_drop_extent_cache(inode, async_extent->start, async_extent->start + async_extent->ram_size - 1, 0); goto out_free_reserve; @@ -884,8 +886,7 @@ retry: NULL, EXTENT_LOCKED | EXTENT_DELALLOC, PAGE_UNLOCK | PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK); - if (btrfs_submit_compressed_write(inode, - async_extent->start, + if (btrfs_submit_compressed_write(inode, async_extent->start, async_extent->ram_size, ins.objectid, ins.offset, async_extent->pages, @@ -896,12 +897,11 @@ retry: const u64 start = async_extent->start; const u64 end = start + async_extent->ram_size - 1; - p->mapping = inode->i_mapping; + p->mapping = inode->vfs_inode.i_mapping; btrfs_writepage_endio_finish_ordered(p, start, end, 0); p->mapping = NULL; - extent_clear_unlock_delalloc(inode, start, end, - NULL, 0, + extent_clear_unlock_delalloc(inode, start, end, NULL, 0, PAGE_END_WRITEBACK | PAGE_SET_ERROR); free_async_extent_pages(async_extent); @@ -929,10 +929,10 @@ out_free: goto again; } -static u64 get_extent_allocation_hint(struct inode *inode, u64 start, +static u64 get_extent_allocation_hint(struct btrfs_inode *inode, u64 start, u64 num_bytes) { - struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; + struct extent_map_tree *em_tree = &inode->extent_tree; struct extent_map *em; u64 alloc_hint = 0; @@ -974,13 +974,13 @@ static u64 get_extent_allocation_hint(struct inode *inode, u64 start, * required to start IO on it. It may be clean and already done with * IO when we return. */ -static noinline int cow_file_range(struct inode *inode, +static noinline int cow_file_range(struct btrfs_inode *inode, struct page *locked_page, u64 start, u64 end, int *page_started, unsigned long *nr_written, int unlock) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_root *root = inode->root; + struct btrfs_fs_info *fs_info = root->fs_info; u64 alloc_hint = 0; u64 num_bytes; unsigned long ram_size; @@ -994,7 +994,7 @@ static noinline int cow_file_range(struct inode *inode, bool extent_reserved = false; int ret = 0; - if (btrfs_is_free_space_inode(BTRFS_I(inode))) { + if (btrfs_is_free_space_inode(inode)) { WARN_ON_ONCE(1); ret = -EINVAL; goto out_unlock; @@ -1004,7 +1004,7 @@ static noinline int cow_file_range(struct inode *inode, num_bytes = max(blocksize, num_bytes); ASSERT(num_bytes <= btrfs_super_total_bytes(fs_info->super_copy)); - inode_should_defrag(BTRFS_I(inode), start, end, num_bytes, SZ_64K); + inode_should_defrag(inode, start, end, num_bytes, SZ_64K); if (start == 0) { /* lets try to make an inline extent */ @@ -1033,8 +1033,7 @@ static noinline int cow_file_range(struct inode *inode, } alloc_hint = get_extent_allocation_hint(inode, start, num_bytes); - btrfs_drop_extent_cache(BTRFS_I(inode), start, - start + num_bytes - 1, 0); + btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); /* * Relocation relies on the relocated extents to have exactly the same @@ -1098,7 +1097,7 @@ static noinline int cow_file_range(struct inode *inode, * skip current ordered extent. */ if (ret) - btrfs_drop_extent_cache(BTRFS_I(inode), start, + btrfs_drop_extent_cache(inode, start, start + ram_size - 1, 0); } @@ -1114,8 +1113,7 @@ static noinline int cow_file_range(struct inode *inode, page_ops = unlock ? PAGE_UNLOCK : 0; page_ops |= PAGE_SET_PRIVATE2; - extent_clear_unlock_delalloc(inode, start, - start + ram_size - 1, + extent_clear_unlock_delalloc(inode, start, start + ram_size - 1, locked_page, EXTENT_LOCKED | EXTENT_DELALLOC, page_ops); @@ -1139,7 +1137,7 @@ out: return ret; out_drop_extent_cache: - btrfs_drop_extent_cache(BTRFS_I(inode), start, start + ram_size - 1, 0); + btrfs_drop_extent_cache(inode, start, start + ram_size - 1, 0); out_reserve: btrfs_dec_block_group_reservations(fs_info, ins.objectid); btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); @@ -1236,13 +1234,13 @@ static noinline void async_cow_free(struct btrfs_work *work) kvfree(async_chunk->pending); } -static int cow_file_range_async(struct inode *inode, +static int cow_file_range_async(struct btrfs_inode *inode, struct writeback_control *wbc, struct page *locked_page, u64 start, u64 end, int *page_started, unsigned long *nr_written) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = inode->root->fs_info; struct cgroup_subsys_state *blkcg_css = wbc_blkcg_css(wbc); struct async_cow *ctx; struct async_chunk *async_chunk; @@ -1254,9 +1252,9 @@ static int cow_file_range_async(struct inode *inode, unsigned nofs_flag; const unsigned int write_flags = wbc_to_write_flags(wbc); - unlock_extent(&BTRFS_I(inode)->io_tree, start, end); + unlock_extent(&inode->io_tree, start, end); - if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS && + if (inode->flags & BTRFS_INODE_NOCOMPRESS && !btrfs_test_opt(fs_info, FORCE_COMPRESS)) { num_chunks = 1; should_compress = false; @@ -1294,9 +1292,9 @@ static int cow_file_range_async(struct inode *inode, * igrab is called higher up in the call chain, take only the * lightweight reference for the callback lifetime */ - ihold(inode); + ihold(&inode->vfs_inode); async_chunk[i].pending = &ctx->num_chunks; - async_chunk[i].inode = inode; + async_chunk[i].inode = &inode->vfs_inode; async_chunk[i].start = start; async_chunk[i].end = cur_end; async_chunk[i].write_flags = write_flags; @@ -1373,15 +1371,15 @@ static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info, return 1; } -static int fallback_to_cow(struct inode *inode, struct page *locked_page, +static int fallback_to_cow(struct btrfs_inode *inode, struct page *locked_page, const u64 start, const u64 end, int *page_started, unsigned long *nr_written) { - const bool is_space_ino = btrfs_is_free_space_inode(BTRFS_I(inode)); - const bool is_reloc_ino = (BTRFS_I(inode)->root->root_key.objectid == + const bool is_space_ino = btrfs_is_free_space_inode(inode); + const bool is_reloc_ino = (inode->root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID); const u64 range_bytes = end + 1 - start; - struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + struct extent_io_tree *io_tree = &inode->io_tree; u64 range_start = start; u64 count; @@ -1421,7 +1419,7 @@ static int fallback_to_cow(struct inode *inode, struct page *locked_page, EXTENT_NORESERVE, 0); if (count > 0 || is_space_ino || is_reloc_ino) { u64 bytes = count; - struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; + struct btrfs_fs_info *fs_info = inode->root->fs_info; struct btrfs_space_info *sinfo = fs_info->data_sinfo; if (is_space_ino || is_reloc_ino) @@ -1447,21 +1445,21 @@ static int fallback_to_cow(struct inode *inode, struct page *locked_page, * If no cow copies or snapshots exist, we write directly to the existing * blocks on disk */ -static noinline int run_delalloc_nocow(struct inode *inode, +static noinline int run_delalloc_nocow(struct btrfs_inode *inode, struct page *locked_page, const u64 start, const u64 end, int *page_started, int force, unsigned long *nr_written) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_fs_info *fs_info = inode->root->fs_info; + struct btrfs_root *root = inode->root; struct btrfs_path *path; u64 cow_start = (u64)-1; u64 cur_offset = start; int ret; bool check_prev = true; - const bool freespace_inode = btrfs_is_free_space_inode(BTRFS_I(inode)); - u64 ino = btrfs_ino(BTRFS_I(inode)); + const bool freespace_inode = btrfs_is_free_space_inode(inode); + u64 ino = btrfs_ino(inode); bool nocow = false; u64 disk_bytenr = 0; @@ -1687,8 +1685,8 @@ out_check: * NOCOW, following one which needs to be COW'ed */ if (cow_start != (u64)-1) { - ret = fallback_to_cow(inode, locked_page, cow_start, - found_key.offset - 1, + ret = fallback_to_cow(inode, locked_page, + cow_start, found_key.offset - 1, page_started, nr_written); if (ret) goto error; @@ -1716,8 +1714,7 @@ out_check: num_bytes, BTRFS_ORDERED_PREALLOC); if (ret) { - btrfs_drop_extent_cache(BTRFS_I(inode), - cur_offset, + btrfs_drop_extent_cache(inode, cur_offset, cur_offset + num_bytes - 1, 0); goto error; @@ -1793,11 +1790,11 @@ error: return ret; } -static inline int need_force_cow(struct inode *inode, u64 start, u64 end) +static inline int need_force_cow(struct btrfs_inode *inode, u64 start, u64 end) { - if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && - !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)) + if (!(inode->flags & BTRFS_INODE_NODATACOW) && + !(inode->flags & BTRFS_INODE_PREALLOC)) return 0; /* @@ -1805,9 +1802,8 @@ static inline int need_force_cow(struct inode *inode, u64 start, u64 end) * if is not zero, it means the file is defragging. * Force cow if given extent needs to be defragged. */ - if (BTRFS_I(inode)->defrag_bytes && - test_range_bit(&BTRFS_I(inode)->io_tree, start, end, - EXTENT_DEFRAG, 0, NULL)) + if (inode->defrag_bytes && + test_range_bit(&inode->io_tree, start, end, EXTENT_DEFRAG, 0, NULL)) return 1; return 0; @@ -1817,26 +1813,25 @@ static inline int need_force_cow(struct inode *inode, u64 start, u64 end) * Function to process delayed allocation (create CoW) for ranges which are * being touched for the first time. */ -int btrfs_run_delalloc_range(struct inode *inode, struct page *locked_page, +int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page, u64 start, u64 end, int *page_started, unsigned long *nr_written, struct writeback_control *wbc) { int ret; int force_cow = need_force_cow(inode, start, end); - if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW && !force_cow) { + if (inode->flags & BTRFS_INODE_NODATACOW && !force_cow) { ret = run_delalloc_nocow(inode, locked_page, start, end, page_started, 1, nr_written); - } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) { + } else if (inode->flags & BTRFS_INODE_PREALLOC && !force_cow) { ret = run_delalloc_nocow(inode, locked_page, start, end, page_started, 0, nr_written); } else if (!inode_can_compress(inode) || !inode_need_compress(inode, start, end)) { ret = cow_file_range(inode, locked_page, start, end, - page_started, nr_written, 1); + page_started, nr_written, 1); } else { - set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, - &BTRFS_I(inode)->runtime_flags); + set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags); ret = cow_file_range_async(inode, wbc, locked_page, start, end, page_started, nr_written); } @@ -2085,9 +2080,7 @@ void btrfs_clear_delalloc_extent(struct inode *vfs_inode, if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID && do_list && !(state->state & EXTENT_NORESERVE) && (*bits & EXTENT_CLEAR_DATA_RESV)) - btrfs_free_reserved_data_space_noquota( - &inode->vfs_inode, - state->start, len); + btrfs_free_reserved_data_space_noquota(fs_info, len); percpu_counter_add_batch(&fs_info->delalloc_bytes, -len, fs_info->delalloc_batch); @@ -2163,7 +2156,7 @@ static blk_status_t btrfs_submit_bio_start(void *private_data, struct bio *bio, struct inode *inode = private_data; blk_status_t ret = 0; - ret = btrfs_csum_one_bio(inode, bio, 0, 0); + ret = btrfs_csum_one_bio(BTRFS_I(inode), bio, 0, 0); BUG_ON(ret); /* -ENOMEM */ return 0; } @@ -2228,7 +2221,7 @@ static blk_status_t btrfs_submit_bio_hook(struct inode *inode, struct bio *bio, 0, inode, btrfs_submit_bio_start); goto out; } else if (!skip_sum) { - ret = btrfs_csum_one_bio(inode, bio, 0, 0); + ret = btrfs_csum_one_bio(BTRFS_I(inode), bio, 0, 0); if (ret) goto out; } @@ -2265,13 +2258,13 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans, return 0; } -int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, +int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end, unsigned int extra_bits, struct extent_state **cached_state) { WARN_ON(PAGE_ALIGNED(end)); - return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end, - extra_bits, cached_state); + return set_extent_delalloc(&inode->io_tree, start, end, extra_bits, + cached_state); } /* see btrfs_writepage_start_hook for details on why this is required */ @@ -2288,7 +2281,7 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work) struct extent_state *cached_state = NULL; struct extent_changeset *data_reserved = NULL; struct page *page; - struct inode *inode; + struct btrfs_inode *inode; u64 page_start; u64 page_end; int ret = 0; @@ -2296,7 +2289,7 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work) fixup = container_of(work, struct btrfs_writepage_fixup, work); page = fixup->page; - inode = fixup->inode; + inode = BTRFS_I(fixup->inode); page_start = page_offset(page); page_end = page_offset(page) + PAGE_SIZE - 1; @@ -2333,8 +2326,7 @@ again: * when the page was already properly dealt with. */ if (!ret) { - btrfs_delalloc_release_extents(BTRFS_I(inode), - PAGE_SIZE); + btrfs_delalloc_release_extents(inode, PAGE_SIZE); btrfs_delalloc_release_space(inode, data_reserved, page_start, PAGE_SIZE, true); @@ -2350,20 +2342,18 @@ again: if (ret) goto out_page; - lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, - &cached_state); + lock_extent_bits(&inode->io_tree, page_start, page_end, &cached_state); /* already ordered? We're done */ if (PagePrivate2(page)) goto out_reserved; - ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start, - PAGE_SIZE); + ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE); if (ordered) { - unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, - page_end, &cached_state); + unlock_extent_cached(&inode->io_tree, page_start, page_end, + &cached_state); unlock_page(page); - btrfs_start_ordered_extent(inode, ordered, 1); + btrfs_start_ordered_extent(&inode->vfs_inode, ordered, 1); btrfs_put_ordered_extent(ordered); goto again; } @@ -2383,11 +2373,11 @@ again: BUG_ON(!PageDirty(page)); free_delalloc_space = false; out_reserved: - btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE); + btrfs_delalloc_release_extents(inode, PAGE_SIZE); if (free_delalloc_space) btrfs_delalloc_release_space(inode, data_reserved, page_start, PAGE_SIZE, true); - unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end, + unlock_extent_cached(&inode->io_tree, page_start, page_end, &cached_state); out_page: if (ret) { @@ -2410,7 +2400,7 @@ out_page: * that could need flushing space. Recursing back to fixup worker would * deadlock. */ - btrfs_add_delayed_iput(inode); + btrfs_add_delayed_iput(&inode->vfs_inode); } /* @@ -2466,18 +2456,18 @@ int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end) } static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, - struct inode *inode, u64 file_pos, - u64 disk_bytenr, u64 disk_num_bytes, - u64 num_bytes, u64 ram_bytes, - u8 compression, u8 encryption, - u16 other_encoding, int extent_type) + struct btrfs_inode *inode, u64 file_pos, + struct btrfs_file_extent_item *stack_fi, + u64 qgroup_reserved) { - struct btrfs_root *root = BTRFS_I(inode)->root; - struct btrfs_file_extent_item *fi; + struct btrfs_root *root = inode->root; struct btrfs_path *path; struct extent_buffer *leaf; struct btrfs_key ins; - u64 qg_released; + u64 disk_num_bytes = btrfs_stack_file_extent_disk_num_bytes(stack_fi); + u64 disk_bytenr = btrfs_stack_file_extent_disk_bytenr(stack_fi); + u64 num_bytes = btrfs_stack_file_extent_num_bytes(stack_fi); + u64 ram_bytes = btrfs_stack_file_extent_ram_bytes(stack_fi); int extent_inserted = 0; int ret; @@ -2496,60 +2486,42 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, */ ret = __btrfs_drop_extents(trans, root, inode, path, file_pos, file_pos + num_bytes, NULL, 0, - 1, sizeof(*fi), &extent_inserted); + 1, sizeof(*stack_fi), &extent_inserted); if (ret) goto out; if (!extent_inserted) { - ins.objectid = btrfs_ino(BTRFS_I(inode)); + ins.objectid = btrfs_ino(inode); ins.offset = file_pos; ins.type = BTRFS_EXTENT_DATA_KEY; path->leave_spinning = 1; ret = btrfs_insert_empty_item(trans, root, path, &ins, - sizeof(*fi)); + sizeof(*stack_fi)); if (ret) goto out; } leaf = path->nodes[0]; - fi = btrfs_item_ptr(leaf, path->slots[0], - struct btrfs_file_extent_item); - btrfs_set_file_extent_generation(leaf, fi, trans->transid); - btrfs_set_file_extent_type(leaf, fi, extent_type); - btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr); - btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes); - btrfs_set_file_extent_offset(leaf, fi, 0); - btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes); - btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes); - btrfs_set_file_extent_compression(leaf, fi, compression); - btrfs_set_file_extent_encryption(leaf, fi, encryption); - btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding); + btrfs_set_stack_file_extent_generation(stack_fi, trans->transid); + write_extent_buffer(leaf, stack_fi, + btrfs_item_ptr_offset(leaf, path->slots[0]), + sizeof(struct btrfs_file_extent_item)); btrfs_mark_buffer_dirty(leaf); btrfs_release_path(path); - inode_add_bytes(inode, num_bytes); + inode_add_bytes(&inode->vfs_inode, num_bytes); ins.objectid = disk_bytenr; ins.offset = disk_num_bytes; ins.type = BTRFS_EXTENT_ITEM_KEY; - ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode), file_pos, - ram_bytes); + ret = btrfs_inode_set_file_extent_range(inode, file_pos, ram_bytes); if (ret) goto out; - /* - * Release the reserved range from inode dirty range map, as it is - * already moved into delayed_ref_head - */ - ret = btrfs_qgroup_release_data(inode, file_pos, ram_bytes); - if (ret < 0) - goto out; - qg_released = ret; - ret = btrfs_alloc_reserved_file_extent(trans, root, - btrfs_ino(BTRFS_I(inode)), - file_pos, qg_released, &ins); + ret = btrfs_alloc_reserved_file_extent(trans, root, btrfs_ino(inode), + file_pos, qgroup_reserved, &ins); out: btrfs_free_path(path); @@ -2571,7 +2543,33 @@ static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info, btrfs_put_block_group(cache); } -/* as ordered data IO finishes, this gets called so we can finish +static int insert_ordered_extent_file_extent(struct btrfs_trans_handle *trans, + struct inode *inode, + struct btrfs_ordered_extent *oe) +{ + struct btrfs_file_extent_item stack_fi; + u64 logical_len; + + memset(&stack_fi, 0, sizeof(stack_fi)); + btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_REG); + btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, oe->disk_bytenr); + btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi, + oe->disk_num_bytes); + if (test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags)) + logical_len = oe->truncated_len; + else + logical_len = oe->num_bytes; + btrfs_set_stack_file_extent_num_bytes(&stack_fi, logical_len); + btrfs_set_stack_file_extent_ram_bytes(&stack_fi, logical_len); + btrfs_set_stack_file_extent_compression(&stack_fi, oe->compress_type); + /* Encryption and other encoding is reserved and all 0 */ + + return insert_reserved_file_extent(trans, BTRFS_I(inode), oe->file_offset, + &stack_fi, oe->qgroup_rsv); +} + +/* + * As ordered data IO finishes, this gets called so we can finish * an ordered extent if the range of bytes in the file it covers are * fully written. */ @@ -2622,13 +2620,6 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */ - /* - * For mwrite(mmap + memset to write) case, we still reserve - * space for NOCOW range. - * As NOCOW won't cause a new delayed ref, just free the space - */ - btrfs_qgroup_free_data(inode, NULL, start, - ordered_extent->num_bytes); btrfs_inode_safe_disk_i_size_write(inode, 0); if (freespace_inode) trans = btrfs_join_transaction_spacecache(root); @@ -2665,20 +2656,14 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) compress_type = ordered_extent->compress_type; if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { BUG_ON(compress_type); - btrfs_qgroup_free_data(inode, NULL, start, - ordered_extent->num_bytes); ret = btrfs_mark_extent_written(trans, BTRFS_I(inode), ordered_extent->file_offset, ordered_extent->file_offset + logical_len); } else { BUG_ON(root == fs_info->tree_root); - ret = insert_reserved_file_extent(trans, inode, start, - ordered_extent->disk_bytenr, - ordered_extent->disk_num_bytes, - logical_len, logical_len, - compress_type, 0, 0, - BTRFS_FILE_EXTENT_REG); + ret = insert_ordered_extent_file_extent(trans, inode, + ordered_extent); if (!ret) { clear_reserved_extent = false; btrfs_release_delalloc_bytes(fs_info, @@ -2830,6 +2815,9 @@ static int check_data_csum(struct inode *inode, struct btrfs_io_bio *io_bio, zeroit: btrfs_print_data_csum_error(BTRFS_I(inode), start, csum, csum_expected, io_bio->mirror_num); + if (io_bio->device) + btrfs_dev_stat_inc_and_print(io_bio->device, + BTRFS_DEV_STAT_CORRUPTION_ERRS); memset(kaddr + pgoff, 1, len); flush_dcache_page(page); kunmap_atomic(kaddr); @@ -3348,6 +3336,14 @@ cache_index: */ BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans; + /* + * Same logic as for last_unlink_trans. We don't persist the generation + * of the last transaction where this inode was used for a reflink + * operation, so after eviction and reloading the inode we must be + * pessimistic and assume the last transaction that modified the inode. + */ + BTRFS_I(inode)->last_reflink_trans = BTRFS_I(inode)->last_trans; + path->slots[0]++; if (inode->i_nlink != 1 || path->slots[0] >= btrfs_header_nritems(leaf)) @@ -3496,7 +3492,7 @@ static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans, fill_inode_item(trans, leaf, inode_item, inode); btrfs_mark_buffer_dirty(leaf); - btrfs_set_inode_last_trans(trans, inode); + btrfs_set_inode_last_trans(trans, BTRFS_I(inode)); ret = 0; failed: btrfs_free_path(path); @@ -3526,7 +3522,7 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, ret = btrfs_delayed_update_inode(trans, root, inode); if (!ret) - btrfs_set_inode_last_trans(trans, inode); + btrfs_set_inode_last_trans(trans, BTRFS_I(inode)); return ret; } @@ -4041,6 +4037,8 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry) } } + free_anon_bdev(dest->anon_dev); + dest->anon_dev = 0; out_end_trans: trans->block_rsv = NULL; trans->bytes_reserved = 0; @@ -4511,11 +4509,13 @@ int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len, struct extent_state *cached_state = NULL; struct extent_changeset *data_reserved = NULL; char *kaddr; + bool only_release_metadata = false; u32 blocksize = fs_info->sectorsize; pgoff_t index = from >> PAGE_SHIFT; unsigned offset = from & (blocksize - 1); struct page *page; gfp_t mask = btrfs_alloc_write_mask(mapping); + size_t write_bytes = blocksize; int ret = 0; u64 block_start; u64 block_end; @@ -4527,15 +4527,28 @@ int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len, block_start = round_down(from, blocksize); block_end = block_start + blocksize - 1; - ret = btrfs_delalloc_reserve_space(inode, &data_reserved, - block_start, blocksize); - if (ret) + ret = btrfs_check_data_free_space(BTRFS_I(inode), &data_reserved, + block_start, blocksize); + if (ret < 0) { + if (btrfs_check_nocow_lock(BTRFS_I(inode), block_start, + &write_bytes) > 0) { + /* For nocow case, no need to reserve data space */ + only_release_metadata = true; + } else { + goto out; + } + } + ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), blocksize); + if (ret < 0) { + if (!only_release_metadata) + btrfs_free_reserved_data_space(BTRFS_I(inode), + data_reserved, block_start, blocksize); goto out; - + } again: page = find_or_create_page(mapping, index, mask); if (!page) { - btrfs_delalloc_release_space(inode, data_reserved, + btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved, block_start, blocksize, true); btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize); ret = -ENOMEM; @@ -4560,7 +4573,7 @@ again: lock_extent_bits(io_tree, block_start, block_end, &cached_state); set_page_extent_mapped(page); - ordered = btrfs_lookup_ordered_extent(inode, block_start); + ordered = btrfs_lookup_ordered_extent(BTRFS_I(inode), block_start); if (ordered) { unlock_extent_cached(io_tree, block_start, block_end, &cached_state); @@ -4575,7 +4588,7 @@ again: EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0, &cached_state); - ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0, + ret = btrfs_set_extent_delalloc(BTRFS_I(inode), block_start, block_end, 0, &cached_state); if (ret) { unlock_extent_cached(io_tree, block_start, block_end, @@ -4600,14 +4613,26 @@ again: set_page_dirty(page); unlock_extent_cached(io_tree, block_start, block_end, &cached_state); + if (only_release_metadata) + set_extent_bit(&BTRFS_I(inode)->io_tree, block_start, + block_end, EXTENT_NORESERVE, NULL, NULL, + GFP_NOFS); + out_unlock: - if (ret) - btrfs_delalloc_release_space(inode, data_reserved, block_start, - blocksize, true); + if (ret) { + if (only_release_metadata) + btrfs_delalloc_release_metadata(BTRFS_I(inode), + blocksize, true); + else + btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved, + block_start, blocksize, true); + } btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize); unlock_page(page); put_page(page); out: + if (only_release_metadata) + btrfs_check_nocow_unlock(BTRFS_I(inode)); extent_changeset_free(data_reserved); return ret; } @@ -4965,7 +4990,8 @@ static void evict_inode_truncate_pages(struct inode *inode) * Note, end is the bytenr of last byte, so we need + 1 here. */ if (state_flags & EXTENT_DELALLOC) - btrfs_qgroup_free_data(inode, NULL, start, end - start + 1); + btrfs_qgroup_free_data(BTRFS_I(inode), NULL, start, + end - start + 1); clear_extent_bit(io_tree, start, end, EXTENT_LOCKED | EXTENT_DELALLOC | @@ -6040,7 +6066,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, inode_tree_add(inode); trace_btrfs_inode_new(inode); - btrfs_set_inode_last_trans(trans, inode); + btrfs_set_inode_last_trans(trans, BTRFS_I(inode)); btrfs_update_root_times(trans, root); @@ -6849,7 +6875,7 @@ out: return em; } -static struct extent_map *btrfs_create_dio_extent(struct inode *inode, +static struct extent_map *btrfs_create_dio_extent(struct btrfs_inode *inode, const u64 start, const u64 len, const u64 orig_start, @@ -6863,21 +6889,19 @@ static struct extent_map *btrfs_create_dio_extent(struct inode *inode, int ret; if (type != BTRFS_ORDERED_NOCOW) { - em = create_io_em(inode, start, len, orig_start, - block_start, block_len, orig_block_len, - ram_bytes, + em = create_io_em(inode, start, len, orig_start, block_start, + block_len, orig_block_len, ram_bytes, BTRFS_COMPRESS_NONE, /* compress_type */ type); if (IS_ERR(em)) goto out; } - ret = btrfs_add_ordered_extent_dio(inode, start, block_start, - len, block_len, type); + ret = btrfs_add_ordered_extent_dio(inode, start, block_start, len, + block_len, type); if (ret) { if (em) { free_extent_map(em); - btrfs_drop_extent_cache(BTRFS_I(inode), start, - start + len - 1, 0); + btrfs_drop_extent_cache(inode, start, start + len - 1, 0); } em = ERR_PTR(ret); } @@ -6886,11 +6910,11 @@ static struct extent_map *btrfs_create_dio_extent(struct inode *inode, return em; } -static struct extent_map *btrfs_new_extent_direct(struct inode *inode, +static struct extent_map *btrfs_new_extent_direct(struct btrfs_inode *inode, u64 start, u64 len) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_root *root = inode->root; + struct btrfs_fs_info *fs_info = root->fs_info; struct extent_map *em; struct btrfs_key ins; u64 alloc_hint; @@ -6907,15 +6931,32 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode, ins.offset, BTRFS_ORDERED_REGULAR); btrfs_dec_block_group_reservations(fs_info, ins.objectid); if (IS_ERR(em)) - btrfs_free_reserved_extent(fs_info, ins.objectid, - ins.offset, 1); + btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, + 1); return em; } /* - * returns 1 when the nocow is safe, < 1 on error, 0 if the - * block must be cow'd + * Check if we can do nocow write into the range [@offset, @offset + @len) + * + * @offset: File offset + * @len: The length to write, will be updated to the nocow writeable + * range + * @orig_start: (optional) Return the original file offset of the file extent + * @orig_len: (optional) Return the original on-disk length of the file extent + * @ram_bytes: (optional) Return the ram_bytes of the file extent + * + * This function will flush ordered extents in the range to ensure proper + * nocow checks for (nowait == false) case. + * + * Return: + * >0 and update @len if we can do nocow write + * 0 if we can't do nocow write + * <0 if error happened + * + * NOTE: This only checks the file extents, caller is responsible to wait for + * any ordered extents. */ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, u64 *orig_start, u64 *orig_block_len, @@ -7142,8 +7183,8 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend, } /* The callers of this must take lock_extent() */ -static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len, - u64 orig_start, u64 block_start, +static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start, + u64 len, u64 orig_start, u64 block_start, u64 block_len, u64 orig_block_len, u64 ram_bytes, int compress_type, int type) @@ -7157,7 +7198,7 @@ static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len, type == BTRFS_ORDERED_NOCOW || type == BTRFS_ORDERED_REGULAR); - em_tree = &BTRFS_I(inode)->extent_tree; + em_tree = &inode->extent_tree; em = alloc_extent_map(); if (!em) return ERR_PTR(-ENOMEM); @@ -7179,8 +7220,8 @@ static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len, } do { - btrfs_drop_extent_cache(BTRFS_I(inode), em->start, - em->start + em->len - 1, 0); + btrfs_drop_extent_cache(inode, em->start, + em->start + em->len - 1, 0); write_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em, 1); write_unlock(&em_tree->lock); @@ -7259,7 +7300,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map, btrfs_inc_nocow_writers(fs_info, block_start)) { struct extent_map *em2; - em2 = btrfs_create_dio_extent(inode, start, len, + em2 = btrfs_create_dio_extent(BTRFS_I(inode), start, len, orig_start, block_start, len, orig_block_len, ram_bytes, type); @@ -7278,8 +7319,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map, * use the existing or preallocated extent, so does not * need to adjust btrfs_space_info's bytes_may_use. */ - btrfs_free_reserved_data_space_noquota(inode, start, - len); + btrfs_free_reserved_data_space_noquota(fs_info, len); goto skip_cow; } } @@ -7287,7 +7327,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map, /* this will cow the extent */ len = bh_result->b_size; free_extent_map(em); - *map = em = btrfs_new_extent_direct(inode, start, len); + *map = em = btrfs_new_extent_direct(BTRFS_I(inode), start, len); if (IS_ERR(em)) { ret = PTR_ERR(em); goto out; @@ -7438,7 +7478,8 @@ static void btrfs_dio_private_put(struct btrfs_dio_private *dip) return; if (bio_op(dip->dio_bio) == REQ_OP_WRITE) { - __endio_write_update_ordered(dip->inode, dip->logical_offset, + __endio_write_update_ordered(BTRFS_I(dip->inode), + dip->logical_offset, dip->bytes, !dip->dio_bio->bi_status); } else { @@ -7524,18 +7565,18 @@ static blk_status_t btrfs_check_read_dio_bio(struct inode *inode, return err; } -static void __endio_write_update_ordered(struct inode *inode, +static void __endio_write_update_ordered(struct btrfs_inode *inode, const u64 offset, const u64 bytes, const bool uptodate) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = inode->root->fs_info; struct btrfs_ordered_extent *ordered = NULL; struct btrfs_workqueue *wq; u64 ordered_offset = offset; u64 ordered_bytes = bytes; u64 last_offset; - if (btrfs_is_free_space_inode(BTRFS_I(inode))) + if (btrfs_is_free_space_inode(inode)) wq = fs_info->endio_freespace_worker; else wq = fs_info->endio_write_workers; @@ -7543,9 +7584,9 @@ static void __endio_write_update_ordered(struct inode *inode, while (ordered_offset < offset + bytes) { last_offset = ordered_offset; if (btrfs_dec_test_first_ordered_pending(inode, &ordered, - &ordered_offset, - ordered_bytes, - uptodate)) { + &ordered_offset, + ordered_bytes, + uptodate)) { btrfs_init_work(&ordered->work, finish_ordered_fn, NULL, NULL); btrfs_queue_work(wq, &ordered->work); @@ -7572,7 +7613,7 @@ static blk_status_t btrfs_submit_bio_start_direct_io(void *private_data, { struct inode *inode = private_data; blk_status_t ret; - ret = btrfs_csum_one_bio(inode, bio, offset, 1); + ret = btrfs_csum_one_bio(BTRFS_I(inode), bio, offset, 1); BUG_ON(ret); /* -ENOMEM */ return 0; } @@ -7633,7 +7674,7 @@ static inline blk_status_t btrfs_submit_dio_bio(struct bio *bio, * If we aren't doing async submit, calculate the csum of the * bio now. */ - ret = btrfs_csum_one_bio(inode, bio, file_offset, 1); + ret = btrfs_csum_one_bio(BTRFS_I(inode), bio, file_offset, 1); if (ret) goto err; } else { @@ -7883,7 +7924,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) inode_unlock(inode); relock = true; } - ret = btrfs_delalloc_reserve_space(inode, &data_reserved, + ret = btrfs_delalloc_reserve_space(BTRFS_I(inode), &data_reserved, offset, count); if (ret) goto out; @@ -7915,8 +7956,9 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) current->journal_info = NULL; if (ret < 0 && ret != -EIOCBQUEUED) { if (dio_data.reserve) - btrfs_delalloc_release_space(inode, data_reserved, - offset, dio_data.reserve, true); + btrfs_delalloc_release_space(BTRFS_I(inode), + data_reserved, offset, dio_data.reserve, + true); /* * On error we might have left some ordered extents * without submitting corresponding bios for them, so @@ -7925,13 +7967,13 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) */ if (dio_data.unsubmitted_oe_range_start < dio_data.unsubmitted_oe_range_end) - __endio_write_update_ordered(inode, + __endio_write_update_ordered(BTRFS_I(inode), dio_data.unsubmitted_oe_range_start, dio_data.unsubmitted_oe_range_end - dio_data.unsubmitted_oe_range_start, false); } else if (ret >= 0 && (size_t)ret < count) - btrfs_delalloc_release_space(inode, data_reserved, + btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved, offset, count - (size_t)ret, true); btrfs_delalloc_release_extents(BTRFS_I(inode), count); } @@ -7946,7 +7988,7 @@ out: } static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, - __u64 start, __u64 len) + u64 start, u64 len) { int ret; @@ -8123,20 +8165,17 @@ again: /* * Qgroup reserved space handler * Page here will be either - * 1) Already written to disk - * In this case, its reserved space is released from data rsv map - * and will be freed by delayed_ref handler finally. - * So even we call qgroup_free_data(), it won't decrease reserved - * space. - * 2) Not written to disk - * This means the reserved space should be freed here. However, - * if a truncate invalidates the page (by clearing PageDirty) - * and the page is accounted for while allocating extent - * in btrfs_check_data_free_space() we let delayed_ref to - * free the entire extent. + * 1) Already written to disk or ordered extent already submitted + * Then its QGROUP_RESERVED bit in io_tree is already cleaned. + * Qgroup will be handled by its qgroup_record then. + * btrfs_qgroup_free_data() call will do nothing here. + * + * 2) Not written to disk yet + * Then btrfs_qgroup_free_data() call will clear the QGROUP_RESERVED + * bit of its io_tree, and free the qgroup reserved data space. + * Since the IO will never happen for this page. */ - if (PageDirty(page)) - btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE); + btrfs_qgroup_free_data(BTRFS_I(inode), NULL, page_start, PAGE_SIZE); if (!inode_evicting) { clear_extent_bit(tree, page_start, page_end, EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | @@ -8200,8 +8239,8 @@ vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf) * end up waiting indefinitely to get a lock on the page currently * being processed by btrfs_page_mkwrite() function. */ - ret2 = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start, - reserved_space); + ret2 = btrfs_delalloc_reserve_space(BTRFS_I(inode), &data_reserved, + page_start, reserved_space); if (!ret2) { ret2 = file_update_time(vmf->vma->vm_file); reserved = 1; @@ -8248,9 +8287,9 @@ again: fs_info->sectorsize); if (reserved_space < PAGE_SIZE) { end = page_start + reserved_space - 1; - btrfs_delalloc_release_space(inode, data_reserved, - page_start, PAGE_SIZE - reserved_space, - true); + btrfs_delalloc_release_space(BTRFS_I(inode), + data_reserved, page_start, + PAGE_SIZE - reserved_space, true); } } @@ -8265,7 +8304,7 @@ again: EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0, &cached_state); - ret2 = btrfs_set_extent_delalloc(inode, page_start, end, 0, + ret2 = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start, end, 0, &cached_state); if (ret2) { unlock_extent_cached(io_tree, page_start, page_end, @@ -8305,7 +8344,7 @@ out_unlock: unlock_page(page); out: btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE); - btrfs_delalloc_release_space(inode, data_reserved, page_start, + btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved, page_start, reserved_space, (ret != 0)); out_noreserve: sb_end_pagefault(inode->i_sb); @@ -8519,6 +8558,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb) ei->index_cnt = (u64)-1; ei->dir_index = 0; ei->last_unlink_trans = 0; + ei->last_reflink_trans = 0; ei->last_log_commit = 0; spin_lock_init(&ei->lock); @@ -8605,7 +8645,7 @@ void btrfs_destroy_inode(struct inode *inode) btrfs_put_ordered_extent(ordered); } } - btrfs_qgroup_check_reserved_leak(inode); + btrfs_qgroup_check_reserved_leak(BTRFS_I(inode)); inode_tree_del(inode); btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0); btrfs_inode_clear_file_extent_range(BTRFS_I(inode), 0, (u64)-1); @@ -9587,6 +9627,31 @@ out_unlock: return err; } +static int insert_prealloc_file_extent(struct btrfs_trans_handle *trans, + struct inode *inode, struct btrfs_key *ins, + u64 file_offset) +{ + struct btrfs_file_extent_item stack_fi; + u64 start = ins->objectid; + u64 len = ins->offset; + int ret; + + memset(&stack_fi, 0, sizeof(stack_fi)); + + btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_PREALLOC); + btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, start); + btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi, len); + btrfs_set_stack_file_extent_num_bytes(&stack_fi, len); + btrfs_set_stack_file_extent_ram_bytes(&stack_fi, len); + btrfs_set_stack_file_extent_compression(&stack_fi, BTRFS_COMPRESS_NONE); + /* Encryption and other encoding is reserved and all 0 */ + + ret = btrfs_qgroup_release_data(BTRFS_I(inode), file_offset, len); + if (ret < 0) + return ret; + return insert_reserved_file_extent(trans, BTRFS_I(inode), file_offset, + &stack_fi, ret); +} static int __btrfs_prealloc_file_range(struct inode *inode, int mode, u64 start, u64 num_bytes, u64 min_size, loff_t actual_len, u64 *alloc_hint, @@ -9645,11 +9710,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode, btrfs_dec_block_group_reservations(fs_info, ins.objectid); last_alloc = ins.offset; - ret = insert_reserved_file_extent(trans, inode, - cur_offset, ins.objectid, - ins.offset, ins.offset, - ins.offset, 0, 0, 0, - BTRFS_FILE_EXTENT_PREALLOC); + ret = insert_prealloc_file_extent(trans, inode, &ins, cur_offset); if (ret) { btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 0); @@ -9722,7 +9783,7 @@ next: btrfs_end_transaction(trans); } if (clear_offset < end) - btrfs_free_reserved_data_space(inode, NULL, clear_offset, + btrfs_free_reserved_data_space(BTRFS_I(inode), NULL, clear_offset, end - clear_offset + 1); return ret; } diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index e8f7c5f00894..bd3511c5ca81 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -164,8 +164,11 @@ static int btrfs_ioctl_getflags(struct file *file, void __user *arg) return 0; } -/* Check if @flags are a supported and valid set of FS_*_FL flags */ -static int check_fsflags(unsigned int flags) +/* + * Check if @flags are a supported and valid set of FS_*_FL flags and that + * the old and new flags are not conflicting + */ +static int check_fsflags(unsigned int old_flags, unsigned int flags) { if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \ FS_NOATIME_FL | FS_NODUMP_FL | \ @@ -174,9 +177,19 @@ static int check_fsflags(unsigned int flags) FS_NOCOW_FL)) return -EOPNOTSUPP; + /* COMPR and NOCOMP on new/old are valid */ if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL)) return -EINVAL; + if ((flags & FS_COMPR_FL) && (flags & FS_NOCOW_FL)) + return -EINVAL; + + /* NOCOW and compression options are mutually exclusive */ + if ((old_flags & FS_NOCOW_FL) && (flags & (FS_COMPR_FL | FS_NOCOMP_FL))) + return -EINVAL; + if ((flags & FS_NOCOW_FL) && (old_flags & (FS_COMPR_FL | FS_NOCOMP_FL))) + return -EINVAL; + return 0; } @@ -190,7 +203,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) unsigned int fsflags, old_fsflags; int ret; const char *comp = NULL; - u32 binode_flags = binode->flags; + u32 binode_flags; if (!inode_owner_or_capable(inode)) return -EPERM; @@ -201,22 +214,23 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) if (copy_from_user(&fsflags, arg, sizeof(fsflags))) return -EFAULT; - ret = check_fsflags(fsflags); - if (ret) - return ret; - ret = mnt_want_write_file(file); if (ret) return ret; inode_lock(inode); - fsflags = btrfs_mask_fsflags_for_type(inode, fsflags); old_fsflags = btrfs_inode_flags_to_fsflags(binode->flags); + ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags); if (ret) goto out_unlock; + ret = check_fsflags(old_fsflags, fsflags); + if (ret) + goto out_unlock; + + binode_flags = binode->flags; if (fsflags & FS_SYNC_FL) binode_flags |= BTRFS_INODE_SYNC; else @@ -566,6 +580,7 @@ static noinline int create_subvol(struct inode *dir, struct inode *inode; int ret; int err; + dev_t anon_dev = 0; u64 objectid; u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID; u64 index = 0; @@ -578,6 +593,10 @@ static noinline int create_subvol(struct inode *dir, if (ret) goto fail_free; + ret = get_anon_bdev(&anon_dev); + if (ret < 0) + goto fail_free; + /* * Don't create subvolume whose level is not zero. Or qgroup will be * screwed up since it assumes subvolume qgroup's level to be 0. @@ -660,12 +679,15 @@ static noinline int create_subvol(struct inode *dir, goto fail; key.offset = (u64)-1; - new_root = btrfs_get_fs_root(fs_info, objectid, true); + new_root = btrfs_get_new_fs_root(fs_info, objectid, anon_dev); if (IS_ERR(new_root)) { + free_anon_bdev(anon_dev); ret = PTR_ERR(new_root); btrfs_abort_transaction(trans, ret); goto fail; } + /* Freeing will be done in btrfs_put_root() of new_root */ + anon_dev = 0; btrfs_record_root_in_trans(trans, new_root); @@ -735,6 +757,8 @@ fail: return ret; fail_free: + if (anon_dev) + free_anon_bdev(anon_dev); kfree(root_item); return ret; } @@ -762,6 +786,9 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, if (!pending_snapshot) return -ENOMEM; + ret = get_anon_bdev(&pending_snapshot->anon_dev); + if (ret < 0) + goto free_pending; pending_snapshot->root_item = kzalloc(sizeof(struct btrfs_root_item), GFP_KERNEL); pending_snapshot->path = btrfs_alloc_path(); @@ -823,10 +850,16 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, d_instantiate(dentry, inode); ret = 0; + pending_snapshot->anon_dev = 0; fail: + /* Prevent double freeing of anon_dev */ + if (ret && pending_snapshot->snap) + pending_snapshot->snap->anon_dev = 0; btrfs_put_root(pending_snapshot->snap); btrfs_subvolume_release_metadata(fs_info, &pending_snapshot->block_rsv); free_pending: + if (pending_snapshot->anon_dev) + free_anon_bdev(pending_snapshot->anon_dev); kfree(pending_snapshot->root_item); btrfs_free_path(pending_snapshot->path); kfree(pending_snapshot); @@ -1243,7 +1276,7 @@ static int cluster_pages_for_defrag(struct inode *inode, page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1); - ret = btrfs_delalloc_reserve_space(inode, &data_reserved, + ret = btrfs_delalloc_reserve_space(BTRFS_I(inode), &data_reserved, start_index << PAGE_SHIFT, page_cnt << PAGE_SHIFT); if (ret) @@ -1265,7 +1298,7 @@ again: while (1) { lock_extent_bits(tree, page_start, page_end, &cached_state); - ordered = btrfs_lookup_ordered_extent(inode, + ordered = btrfs_lookup_ordered_extent(BTRFS_I(inode), page_start); unlock_extent_cached(tree, page_start, page_end, &cached_state); @@ -1333,7 +1366,7 @@ again: spin_lock(&BTRFS_I(inode)->lock); btrfs_mod_outstanding_extents(BTRFS_I(inode), 1); spin_unlock(&BTRFS_I(inode)->lock); - btrfs_delalloc_release_space(inode, data_reserved, + btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved, start_index << PAGE_SHIFT, (page_cnt - i_done) << PAGE_SHIFT, true); } @@ -1361,7 +1394,7 @@ out: unlock_page(pages[i]); put_page(pages[i]); } - btrfs_delalloc_release_space(inode, data_reserved, + btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved, start_index << PAGE_SHIFT, page_cnt << PAGE_SHIFT, true); btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT); @@ -3198,11 +3231,15 @@ static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info, struct btrfs_ioctl_fs_info_args *fi_args; struct btrfs_device *device; struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; + u64 flags_in; int ret = 0; - fi_args = kzalloc(sizeof(*fi_args), GFP_KERNEL); - if (!fi_args) - return -ENOMEM; + fi_args = memdup_user(arg, sizeof(*fi_args)); + if (IS_ERR(fi_args)) + return PTR_ERR(fi_args); + + flags_in = fi_args->flags; + memset(fi_args, 0, sizeof(*fi_args)); rcu_read_lock(); fi_args->num_devices = fs_devices->num_devices; @@ -3218,6 +3255,23 @@ static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info, fi_args->sectorsize = fs_info->sectorsize; fi_args->clone_alignment = fs_info->sectorsize; + if (flags_in & BTRFS_FS_INFO_FLAG_CSUM_INFO) { + fi_args->csum_type = btrfs_super_csum_type(fs_info->super_copy); + fi_args->csum_size = btrfs_super_csum_size(fs_info->super_copy); + fi_args->flags |= BTRFS_FS_INFO_FLAG_CSUM_INFO; + } + + if (flags_in & BTRFS_FS_INFO_FLAG_GENERATION) { + fi_args->generation = fs_info->generation; + fi_args->flags |= BTRFS_FS_INFO_FLAG_GENERATION; + } + + if (flags_in & BTRFS_FS_INFO_FLAG_METADATA_UUID) { + memcpy(&fi_args->metadata_uuid, fs_devices->metadata_uuid, + sizeof(fi_args->metadata_uuid)); + fi_args->flags |= BTRFS_FS_INFO_FLAG_METADATA_UUID; + } + if (copy_to_user(arg, fi_args, sizeof(*fi_args))) ret = -EFAULT; diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index e13b3d28c063..ebac13389e7e 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -15,6 +15,7 @@ #include "disk-io.h" #include "compression.h" #include "delalloc-space.h" +#include "qgroup.h" static struct kmem_cache *btrfs_ordered_extent_cache; @@ -152,23 +153,39 @@ static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, return ret; } -/* allocate and add a new ordered_extent into the per-inode tree. +/* + * Allocate and add a new ordered_extent into the per-inode tree. * * The tree is given a single reference on the ordered extent that was * inserted. */ -static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, +static int __btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset, u64 disk_bytenr, u64 num_bytes, u64 disk_num_bytes, int type, int dio, int compress_type) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_root *root = BTRFS_I(inode)->root; - struct btrfs_ordered_inode_tree *tree; + struct btrfs_root *root = inode->root; + struct btrfs_fs_info *fs_info = root->fs_info; + struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree; struct rb_node *node; struct btrfs_ordered_extent *entry; + int ret; - tree = &BTRFS_I(inode)->ordered_tree; + if (type == BTRFS_ORDERED_NOCOW || type == BTRFS_ORDERED_PREALLOC) { + /* For nocow write, we can release the qgroup rsv right now */ + ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes); + if (ret < 0) + return ret; + ret = 0; + } else { + /* + * The ordered extent has reserved qgroup space, release now + * and pass the reserved number for qgroup_record to free. + */ + ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes); + if (ret < 0) + return ret; + } entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS); if (!entry) return -ENOMEM; @@ -178,9 +195,10 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, entry->num_bytes = num_bytes; entry->disk_num_bytes = disk_num_bytes; entry->bytes_left = num_bytes; - entry->inode = igrab(inode); + entry->inode = igrab(&inode->vfs_inode); entry->compress_type = compress_type; entry->truncated_len = (u64)-1; + entry->qgroup_rsv = ret; if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE) set_bit(type, &entry->flags); @@ -197,10 +215,8 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, INIT_LIST_HEAD(&entry->root_extent_list); INIT_LIST_HEAD(&entry->work_list); init_completion(&entry->completion); - INIT_LIST_HEAD(&entry->log_list); - INIT_LIST_HEAD(&entry->trans_list); - trace_btrfs_ordered_extent_add(inode, entry); + trace_btrfs_ordered_extent_add(&inode->vfs_inode, entry); spin_lock_irq(&tree->lock); node = tree_insert(&tree->tree, file_offset, @@ -228,14 +244,14 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, * that work has been done at higher layers, so this is truly the * smallest the extent is going to get. */ - spin_lock(&BTRFS_I(inode)->lock); - btrfs_mod_outstanding_extents(BTRFS_I(inode), 1); - spin_unlock(&BTRFS_I(inode)->lock); + spin_lock(&inode->lock); + btrfs_mod_outstanding_extents(inode, 1); + spin_unlock(&inode->lock); return 0; } -int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, +int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset, u64 disk_bytenr, u64 num_bytes, u64 disk_num_bytes, int type) { @@ -244,7 +260,7 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, BTRFS_COMPRESS_NONE); } -int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset, +int btrfs_add_ordered_extent_dio(struct btrfs_inode *inode, u64 file_offset, u64 disk_bytenr, u64 num_bytes, u64 disk_num_bytes, int type) { @@ -253,7 +269,7 @@ int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset, BTRFS_COMPRESS_NONE); } -int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset, +int btrfs_add_ordered_extent_compress(struct btrfs_inode *inode, u64 file_offset, u64 disk_bytenr, u64 num_bytes, u64 disk_num_bytes, int type, int compress_type) @@ -291,12 +307,12 @@ void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry, * file_offset is updated to one byte past the range that is recorded as * complete. This allows you to walk forward in the file. */ -int btrfs_dec_test_first_ordered_pending(struct inode *inode, +int btrfs_dec_test_first_ordered_pending(struct btrfs_inode *inode, struct btrfs_ordered_extent **cached, u64 *file_offset, u64 io_size, int uptodate) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_ordered_inode_tree *tree; + struct btrfs_fs_info *fs_info = inode->root->fs_info; + struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree; struct rb_node *node; struct btrfs_ordered_extent *entry = NULL; int ret; @@ -305,7 +321,6 @@ int btrfs_dec_test_first_ordered_pending(struct inode *inode, u64 dec_start; u64 to_dec; - tree = &BTRFS_I(inode)->ordered_tree; spin_lock_irqsave(&tree->lock, flags); node = tree_search(tree, *file_offset); if (!node) { @@ -429,8 +444,6 @@ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) trace_btrfs_ordered_extent_put(entry->inode, entry); if (refcount_dec_and_test(&entry->refs)) { - ASSERT(list_empty(&entry->log_list)); - ASSERT(list_empty(&entry->trans_list)); ASSERT(list_empty(&entry->root_extent_list)); ASSERT(RB_EMPTY_NODE(&entry->rb_node)); if (entry->inode) @@ -698,14 +711,14 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) * find an ordered extent corresponding to file_offset. return NULL if * nothing is found, otherwise take a reference on the extent and return it */ -struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, +struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode, u64 file_offset) { struct btrfs_ordered_inode_tree *tree; struct rb_node *node; struct btrfs_ordered_extent *entry = NULL; - tree = &BTRFS_I(inode)->ordered_tree; + tree = &inode->ordered_tree; spin_lock_irq(&tree->lock); node = tree_search(tree, file_offset); if (!node) @@ -803,7 +816,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); int index = 0; - ordered = btrfs_lookup_ordered_extent(inode, offset); + ordered = btrfs_lookup_ordered_extent(BTRFS_I(inode), offset); if (!ordered) return 0; diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index c01c9698250b..d61ea9c880a3 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -92,6 +92,9 @@ struct btrfs_ordered_extent { /* compression algorithm */ int compress_type; + /* Qgroup reserved space */ + int qgroup_rsv; + /* reference count */ refcount_t refs; @@ -101,12 +104,6 @@ struct btrfs_ordered_extent { /* list of checksums for insertion when the extent io is done */ struct list_head list; - /* If we need to wait on this to be done */ - struct list_head log_list; - - /* If the transaction needs to wait on this ordered extent */ - struct list_head trans_list; - /* used to wait for the BTRFS_ORDERED_COMPLETE bit */ wait_queue_head_t wait; @@ -150,23 +147,23 @@ void btrfs_remove_ordered_extent(struct inode *inode, int btrfs_dec_test_ordered_pending(struct inode *inode, struct btrfs_ordered_extent **cached, u64 file_offset, u64 io_size, int uptodate); -int btrfs_dec_test_first_ordered_pending(struct inode *inode, +int btrfs_dec_test_first_ordered_pending(struct btrfs_inode *inode, struct btrfs_ordered_extent **cached, u64 *file_offset, u64 io_size, int uptodate); -int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, +int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset, u64 disk_bytenr, u64 num_bytes, u64 disk_num_bytes, int type); -int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset, +int btrfs_add_ordered_extent_dio(struct btrfs_inode *inode, u64 file_offset, u64 disk_bytenr, u64 num_bytes, u64 disk_num_bytes, int type); -int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset, +int btrfs_add_ordered_extent_compress(struct btrfs_inode *inode, u64 file_offset, u64 disk_bytenr, u64 num_bytes, u64 disk_num_bytes, int type, int compress_type); void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry, struct btrfs_ordered_sum *sum); -struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, +struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode, u64 file_offset); void btrfs_start_ordered_extent(struct inode *inode, struct btrfs_ordered_extent *entry, int wait); diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 5bd4089ad0e1..c0f350c3a0cf 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -11,7 +11,6 @@ #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/btrfs.h> -#include <linux/sizes.h> #include "ctree.h" #include "transaction.h" @@ -22,6 +21,7 @@ #include "extent_io.h" #include "qgroup.h" #include "block-group.h" +#include "sysfs.h" /* TODO XXX FIXME * - subvol delete -> delete when ref goes to 0? delete limits also? @@ -220,10 +220,12 @@ static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info, return qgroup; } -static void __del_qgroup_rb(struct btrfs_qgroup *qgroup) +static void __del_qgroup_rb(struct btrfs_fs_info *fs_info, + struct btrfs_qgroup *qgroup) { struct btrfs_qgroup_list *list; + btrfs_sysfs_del_one_qgroup(fs_info, qgroup); list_del(&qgroup->dirty); while (!list_empty(&qgroup->groups)) { list = list_first_entry(&qgroup->groups, @@ -252,7 +254,7 @@ static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid) return -ENOENT; rb_erase(&qgroup->node, &fs_info->qgroup_tree); - __del_qgroup_rb(qgroup); + __del_qgroup_rb(fs_info, qgroup); return 0; } @@ -351,6 +353,9 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info) goto out; } + ret = btrfs_sysfs_add_qgroups(fs_info); + if (ret < 0) + goto out; /* default this to quota off, in case no status key is found */ fs_info->qgroup_flags = 0; @@ -412,6 +417,10 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info) goto out; } } + ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup); + if (ret < 0) + goto out; + switch (found_key.type) { case BTRFS_QGROUP_INFO_KEY: { struct btrfs_qgroup_info_item *ptr; @@ -500,12 +509,51 @@ out: ulist_free(fs_info->qgroup_ulist); fs_info->qgroup_ulist = NULL; fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; + btrfs_sysfs_del_qgroups(fs_info); } return ret < 0 ? ret : 0; } /* + * Called in close_ctree() when quota is still enabled. This verifies we don't + * leak some reserved space. + * + * Return false if no reserved space is left. + * Return true if some reserved space is leaked. + */ +bool btrfs_check_quota_leak(struct btrfs_fs_info *fs_info) +{ + struct rb_node *node; + bool ret = false; + + if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) + return ret; + /* + * Since we're unmounting, there is no race and no need to grab qgroup + * lock. And here we don't go post-order to provide a more user + * friendly sorted result. + */ + for (node = rb_first(&fs_info->qgroup_tree); node; node = rb_next(node)) { + struct btrfs_qgroup *qgroup; + int i; + + qgroup = rb_entry(node, struct btrfs_qgroup, node); + for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) { + if (qgroup->rsv.values[i]) { + ret = true; + btrfs_warn(fs_info, + "qgroup %hu/%llu has unreleased space, type %d rsv %llu", + btrfs_qgroup_level(qgroup->qgroupid), + btrfs_qgroup_subvolid(qgroup->qgroupid), + i, qgroup->rsv.values[i]); + } + } + } + return ret; +} + +/* * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(), * first two are in single-threaded paths.And for the third one, we have set * quota_root to be null with qgroup_lock held before, so it is safe to clean @@ -519,7 +567,7 @@ void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info) while ((n = rb_first(&fs_info->qgroup_tree))) { qgroup = rb_entry(n, struct btrfs_qgroup, node); rb_erase(n, &fs_info->qgroup_tree); - __del_qgroup_rb(qgroup); + __del_qgroup_rb(fs_info, qgroup); } /* * We call btrfs_free_qgroup_config() when unmounting @@ -528,6 +576,7 @@ void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info) */ ulist_free(fs_info->qgroup_ulist); fs_info->qgroup_ulist = NULL; + btrfs_sysfs_del_qgroups(fs_info); } static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src, @@ -900,6 +949,9 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info) goto out; } + ret = btrfs_sysfs_add_qgroups(fs_info); + if (ret < 0) + goto out; /* * 1 for quota root item * 1 for BTRFS_QGROUP_STATUS item @@ -987,6 +1039,11 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info) btrfs_abort_transaction(trans, ret); goto out_free_path; } + ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup); + if (ret < 0) { + btrfs_abort_transaction(trans, ret); + goto out_free_path; + } } ret = btrfs_next_item(tree_root, path); if (ret < 0) { @@ -1011,6 +1068,11 @@ out_add_root: btrfs_abort_transaction(trans, ret); goto out_free_path; } + ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup); + if (ret < 0) { + btrfs_abort_transaction(trans, ret); + goto out_free_path; + } ret = btrfs_commit_transaction(trans); trans = NULL; @@ -1046,6 +1108,7 @@ out: fs_info->qgroup_ulist = NULL; if (trans) btrfs_end_transaction(trans); + btrfs_sysfs_del_qgroups(fs_info); } mutex_unlock(&fs_info->qgroup_ioctl_lock); return ret; @@ -1398,8 +1461,11 @@ int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid) qgroup = add_qgroup_rb(fs_info, qgroupid); spin_unlock(&fs_info->qgroup_lock); - if (IS_ERR(qgroup)) + if (IS_ERR(qgroup)) { ret = PTR_ERR(qgroup); + goto out; + } + ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup); out: mutex_unlock(&fs_info->qgroup_ioctl_lock); return ret; @@ -2818,6 +2884,8 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid, unlock: spin_unlock(&fs_info->qgroup_lock); + if (!ret) + ret = btrfs_sysfs_add_one_qgroup(fs_info, dstgroup); out: if (!committing) mutex_unlock(&fs_info->qgroup_ioctl_lock); @@ -2826,20 +2894,8 @@ out: return ret; } -/* - * Two limits to commit transaction in advance. - * - * For RATIO, it will be 1/RATIO of the remaining limit as threshold. - * For SIZE, it will be in byte unit as threshold. - */ -#define QGROUP_FREE_RATIO 32 -#define QGROUP_FREE_SIZE SZ_32M -static bool qgroup_check_limits(struct btrfs_fs_info *fs_info, - const struct btrfs_qgroup *qg, u64 num_bytes) +static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes) { - u64 free; - u64 threshold; - if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) && qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer) return false; @@ -2848,32 +2904,6 @@ static bool qgroup_check_limits(struct btrfs_fs_info *fs_info, qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl) return false; - /* - * Even if we passed the check, it's better to check if reservation - * for meta_pertrans is pushing us near limit. - * If there is too much pertrans reservation or it's near the limit, - * let's try commit transaction to free some, using transaction_kthread - */ - if ((qg->lim_flags & (BTRFS_QGROUP_LIMIT_MAX_RFER | - BTRFS_QGROUP_LIMIT_MAX_EXCL))) { - if (qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) { - free = qg->max_excl - qgroup_rsv_total(qg) - qg->excl; - threshold = min_t(u64, qg->max_excl / QGROUP_FREE_RATIO, - QGROUP_FREE_SIZE); - } else { - free = qg->max_rfer - qgroup_rsv_total(qg) - qg->rfer; - threshold = min_t(u64, qg->max_rfer / QGROUP_FREE_RATIO, - QGROUP_FREE_SIZE); - } - - /* - * Use transaction_kthread to commit transaction, so we no - * longer need to bother nested transaction nor lock context. - */ - if (free < threshold) - btrfs_commit_transaction_locksafe(fs_info); - } - return true; } @@ -2921,7 +2951,7 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce, qg = unode_aux_to_qgroup(unode); - if (enforce && !qgroup_check_limits(fs_info, qg, num_bytes)) { + if (enforce && !qgroup_check_limits(qg, num_bytes)) { ret = -EDQUOT; goto out; } @@ -3378,28 +3408,132 @@ btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info) } } +#define rbtree_iterate_from_safe(node, next, start) \ + for (node = start; node && ({ next = rb_next(node); 1;}); node = next) + +static int qgroup_unreserve_range(struct btrfs_inode *inode, + struct extent_changeset *reserved, u64 start, + u64 len) +{ + struct rb_node *node; + struct rb_node *next; + struct ulist_node *entry = NULL; + int ret = 0; + + node = reserved->range_changed.root.rb_node; + while (node) { + entry = rb_entry(node, struct ulist_node, rb_node); + if (entry->val < start) + node = node->rb_right; + else if (entry) + node = node->rb_left; + else + break; + } + + /* Empty changeset */ + if (!entry) + return 0; + + if (entry->val > start && rb_prev(&entry->rb_node)) + entry = rb_entry(rb_prev(&entry->rb_node), struct ulist_node, + rb_node); + + rbtree_iterate_from_safe(node, next, &entry->rb_node) { + u64 entry_start; + u64 entry_end; + u64 entry_len; + int clear_ret; + + entry = rb_entry(node, struct ulist_node, rb_node); + entry_start = entry->val; + entry_end = entry->aux; + entry_len = entry_end - entry_start + 1; + + if (entry_start >= start + len) + break; + if (entry_start + entry_len <= start) + continue; + /* + * Now the entry is in [start, start + len), revert the + * EXTENT_QGROUP_RESERVED bit. + */ + clear_ret = clear_extent_bits(&inode->io_tree, entry_start, + entry_end, EXTENT_QGROUP_RESERVED); + if (!ret && clear_ret < 0) + ret = clear_ret; + + ulist_del(&reserved->range_changed, entry->val, entry->aux); + if (likely(reserved->bytes_changed >= entry_len)) { + reserved->bytes_changed -= entry_len; + } else { + WARN_ON(1); + reserved->bytes_changed = 0; + } + } + + return ret; +} + /* - * Reserve qgroup space for range [start, start + len). + * Try to free some space for qgroup. * - * This function will either reserve space from related qgroups or doing - * nothing if the range is already reserved. + * For qgroup, there are only 3 ways to free qgroup space: + * - Flush nodatacow write + * Any nodatacow write will free its reserved data space at run_delalloc_range(). + * In theory, we should only flush nodatacow inodes, but it's not yet + * possible, so we need to flush the whole root. * - * Return 0 for successful reserve - * Return <0 for error (including -EQUOT) + * - Wait for ordered extents + * When ordered extents are finished, their reserved metadata is finally + * converted to per_trans status, which can be freed by later commit + * transaction. * - * NOTE: this function may sleep for memory allocation. - * if btrfs_qgroup_reserve_data() is called multiple times with - * same @reserved, caller must ensure when error happens it's OK - * to free *ALL* reserved space. + * - Commit transaction + * This would free the meta_per_trans space. + * In theory this shouldn't provide much space, but any more qgroup space + * is needed. */ -int btrfs_qgroup_reserve_data(struct inode *inode, +static int try_flush_qgroup(struct btrfs_root *root) +{ + struct btrfs_trans_handle *trans; + int ret; + + /* + * We don't want to run flush again and again, so if there is a running + * one, we won't try to start a new flush, but exit directly. + */ + if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) { + wait_event(root->qgroup_flush_wait, + !test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)); + return 0; + } + + ret = btrfs_start_delalloc_snapshot(root); + if (ret < 0) + goto out; + btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1); + + trans = btrfs_join_transaction(root); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + goto out; + } + + ret = btrfs_commit_transaction(trans); +out: + clear_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state); + wake_up(&root->qgroup_flush_wait); + return ret; +} + +static int qgroup_reserve_data(struct btrfs_inode *inode, struct extent_changeset **reserved_ret, u64 start, u64 len) { - struct btrfs_root *root = BTRFS_I(inode)->root; - struct ulist_node *unode; - struct ulist_iterator uiter; + struct btrfs_root *root = inode->root; struct extent_changeset *reserved; + bool new_reserved = false; u64 orig_reserved; u64 to_reserve; int ret; @@ -3412,6 +3546,7 @@ int btrfs_qgroup_reserve_data(struct inode *inode, if (WARN_ON(!reserved_ret)) return -EINVAL; if (!*reserved_ret) { + new_reserved = true; *reserved_ret = extent_changeset_alloc(); if (!*reserved_ret) return -ENOMEM; @@ -3419,15 +3554,15 @@ int btrfs_qgroup_reserve_data(struct inode *inode, reserved = *reserved_ret; /* Record already reserved space */ orig_reserved = reserved->bytes_changed; - ret = set_record_extent_bits(&BTRFS_I(inode)->io_tree, start, + ret = set_record_extent_bits(&inode->io_tree, start, start + len -1, EXTENT_QGROUP_RESERVED, reserved); /* Newly reserved space */ to_reserve = reserved->bytes_changed - orig_reserved; - trace_btrfs_qgroup_reserve_data(inode, start, len, + trace_btrfs_qgroup_reserve_data(&inode->vfs_inode, start, len, to_reserve, QGROUP_RESERVE); if (ret < 0) - goto cleanup; + goto out; ret = qgroup_reserve(root, to_reserve, true, BTRFS_QGROUP_RSV_DATA); if (ret < 0) goto cleanup; @@ -3435,23 +3570,49 @@ int btrfs_qgroup_reserve_data(struct inode *inode, return ret; cleanup: - /* cleanup *ALL* already reserved ranges */ - ULIST_ITER_INIT(&uiter); - while ((unode = ulist_next(&reserved->range_changed, &uiter))) - clear_extent_bit(&BTRFS_I(inode)->io_tree, unode->val, - unode->aux, EXTENT_QGROUP_RESERVED, 0, 0, NULL); - /* Also free data bytes of already reserved one */ - btrfs_qgroup_free_refroot(root->fs_info, root->root_key.objectid, - orig_reserved, BTRFS_QGROUP_RSV_DATA); - extent_changeset_release(reserved); + qgroup_unreserve_range(inode, reserved, start, len); +out: + if (new_reserved) { + extent_changeset_release(reserved); + kfree(reserved); + *reserved_ret = NULL; + } return ret; } +/* + * Reserve qgroup space for range [start, start + len). + * + * This function will either reserve space from related qgroups or do nothing + * if the range is already reserved. + * + * Return 0 for successful reservation + * Return <0 for error (including -EQUOT) + * + * NOTE: This function may sleep for memory allocation, dirty page flushing and + * commit transaction. So caller should not hold any dirty page locked. + */ +int btrfs_qgroup_reserve_data(struct btrfs_inode *inode, + struct extent_changeset **reserved_ret, u64 start, + u64 len) +{ + int ret; + + ret = qgroup_reserve_data(inode, reserved_ret, start, len); + if (ret <= 0 && ret != -EDQUOT) + return ret; + + ret = try_flush_qgroup(inode->root); + if (ret < 0) + return ret; + return qgroup_reserve_data(inode, reserved_ret, start, len); +} + /* Free ranges specified by @reserved, normally in error path */ -static int qgroup_free_reserved_data(struct inode *inode, +static int qgroup_free_reserved_data(struct btrfs_inode *inode, struct extent_changeset *reserved, u64 start, u64 len) { - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_root *root = inode->root; struct ulist_node *unode; struct ulist_iterator uiter; struct extent_changeset changeset; @@ -3487,8 +3648,8 @@ static int qgroup_free_reserved_data(struct inode *inode, * EXTENT_QGROUP_RESERVED, we won't double free. * So not need to rush. */ - ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, - free_start, free_start + free_len - 1, + ret = clear_record_extent_bits(&inode->io_tree, free_start, + free_start + free_len - 1, EXTENT_QGROUP_RESERVED, &changeset); if (ret < 0) goto out; @@ -3502,7 +3663,7 @@ out: return ret; } -static int __btrfs_qgroup_release_data(struct inode *inode, +static int __btrfs_qgroup_release_data(struct btrfs_inode *inode, struct extent_changeset *reserved, u64 start, u64 len, int free) { @@ -3510,8 +3671,7 @@ static int __btrfs_qgroup_release_data(struct inode *inode, int trace_op = QGROUP_RELEASE; int ret; - if (!test_bit(BTRFS_FS_QUOTA_ENABLED, - &BTRFS_I(inode)->root->fs_info->flags)) + if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &inode->root->fs_info->flags)) return 0; /* In release case, we shouldn't have @reserved */ @@ -3519,18 +3679,18 @@ static int __btrfs_qgroup_release_data(struct inode *inode, if (free && reserved) return qgroup_free_reserved_data(inode, reserved, start, len); extent_changeset_init(&changeset); - ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, start, - start + len -1, EXTENT_QGROUP_RESERVED, &changeset); + ret = clear_record_extent_bits(&inode->io_tree, start, start + len -1, + EXTENT_QGROUP_RESERVED, &changeset); if (ret < 0) goto out; if (free) trace_op = QGROUP_FREE; - trace_btrfs_qgroup_release_data(inode, start, len, + trace_btrfs_qgroup_release_data(&inode->vfs_inode, start, len, changeset.bytes_changed, trace_op); if (free) - btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info, - BTRFS_I(inode)->root->root_key.objectid, + btrfs_qgroup_free_refroot(inode->root->fs_info, + inode->root->root_key.objectid, changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA); ret = changeset.bytes_changed; out: @@ -3550,7 +3710,7 @@ out: * * NOTE: This function may sleep for memory allocation. */ -int btrfs_qgroup_free_data(struct inode *inode, +int btrfs_qgroup_free_data(struct btrfs_inode *inode, struct extent_changeset *reserved, u64 start, u64 len) { return __btrfs_qgroup_release_data(inode, reserved, start, len, 1); @@ -3571,7 +3731,7 @@ int btrfs_qgroup_free_data(struct inode *inode, * * NOTE: This function may sleep for memory allocation. */ -int btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len) +int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len) { return __btrfs_qgroup_release_data(inode, NULL, start, len, 0); } @@ -3616,7 +3776,7 @@ static int sub_root_meta_rsv(struct btrfs_root *root, int num_bytes, return num_bytes; } -int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, +static int qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, enum btrfs_qgroup_rsv_type type, bool enforce) { struct btrfs_fs_info *fs_info = root->fs_info; @@ -3643,6 +3803,21 @@ int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, return ret; } +int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, + enum btrfs_qgroup_rsv_type type, bool enforce) +{ + int ret; + + ret = qgroup_reserve_meta(root, num_bytes, type, enforce); + if (ret <= 0 && ret != -EDQUOT) + return ret; + + ret = try_flush_qgroup(root); + if (ret < 0) + return ret; + return qgroup_reserve_meta(root, num_bytes, type, enforce); +} + void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root) { struct btrfs_fs_info *fs_info = root->fs_info; @@ -3742,7 +3917,7 @@ void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes) * Check qgroup reserved space leaking, normally at destroy inode * time */ -void btrfs_qgroup_check_reserved_leak(struct inode *inode) +void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode) { struct extent_changeset changeset; struct ulist_node *unode; @@ -3750,19 +3925,19 @@ void btrfs_qgroup_check_reserved_leak(struct inode *inode) int ret; extent_changeset_init(&changeset); - ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, 0, (u64)-1, + ret = clear_record_extent_bits(&inode->io_tree, 0, (u64)-1, EXTENT_QGROUP_RESERVED, &changeset); WARN_ON(ret < 0); if (WARN_ON(changeset.bytes_changed)) { ULIST_ITER_INIT(&iter); while ((unode = ulist_next(&changeset.range_changed, &iter))) { - btrfs_warn(BTRFS_I(inode)->root->fs_info, - "leaking qgroup reserved space, ino: %lu, start: %llu, end: %llu", - inode->i_ino, unode->val, unode->aux); + btrfs_warn(inode->root->fs_info, + "leaking qgroup reserved space, ino: %llu, start: %llu, end: %llu", + btrfs_ino(inode), unode->val, unode->aux); } - btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info, - BTRFS_I(inode)->root->root_key.objectid, + btrfs_qgroup_free_refroot(inode->root->fs_info, + inode->root->root_key.objectid, changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA); } diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h index 1bc654459469..50dea9a2d8fb 100644 --- a/fs/btrfs/qgroup.h +++ b/fs/btrfs/qgroup.h @@ -8,6 +8,7 @@ #include <linux/spinlock.h> #include <linux/rbtree.h> +#include <linux/kobject.h> #include "ulist.h" #include "delayed-ref.h" @@ -223,8 +224,18 @@ struct btrfs_qgroup { */ u64 old_refcnt; u64 new_refcnt; + + /* + * Sysfs kobjectid + */ + struct kobject kobj; }; +static inline u64 btrfs_qgroup_subvolid(u64 qgroupid) +{ + return (qgroupid & ((1ULL << BTRFS_QGROUP_LEVEL_SHIFT) - 1)); +} + /* * For qgroup event trace points only */ @@ -344,12 +355,12 @@ int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid, #endif /* New io_tree based accurate qgroup reserve API */ -int btrfs_qgroup_reserve_data(struct inode *inode, +int btrfs_qgroup_reserve_data(struct btrfs_inode *inode, struct extent_changeset **reserved, u64 start, u64 len); -int btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len); -int btrfs_qgroup_free_data(struct inode *inode, - struct extent_changeset *reserved, u64 start, u64 len); - +int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len); +int btrfs_qgroup_free_data(struct btrfs_inode *inode, + struct extent_changeset *reserved, u64 start, + u64 len); int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, enum btrfs_qgroup_rsv_type type, bool enforce); /* Reserve metadata space for pertrans and prealloc type */ @@ -399,7 +410,7 @@ void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root); */ void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes); -void btrfs_qgroup_check_reserved_leak(struct inode *inode); +void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode); /* btrfs_qgroup_swapped_blocks related functions */ void btrfs_qgroup_init_swapped_blocks( @@ -415,5 +426,6 @@ int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans, int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *eb); void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans); +bool btrfs_check_quota_leak(struct btrfs_fs_info *fs_info); #endif diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index c870ef70f817..255490f42b5d 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c @@ -1083,7 +1083,6 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio, unsigned long bio_max_len) { struct bio *last = bio_list->tail; - u64 last_end = 0; int ret; struct bio *bio; struct btrfs_bio_stripe *stripe; @@ -1098,15 +1097,14 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio, /* see if we can add this page onto our existing bio */ if (last) { - last_end = (u64)last->bi_iter.bi_sector << 9; + u64 last_end = (u64)last->bi_iter.bi_sector << 9; last_end += last->bi_iter.bi_size; /* * we can't merge these if they are from different * devices or if they are not contiguous */ - if (last_end == disk_start && stripe->dev->bdev && - !last->bi_status && + if (last_end == disk_start && !last->bi_status && last->bi_disk == stripe->dev->bdev->bd_disk && last->bi_partno == stripe->dev->bdev->bd_partno) { ret = bio_add_page(last, page, PAGE_SIZE, 0); @@ -1117,6 +1115,7 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio, /* put a new bio on the list */ bio = btrfs_io_bio_alloc(bio_max_len >> PAGE_SHIFT ?: 1); + btrfs_io_bio(bio)->device = stripe->dev; bio->bi_iter.bi_size = 0; bio_set_dev(bio, stripe->dev->bdev); bio->bi_iter.bi_sector = disk_start >> 9; @@ -1325,11 +1324,7 @@ write_data: atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list)); BUG_ON(atomic_read(&rbio->stripes_pending) == 0); - while (1) { - bio = bio_list_pop(&bio_list); - if (!bio) - break; - + while ((bio = bio_list_pop(&bio_list))) { bio->bi_private = rbio; bio->bi_end_io = raid_write_end_io; bio->bi_opf = REQ_OP_WRITE; @@ -1354,7 +1349,6 @@ static int find_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio) { u64 physical = bio->bi_iter.bi_sector; - u64 stripe_start; int i; struct btrfs_bio_stripe *stripe; @@ -1362,9 +1356,7 @@ static int find_bio_stripe(struct btrfs_raid_bio *rbio, for (i = 0; i < rbio->bbio->num_stripes; i++) { stripe = &rbio->bbio->stripes[i]; - stripe_start = stripe->physical; - if (physical >= stripe_start && - physical < stripe_start + rbio->stripe_len && + if (in_range(physical, stripe->physical, rbio->stripe_len) && stripe->dev->bdev && bio->bi_disk == stripe->dev->bdev->bd_disk && bio->bi_partno == stripe->dev->bdev->bd_partno) { @@ -1382,18 +1374,14 @@ static int find_bio_stripe(struct btrfs_raid_bio *rbio, static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio) { - u64 logical = bio->bi_iter.bi_sector; - u64 stripe_start; + u64 logical = (u64)bio->bi_iter.bi_sector << 9; int i; - logical <<= 9; - for (i = 0; i < rbio->nr_data; i++) { - stripe_start = rbio->bbio->raid_map[i]; - if (logical >= stripe_start && - logical < stripe_start + rbio->stripe_len) { + u64 stripe_start = rbio->bbio->raid_map[i]; + + if (in_range(logical, stripe_start, rbio->stripe_len)) return i; - } } return -1; } @@ -1567,11 +1555,7 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio) * not to touch it after that */ atomic_set(&rbio->stripes_pending, bios_to_read); - while (1) { - bio = bio_list_pop(&bio_list); - if (!bio) - break; - + while ((bio = bio_list_pop(&bio_list))) { bio->bi_private = rbio; bio->bi_end_io = raid_rmw_end_io; bio->bi_opf = REQ_OP_READ; @@ -1878,11 +1862,8 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) } /* make sure our ps and qs are in order */ - if (faila > failb) { - int tmp = failb; - failb = faila; - faila = tmp; - } + if (faila > failb) + swap(faila, failb); /* if the q stripe is failed, do a pstripe reconstruction * from the xors. @@ -2102,7 +2083,7 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio) */ if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) { __raid_recover_end_io(rbio); - goto out; + return 0; } else { goto cleanup; } @@ -2113,11 +2094,7 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio) * not to touch it after that */ atomic_set(&rbio->stripes_pending, bios_to_read); - while (1) { - bio = bio_list_pop(&bio_list); - if (!bio) - break; - + while ((bio = bio_list_pop(&bio_list))) { bio->bi_private = rbio; bio->bi_end_io = raid_recover_end_io; bio->bi_opf = REQ_OP_READ; @@ -2126,7 +2103,7 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio) submit_bio(bio); } -out: + return 0; cleanup: @@ -2482,11 +2459,7 @@ submit_write: atomic_set(&rbio->stripes_pending, nr_data); - while (1) { - bio = bio_list_pop(&bio_list); - if (!bio) - break; - + while ((bio = bio_list_pop(&bio_list))) { bio->bi_private = rbio; bio->bi_end_io = raid_write_end_io; bio->bi_opf = REQ_OP_WRITE; @@ -2664,11 +2637,7 @@ static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio) * not to touch it after that */ atomic_set(&rbio->stripes_pending, bios_to_read); - while (1) { - bio = bio_list_pop(&bio_list); - if (!bio) - break; - + while ((bio = bio_list_pop(&bio_list))) { bio->bi_private = rbio; bio->bi_end_io = raid56_parity_scrub_end_io; bio->bi_opf = REQ_OP_READ; diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c index af92525dbb16..7f03dbe5b609 100644 --- a/fs/btrfs/ref-verify.c +++ b/fs/btrfs/ref-verify.c @@ -286,6 +286,8 @@ static struct block_entry *add_block_entry(struct btrfs_fs_info *fs_info, exist_re = insert_root_entry(&exist->roots, re); if (exist_re) kfree(re); + } else { + kfree(re); } kfree(be); return exist; diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c index 040009d1cc31..5cd02514cf4d 100644 --- a/fs/btrfs/reflink.c +++ b/fs/btrfs/reflink.c @@ -68,8 +68,8 @@ static int copy_inline_to_page(struct inode *inode, * reservation here. Also we must not do the reservation while holding * a transaction open, otherwise we would deadlock. */ - ret = btrfs_delalloc_reserve_space(inode, &data_reserved, file_offset, - block_size); + ret = btrfs_delalloc_reserve_space(BTRFS_I(inode), &data_reserved, + file_offset, block_size); if (ret) goto out; @@ -84,7 +84,8 @@ static int copy_inline_to_page(struct inode *inode, clear_extent_bit(&BTRFS_I(inode)->io_tree, file_offset, range_end, EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0, NULL); - ret = btrfs_set_extent_delalloc(inode, file_offset, range_end, 0, NULL); + ret = btrfs_set_extent_delalloc(BTRFS_I(inode), file_offset, range_end, + 0, NULL); if (ret) goto out_unlock; @@ -133,8 +134,8 @@ out_unlock: put_page(page); } if (ret) - btrfs_delalloc_release_space(inode, data_reserved, file_offset, - block_size, true); + btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved, + file_offset, block_size, true); btrfs_delalloc_release_extents(BTRFS_I(inode), block_size); out: extent_changeset_free(data_reserved); @@ -336,6 +337,7 @@ static int btrfs_clone(struct inode *src, struct inode *inode, while (1) { u64 next_key_min_offset = key.offset + 1; struct btrfs_file_extent_item *extent; + u64 extent_gen; int type; u32 size; struct btrfs_key new_key; @@ -384,6 +386,7 @@ process_slot: extent = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); + extent_gen = btrfs_file_extent_generation(leaf, extent); comp = btrfs_file_extent_compression(leaf, extent); type = btrfs_file_extent_type(leaf, extent); if (type == BTRFS_FILE_EXTENT_REG || @@ -488,6 +491,19 @@ process_slot: btrfs_release_path(path); + /* + * If this is a new extent update the last_reflink_trans of both + * inodes. This is used by fsync to make sure it does not log + * multiple checksum items with overlapping ranges. For older + * extents we don't need to do it since inode logging skips the + * checksums for older extents. Also ignore holes and inline + * extents because they don't have checksums in the csum tree. + */ + if (extent_gen == trans->transid && disko > 0) { + BTRFS_I(src)->last_reflink_trans = trans->transid; + BTRFS_I(inode)->last_reflink_trans = trans->transid; + } + last_dest_end = ALIGN(new_key.offset + datal, fs_info->sectorsize); ret = clone_finish_inode_update(trans, inode, last_dest_end, diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 3bbae80c752f..4ba1ab9cc76d 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -1686,12 +1686,20 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, btrfs_unlock_up_safe(path, 0); } - min_reserved = fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; + /* + * In merge_reloc_root(), we modify the upper level pointer to swap the + * tree blocks between reloc tree and subvolume tree. Thus for tree + * block COW, we COW at most from level 1 to root level for each tree. + * + * Thus the needed metadata size is at most root_level * nodesize, + * and * 2 since we have two trees to COW. + */ + min_reserved = fs_info->nodesize * btrfs_root_level(root_item) * 2; memset(&next_key, 0, sizeof(next_key)); while (1) { ret = btrfs_block_rsv_refill(root, rc->block_rsv, min_reserved, - BTRFS_RESERVE_FLUSH_ALL); + BTRFS_RESERVE_FLUSH_LIMIT); if (ret) { err = ret; goto out; @@ -2571,58 +2579,50 @@ out_free_blocks: return err; } -static noinline_for_stack -int prealloc_file_extent_cluster(struct inode *inode, - struct file_extent_cluster *cluster) +static noinline_for_stack int prealloc_file_extent_cluster( + struct btrfs_inode *inode, + struct file_extent_cluster *cluster) { u64 alloc_hint = 0; u64 start; u64 end; - u64 offset = BTRFS_I(inode)->index_cnt; + u64 offset = inode->index_cnt; u64 num_bytes; - int nr = 0; + int nr; int ret = 0; u64 prealloc_start = cluster->start - offset; u64 prealloc_end = cluster->end - offset; - u64 cur_offset; - struct extent_changeset *data_reserved = NULL; + u64 cur_offset = prealloc_start; BUG_ON(cluster->start != cluster->boundary[0]); - inode_lock(inode); - - ret = btrfs_check_data_free_space(inode, &data_reserved, prealloc_start, - prealloc_end + 1 - prealloc_start); + ret = btrfs_alloc_data_chunk_ondemand(inode, + prealloc_end + 1 - prealloc_start); if (ret) - goto out; + return ret; - cur_offset = prealloc_start; - while (nr < cluster->nr) { + inode_lock(&inode->vfs_inode); + for (nr = 0; nr < cluster->nr; nr++) { start = cluster->boundary[nr] - offset; if (nr + 1 < cluster->nr) end = cluster->boundary[nr + 1] - 1 - offset; else end = cluster->end - offset; - lock_extent(&BTRFS_I(inode)->io_tree, start, end); + lock_extent(&inode->io_tree, start, end); num_bytes = end + 1 - start; - if (cur_offset < start) - btrfs_free_reserved_data_space(inode, data_reserved, - cur_offset, start - cur_offset); - ret = btrfs_prealloc_file_range(inode, 0, start, + ret = btrfs_prealloc_file_range(&inode->vfs_inode, 0, start, num_bytes, num_bytes, end + 1, &alloc_hint); cur_offset = end + 1; - unlock_extent(&BTRFS_I(inode)->io_tree, start, end); + unlock_extent(&inode->io_tree, start, end); if (ret) break; - nr++; } + inode_unlock(&inode->vfs_inode); + if (cur_offset < prealloc_end) - btrfs_free_reserved_data_space(inode, data_reserved, - cur_offset, prealloc_end + 1 - cur_offset); -out: - inode_unlock(inode); - extent_changeset_free(data_reserved); + btrfs_free_reserved_data_space_noquota(inode->root->fs_info, + prealloc_end + 1 - cur_offset); return ret; } @@ -2664,7 +2664,8 @@ int setup_extent_mapping(struct inode *inode, u64 start, u64 end, */ int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info) { - return atomic_read(&fs_info->balance_cancel_req); + return atomic_read(&fs_info->balance_cancel_req) || + fatal_signal_pending(current); } ALLOW_ERROR_INJECTION(btrfs_should_cancel_balance, TRUE); @@ -2690,7 +2691,7 @@ static int relocate_file_extent_cluster(struct inode *inode, if (!ra) return -ENOMEM; - ret = prealloc_file_extent_cluster(inode, cluster); + ret = prealloc_file_extent_cluster(BTRFS_I(inode), cluster); if (ret) goto out; @@ -2762,8 +2763,8 @@ static int relocate_file_extent_cluster(struct inode *inode, nr++; } - ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0, - NULL); + ret = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start, + page_end, 0, NULL); if (ret) { unlock_page(page); put_page(page); @@ -3872,9 +3873,9 @@ out: * cloning checksum properly handles the nodatasum extents. * it also saves CPU time to re-calculate the checksum. */ -int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len) +int btrfs_reloc_clone_csums(struct btrfs_inode *inode, u64 file_pos, u64 len) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = inode->root->fs_info; struct btrfs_ordered_sum *sums; struct btrfs_ordered_extent *ordered; int ret; @@ -3885,7 +3886,7 @@ int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len) ordered = btrfs_lookup_ordered_extent(inode, file_pos); BUG_ON(ordered->file_offset != file_pos || ordered->num_bytes != len); - disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt; + disk_bytenr = file_pos + inode->index_cnt; ret = btrfs_lookup_csums_range(fs_info->csum_root, disk_bytenr, disk_bytenr + len - 1, &list, 0); if (ret) diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 016a025e36c7..5a6cb9db512e 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -1616,13 +1616,9 @@ static int scrub_write_page_to_dev_replace(struct scrub_block *sblock, struct scrub_page *spage = sblock->pagev[page_num]; BUG_ON(spage->page == NULL); - if (spage->io_error) { - void *mapped_buffer = kmap_atomic(spage->page); + if (spage->io_error) + clear_page(page_address(spage->page)); - clear_page(mapped_buffer); - flush_dcache_page(spage->page); - kunmap_atomic(mapped_buffer); - } return scrub_add_page_to_wr_bio(sblock->sctx, spage); } @@ -1790,42 +1786,21 @@ static int scrub_checksum_data(struct scrub_block *sblock) struct btrfs_fs_info *fs_info = sctx->fs_info; SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); u8 csum[BTRFS_CSUM_SIZE]; - u8 *on_disk_csum; - struct page *page; - void *buffer; - u64 len; - int index; + struct scrub_page *spage; + char *kaddr; BUG_ON(sblock->page_count < 1); - if (!sblock->pagev[0]->have_csum) + spage = sblock->pagev[0]; + if (!spage->have_csum) return 0; + kaddr = page_address(spage->page); + shash->tfm = fs_info->csum_shash; crypto_shash_init(shash); + crypto_shash_digest(shash, kaddr, PAGE_SIZE, csum); - on_disk_csum = sblock->pagev[0]->csum; - page = sblock->pagev[0]->page; - buffer = kmap_atomic(page); - - len = sctx->fs_info->sectorsize; - index = 0; - for (;;) { - u64 l = min_t(u64, len, PAGE_SIZE); - - crypto_shash_update(shash, buffer, l); - kunmap_atomic(buffer); - len -= l; - if (len == 0) - break; - index++; - BUG_ON(index >= sblock->page_count); - BUG_ON(!sblock->pagev[index]->page); - page = sblock->pagev[index]->page; - buffer = kmap_atomic(page); - } - - crypto_shash_final(shash, csum); - if (memcmp(csum, on_disk_csum, sctx->csum_size)) + if (memcmp(csum, spage->csum, sctx->csum_size)) sblock->checksum_error = 1; return sblock->checksum_error; @@ -1839,20 +1814,15 @@ static int scrub_checksum_tree_block(struct scrub_block *sblock) SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); u8 calculated_csum[BTRFS_CSUM_SIZE]; u8 on_disk_csum[BTRFS_CSUM_SIZE]; - struct page *page; - void *mapped_buffer; - u64 mapped_size; - void *p; - u64 len; - int index; - - shash->tfm = fs_info->csum_shash; - crypto_shash_init(shash); + const int num_pages = sctx->fs_info->nodesize >> PAGE_SHIFT; + int i; + struct scrub_page *spage; + char *kaddr; BUG_ON(sblock->page_count < 1); - page = sblock->pagev[0]->page; - mapped_buffer = kmap_atomic(page); - h = (struct btrfs_header *)mapped_buffer; + spage = sblock->pagev[0]; + kaddr = page_address(spage->page); + h = (struct btrfs_header *)kaddr; memcpy(on_disk_csum, h->csum, sctx->csum_size); /* @@ -1860,40 +1830,29 @@ static int scrub_checksum_tree_block(struct scrub_block *sblock) * a) don't have an extent buffer and * b) the page is already kmapped */ - if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h)) + if (spage->logical != btrfs_stack_header_bytenr(h)) sblock->header_error = 1; - if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h)) { + if (spage->generation != btrfs_stack_header_generation(h)) { sblock->header_error = 1; sblock->generation_error = 1; } - if (!scrub_check_fsid(h->fsid, sblock->pagev[0])) + if (!scrub_check_fsid(h->fsid, spage)) sblock->header_error = 1; if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid, BTRFS_UUID_SIZE)) sblock->header_error = 1; - len = sctx->fs_info->nodesize - BTRFS_CSUM_SIZE; - mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE; - p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE; - index = 0; - for (;;) { - u64 l = min_t(u64, len, mapped_size); + shash->tfm = fs_info->csum_shash; + crypto_shash_init(shash); + crypto_shash_update(shash, kaddr + BTRFS_CSUM_SIZE, + PAGE_SIZE - BTRFS_CSUM_SIZE); - crypto_shash_update(shash, p, l); - kunmap_atomic(mapped_buffer); - len -= l; - if (len == 0) - break; - index++; - BUG_ON(index >= sblock->page_count); - BUG_ON(!sblock->pagev[index]->page); - page = sblock->pagev[index]->page; - mapped_buffer = kmap_atomic(page); - mapped_size = PAGE_SIZE; - p = mapped_buffer; + for (i = 1; i < num_pages; i++) { + kaddr = page_address(sblock->pagev[i]->page); + crypto_shash_update(shash, kaddr, PAGE_SIZE); } crypto_shash_final(shash, calculated_csum); @@ -1910,57 +1869,31 @@ static int scrub_checksum_super(struct scrub_block *sblock) struct btrfs_fs_info *fs_info = sctx->fs_info; SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); u8 calculated_csum[BTRFS_CSUM_SIZE]; - u8 on_disk_csum[BTRFS_CSUM_SIZE]; - struct page *page; - void *mapped_buffer; - u64 mapped_size; - void *p; + struct scrub_page *spage; + char *kaddr; int fail_gen = 0; int fail_cor = 0; - u64 len; - int index; - - shash->tfm = fs_info->csum_shash; - crypto_shash_init(shash); BUG_ON(sblock->page_count < 1); - page = sblock->pagev[0]->page; - mapped_buffer = kmap_atomic(page); - s = (struct btrfs_super_block *)mapped_buffer; - memcpy(on_disk_csum, s->csum, sctx->csum_size); + spage = sblock->pagev[0]; + kaddr = page_address(spage->page); + s = (struct btrfs_super_block *)kaddr; - if (sblock->pagev[0]->logical != btrfs_super_bytenr(s)) + if (spage->logical != btrfs_super_bytenr(s)) ++fail_cor; - if (sblock->pagev[0]->generation != btrfs_super_generation(s)) + if (spage->generation != btrfs_super_generation(s)) ++fail_gen; - if (!scrub_check_fsid(s->fsid, sblock->pagev[0])) + if (!scrub_check_fsid(s->fsid, spage)) ++fail_cor; - len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE; - mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE; - p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE; - index = 0; - for (;;) { - u64 l = min_t(u64, len, mapped_size); - - crypto_shash_update(shash, p, l); - kunmap_atomic(mapped_buffer); - len -= l; - if (len == 0) - break; - index++; - BUG_ON(index >= sblock->page_count); - BUG_ON(!sblock->pagev[index]->page); - page = sblock->pagev[index]->page; - mapped_buffer = kmap_atomic(page); - mapped_size = PAGE_SIZE; - p = mapped_buffer; - } + shash->tfm = fs_info->csum_shash; + crypto_shash_init(shash); + crypto_shash_digest(shash, kaddr + BTRFS_CSUM_SIZE, + BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, calculated_csum); - crypto_shash_final(shash, calculated_csum); - if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size)) + if (memcmp(calculated_csum, s->csum, sctx->csum_size)) ++fail_cor; if (fail_cor + fail_gen) { @@ -1973,10 +1906,10 @@ static int scrub_checksum_super(struct scrub_block *sblock) ++sctx->stat.super_errors; spin_unlock(&sctx->stat_lock); if (fail_cor) - btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev, + btrfs_dev_stat_inc_and_print(spage->dev, BTRFS_DEV_STAT_CORRUPTION_ERRS); else - btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev, + btrfs_dev_stat_inc_and_print(spage->dev, BTRFS_DEV_STAT_GENERATION_ERRS); } @@ -3758,7 +3691,7 @@ static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx, struct btrfs_fs_info *fs_info = sctx->fs_info; if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) - return -EIO; + return -EROFS; /* Seed devices of a new filesystem has their own generation. */ if (scrub_dev->fs_devices != fs_info->fs_devices) diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c index c7bd3fdd7792..475968ccbd1d 100644 --- a/fs/btrfs/space-info.c +++ b/fs/btrfs/space-info.c @@ -468,8 +468,8 @@ again: "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s", cache->start, cache->length, cache->used, cache->pinned, cache->reserved, cache->ro ? "[readonly]" : ""); - btrfs_dump_free_space(cache, bytes); spin_unlock(&cache->lock); + btrfs_dump_free_space(cache, bytes); } if (++index < BTRFS_NR_RAID_TYPES) goto again; diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index c3826ae883f0..5a9dc31d95c9 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -67,6 +67,21 @@ static struct file_system_type btrfs_root_fs_type; static int btrfs_remount(struct super_block *sb, int *flags, char *data); +/* + * Generally the error codes correspond to their respective errors, but there + * are a few special cases. + * + * EUCLEAN: Any sort of corruption that we encounter. The tree-checker for + * instance will return EUCLEAN if any of the blocks are corrupted in + * a way that is problematic. We want to reserve EUCLEAN for these + * sort of corruptions. + * + * EROFS: If we check BTRFS_FS_STATE_ERROR and fail out with a return error, we + * need to use EROFS for this case. We will have no idea of the + * original failure, that will have been reported at the time we tripped + * over the error. Each subsequent error that doesn't have any context + * of the original error should use EROFS when handling BTRFS_FS_STATE_ERROR. + */ const char * __attribute_const__ btrfs_decode_error(int errno) { char *errstr = "unknown"; @@ -326,7 +341,6 @@ enum { Opt_defrag, Opt_nodefrag, Opt_discard, Opt_nodiscard, Opt_discard_mode, - Opt_nologreplay, Opt_norecovery, Opt_ratio, Opt_rescan_uuid_tree, @@ -340,13 +354,15 @@ enum { Opt_subvolid, Opt_thread_pool, Opt_treelog, Opt_notreelog, - Opt_usebackuproot, Opt_user_subvol_rm_allowed, + /* Rescue options */ + Opt_rescue, + Opt_usebackuproot, + Opt_nologreplay, + /* Deprecated options */ - Opt_alloc_start, Opt_recovery, - Opt_subvolrootid, /* Debugging options */ Opt_check_integrity, @@ -390,7 +406,6 @@ static const match_table_t tokens = { {Opt_discard, "discard"}, {Opt_discard_mode, "discard=%s"}, {Opt_nodiscard, "nodiscard"}, - {Opt_nologreplay, "nologreplay"}, {Opt_norecovery, "norecovery"}, {Opt_ratio, "metadata_ratio=%u"}, {Opt_rescan_uuid_tree, "rescan_uuid_tree"}, @@ -408,13 +423,17 @@ static const match_table_t tokens = { {Opt_thread_pool, "thread_pool=%u"}, {Opt_treelog, "treelog"}, {Opt_notreelog, "notreelog"}, - {Opt_usebackuproot, "usebackuproot"}, {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"}, + /* Rescue options */ + {Opt_rescue, "rescue=%s"}, + /* Deprecated, with alias rescue=nologreplay */ + {Opt_nologreplay, "nologreplay"}, + /* Deprecated, with alias rescue=usebackuproot */ + {Opt_usebackuproot, "usebackuproot"}, + /* Deprecated options */ - {Opt_alloc_start, "alloc_start=%s"}, {Opt_recovery, "recovery"}, - {Opt_subvolrootid, "subvolrootid=%d"}, /* Debugging options */ {Opt_check_integrity, "check_int"}, @@ -433,6 +452,55 @@ static const match_table_t tokens = { {Opt_err, NULL}, }; +static const match_table_t rescue_tokens = { + {Opt_usebackuproot, "usebackuproot"}, + {Opt_nologreplay, "nologreplay"}, + {Opt_err, NULL}, +}; + +static int parse_rescue_options(struct btrfs_fs_info *info, const char *options) +{ + char *opts; + char *orig; + char *p; + substring_t args[MAX_OPT_ARGS]; + int ret = 0; + + opts = kstrdup(options, GFP_KERNEL); + if (!opts) + return -ENOMEM; + orig = opts; + + while ((p = strsep(&opts, ":")) != NULL) { + int token; + + if (!*p) + continue; + token = match_token(p, rescue_tokens, args); + switch (token){ + case Opt_usebackuproot: + btrfs_info(info, + "trying to use backup root at mount time"); + btrfs_set_opt(info->mount_opt, USEBACKUPROOT); + break; + case Opt_nologreplay: + btrfs_set_and_info(info, NOLOGREPLAY, + "disabling log replay at mount time"); + break; + case Opt_err: + btrfs_info(info, "unrecognized rescue option '%s'", p); + ret = -EINVAL; + goto out; + default: + break; + } + + } +out: + kfree(orig); + return ret; +} + /* * Regular mount options parser. Everything that is needed only when * reading in a new superblock is parsed here. @@ -479,7 +547,6 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, case Opt_subvol: case Opt_subvol_empty: case Opt_subvolid: - case Opt_subvolrootid: case Opt_device: /* * These are parsed by btrfs_parse_subvol_options or @@ -663,10 +730,6 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, goto out; } break; - case Opt_alloc_start: - btrfs_info(info, - "option alloc_start is obsolete, ignored"); - break; case Opt_acl: #ifdef CONFIG_BTRFS_FS_POSIX_ACL info->sb->s_flags |= SB_POSIXACL; @@ -689,6 +752,8 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, break; case Opt_norecovery: case Opt_nologreplay: + btrfs_warn(info, + "'nologreplay' is deprecated, use 'rescue=nologreplay' instead"); btrfs_set_and_info(info, NOLOGREPLAY, "disabling log replay at mount time"); break; @@ -762,6 +827,8 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, } break; case Opt_inode_cache: + btrfs_warn(info, + "the 'inode_cache' option is deprecated and will have no effect from 5.11"); btrfs_set_pending_and_info(info, INODE_MAP_CACHE, "enabling inode map caching"); break; @@ -791,10 +858,11 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, "disabling auto defrag"); break; case Opt_recovery: - btrfs_warn(info, - "'recovery' is deprecated, use 'usebackuproot' instead"); - fallthrough; case Opt_usebackuproot: + btrfs_warn(info, + "'%s' is deprecated, use 'rescue=usebackuproot' instead", + token == Opt_recovery ? "recovery" : + "usebackuproot"); btrfs_info(info, "trying to use backup root at mount time"); btrfs_set_opt(info->mount_opt, USEBACKUPROOT); @@ -859,6 +927,11 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, } info->commit_interval = intarg; break; + case Opt_rescue: + ret = parse_rescue_options(info, args[0].from); + if (ret < 0) + goto out; + break; #ifdef CONFIG_BTRFS_DEBUG case Opt_fragment_all: btrfs_info(info, "fragmenting all space"); @@ -1020,9 +1093,6 @@ static int btrfs_parse_subvol_options(const char *options, char **subvol_name, *subvol_objectid = subvolid; break; - case Opt_subvolrootid: - pr_warn("BTRFS: 'subvolrootid' mount option is deprecated and has no effect\n"); - break; default: break; } @@ -1344,7 +1414,7 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry) if (btrfs_test_opt(info, NOTREELOG)) seq_puts(seq, ",notreelog"); if (btrfs_test_opt(info, NOLOGREPLAY)) - seq_puts(seq, ",nologreplay"); + seq_puts(seq, ",rescue=nologreplay"); if (btrfs_test_opt(info, FLUSHONCOMMIT)) seq_puts(seq, ",flushoncommit"); if (btrfs_test_opt(info, DISCARD_SYNC)) @@ -1712,11 +1782,6 @@ static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info, new_pool_size); } -static inline void btrfs_remount_prepare(struct btrfs_fs_info *fs_info) -{ - set_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state); -} - static inline void btrfs_remount_begin(struct btrfs_fs_info *fs_info, unsigned long old_opts, int flags) { @@ -1750,8 +1815,6 @@ static inline void btrfs_remount_cleanup(struct btrfs_fs_info *fs_info, else if (btrfs_raw_test_opt(old_opts, DISCARD_ASYNC) && !btrfs_test_opt(fs_info, DISCARD_ASYNC)) btrfs_discard_cleanup(fs_info); - - clear_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state); } static int btrfs_remount(struct super_block *sb, int *flags, char *data) @@ -1767,7 +1830,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data) int ret; sync_filesystem(sb); - btrfs_remount_prepare(fs_info); + set_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state); if (data) { void *new_sec_opts = NULL; @@ -1889,6 +1952,8 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data) out: wake_up_process(fs_info->transaction_kthread); btrfs_remount_cleanup(fs_info, old_opts); + clear_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state); + return 0; restore: @@ -1903,6 +1968,8 @@ restore: old_thread_pool_size, fs_info->thread_pool_size); fs_info->metadata_ratio = old_metadata_ratio; btrfs_remount_cleanup(fs_info, old_opts); + clear_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state); + return ret; } @@ -2296,9 +2363,7 @@ static int btrfs_unfreeze(struct super_block *sb) static int btrfs_show_devname(struct seq_file *m, struct dentry *root) { struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb); - struct btrfs_fs_devices *cur_devices; struct btrfs_device *dev, *first_dev = NULL; - struct list_head *head; /* * Lightweight locking of the devices. We should not need @@ -2308,18 +2373,13 @@ static int btrfs_show_devname(struct seq_file *m, struct dentry *root) * least until the rcu_read_unlock. */ rcu_read_lock(); - cur_devices = fs_info->fs_devices; - while (cur_devices) { - head = &cur_devices->devices; - list_for_each_entry_rcu(dev, head, dev_list) { - if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) - continue; - if (!dev->name) - continue; - if (!first_dev || dev->devid < first_dev->devid) - first_dev = dev; - } - cur_devices = cur_devices->seed; + list_for_each_entry_rcu(dev, &fs_info->fs_devices->devices, dev_list) { + if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) + continue; + if (!dev->name) + continue; + if (!first_dev || dev->devid < first_dev->devid) + first_dev = dev; } if (first_dev) diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index a39bff64ff24..104c80caaa74 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -19,6 +19,7 @@ #include "volumes.h" #include "space-info.h" #include "block-group.h" +#include "qgroup.h" struct btrfs_feature_attr { struct kobj_attribute kobj_attr; @@ -936,8 +937,12 @@ void btrfs_sysfs_remove_fsid(struct btrfs_fs_devices *fs_devs) void btrfs_sysfs_remove_mounted(struct btrfs_fs_info *fs_info) { + struct kobject *fsid_kobj = &fs_info->fs_devices->fsid_kobj; + btrfs_reset_fs_info_ptr(fs_info); + sysfs_remove_link(fsid_kobj, "bdi"); + if (fs_info->space_info_kobj) { sysfs_remove_files(fs_info->space_info_kobj, allocation_attrs); kobject_del(fs_info->space_info_kobj); @@ -957,8 +962,8 @@ void btrfs_sysfs_remove_mounted(struct btrfs_fs_info *fs_info) } #endif addrm_unknown_feature_attrs(fs_info, false); - sysfs_remove_group(&fs_info->fs_devices->fsid_kobj, &btrfs_feature_attr_group); - sysfs_remove_files(&fs_info->fs_devices->fsid_kobj, btrfs_attrs); + sysfs_remove_group(fsid_kobj, &btrfs_feature_attr_group); + sysfs_remove_files(fsid_kobj, btrfs_attrs); btrfs_sysfs_remove_devices_dir(fs_info->fs_devices, NULL); } @@ -1273,7 +1278,9 @@ int btrfs_sysfs_add_devices_dir(struct btrfs_fs_devices *fs_devices, { int error = 0; struct btrfs_device *dev; + unsigned int nofs_flag; + nofs_flag = memalloc_nofs_save(); list_for_each_entry(dev, &fs_devices->devices, dev_list) { if (one_device && one_device != dev) @@ -1301,6 +1308,7 @@ int btrfs_sysfs_add_devices_dir(struct btrfs_fs_devices *fs_devices, break; } } + memalloc_nofs_restore(nofs_flag); return error; } @@ -1438,6 +1446,10 @@ int btrfs_sysfs_add_mounted(struct btrfs_fs_info *fs_info) if (error) goto failure; + error = sysfs_create_link(fsid_kobj, &fs_info->sb->s_bdi->dev->kobj, "bdi"); + if (error) + goto failure; + fs_info->space_info_kobj = kobject_create_and_add("allocation", fsid_kobj); if (!fs_info->space_info_kobj) { @@ -1455,6 +1467,153 @@ failure: return error; } +static inline struct btrfs_fs_info *qgroup_kobj_to_fs_info(struct kobject *kobj) +{ + return to_fs_info(kobj->parent->parent); +} + +#define QGROUP_ATTR(_member, _show_name) \ +static ssize_t btrfs_qgroup_show_##_member(struct kobject *qgroup_kobj, \ + struct kobj_attribute *a, \ + char *buf) \ +{ \ + struct btrfs_fs_info *fs_info = qgroup_kobj_to_fs_info(qgroup_kobj); \ + struct btrfs_qgroup *qgroup = container_of(qgroup_kobj, \ + struct btrfs_qgroup, kobj); \ + return btrfs_show_u64(&qgroup->_member, &fs_info->qgroup_lock, buf); \ +} \ +BTRFS_ATTR(qgroup, _show_name, btrfs_qgroup_show_##_member) + +#define QGROUP_RSV_ATTR(_name, _type) \ +static ssize_t btrfs_qgroup_rsv_show_##_name(struct kobject *qgroup_kobj, \ + struct kobj_attribute *a, \ + char *buf) \ +{ \ + struct btrfs_fs_info *fs_info = qgroup_kobj_to_fs_info(qgroup_kobj); \ + struct btrfs_qgroup *qgroup = container_of(qgroup_kobj, \ + struct btrfs_qgroup, kobj); \ + return btrfs_show_u64(&qgroup->rsv.values[_type], \ + &fs_info->qgroup_lock, buf); \ +} \ +BTRFS_ATTR(qgroup, rsv_##_name, btrfs_qgroup_rsv_show_##_name) + +QGROUP_ATTR(rfer, referenced); +QGROUP_ATTR(excl, exclusive); +QGROUP_ATTR(max_rfer, max_referenced); +QGROUP_ATTR(max_excl, max_exclusive); +QGROUP_ATTR(lim_flags, limit_flags); +QGROUP_RSV_ATTR(data, BTRFS_QGROUP_RSV_DATA); +QGROUP_RSV_ATTR(meta_pertrans, BTRFS_QGROUP_RSV_META_PERTRANS); +QGROUP_RSV_ATTR(meta_prealloc, BTRFS_QGROUP_RSV_META_PREALLOC); + +static struct attribute *qgroup_attrs[] = { + BTRFS_ATTR_PTR(qgroup, referenced), + BTRFS_ATTR_PTR(qgroup, exclusive), + BTRFS_ATTR_PTR(qgroup, max_referenced), + BTRFS_ATTR_PTR(qgroup, max_exclusive), + BTRFS_ATTR_PTR(qgroup, limit_flags), + BTRFS_ATTR_PTR(qgroup, rsv_data), + BTRFS_ATTR_PTR(qgroup, rsv_meta_pertrans), + BTRFS_ATTR_PTR(qgroup, rsv_meta_prealloc), + NULL +}; +ATTRIBUTE_GROUPS(qgroup); + +static void qgroup_release(struct kobject *kobj) +{ + struct btrfs_qgroup *qgroup = container_of(kobj, struct btrfs_qgroup, kobj); + + memset(&qgroup->kobj, 0, sizeof(*kobj)); +} + +static struct kobj_type qgroup_ktype = { + .sysfs_ops = &kobj_sysfs_ops, + .release = qgroup_release, + .default_groups = qgroup_groups, +}; + +int btrfs_sysfs_add_one_qgroup(struct btrfs_fs_info *fs_info, + struct btrfs_qgroup *qgroup) +{ + struct kobject *qgroups_kobj = fs_info->qgroups_kobj; + int ret; + + if (test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state)) + return 0; + if (qgroup->kobj.state_initialized) + return 0; + if (!qgroups_kobj) + return -EINVAL; + + ret = kobject_init_and_add(&qgroup->kobj, &qgroup_ktype, qgroups_kobj, + "%hu_%llu", btrfs_qgroup_level(qgroup->qgroupid), + btrfs_qgroup_subvolid(qgroup->qgroupid)); + if (ret < 0) + kobject_put(&qgroup->kobj); + + return ret; +} + +void btrfs_sysfs_del_qgroups(struct btrfs_fs_info *fs_info) +{ + struct btrfs_qgroup *qgroup; + struct btrfs_qgroup *next; + + if (test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state)) + return; + + rbtree_postorder_for_each_entry_safe(qgroup, next, + &fs_info->qgroup_tree, node) + btrfs_sysfs_del_one_qgroup(fs_info, qgroup); + kobject_del(fs_info->qgroups_kobj); + kobject_put(fs_info->qgroups_kobj); + fs_info->qgroups_kobj = NULL; +} + +/* Called when qgroups get initialized, thus there is no need for locking */ +int btrfs_sysfs_add_qgroups(struct btrfs_fs_info *fs_info) +{ + struct kobject *fsid_kobj = &fs_info->fs_devices->fsid_kobj; + struct btrfs_qgroup *qgroup; + struct btrfs_qgroup *next; + int ret = 0; + + if (test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state)) + return 0; + + ASSERT(fsid_kobj); + if (fs_info->qgroups_kobj) + return 0; + + fs_info->qgroups_kobj = kobject_create_and_add("qgroups", fsid_kobj); + if (!fs_info->qgroups_kobj) { + ret = -ENOMEM; + goto out; + } + rbtree_postorder_for_each_entry_safe(qgroup, next, + &fs_info->qgroup_tree, node) { + ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup); + if (ret < 0) + goto out; + } + +out: + if (ret < 0) + btrfs_sysfs_del_qgroups(fs_info); + return ret; +} + +void btrfs_sysfs_del_one_qgroup(struct btrfs_fs_info *fs_info, + struct btrfs_qgroup *qgroup) +{ + if (test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state)) + return; + + if (qgroup->kobj.state_initialized) { + kobject_del(&qgroup->kobj); + kobject_put(&qgroup->kobj); + } +} /* * Change per-fs features in /sys/fs/btrfs/UUID/features to match current diff --git a/fs/btrfs/sysfs.h b/fs/btrfs/sysfs.h index 718a26c97833..cf839c46a131 100644 --- a/fs/btrfs/sysfs.h +++ b/fs/btrfs/sysfs.h @@ -36,4 +36,11 @@ int btrfs_sysfs_add_space_info_type(struct btrfs_fs_info *fs_info, void btrfs_sysfs_remove_space_info(struct btrfs_space_info *space_info); void btrfs_sysfs_update_devid(struct btrfs_device *device); +int btrfs_sysfs_add_one_qgroup(struct btrfs_fs_info *fs_info, + struct btrfs_qgroup *qgroup); +void btrfs_sysfs_del_qgroups(struct btrfs_fs_info *fs_info); +int btrfs_sysfs_add_qgroups(struct btrfs_fs_info *fs_info); +void btrfs_sysfs_del_one_qgroup(struct btrfs_fs_info *fs_info, + struct btrfs_qgroup *qgroup); + #endif diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c index 914eea5ba6a7..2c783d2f5228 100644 --- a/fs/btrfs/tests/free-space-tree-tests.c +++ b/fs/btrfs/tests/free-space-tree-tests.c @@ -60,8 +60,6 @@ static int __check_free_space_extents(struct btrfs_trans_handle *trans, if (prev_bit == 0 && bit == 1) { extent_start = offset; } else if (prev_bit == 1 && bit == 0) { - if (i >= num_extents) - goto invalid; if (i >= num_extents || extent_start != extents[i].start || offset - extent_start != extents[i].length) diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c index 24a8c714f56c..894a63a92236 100644 --- a/fs/btrfs/tests/inode-tests.c +++ b/fs/btrfs/tests/inode-tests.c @@ -954,8 +954,8 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize) btrfs_test_inode_set_ops(inode); /* [BTRFS_MAX_EXTENT_SIZE] */ - ret = btrfs_set_extent_delalloc(inode, 0, BTRFS_MAX_EXTENT_SIZE - 1, 0, - NULL); + ret = btrfs_set_extent_delalloc(BTRFS_I(inode), 0, + BTRFS_MAX_EXTENT_SIZE - 1, 0, NULL); if (ret) { test_err("btrfs_set_extent_delalloc returned %d", ret); goto out; @@ -968,7 +968,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize) } /* [BTRFS_MAX_EXTENT_SIZE][sectorsize] */ - ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE, + ret = btrfs_set_extent_delalloc(BTRFS_I(inode), BTRFS_MAX_EXTENT_SIZE, BTRFS_MAX_EXTENT_SIZE + sectorsize - 1, 0, NULL); if (ret) { @@ -999,7 +999,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize) } /* [BTRFS_MAX_EXTENT_SIZE][sectorsize] */ - ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE >> 1, + ret = btrfs_set_extent_delalloc(BTRFS_I(inode), BTRFS_MAX_EXTENT_SIZE >> 1, (BTRFS_MAX_EXTENT_SIZE >> 1) + sectorsize - 1, 0, NULL); @@ -1017,7 +1017,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize) /* * [BTRFS_MAX_EXTENT_SIZE+sectorsize][sectorsize HOLE][BTRFS_MAX_EXTENT_SIZE+sectorsize] */ - ret = btrfs_set_extent_delalloc(inode, + ret = btrfs_set_extent_delalloc(BTRFS_I(inode), BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize, (BTRFS_MAX_EXTENT_SIZE << 1) + 3 * sectorsize - 1, 0, NULL); @@ -1035,7 +1035,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize) /* * [BTRFS_MAX_EXTENT_SIZE+sectorsize][sectorsize][BTRFS_MAX_EXTENT_SIZE+sectorsize] */ - ret = btrfs_set_extent_delalloc(inode, + ret = btrfs_set_extent_delalloc(BTRFS_I(inode), BTRFS_MAX_EXTENT_SIZE + sectorsize, BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, 0, NULL); if (ret) { @@ -1069,7 +1069,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize) * Refill the hole again just for good measure, because I thought it * might fail and I'd rather satisfy my paranoia at this point. */ - ret = btrfs_set_extent_delalloc(inode, + ret = btrfs_set_extent_delalloc(BTRFS_I(inode), BTRFS_MAX_EXTENT_SIZE + sectorsize, BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, 0, NULL); if (ret) { diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index b359d4b17658..20c6ac1a5de7 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -937,7 +937,10 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, if (TRANS_ABORTED(trans) || test_bit(BTRFS_FS_STATE_ERROR, &info->fs_state)) { wake_up_process(info->transaction_kthread); - err = -EIO; + if (TRANS_ABORTED(trans)) + err = trans->aborted; + else + err = -EROFS; } kmem_cache_free(btrfs_trans_handle_cachep, trans); @@ -1630,7 +1633,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, } key.offset = (u64)-1; - pending->snap = btrfs_get_fs_root(fs_info, objectid, true); + pending->snap = btrfs_get_new_fs_root(fs_info, objectid, pending->anon_dev); if (IS_ERR(pending->snap)) { ret = PTR_ERR(pending->snap); btrfs_abort_transaction(trans, ret); @@ -2351,7 +2354,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) */ cur_trans->state = TRANS_STATE_COMPLETED; wake_up(&cur_trans->commit_wait); - clear_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags); spin_lock(&fs_info->trans_lock); list_del_init(&cur_trans->list); diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index bf102e64bfb2..d60b055b8695 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -151,18 +151,20 @@ struct btrfs_pending_snapshot { struct btrfs_block_rsv block_rsv; /* extra metadata reservation for relocation */ int error; + /* Preallocated anonymous block device number */ + dev_t anon_dev; bool readonly; struct list_head list; }; static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans, - struct inode *inode) + struct btrfs_inode *inode) { - spin_lock(&BTRFS_I(inode)->lock); - BTRFS_I(inode)->last_trans = trans->transaction->transid; - BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid; - BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit; - spin_unlock(&BTRFS_I(inode)->lock); + spin_lock(&inode->lock); + inode->last_trans = trans->transaction->transid; + inode->last_sub_trans = inode->root->log_transid; + inode->last_log_commit = inode->root->last_log_commit; + spin_unlock(&inode->lock); } /* @@ -208,20 +210,6 @@ int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root); int btrfs_commit_transaction(struct btrfs_trans_handle *trans); int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, int wait_for_unblock); - -/* - * Try to commit transaction asynchronously, so this is safe to call - * even holding a spinlock. - * - * It's done by informing transaction_kthread to commit transaction without - * waiting for commit interval. - */ -static inline void btrfs_commit_transaction_locksafe( - struct btrfs_fs_info *fs_info) -{ - set_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags); - wake_up_process(fs_info->transaction_kthread); -} int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans); int btrfs_should_end_transaction(struct btrfs_trans_handle *trans); void btrfs_throttle(struct btrfs_fs_info *fs_info); diff --git a/fs/btrfs/tree-defrag.c b/fs/btrfs/tree-defrag.c index 16c3a6d2586d..d3f28b8f4ff9 100644 --- a/fs/btrfs/tree-defrag.c +++ b/fs/btrfs/tree-defrag.c @@ -133,10 +133,9 @@ out: ret = 0; } done: - if (ret != -EAGAIN) { + if (ret != -EAGAIN) memset(&root->defrag_progress, 0, sizeof(root->defrag_progress)); - root->defrag_trans_start = trans->transid; - } + return ret; } diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index cd5348f352dd..ea8136dcf71f 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -3116,29 +3116,17 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, btrfs_init_log_ctx(&root_log_ctx, NULL); mutex_lock(&log_root_tree->log_mutex); - atomic_inc(&log_root_tree->log_batch); - atomic_inc(&log_root_tree->log_writers); index2 = log_root_tree->log_transid % 2; list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]); root_log_ctx.log_transid = log_root_tree->log_transid; - mutex_unlock(&log_root_tree->log_mutex); - - mutex_lock(&log_root_tree->log_mutex); - /* * Now we are safe to update the log_root_tree because we're under the * log_mutex, and we're a current writer so we're holding the commit * open until we drop the log_mutex. */ ret = update_log_root(trans, log, &new_root_item); - - if (atomic_dec_and_test(&log_root_tree->log_writers)) { - /* atomic_dec_and_test implies a barrier */ - cond_wake_up_nomb(&log_root_tree->log_writer_wait); - } - if (ret) { if (!list_empty(&root_log_ctx.list)) list_del_init(&root_log_ctx.list); @@ -3184,8 +3172,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, root_log_ctx.log_transid - 1); } - wait_for_writer(log_root_tree); - /* * now that we've moved on to the tree of log tree roots, * check the full commit flag again @@ -3906,6 +3892,7 @@ static int log_inode_item(struct btrfs_trans_handle *trans, } static int log_csums(struct btrfs_trans_handle *trans, + struct btrfs_inode *inode, struct btrfs_root *log_root, struct btrfs_ordered_sum *sums) { @@ -3914,6 +3901,14 @@ static int log_csums(struct btrfs_trans_handle *trans, int ret; /* + * If this inode was not used for reflink operations in the current + * transaction with new extents, then do the fast path, no need to + * worry about logging checksum items with overlapping ranges. + */ + if (inode->last_reflink_trans < trans->transid) + return btrfs_csum_file_blocks(trans, log_root, sums); + + /* * Serialize logging for checksums. This is to avoid racing with the * same checksum being logged by another task that is logging another * file which happens to refer to the same extent as well. Such races @@ -4064,7 +4059,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, struct btrfs_ordered_sum, list); if (!ret) - ret = log_csums(trans, log, sums); + ret = log_csums(trans, inode, log, sums); list_del(&sums->list); kfree(sums); } @@ -4123,7 +4118,7 @@ static int log_extent_csums(struct btrfs_trans_handle *trans, struct btrfs_ordered_sum, list); if (!ret) - ret = log_csums(trans, log_root, sums); + ret = log_csums(trans, inode, log_root, sums); list_del(&sums->list); kfree(sums); } @@ -4151,7 +4146,7 @@ static int log_one_extent(struct btrfs_trans_handle *trans, if (ret) return ret; - ret = __btrfs_drop_extents(trans, log, &inode->vfs_inode, path, em->start, + ret = __btrfs_drop_extents(trans, log, inode, path, em->start, em->start + em->len, NULL, 0, 1, sizeof(*fi), &extent_inserted); if (ret) @@ -5123,14 +5118,13 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, const loff_t end, struct btrfs_log_ctx *ctx) { - struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_path *path; struct btrfs_path *dst_path; struct btrfs_key min_key; struct btrfs_key max_key; struct btrfs_root *log = root->log_root; int err = 0; - int ret; + int ret = 0; bool fast_search = false; u64 ino = btrfs_ino(inode); struct extent_map_tree *em_tree = &inode->extent_tree; @@ -5166,15 +5160,19 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, max_key.offset = (u64)-1; /* - * Only run delayed items if we are a dir or a new file. - * Otherwise commit the delayed inode only, which is needed in - * order for the log replay code to mark inodes for link count - * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items). + * Only run delayed items if we are a directory. We want to make sure + * all directory indexes hit the fs/subvolume tree so we can find them + * and figure out which index ranges have to be logged. + * + * Otherwise commit the delayed inode only if the full sync flag is set, + * as we want to make sure an up to date version is in the subvolume + * tree so copy_inode_items_to_log() / copy_items() can find it and copy + * it to the log tree. For a non full sync, we always log the inode item + * based on the in-memory struct btrfs_inode which is always up to date. */ - if (S_ISDIR(inode->vfs_inode.i_mode) || - inode->generation > fs_info->last_trans_committed) + if (S_ISDIR(inode->vfs_inode.i_mode)) ret = btrfs_commit_inode_delayed_items(trans, inode); - else + else if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags)) ret = btrfs_commit_inode_delayed_inode(inode); if (ret) { diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 0d6e785bcb98..d7670e2a9f39 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -245,7 +245,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, * * global::fs_devs - add, remove, updates to the global list * - * does not protect: manipulation of the fs_devices::devices list! + * does not protect: manipulation of the fs_devices::devices list in general + * but in mount context it could be used to exclude list modifications by eg. + * scan ioctl * * btrfs_device::name - renames (write side), read is RCU * @@ -258,6 +260,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, * may be used to exclude some operations from running concurrently without any * modifications to the list (see write_all_supers) * + * Is not required at mount and close times, because our device list is + * protected by the uuid_mutex at that point. + * * balance_mutex * ------------- * protects balance structures (status, state) and context accessed from @@ -602,6 +607,11 @@ static int btrfs_free_stale_devices(const char *path, return ret; } +/* + * This is only used on mount, and we are protected from competing things + * messing with our fs_devices by the uuid_mutex, thus we do not need the + * fs_devices->device_list_mutex here. + */ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, struct btrfs_device *device, fmode_t flags, void *holder) @@ -1229,8 +1239,14 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, int ret; lockdep_assert_held(&uuid_mutex); + /* + * The device_list_mutex cannot be taken here in case opening the + * underlying device takes further locks like bd_mutex. + * + * We also don't need the lock here as this is called during mount and + * exclusion is provided by uuid_mutex + */ - mutex_lock(&fs_devices->device_list_mutex); if (fs_devices->opened) { fs_devices->opened++; ret = 0; @@ -1238,7 +1254,6 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, list_sort(NULL, &fs_devices->devices, devid_cmp); ret = open_fs_devices(fs_devices, flags, holder); } - mutex_unlock(&fs_devices->device_list_mutex); return ret; } @@ -3231,7 +3246,7 @@ static int del_balance_item(struct btrfs_fs_info *fs_info) if (!path) return -ENOMEM; - trans = btrfs_start_transaction(root, 0); + trans = btrfs_start_transaction_fallback_global_rsv(root, 0); if (IS_ERR(trans)) { btrfs_free_path(path); return PTR_ERR(trans); @@ -4135,7 +4150,22 @@ int btrfs_balance(struct btrfs_fs_info *fs_info, mutex_lock(&fs_info->balance_mutex); if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req)) btrfs_info(fs_info, "balance: paused"); - else if (ret == -ECANCELED && atomic_read(&fs_info->balance_cancel_req)) + /* + * Balance can be canceled by: + * + * - Regular cancel request + * Then ret == -ECANCELED and balance_cancel_req > 0 + * + * - Fatal signal to "btrfs" process + * Either the signal caught by wait_reserve_ticket() and callers + * got -EINTR, or caught by btrfs_should_cancel_balance() and + * got -ECANCELED. + * Either way, in this case balance_cancel_req = 0, and + * ret == -EINTR or ret == -ECANCELED. + * + * So here we only check the return value to catch canceled balance. + */ + else if (ret == -ECANCELED || ret == -EINTR) btrfs_info(fs_info, "balance: canceled"); else btrfs_info(fs_info, "balance: ended with status: %d", ret); @@ -5522,6 +5552,9 @@ static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes) atomic_set(&bbio->error, 0); refcount_set(&bbio->refs, 1); + bbio->tgtdev_map = (int *)(bbio->stripes + total_stripes); + bbio->raid_map = (u64 *)(bbio->tgtdev_map + real_stripes); + return bbio; } @@ -6144,8 +6177,13 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, ret = -ENOMEM; goto out; } - if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) - bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes); + + for (i = 0; i < num_stripes; i++) { + bbio->stripes[i].physical = map->stripes[stripe_index].physical + + stripe_offset + stripe_nr * map->stripe_len; + bbio->stripes[i].dev = map->stripes[stripe_index].dev; + stripe_index++; + } /* build raid_map */ if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map && @@ -6153,11 +6191,6 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, u64 tmp; unsigned rot; - bbio->raid_map = (u64 *)((void *)bbio->stripes + - sizeof(struct btrfs_bio_stripe) * - num_alloc_stripes + - sizeof(int) * tgtdev_indexes); - /* Work out the disk rotation on this stripe-set */ div_u64_rem(stripe_nr, num_stripes, &rot); @@ -6171,25 +6204,13 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, if (map->type & BTRFS_BLOCK_GROUP_RAID6) bbio->raid_map[(i+rot+1) % num_stripes] = RAID6_Q_STRIPE; - } - - for (i = 0; i < num_stripes; i++) { - bbio->stripes[i].physical = - map->stripes[stripe_index].physical + - stripe_offset + - stripe_nr * map->stripe_len; - bbio->stripes[i].dev = - map->stripes[stripe_index].dev; - stripe_index++; + sort_parity_stripes(bbio, num_stripes); } if (need_full_stripe(op)) max_errors = btrfs_chunk_max_errors(map); - if (bbio->raid_map) - sort_parity_stripes(bbio, num_stripes); - if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL && need_full_stripe(op)) { handle_ops_on_dev_replace(op, &bbio, dev_replace, &num_stripes, @@ -6261,23 +6282,18 @@ static void btrfs_end_bio(struct bio *bio) atomic_inc(&bbio->error); if (bio->bi_status == BLK_STS_IOERR || bio->bi_status == BLK_STS_TARGET) { - unsigned int stripe_index = - btrfs_io_bio(bio)->stripe_index; - struct btrfs_device *dev; - - BUG_ON(stripe_index >= bbio->num_stripes); - dev = bbio->stripes[stripe_index].dev; - if (dev->bdev) { - if (bio_op(bio) == REQ_OP_WRITE) - btrfs_dev_stat_inc_and_print(dev, + struct btrfs_device *dev = btrfs_io_bio(bio)->device; + + ASSERT(dev->bdev); + if (bio_op(bio) == REQ_OP_WRITE) + btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); - else if (!(bio->bi_opf & REQ_RAHEAD)) - btrfs_dev_stat_inc_and_print(dev, + else if (!(bio->bi_opf & REQ_RAHEAD)) + btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); - if (bio->bi_opf & REQ_PREFLUSH) - btrfs_dev_stat_inc_and_print(dev, + if (bio->bi_opf & REQ_PREFLUSH) + btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_FLUSH_ERRS); - } } } @@ -6313,13 +6329,12 @@ static void btrfs_end_bio(struct bio *bio) } static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio, - u64 physical, int dev_nr) + u64 physical, struct btrfs_device *dev) { - struct btrfs_device *dev = bbio->stripes[dev_nr].dev; struct btrfs_fs_info *fs_info = bbio->fs_info; bio->bi_private = bbio; - btrfs_io_bio(bio)->stripe_index = dev_nr; + btrfs_io_bio(bio)->device = dev; bio->bi_end_io = btrfs_end_bio; bio->bi_iter.bi_sector = physical >> 9; btrfs_debug_in_rcu(fs_info, @@ -6420,8 +6435,7 @@ blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, else bio = first_bio; - submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical, - dev_nr); + submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical, dev); } btrfs_bio_counter_dec(fs_info); return BLK_STS_OK; @@ -7029,6 +7043,19 @@ out: return ret; } +static void readahead_tree_node_children(struct extent_buffer *node) +{ + int i; + const int nr_items = btrfs_header_nritems(node); + + for (i = 0; i < nr_items; i++) { + u64 start; + + start = btrfs_node_blockptr(node, i); + readahead_tree_block(node->fs_info, start); + } +} + int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) { struct btrfs_root *root = fs_info->chunk_root; @@ -7039,6 +7066,7 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) int ret; int slot; u64 total_dev = 0; + u64 last_ra_node = 0; path = btrfs_alloc_path(); if (!path) @@ -7049,7 +7077,14 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) * otherwise we don't need it. */ mutex_lock(&uuid_mutex); - mutex_lock(&fs_info->chunk_mutex); + + /* + * It is possible for mount and umount to race in such a way that + * we execute this code path, but open_fs_devices failed to clear + * total_rw_bytes. We certainly want it cleared before reading the + * device items, so clear it here. + */ + fs_info->fs_devices->total_rw_bytes = 0; /* * Read all device items, and then all the chunk items. All @@ -7064,6 +7099,8 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) if (ret < 0) goto error; while (1) { + struct extent_buffer *node; + leaf = path->nodes[0]; slot = path->slots[0]; if (slot >= btrfs_header_nritems(leaf)) { @@ -7074,6 +7111,17 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) goto error; break; } + /* + * The nodes on level 1 are not locked but we don't need to do + * that during mount time as nothing else can access the tree + */ + node = path->nodes[1]; + if (node) { + if (last_ra_node != node->start) { + readahead_tree_node_children(node); + last_ra_node = node->start; + } + } btrfs_item_key_to_cpu(leaf, &found_key, slot); if (found_key.type == BTRFS_DEV_ITEM_KEY) { struct btrfs_dev_item *dev_item; @@ -7086,7 +7134,9 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { struct btrfs_chunk *chunk; chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); + mutex_lock(&fs_info->chunk_mutex); ret = read_one_chunk(&found_key, leaf, chunk); + mutex_unlock(&fs_info->chunk_mutex); if (ret) goto error; } @@ -7116,7 +7166,6 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) } ret = 0; error: - mutex_unlock(&fs_info->chunk_mutex); mutex_unlock(&uuid_mutex); btrfs_free_path(path); diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 75af2334b2e3..5eea93916fbf 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -288,7 +288,7 @@ struct btrfs_fs_devices { */ struct btrfs_io_bio { unsigned int mirror_num; - unsigned int stripe_index; + struct btrfs_device *device; u64 logical; u8 *csum; u8 csum_inline[BTRFS_BIO_INLINE_CSUM_SIZE]; diff --git a/fs/buffer.c b/fs/buffer.c index 64fe82ec65ff..dc5e05b47646 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -320,9 +320,8 @@ static void decrypt_bh(struct work_struct *work) static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate) { /* Decrypt if needed */ - if (uptodate && IS_ENABLED(CONFIG_FS_ENCRYPTION) && - IS_ENCRYPTED(bh->b_page->mapping->host) && - S_ISREG(bh->b_page->mapping->host->i_mode)) { + if (uptodate && + fscrypt_inode_uses_fs_layer_crypto(bh->b_page->mapping->host)) { struct decrypt_bh_ctx *ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC); if (ctx) { @@ -3046,6 +3045,8 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, */ bio = bio_alloc(GFP_NOIO, 1); + fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO); + bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); bio_set_dev(bio, bh->b_bdev); bio->bi_write_hint = write_hint; diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 49c3ea8aa845..ce95801e9b66 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -2044,7 +2044,6 @@ cifs_rename2(struct inode *source_dir, struct dentry *source_dentry, FILE_UNIX_BASIC_INFO *info_buf_target; unsigned int xid; int rc, tmprc; - bool new_target = d_really_is_negative(target_dentry); if (flags & ~RENAME_NOREPLACE) return -EINVAL; @@ -2121,13 +2120,8 @@ cifs_rename2(struct inode *source_dir, struct dentry *source_dentry, */ unlink_target: - /* - * If the target dentry was created during the rename, try - * unlinking it if it's not negative - */ - if (new_target && - d_really_is_positive(target_dentry) && - (rc == -EACCES || rc == -EEXIST)) { + /* Try unlinking the target dentry if it's not negative */ + if (d_really_is_positive(target_dentry) && (rc == -EACCES || rc == -EEXIST)) { if (d_is_dir(target_dentry)) tmprc = cifs_rmdir(target_dir, target_dentry); else diff --git a/fs/crypto/Kconfig b/fs/crypto/Kconfig index 8046d7c7a3e9..a5f5c30368a2 100644 --- a/fs/crypto/Kconfig +++ b/fs/crypto/Kconfig @@ -4,6 +4,7 @@ config FS_ENCRYPTION select CRYPTO select CRYPTO_HASH select CRYPTO_SKCIPHER + select CRYPTO_LIB_SHA256 select KEYS help Enable encryption of files and directories. This @@ -21,6 +22,11 @@ config FS_ENCRYPTION_ALGS select CRYPTO_CTS select CRYPTO_ECB select CRYPTO_HMAC - select CRYPTO_SHA256 select CRYPTO_SHA512 select CRYPTO_XTS + +config FS_ENCRYPTION_INLINE_CRYPT + bool "Enable fscrypt to use inline crypto" + depends on FS_ENCRYPTION && BLK_INLINE_ENCRYPTION + help + Enable fscrypt to use inline encryption hardware if available. diff --git a/fs/crypto/Makefile b/fs/crypto/Makefile index 232e2bb5a337..652c7180ec6d 100644 --- a/fs/crypto/Makefile +++ b/fs/crypto/Makefile @@ -11,3 +11,4 @@ fscrypto-y := crypto.o \ policy.o fscrypto-$(CONFIG_BLOCK) += bio.o +fscrypto-$(CONFIG_FS_ENCRYPTION_INLINE_CRYPT) += inline_crypt.o diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c index 4fa18fff9c4e..b048a0e38516 100644 --- a/fs/crypto/bio.c +++ b/fs/crypto/bio.c @@ -41,6 +41,53 @@ void fscrypt_decrypt_bio(struct bio *bio) } EXPORT_SYMBOL(fscrypt_decrypt_bio); +static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode, + pgoff_t lblk, sector_t pblk, + unsigned int len) +{ + const unsigned int blockbits = inode->i_blkbits; + const unsigned int blocks_per_page = 1 << (PAGE_SHIFT - blockbits); + struct bio *bio; + int ret, err = 0; + int num_pages = 0; + + /* This always succeeds since __GFP_DIRECT_RECLAIM is set. */ + bio = bio_alloc(GFP_NOFS, BIO_MAX_PAGES); + + while (len) { + unsigned int blocks_this_page = min(len, blocks_per_page); + unsigned int bytes_this_page = blocks_this_page << blockbits; + + if (num_pages == 0) { + fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOFS); + bio_set_dev(bio, inode->i_sb->s_bdev); + bio->bi_iter.bi_sector = + pblk << (blockbits - SECTOR_SHIFT); + bio_set_op_attrs(bio, REQ_OP_WRITE, 0); + } + ret = bio_add_page(bio, ZERO_PAGE(0), bytes_this_page, 0); + if (WARN_ON(ret != bytes_this_page)) { + err = -EIO; + goto out; + } + num_pages++; + len -= blocks_this_page; + lblk += blocks_this_page; + pblk += blocks_this_page; + if (num_pages == BIO_MAX_PAGES || !len || + !fscrypt_mergeable_bio(bio, inode, lblk)) { + err = submit_bio_wait(bio); + if (err) + goto out; + bio_reset(bio); + num_pages = 0; + } + } +out: + bio_put(bio); + return err; +} + /** * fscrypt_zeroout_range() - zero out a range of blocks in an encrypted file * @inode: the file's inode @@ -75,6 +122,10 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, if (len == 0) return 0; + if (fscrypt_inode_uses_inline_crypto(inode)) + return fscrypt_zeroout_range_inline_crypt(inode, lblk, pblk, + len); + BUILD_BUG_ON(ARRAY_SIZE(pages) > BIO_MAX_PAGES); nr_pages = min_t(unsigned int, ARRAY_SIZE(pages), (len + blocks_per_page - 1) >> blocks_per_page_bits); diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index ed015cb66c7c..9212325763b0 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c @@ -84,7 +84,7 @@ void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num, WARN_ON_ONCE(lblk_num > U32_MAX); lblk_num = (u32)(ci->ci_hashed_ino + lblk_num); } else if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) { - memcpy(iv->nonce, ci->ci_nonce, FS_KEY_DERIVATION_NONCE_SIZE); + memcpy(iv->nonce, ci->ci_nonce, FSCRYPT_FILE_NONCE_SIZE); } iv->lblk_num = cpu_to_le64(lblk_num); } @@ -100,7 +100,7 @@ int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw, DECLARE_CRYPTO_WAIT(wait); struct scatterlist dst, src; struct fscrypt_info *ci = inode->i_crypt_info; - struct crypto_skcipher *tfm = ci->ci_ctfm; + struct crypto_skcipher *tfm = ci->ci_enc_key.tfm; int res = 0; if (WARN_ON_ONCE(len <= 0)) diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c index 83ca5f1e7934..011830f84d8d 100644 --- a/fs/crypto/fname.c +++ b/fs/crypto/fname.c @@ -61,30 +61,13 @@ struct fscrypt_nokey_name { */ #define FSCRYPT_NOKEY_NAME_MAX offsetofend(struct fscrypt_nokey_name, sha256) -static struct crypto_shash *sha256_hash_tfm; - -static int fscrypt_do_sha256(const u8 *data, unsigned int data_len, u8 *result) +static void fscrypt_do_sha256(const u8 *data, unsigned int data_len, u8 *result) { - struct crypto_shash *tfm = READ_ONCE(sha256_hash_tfm); - - if (unlikely(!tfm)) { - struct crypto_shash *prev_tfm; - - tfm = crypto_alloc_shash("sha256", 0, 0); - if (IS_ERR(tfm)) { - fscrypt_err(NULL, - "Error allocating SHA-256 transform: %ld", - PTR_ERR(tfm)); - return PTR_ERR(tfm); - } - prev_tfm = cmpxchg(&sha256_hash_tfm, NULL, tfm); - if (prev_tfm) { - crypto_free_shash(tfm); - tfm = prev_tfm; - } - } + struct sha256_state sctx; - return crypto_shash_tfm_digest(tfm, data, data_len, result); + sha256_init(&sctx); + sha256_update(&sctx, data, data_len); + sha256_final(&sctx, result); } static inline bool fscrypt_is_dot_dotdot(const struct qstr *str) @@ -115,7 +98,7 @@ int fscrypt_fname_encrypt(const struct inode *inode, const struct qstr *iname, struct skcipher_request *req = NULL; DECLARE_CRYPTO_WAIT(wait); const struct fscrypt_info *ci = inode->i_crypt_info; - struct crypto_skcipher *tfm = ci->ci_ctfm; + struct crypto_skcipher *tfm = ci->ci_enc_key.tfm; union fscrypt_iv iv; struct scatterlist sg; int res; @@ -171,7 +154,7 @@ static int fname_decrypt(const struct inode *inode, DECLARE_CRYPTO_WAIT(wait); struct scatterlist src_sg, dst_sg; const struct fscrypt_info *ci = inode->i_crypt_info; - struct crypto_skcipher *tfm = ci->ci_ctfm; + struct crypto_skcipher *tfm = ci->ci_enc_key.tfm; union fscrypt_iv iv; int res; @@ -349,7 +332,6 @@ int fscrypt_fname_disk_to_usr(const struct inode *inode, const struct qstr qname = FSTR_TO_QSTR(iname); struct fscrypt_nokey_name nokey_name; u32 size; /* size of the unencoded no-key name */ - int err; if (fscrypt_is_dot_dotdot(&qname)) { oname->name[0] = '.'; @@ -387,11 +369,9 @@ int fscrypt_fname_disk_to_usr(const struct inode *inode, } else { memcpy(nokey_name.bytes, iname->name, sizeof(nokey_name.bytes)); /* Compute strong hash of remaining part of name. */ - err = fscrypt_do_sha256(&iname->name[sizeof(nokey_name.bytes)], - iname->len - sizeof(nokey_name.bytes), - nokey_name.sha256); - if (err) - return err; + fscrypt_do_sha256(&iname->name[sizeof(nokey_name.bytes)], + iname->len - sizeof(nokey_name.bytes), + nokey_name.sha256); size = FSCRYPT_NOKEY_NAME_MAX; } oname->len = base64_encode((const u8 *)&nokey_name, size, oname->name); @@ -530,9 +510,8 @@ bool fscrypt_match_name(const struct fscrypt_name *fname, return false; if (memcmp(de_name, nokey_name->bytes, sizeof(nokey_name->bytes))) return false; - if (fscrypt_do_sha256(&de_name[sizeof(nokey_name->bytes)], - de_name_len - sizeof(nokey_name->bytes), sha256)) - return false; + fscrypt_do_sha256(&de_name[sizeof(nokey_name->bytes)], + de_name_len - sizeof(nokey_name->bytes), sha256); return !memcmp(sha256, nokey_name->sha256, sizeof(sha256)); } EXPORT_SYMBOL_GPL(fscrypt_match_name); diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h index eb7fcd2b7fb8..8117a61b6f55 100644 --- a/fs/crypto/fscrypt_private.h +++ b/fs/crypto/fscrypt_private.h @@ -14,12 +14,13 @@ #include <linux/fscrypt.h> #include <linux/siphash.h> #include <crypto/hash.h> +#include <linux/blk-crypto.h> #define CONST_STRLEN(str) (sizeof(str) - 1) -#define FS_KEY_DERIVATION_NONCE_SIZE 16 +#define FSCRYPT_FILE_NONCE_SIZE 16 -#define FSCRYPT_MIN_KEY_SIZE 16 +#define FSCRYPT_MIN_KEY_SIZE 16 #define FSCRYPT_CONTEXT_V1 1 #define FSCRYPT_CONTEXT_V2 2 @@ -30,7 +31,7 @@ struct fscrypt_context_v1 { u8 filenames_encryption_mode; u8 flags; u8 master_key_descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE]; - u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE]; + u8 nonce[FSCRYPT_FILE_NONCE_SIZE]; }; struct fscrypt_context_v2 { @@ -40,7 +41,7 @@ struct fscrypt_context_v2 { u8 flags; u8 __reserved[4]; u8 master_key_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]; - u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE]; + u8 nonce[FSCRYPT_FILE_NONCE_SIZE]; }; /* @@ -166,6 +167,20 @@ struct fscrypt_symlink_data { char encrypted_path[1]; } __packed; +/** + * struct fscrypt_prepared_key - a key prepared for actual encryption/decryption + * @tfm: crypto API transform object + * @blk_key: key for blk-crypto + * + * Normally only one of the fields will be non-NULL. + */ +struct fscrypt_prepared_key { + struct crypto_skcipher *tfm; +#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT + struct fscrypt_blk_crypto_key *blk_key; +#endif +}; + /* * fscrypt_info - the "encryption key" for an inode * @@ -175,12 +190,20 @@ struct fscrypt_symlink_data { */ struct fscrypt_info { - /* The actual crypto transform used for encryption and decryption */ - struct crypto_skcipher *ci_ctfm; + /* The key in a form prepared for actual encryption/decryption */ + struct fscrypt_prepared_key ci_enc_key; - /* True if the key should be freed when this fscrypt_info is freed */ + /* True if ci_enc_key should be freed when this fscrypt_info is freed */ bool ci_owns_key; +#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT + /* + * True if this inode will use inline encryption (blk-crypto) instead of + * the traditional filesystem-layer encryption. + */ + bool ci_inlinecrypt; +#endif + /* * Encryption mode used for this inode. It corresponds to either the * contents or filenames encryption mode, depending on the inode type. @@ -205,7 +228,7 @@ struct fscrypt_info { /* * If non-NULL, then encryption is done using the master key directly - * and ci_ctfm will equal ci_direct_key->dk_ctfm. + * and ci_enc_key will equal ci_direct_key->dk_key. */ struct fscrypt_direct_key *ci_direct_key; @@ -221,7 +244,7 @@ struct fscrypt_info { union fscrypt_policy ci_policy; /* This inode's nonce, copied from the fscrypt_context */ - u8 ci_nonce[FS_KEY_DERIVATION_NONCE_SIZE]; + u8 ci_nonce[FSCRYPT_FILE_NONCE_SIZE]; /* Hashed inode number. Only set for IV_INO_LBLK_32 */ u32 ci_hashed_ino; @@ -257,9 +280,10 @@ union fscrypt_iv { __le64 lblk_num; /* per-file nonce; only set in DIRECT_KEY mode */ - u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE]; + u8 nonce[FSCRYPT_FILE_NONCE_SIZE]; }; u8 raw[FSCRYPT_MAX_IV_SIZE]; + __le64 dun[FSCRYPT_MAX_IV_SIZE / sizeof(__le64)]; }; void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num, @@ -288,13 +312,13 @@ int fscrypt_init_hkdf(struct fscrypt_hkdf *hkdf, const u8 *master_key, * outputs are unique and cryptographically isolated, i.e. knowledge of one * output doesn't reveal another. */ -#define HKDF_CONTEXT_KEY_IDENTIFIER 1 -#define HKDF_CONTEXT_PER_FILE_ENC_KEY 2 -#define HKDF_CONTEXT_DIRECT_KEY 3 -#define HKDF_CONTEXT_IV_INO_LBLK_64_KEY 4 -#define HKDF_CONTEXT_DIRHASH_KEY 5 -#define HKDF_CONTEXT_IV_INO_LBLK_32_KEY 6 -#define HKDF_CONTEXT_INODE_HASH_KEY 7 +#define HKDF_CONTEXT_KEY_IDENTIFIER 1 /* info=<empty> */ +#define HKDF_CONTEXT_PER_FILE_ENC_KEY 2 /* info=file_nonce */ +#define HKDF_CONTEXT_DIRECT_KEY 3 /* info=mode_num */ +#define HKDF_CONTEXT_IV_INO_LBLK_64_KEY 4 /* info=mode_num||fs_uuid */ +#define HKDF_CONTEXT_DIRHASH_KEY 5 /* info=file_nonce */ +#define HKDF_CONTEXT_IV_INO_LBLK_32_KEY 6 /* info=mode_num||fs_uuid */ +#define HKDF_CONTEXT_INODE_HASH_KEY 7 /* info=<empty> */ int fscrypt_hkdf_expand(const struct fscrypt_hkdf *hkdf, u8 context, const u8 *info, unsigned int infolen, @@ -302,6 +326,78 @@ int fscrypt_hkdf_expand(const struct fscrypt_hkdf *hkdf, u8 context, void fscrypt_destroy_hkdf(struct fscrypt_hkdf *hkdf); +/* inline_crypt.c */ +#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT +int fscrypt_select_encryption_impl(struct fscrypt_info *ci); + +static inline bool +fscrypt_using_inline_encryption(const struct fscrypt_info *ci) +{ + return ci->ci_inlinecrypt; +} + +int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, + const u8 *raw_key, + const struct fscrypt_info *ci); + +void fscrypt_destroy_inline_crypt_key(struct fscrypt_prepared_key *prep_key); + +/* + * Check whether the crypto transform or blk-crypto key has been allocated in + * @prep_key, depending on which encryption implementation the file will use. + */ +static inline bool +fscrypt_is_key_prepared(struct fscrypt_prepared_key *prep_key, + const struct fscrypt_info *ci) +{ + /* + * The two smp_load_acquire()'s here pair with the smp_store_release()'s + * in fscrypt_prepare_inline_crypt_key() and fscrypt_prepare_key(). + * I.e., in some cases (namely, if this prep_key is a per-mode + * encryption key) another task can publish blk_key or tfm concurrently, + * executing a RELEASE barrier. We need to use smp_load_acquire() here + * to safely ACQUIRE the memory the other task published. + */ + if (fscrypt_using_inline_encryption(ci)) + return smp_load_acquire(&prep_key->blk_key) != NULL; + return smp_load_acquire(&prep_key->tfm) != NULL; +} + +#else /* CONFIG_FS_ENCRYPTION_INLINE_CRYPT */ + +static inline int fscrypt_select_encryption_impl(struct fscrypt_info *ci) +{ + return 0; +} + +static inline bool +fscrypt_using_inline_encryption(const struct fscrypt_info *ci) +{ + return false; +} + +static inline int +fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, + const u8 *raw_key, + const struct fscrypt_info *ci) +{ + WARN_ON(1); + return -EOPNOTSUPP; +} + +static inline void +fscrypt_destroy_inline_crypt_key(struct fscrypt_prepared_key *prep_key) +{ +} + +static inline bool +fscrypt_is_key_prepared(struct fscrypt_prepared_key *prep_key, + const struct fscrypt_info *ci) +{ + return smp_load_acquire(&prep_key->tfm) != NULL; +} +#endif /* !CONFIG_FS_ENCRYPTION_INLINE_CRYPT */ + /* keyring.c */ /* @@ -395,9 +491,9 @@ struct fscrypt_master_key { * Per-mode encryption keys for the various types of encryption policies * that use them. Allocated and derived on-demand. */ - struct crypto_skcipher *mk_direct_keys[__FSCRYPT_MODE_MAX + 1]; - struct crypto_skcipher *mk_iv_ino_lblk_64_keys[__FSCRYPT_MODE_MAX + 1]; - struct crypto_skcipher *mk_iv_ino_lblk_32_keys[__FSCRYPT_MODE_MAX + 1]; + struct fscrypt_prepared_key mk_direct_keys[__FSCRYPT_MODE_MAX + 1]; + struct fscrypt_prepared_key mk_iv_ino_lblk_64_keys[__FSCRYPT_MODE_MAX + 1]; + struct fscrypt_prepared_key mk_iv_ino_lblk_32_keys[__FSCRYPT_MODE_MAX + 1]; /* Hash key for inode numbers. Initialized only when needed. */ siphash_key_t mk_ino_hash_key; @@ -461,13 +557,15 @@ struct fscrypt_mode { int keysize; int ivsize; int logged_impl_name; + enum blk_crypto_mode_num blk_crypto_mode; }; extern struct fscrypt_mode fscrypt_modes[]; -struct crypto_skcipher *fscrypt_allocate_skcipher(struct fscrypt_mode *mode, - const u8 *raw_key, - const struct inode *inode); +int fscrypt_prepare_key(struct fscrypt_prepared_key *prep_key, + const u8 *raw_key, const struct fscrypt_info *ci); + +void fscrypt_destroy_prepared_key(struct fscrypt_prepared_key *prep_key); int fscrypt_set_per_file_enc_key(struct fscrypt_info *ci, const u8 *raw_key); diff --git a/fs/crypto/inline_crypt.c b/fs/crypto/inline_crypt.c new file mode 100644 index 000000000000..b6b8574caa13 --- /dev/null +++ b/fs/crypto/inline_crypt.c @@ -0,0 +1,367 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Inline encryption support for fscrypt + * + * Copyright 2019 Google LLC + */ + +/* + * With "inline encryption", the block layer handles the decryption/encryption + * as part of the bio, instead of the filesystem doing the crypto itself via + * crypto API. See Documentation/block/inline-encryption.rst. fscrypt still + * provides the key and IV to use. + */ + +#include <linux/blk-crypto.h> +#include <linux/blkdev.h> +#include <linux/buffer_head.h> +#include <linux/sched/mm.h> + +#include "fscrypt_private.h" + +struct fscrypt_blk_crypto_key { + struct blk_crypto_key base; + int num_devs; + struct request_queue *devs[]; +}; + +static int fscrypt_get_num_devices(struct super_block *sb) +{ + if (sb->s_cop->get_num_devices) + return sb->s_cop->get_num_devices(sb); + return 1; +} + +static void fscrypt_get_devices(struct super_block *sb, int num_devs, + struct request_queue **devs) +{ + if (num_devs == 1) + devs[0] = bdev_get_queue(sb->s_bdev); + else + sb->s_cop->get_devices(sb, devs); +} + +static unsigned int fscrypt_get_dun_bytes(const struct fscrypt_info *ci) +{ + struct super_block *sb = ci->ci_inode->i_sb; + unsigned int flags = fscrypt_policy_flags(&ci->ci_policy); + int ino_bits = 64, lblk_bits = 64; + + if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) + return offsetofend(union fscrypt_iv, nonce); + + if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) + return sizeof(__le64); + + if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) + return sizeof(__le32); + + /* Default case: IVs are just the file logical block number */ + if (sb->s_cop->get_ino_and_lblk_bits) + sb->s_cop->get_ino_and_lblk_bits(sb, &ino_bits, &lblk_bits); + return DIV_ROUND_UP(lblk_bits, 8); +} + +/* Enable inline encryption for this file if supported. */ +int fscrypt_select_encryption_impl(struct fscrypt_info *ci) +{ + const struct inode *inode = ci->ci_inode; + struct super_block *sb = inode->i_sb; + struct blk_crypto_config crypto_cfg; + int num_devs; + struct request_queue **devs; + int i; + + /* The file must need contents encryption, not filenames encryption */ + if (!fscrypt_needs_contents_encryption(inode)) + return 0; + + /* The crypto mode must have a blk-crypto counterpart */ + if (ci->ci_mode->blk_crypto_mode == BLK_ENCRYPTION_MODE_INVALID) + return 0; + + /* The filesystem must be mounted with -o inlinecrypt */ + if (!(sb->s_flags & SB_INLINECRYPT)) + return 0; + + /* + * When a page contains multiple logically contiguous filesystem blocks, + * some filesystem code only calls fscrypt_mergeable_bio() for the first + * block in the page. This is fine for most of fscrypt's IV generation + * strategies, where contiguous blocks imply contiguous IVs. But it + * doesn't work with IV_INO_LBLK_32. For now, simply exclude + * IV_INO_LBLK_32 with blocksize != PAGE_SIZE from inline encryption. + */ + if ((fscrypt_policy_flags(&ci->ci_policy) & + FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) && + sb->s_blocksize != PAGE_SIZE) + return 0; + + /* + * On all the filesystem's devices, blk-crypto must support the crypto + * configuration that the file would use. + */ + crypto_cfg.crypto_mode = ci->ci_mode->blk_crypto_mode; + crypto_cfg.data_unit_size = sb->s_blocksize; + crypto_cfg.dun_bytes = fscrypt_get_dun_bytes(ci); + num_devs = fscrypt_get_num_devices(sb); + devs = kmalloc_array(num_devs, sizeof(*devs), GFP_NOFS); + if (!devs) + return -ENOMEM; + fscrypt_get_devices(sb, num_devs, devs); + + for (i = 0; i < num_devs; i++) { + if (!blk_crypto_config_supported(devs[i], &crypto_cfg)) + goto out_free_devs; + } + + ci->ci_inlinecrypt = true; +out_free_devs: + kfree(devs); + + return 0; +} + +int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, + const u8 *raw_key, + const struct fscrypt_info *ci) +{ + const struct inode *inode = ci->ci_inode; + struct super_block *sb = inode->i_sb; + enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode; + int num_devs = fscrypt_get_num_devices(sb); + int queue_refs = 0; + struct fscrypt_blk_crypto_key *blk_key; + int err; + int i; + unsigned int flags; + + blk_key = kzalloc(struct_size(blk_key, devs, num_devs), GFP_NOFS); + if (!blk_key) + return -ENOMEM; + + blk_key->num_devs = num_devs; + fscrypt_get_devices(sb, num_devs, blk_key->devs); + + err = blk_crypto_init_key(&blk_key->base, raw_key, crypto_mode, + fscrypt_get_dun_bytes(ci), sb->s_blocksize); + if (err) { + fscrypt_err(inode, "error %d initializing blk-crypto key", err); + goto fail; + } + + /* + * We have to start using blk-crypto on all the filesystem's devices. + * We also have to save all the request_queue's for later so that the + * key can be evicted from them. This is needed because some keys + * aren't destroyed until after the filesystem was already unmounted + * (namely, the per-mode keys in struct fscrypt_master_key). + */ + for (i = 0; i < num_devs; i++) { + if (!blk_get_queue(blk_key->devs[i])) { + fscrypt_err(inode, "couldn't get request_queue"); + err = -EAGAIN; + goto fail; + } + queue_refs++; + + flags = memalloc_nofs_save(); + err = blk_crypto_start_using_key(&blk_key->base, + blk_key->devs[i]); + memalloc_nofs_restore(flags); + if (err) { + fscrypt_err(inode, + "error %d starting to use blk-crypto", err); + goto fail; + } + } + /* + * Pairs with the smp_load_acquire() in fscrypt_is_key_prepared(). + * I.e., here we publish ->blk_key with a RELEASE barrier so that + * concurrent tasks can ACQUIRE it. Note that this concurrency is only + * possible for per-mode keys, not for per-file keys. + */ + smp_store_release(&prep_key->blk_key, blk_key); + return 0; + +fail: + for (i = 0; i < queue_refs; i++) + blk_put_queue(blk_key->devs[i]); + kzfree(blk_key); + return err; +} + +void fscrypt_destroy_inline_crypt_key(struct fscrypt_prepared_key *prep_key) +{ + struct fscrypt_blk_crypto_key *blk_key = prep_key->blk_key; + int i; + + if (blk_key) { + for (i = 0; i < blk_key->num_devs; i++) { + blk_crypto_evict_key(blk_key->devs[i], &blk_key->base); + blk_put_queue(blk_key->devs[i]); + } + kzfree(blk_key); + } +} + +bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode) +{ + return inode->i_crypt_info->ci_inlinecrypt; +} +EXPORT_SYMBOL_GPL(__fscrypt_inode_uses_inline_crypto); + +static void fscrypt_generate_dun(const struct fscrypt_info *ci, u64 lblk_num, + u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE]) +{ + union fscrypt_iv iv; + int i; + + fscrypt_generate_iv(&iv, lblk_num, ci); + + BUILD_BUG_ON(FSCRYPT_MAX_IV_SIZE > BLK_CRYPTO_MAX_IV_SIZE); + memset(dun, 0, BLK_CRYPTO_MAX_IV_SIZE); + for (i = 0; i < ci->ci_mode->ivsize/sizeof(dun[0]); i++) + dun[i] = le64_to_cpu(iv.dun[i]); +} + +/** + * fscrypt_set_bio_crypt_ctx() - prepare a file contents bio for inline crypto + * @bio: a bio which will eventually be submitted to the file + * @inode: the file's inode + * @first_lblk: the first file logical block number in the I/O + * @gfp_mask: memory allocation flags - these must be a waiting mask so that + * bio_crypt_set_ctx can't fail. + * + * If the contents of the file should be encrypted (or decrypted) with inline + * encryption, then assign the appropriate encryption context to the bio. + * + * Normally the bio should be newly allocated (i.e. no pages added yet), as + * otherwise fscrypt_mergeable_bio() won't work as intended. + * + * The encryption context will be freed automatically when the bio is freed. + */ +void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode, + u64 first_lblk, gfp_t gfp_mask) +{ + const struct fscrypt_info *ci; + u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; + + if (!fscrypt_inode_uses_inline_crypto(inode)) + return; + ci = inode->i_crypt_info; + + fscrypt_generate_dun(ci, first_lblk, dun); + bio_crypt_set_ctx(bio, &ci->ci_enc_key.blk_key->base, dun, gfp_mask); +} +EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx); + +/* Extract the inode and logical block number from a buffer_head. */ +static bool bh_get_inode_and_lblk_num(const struct buffer_head *bh, + const struct inode **inode_ret, + u64 *lblk_num_ret) +{ + struct page *page = bh->b_page; + const struct address_space *mapping; + const struct inode *inode; + + /* + * The ext4 journal (jbd2) can submit a buffer_head it directly created + * for a non-pagecache page. fscrypt doesn't care about these. + */ + mapping = page_mapping(page); + if (!mapping) + return false; + inode = mapping->host; + + *inode_ret = inode; + *lblk_num_ret = ((u64)page->index << (PAGE_SHIFT - inode->i_blkbits)) + + (bh_offset(bh) >> inode->i_blkbits); + return true; +} + +/** + * fscrypt_set_bio_crypt_ctx_bh() - prepare a file contents bio for inline + * crypto + * @bio: a bio which will eventually be submitted to the file + * @first_bh: the first buffer_head for which I/O will be submitted + * @gfp_mask: memory allocation flags + * + * Same as fscrypt_set_bio_crypt_ctx(), except this takes a buffer_head instead + * of an inode and block number directly. + */ +void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio, + const struct buffer_head *first_bh, + gfp_t gfp_mask) +{ + const struct inode *inode; + u64 first_lblk; + + if (bh_get_inode_and_lblk_num(first_bh, &inode, &first_lblk)) + fscrypt_set_bio_crypt_ctx(bio, inode, first_lblk, gfp_mask); +} +EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx_bh); + +/** + * fscrypt_mergeable_bio() - test whether data can be added to a bio + * @bio: the bio being built up + * @inode: the inode for the next part of the I/O + * @next_lblk: the next file logical block number in the I/O + * + * When building a bio which may contain data which should undergo inline + * encryption (or decryption) via fscrypt, filesystems should call this function + * to ensure that the resulting bio contains only contiguous data unit numbers. + * This will return false if the next part of the I/O cannot be merged with the + * bio because either the encryption key would be different or the encryption + * data unit numbers would be discontiguous. + * + * fscrypt_set_bio_crypt_ctx() must have already been called on the bio. + * + * Return: true iff the I/O is mergeable + */ +bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode, + u64 next_lblk) +{ + const struct bio_crypt_ctx *bc = bio->bi_crypt_context; + u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; + + if (!!bc != fscrypt_inode_uses_inline_crypto(inode)) + return false; + if (!bc) + return true; + + /* + * Comparing the key pointers is good enough, as all I/O for each key + * uses the same pointer. I.e., there's currently no need to support + * merging requests where the keys are the same but the pointers differ. + */ + if (bc->bc_key != &inode->i_crypt_info->ci_enc_key.blk_key->base) + return false; + + fscrypt_generate_dun(inode->i_crypt_info, next_lblk, next_dun); + return bio_crypt_dun_is_contiguous(bc, bio->bi_iter.bi_size, next_dun); +} +EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio); + +/** + * fscrypt_mergeable_bio_bh() - test whether data can be added to a bio + * @bio: the bio being built up + * @next_bh: the next buffer_head for which I/O will be submitted + * + * Same as fscrypt_mergeable_bio(), except this takes a buffer_head instead of + * an inode and block number directly. + * + * Return: true iff the I/O is mergeable + */ +bool fscrypt_mergeable_bio_bh(struct bio *bio, + const struct buffer_head *next_bh) +{ + const struct inode *inode; + u64 next_lblk; + + if (!bh_get_inode_and_lblk_num(next_bh, &inode, &next_lblk)) + return !bio->bi_crypt_context; + + return fscrypt_mergeable_bio(bio, inode, next_lblk); +} +EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio_bh); diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c index e24eb48bfbe1..71d56f8e2870 100644 --- a/fs/crypto/keyring.c +++ b/fs/crypto/keyring.c @@ -45,9 +45,9 @@ static void free_master_key(struct fscrypt_master_key *mk) wipe_master_key_secret(&mk->mk_secret); for (i = 0; i <= __FSCRYPT_MODE_MAX; i++) { - crypto_free_skcipher(mk->mk_direct_keys[i]); - crypto_free_skcipher(mk->mk_iv_ino_lblk_64_keys[i]); - crypto_free_skcipher(mk->mk_iv_ino_lblk_32_keys[i]); + fscrypt_destroy_prepared_key(&mk->mk_direct_keys[i]); + fscrypt_destroy_prepared_key(&mk->mk_iv_ino_lblk_64_keys[i]); + fscrypt_destroy_prepared_key(&mk->mk_iv_ino_lblk_32_keys[i]); } key_put(mk->mk_users); @@ -213,7 +213,11 @@ static int allocate_filesystem_keyring(struct super_block *sb) if (IS_ERR(keyring)) return PTR_ERR(keyring); - /* Pairs with READ_ONCE() in fscrypt_find_master_key() */ + /* + * Pairs with the smp_load_acquire() in fscrypt_find_master_key(). + * I.e., here we publish ->s_master_keys with a RELEASE barrier so that + * concurrent tasks can ACQUIRE it. + */ smp_store_release(&sb->s_master_keys, keyring); return 0; } @@ -234,8 +238,13 @@ struct key *fscrypt_find_master_key(struct super_block *sb, struct key *keyring; char description[FSCRYPT_MK_DESCRIPTION_SIZE]; - /* pairs with smp_store_release() in allocate_filesystem_keyring() */ - keyring = READ_ONCE(sb->s_master_keys); + /* + * Pairs with the smp_store_release() in allocate_filesystem_keyring(). + * I.e., another task can publish ->s_master_keys concurrently, + * executing a RELEASE barrier. We need to use smp_load_acquire() here + * to safely ACQUIRE the memory the other task published. + */ + keyring = smp_load_acquire(&sb->s_master_keys); if (keyring == NULL) return ERR_PTR(-ENOKEY); /* No keyring yet, so no keys yet. */ diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c index 1129adfa097d..fea6226afc2b 100644 --- a/fs/crypto/keysetup.c +++ b/fs/crypto/keysetup.c @@ -19,6 +19,7 @@ struct fscrypt_mode fscrypt_modes[] = { .cipher_str = "xts(aes)", .keysize = 64, .ivsize = 16, + .blk_crypto_mode = BLK_ENCRYPTION_MODE_AES_256_XTS, }, [FSCRYPT_MODE_AES_256_CTS] = { .friendly_name = "AES-256-CTS-CBC", @@ -31,6 +32,7 @@ struct fscrypt_mode fscrypt_modes[] = { .cipher_str = "essiv(cbc(aes),sha256)", .keysize = 16, .ivsize = 16, + .blk_crypto_mode = BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV, }, [FSCRYPT_MODE_AES_128_CTS] = { .friendly_name = "AES-128-CTS-CBC", @@ -43,6 +45,7 @@ struct fscrypt_mode fscrypt_modes[] = { .cipher_str = "adiantum(xchacha12,aes)", .keysize = 32, .ivsize = 32, + .blk_crypto_mode = BLK_ENCRYPTION_MODE_ADIANTUM, }, }; @@ -64,9 +67,9 @@ select_encryption_mode(const union fscrypt_policy *policy, } /* Create a symmetric cipher object for the given encryption mode and key */ -struct crypto_skcipher *fscrypt_allocate_skcipher(struct fscrypt_mode *mode, - const u8 *raw_key, - const struct inode *inode) +static struct crypto_skcipher * +fscrypt_allocate_skcipher(struct fscrypt_mode *mode, const u8 *raw_key, + const struct inode *inode) { struct crypto_skcipher *tfm; int err; @@ -109,30 +112,56 @@ err_free_tfm: return ERR_PTR(err); } -/* Given a per-file encryption key, set up the file's crypto transform object */ -int fscrypt_set_per_file_enc_key(struct fscrypt_info *ci, const u8 *raw_key) +/* + * Prepare the crypto transform object or blk-crypto key in @prep_key, given the + * raw key, encryption mode, and flag indicating which encryption implementation + * (fs-layer or blk-crypto) will be used. + */ +int fscrypt_prepare_key(struct fscrypt_prepared_key *prep_key, + const u8 *raw_key, const struct fscrypt_info *ci) { struct crypto_skcipher *tfm; + if (fscrypt_using_inline_encryption(ci)) + return fscrypt_prepare_inline_crypt_key(prep_key, raw_key, ci); + tfm = fscrypt_allocate_skcipher(ci->ci_mode, raw_key, ci->ci_inode); if (IS_ERR(tfm)) return PTR_ERR(tfm); + /* + * Pairs with the smp_load_acquire() in fscrypt_is_key_prepared(). + * I.e., here we publish ->tfm with a RELEASE barrier so that + * concurrent tasks can ACQUIRE it. Note that this concurrency is only + * possible for per-mode keys, not for per-file keys. + */ + smp_store_release(&prep_key->tfm, tfm); + return 0; +} - ci->ci_ctfm = tfm; +/* Destroy a crypto transform object and/or blk-crypto key. */ +void fscrypt_destroy_prepared_key(struct fscrypt_prepared_key *prep_key) +{ + crypto_free_skcipher(prep_key->tfm); + fscrypt_destroy_inline_crypt_key(prep_key); +} + +/* Given a per-file encryption key, set up the file's crypto transform object */ +int fscrypt_set_per_file_enc_key(struct fscrypt_info *ci, const u8 *raw_key) +{ ci->ci_owns_key = true; - return 0; + return fscrypt_prepare_key(&ci->ci_enc_key, raw_key, ci); } static int setup_per_mode_enc_key(struct fscrypt_info *ci, struct fscrypt_master_key *mk, - struct crypto_skcipher **tfms, + struct fscrypt_prepared_key *keys, u8 hkdf_context, bool include_fs_uuid) { const struct inode *inode = ci->ci_inode; const struct super_block *sb = inode->i_sb; struct fscrypt_mode *mode = ci->ci_mode; const u8 mode_num = mode - fscrypt_modes; - struct crypto_skcipher *tfm; + struct fscrypt_prepared_key *prep_key; u8 mode_key[FSCRYPT_MAX_KEY_SIZE]; u8 hkdf_info[sizeof(mode_num) + sizeof(sb->s_uuid)]; unsigned int hkdf_infolen = 0; @@ -141,16 +170,15 @@ static int setup_per_mode_enc_key(struct fscrypt_info *ci, if (WARN_ON(mode_num > __FSCRYPT_MODE_MAX)) return -EINVAL; - /* pairs with smp_store_release() below */ - tfm = READ_ONCE(tfms[mode_num]); - if (likely(tfm != NULL)) { - ci->ci_ctfm = tfm; + prep_key = &keys[mode_num]; + if (fscrypt_is_key_prepared(prep_key, ci)) { + ci->ci_enc_key = *prep_key; return 0; } mutex_lock(&fscrypt_mode_key_setup_mutex); - if (tfms[mode_num]) + if (fscrypt_is_key_prepared(prep_key, ci)) goto done_unlock; BUILD_BUG_ON(sizeof(mode_num) != 1); @@ -167,16 +195,12 @@ static int setup_per_mode_enc_key(struct fscrypt_info *ci, mode_key, mode->keysize); if (err) goto out_unlock; - tfm = fscrypt_allocate_skcipher(mode, mode_key, inode); + err = fscrypt_prepare_key(prep_key, mode_key, ci); memzero_explicit(mode_key, mode->keysize); - if (IS_ERR(tfm)) { - err = PTR_ERR(tfm); + if (err) goto out_unlock; - } - /* pairs with READ_ONCE() above */ - smp_store_release(&tfms[mode_num], tfm); done_unlock: - ci->ci_ctfm = tfm; + ci->ci_enc_key = *prep_key; err = 0; out_unlock: mutex_unlock(&fscrypt_mode_key_setup_mutex); @@ -189,7 +213,7 @@ int fscrypt_derive_dirhash_key(struct fscrypt_info *ci, int err; err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf, HKDF_CONTEXT_DIRHASH_KEY, - ci->ci_nonce, FS_KEY_DERIVATION_NONCE_SIZE, + ci->ci_nonce, FSCRYPT_FILE_NONCE_SIZE, (u8 *)&ci->ci_dirhash_key, sizeof(ci->ci_dirhash_key)); if (err) @@ -270,8 +294,7 @@ static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci, err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf, HKDF_CONTEXT_PER_FILE_ENC_KEY, - ci->ci_nonce, - FS_KEY_DERIVATION_NONCE_SIZE, + ci->ci_nonce, FSCRYPT_FILE_NONCE_SIZE, derived_key, ci->ci_mode->keysize); if (err) return err; @@ -310,6 +333,10 @@ static int setup_file_encryption_key(struct fscrypt_info *ci, struct fscrypt_key_specifier mk_spec; int err; + err = fscrypt_select_encryption_impl(ci); + if (err) + return err; + switch (ci->ci_policy.version) { case FSCRYPT_POLICY_V1: mk_spec.type = FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR; @@ -402,7 +429,7 @@ static void put_crypt_info(struct fscrypt_info *ci) if (ci->ci_direct_key) fscrypt_put_direct_key(ci->ci_direct_key); else if (ci->ci_owns_key) - crypto_free_skcipher(ci->ci_ctfm); + fscrypt_destroy_prepared_key(&ci->ci_enc_key); key = ci->ci_master_key; if (key) { @@ -472,7 +499,7 @@ int fscrypt_get_encryption_info(struct inode *inode) } memcpy(crypt_info->ci_nonce, fscrypt_context_nonce(&ctx), - FS_KEY_DERIVATION_NONCE_SIZE); + FSCRYPT_FILE_NONCE_SIZE); if (!fscrypt_supported_policy(&crypt_info->ci_policy, inode)) { res = -EINVAL; @@ -491,7 +518,17 @@ int fscrypt_get_encryption_info(struct inode *inode) if (res) goto out; + /* + * Multiple tasks may race to set ->i_crypt_info, so use + * cmpxchg_release(). This pairs with the smp_load_acquire() in + * fscrypt_get_info(). I.e., here we publish ->i_crypt_info with a + * RELEASE barrier so that other tasks can ACQUIRE it. + */ if (cmpxchg_release(&inode->i_crypt_info, NULL, crypt_info) == NULL) { + /* + * We won the race and set ->i_crypt_info to our crypt_info. + * Now link it into the master key's inode list. + */ if (master_key) { struct fscrypt_master_key *mk = master_key->payload.data[0]; @@ -562,7 +599,7 @@ EXPORT_SYMBOL(fscrypt_free_inode); */ int fscrypt_drop_inode(struct inode *inode) { - const struct fscrypt_info *ci = READ_ONCE(inode->i_crypt_info); + const struct fscrypt_info *ci = fscrypt_get_info(inode); const struct fscrypt_master_key *mk; /* diff --git a/fs/crypto/keysetup_v1.c b/fs/crypto/keysetup_v1.c index 801b48c0cd7f..e4e707fb1100 100644 --- a/fs/crypto/keysetup_v1.c +++ b/fs/crypto/keysetup_v1.c @@ -45,7 +45,7 @@ static DEFINE_SPINLOCK(fscrypt_direct_keys_lock); * key is longer, then only the first 'derived_keysize' bytes are used. */ static int derive_key_aes(const u8 *master_key, - const u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE], + const u8 nonce[FSCRYPT_FILE_NONCE_SIZE], u8 *derived_key, unsigned int derived_keysize) { int res = 0; @@ -68,7 +68,7 @@ static int derive_key_aes(const u8 *master_key, skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done, &wait); - res = crypto_skcipher_setkey(tfm, nonce, FS_KEY_DERIVATION_NONCE_SIZE); + res = crypto_skcipher_setkey(tfm, nonce, FSCRYPT_FILE_NONCE_SIZE); if (res < 0) goto out; @@ -146,7 +146,7 @@ struct fscrypt_direct_key { struct hlist_node dk_node; refcount_t dk_refcount; const struct fscrypt_mode *dk_mode; - struct crypto_skcipher *dk_ctfm; + struct fscrypt_prepared_key dk_key; u8 dk_descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE]; u8 dk_raw[FSCRYPT_MAX_KEY_SIZE]; }; @@ -154,7 +154,7 @@ struct fscrypt_direct_key { static void free_direct_key(struct fscrypt_direct_key *dk) { if (dk) { - crypto_free_skcipher(dk->dk_ctfm); + fscrypt_destroy_prepared_key(&dk->dk_key); kzfree(dk); } } @@ -199,6 +199,8 @@ find_or_insert_direct_key(struct fscrypt_direct_key *to_insert, continue; if (ci->ci_mode != dk->dk_mode) continue; + if (!fscrypt_is_key_prepared(&dk->dk_key, ci)) + continue; if (crypto_memneq(raw_key, dk->dk_raw, ci->ci_mode->keysize)) continue; /* using existing tfm with same (descriptor, mode, raw_key) */ @@ -231,13 +233,9 @@ fscrypt_get_direct_key(const struct fscrypt_info *ci, const u8 *raw_key) return ERR_PTR(-ENOMEM); refcount_set(&dk->dk_refcount, 1); dk->dk_mode = ci->ci_mode; - dk->dk_ctfm = fscrypt_allocate_skcipher(ci->ci_mode, raw_key, - ci->ci_inode); - if (IS_ERR(dk->dk_ctfm)) { - err = PTR_ERR(dk->dk_ctfm); - dk->dk_ctfm = NULL; + err = fscrypt_prepare_key(&dk->dk_key, raw_key, ci); + if (err) goto err_free_dk; - } memcpy(dk->dk_descriptor, ci->ci_policy.v1.master_key_descriptor, FSCRYPT_KEY_DESCRIPTOR_SIZE); memcpy(dk->dk_raw, raw_key, ci->ci_mode->keysize); @@ -259,7 +257,7 @@ static int setup_v1_file_key_direct(struct fscrypt_info *ci, if (IS_ERR(dk)) return PTR_ERR(dk); ci->ci_direct_key = dk; - ci->ci_ctfm = dk->dk_ctfm; + ci->ci_enc_key = dk->dk_key; return 0; } diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c index d23ff162c78b..2d73fd39ad96 100644 --- a/fs/crypto/policy.c +++ b/fs/crypto/policy.c @@ -78,6 +78,20 @@ static bool supported_iv_ino_lblk_policy(const struct fscrypt_policy_v2 *policy, int ino_bits = 64, lblk_bits = 64; /* + * IV_INO_LBLK_* exist only because of hardware limitations, and + * currently the only known use case for them involves AES-256-XTS. + * That's also all we test currently. For these reasons, for now only + * allow AES-256-XTS here. This can be relaxed later if a use case for + * IV_INO_LBLK_* with other encryption modes arises. + */ + if (policy->contents_encryption_mode != FSCRYPT_MODE_AES_256_XTS) { + fscrypt_warn(inode, + "Can't use %s policy with contents mode other than AES-256-XTS", + type); + return false; + } + + /* * It's unsafe to include inode numbers in the IVs if the filesystem can * potentially renumber inodes, e.g. via filesystem shrinking. */ @@ -338,7 +352,7 @@ static int fscrypt_get_policy(struct inode *inode, union fscrypt_policy *policy) union fscrypt_context ctx; int ret; - ci = READ_ONCE(inode->i_crypt_info); + ci = fscrypt_get_info(inode); if (ci) { /* key available, use the cached policy */ *policy = ci->ci_policy; @@ -529,7 +543,7 @@ int fscrypt_ioctl_get_nonce(struct file *filp, void __user *arg) if (!fscrypt_context_is_valid(&ctx, ret)) return -EINVAL; if (copy_to_user(arg, fscrypt_context_nonce(&ctx), - FS_KEY_DERIVATION_NONCE_SIZE)) + FSCRYPT_FILE_NONCE_SIZE)) return -EFAULT; return 0; } @@ -627,7 +641,7 @@ int fscrypt_inherit_context(struct inode *parent, struct inode *child, if (res < 0) return res; - ci = READ_ONCE(parent->i_crypt_info); + ci = fscrypt_get_info(parent); if (ci == NULL) return -ENOKEY; diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c index 12c66f5d92dd..28bb5689333a 100644 --- a/fs/efivarfs/super.c +++ b/fs/efivarfs/super.c @@ -201,6 +201,9 @@ static int efivarfs_fill_super(struct super_block *sb, struct fs_context *fc) sb->s_d_op = &efivarfs_d_ops; sb->s_time_gran = 1; + if (!efivar_supports_writes()) + sb->s_flags |= SB_RDONLY; + inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0, true); if (!inode) return -ENOMEM; @@ -252,9 +255,6 @@ static struct file_system_type efivarfs_type = { static __init int efivarfs_init(void) { - if (!efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES)) - return -ENODEV; - if (!efivars_kobject()) return -ENODEV; diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 10dd470876b3..44bad4bb8831 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -1096,7 +1096,7 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len, } if (unlikely(err)) { page_zero_new_buffers(page, from, to); - } else if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) { + } else if (fscrypt_inode_uses_fs_layer_crypto(inode)) { for (i = 0; i < nr_wait; i++) { int err2; @@ -3737,7 +3737,7 @@ static int __ext4_block_zero_page_range(handle_t *handle, /* Uhhuh. Read error. Complain and punt. */ if (!buffer_uptodate(bh)) goto unlock; - if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode)) { + if (fscrypt_inode_uses_fs_layer_crypto(inode)) { /* We expect the key to be set. */ BUG_ON(!fscrypt_has_encryption_key(inode)); err = fscrypt_decrypt_pagecache_blocks(page, blocksize, diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index de6fe969f773..defd2e10dfd1 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -402,6 +402,7 @@ static void io_submit_init_bio(struct ext4_io_submit *io, * __GFP_DIRECT_RECLAIM is set, see comments for bio_alloc_bioset(). */ bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES); + fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO); bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); bio_set_dev(bio, bh->b_bdev); bio->bi_end_io = ext4_end_bio; @@ -418,7 +419,8 @@ static void io_submit_add_bh(struct ext4_io_submit *io, { int ret; - if (io->io_bio && bh->b_blocknr != io->io_next_block) { + if (io->io_bio && (bh->b_blocknr != io->io_next_block || + !fscrypt_mergeable_bio_bh(io->io_bio, bh))) { submit_and_retry: ext4_io_submit(io); } @@ -506,7 +508,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io, * (e.g. holes) to be unnecessarily encrypted, but this is rare and * can't happen in the common case of blocksize == PAGE_SIZE. */ - if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode) && nr_to_submit) { + if (fscrypt_inode_uses_fs_layer_crypto(inode) && nr_to_submit) { gfp_t gfp_flags = GFP_NOFS; unsigned int enc_bytes = round_up(len, i_blocksize(inode)); diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c index 5761e9961682..f2df2db0786c 100644 --- a/fs/ext4/readpage.c +++ b/fs/ext4/readpage.c @@ -195,7 +195,7 @@ static void ext4_set_bio_post_read_ctx(struct bio *bio, { unsigned int post_read_steps = 0; - if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) + if (fscrypt_inode_uses_fs_layer_crypto(inode)) post_read_steps |= 1 << STEP_DECRYPT; if (ext4_need_verity(inode, first_idx)) @@ -230,6 +230,7 @@ int ext4_mpage_readpages(struct inode *inode, const unsigned blkbits = inode->i_blkbits; const unsigned blocks_per_page = PAGE_SIZE >> blkbits; const unsigned blocksize = 1 << blkbits; + sector_t next_block; sector_t block_in_file; sector_t last_block; sector_t last_block_in_file; @@ -258,7 +259,8 @@ int ext4_mpage_readpages(struct inode *inode, if (page_has_buffers(page)) goto confused; - block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits); + block_in_file = next_block = + (sector_t)page->index << (PAGE_SHIFT - blkbits); last_block = block_in_file + nr_pages * blocks_per_page; last_block_in_file = (ext4_readpage_limit(inode) + blocksize - 1) >> blkbits; @@ -358,7 +360,8 @@ int ext4_mpage_readpages(struct inode *inode, * This page will go to BIO. Do we need to send this * BIO off first? */ - if (bio && (last_block_in_bio != blocks[0] - 1)) { + if (bio && (last_block_in_bio != blocks[0] - 1 || + !fscrypt_mergeable_bio(bio, inode, next_block))) { submit_and_realloc: submit_bio(bio); bio = NULL; @@ -370,6 +373,8 @@ int ext4_mpage_readpages(struct inode *inode, */ bio = bio_alloc(GFP_KERNEL, min_t(int, nr_pages, BIO_MAX_PAGES)); + fscrypt_set_bio_crypt_ctx(bio, inode, next_block, + GFP_KERNEL); ext4_set_bio_post_read_ctx(bio, inode, page->index); bio_set_dev(bio, bdev); bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 330957ed1f05..0907f907c47d 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1508,6 +1508,7 @@ enum { Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit, Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback, Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption, + Opt_inlinecrypt, Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota, Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota, Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err, @@ -1610,6 +1611,7 @@ static const match_table_t tokens = { {Opt_max_dir_size_kb, "max_dir_size_kb=%u"}, {Opt_test_dummy_encryption, "test_dummy_encryption=%s"}, {Opt_test_dummy_encryption, "test_dummy_encryption"}, + {Opt_inlinecrypt, "inlinecrypt"}, {Opt_nombcache, "nombcache"}, {Opt_nombcache, "no_mbcache"}, /* for backward compatibility */ {Opt_removed, "check=none"}, /* mount option from ext2/3 */ @@ -1946,6 +1948,13 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token, case Opt_nolazytime: sb->s_flags &= ~SB_LAZYTIME; return 1; + case Opt_inlinecrypt: +#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT + sb->s_flags |= SB_INLINECRYPT; +#else + ext4_msg(sb, KERN_ERR, "inline encryption not supported"); +#endif + return 1; } for (m = ext4_mount_opts; m->token != Opt_err; m++) @@ -2404,6 +2413,9 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb, fscrypt_show_test_dummy_encryption(seq, sep, sb); + if (sb->s_flags & SB_INLINECRYPT) + SEQ_OPTS_PUTS("inlinecrypt"); + if (test_opt(sb, DAX_ALWAYS)) { if (IS_EXT2_SB(sb)) SEQ_OPTS_PUTS("dax"); diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index 1e02a8c106b0..29e50fbe7eca 100644 --- a/fs/f2fs/compress.c +++ b/fs/f2fs/compress.c @@ -1086,7 +1086,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, .submitted = false, .io_type = io_type, .io_wbc = wbc, - .encrypted = f2fs_encrypted_file(cc->inode), + .encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode), }; struct dnode_of_data dn; struct node_info ni; diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 326c63879ddc..b9642607c07d 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -14,6 +14,7 @@ #include <linux/pagevec.h> #include <linux/blkdev.h> #include <linux/bio.h> +#include <linux/blk-crypto.h> #include <linux/swap.h> #include <linux/prefetch.h> #include <linux/uio.h> @@ -459,6 +460,33 @@ static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages) return bio; } +static void f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode, + pgoff_t first_idx, + const struct f2fs_io_info *fio, + gfp_t gfp_mask) +{ + /* + * The f2fs garbage collector sets ->encrypted_page when it wants to + * read/write raw data without encryption. + */ + if (!fio || !fio->encrypted_page) + fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask); +} + +static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode, + pgoff_t next_idx, + const struct f2fs_io_info *fio) +{ + /* + * The f2fs garbage collector sets ->encrypted_page when it wants to + * read/write raw data without encryption. + */ + if (fio && fio->encrypted_page) + return !bio_has_crypt_ctx(bio); + + return fscrypt_mergeable_bio(bio, inode, next_idx); +} + static inline void __submit_bio(struct f2fs_sb_info *sbi, struct bio *bio, enum page_type type) { @@ -684,6 +712,9 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio) /* Allocate a new bio */ bio = __bio_alloc(fio, 1); + f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host, + fio->page->index, fio, GFP_NOIO); + if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { bio_put(bio); return -EFAULT; @@ -763,9 +794,10 @@ static void del_bio_entry(struct bio_entry *be) kmem_cache_free(bio_entry_slab, be); } -static int add_ipu_page(struct f2fs_sb_info *sbi, struct bio **bio, +static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio, struct page *page) { + struct f2fs_sb_info *sbi = fio->sbi; enum temp_type temp; bool found = false; int ret = -EAGAIN; @@ -782,13 +814,19 @@ static int add_ipu_page(struct f2fs_sb_info *sbi, struct bio **bio, found = true; - if (bio_add_page(*bio, page, PAGE_SIZE, 0) == - PAGE_SIZE) { + f2fs_bug_on(sbi, !page_is_mergeable(sbi, *bio, + *fio->last_block, + fio->new_blkaddr)); + if (f2fs_crypt_mergeable_bio(*bio, + fio->page->mapping->host, + fio->page->index, fio) && + bio_add_page(*bio, page, PAGE_SIZE, 0) == + PAGE_SIZE) { ret = 0; break; } - /* bio is full */ + /* page can't be merged into bio; submit the bio */ del_bio_entry(be); __submit_bio(sbi, *bio, DATA); break; @@ -880,11 +918,13 @@ alloc_new: if (!bio) { bio = __bio_alloc(fio, BIO_MAX_PAGES); __attach_io_flag(fio); + f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host, + fio->page->index, fio, GFP_NOIO); bio_set_op_attrs(bio, fio->op, fio->op_flags); add_bio_entry(fio->sbi, bio, page, fio->temp); } else { - if (add_ipu_page(fio->sbi, &bio, page)) + if (add_ipu_page(fio, &bio, page)) goto alloc_new; } @@ -936,8 +976,11 @@ next: inc_page_count(sbi, WB_DATA_TYPE(bio_page)); - if (io->bio && !io_is_mergeable(sbi, io->bio, io, fio, - io->last_block_in_bio, fio->new_blkaddr)) + if (io->bio && + (!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio, + fio->new_blkaddr) || + !f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host, + bio_page->index, fio))) __submit_merged_bio(io); alloc_new: if (io->bio == NULL) { @@ -949,6 +992,8 @@ alloc_new: goto skip; } io->bio = __bio_alloc(fio, BIO_MAX_PAGES); + f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host, + bio_page->index, fio, GFP_NOIO); io->fio = *fio; } @@ -993,11 +1038,14 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr, for_write); if (!bio) return ERR_PTR(-ENOMEM); + + f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS); + f2fs_target_device(sbi, blkaddr, bio); bio->bi_end_io = f2fs_read_end_io; bio_set_op_attrs(bio, REQ_OP_READ, op_flag); - if (f2fs_encrypted_file(inode)) + if (fscrypt_inode_uses_fs_layer_crypto(inode)) post_read_steps |= 1 << STEP_DECRYPT; if (f2fs_compressed_file(inode)) post_read_steps |= 1 << STEP_DECOMPRESS_NOWQ; @@ -2073,8 +2121,9 @@ zero_out: * This page will go to BIO. Do we need to send this * BIO off first? */ - if (bio && !page_is_mergeable(F2FS_I_SB(inode), bio, - *last_block_in_bio, block_nr)) { + if (bio && (!page_is_mergeable(F2FS_I_SB(inode), bio, + *last_block_in_bio, block_nr) || + !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) { submit_and_realloc: __submit_bio(F2FS_I_SB(inode), bio, DATA); bio = NULL; @@ -2204,8 +2253,9 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, blkaddr = data_blkaddr(dn.inode, dn.node_page, dn.ofs_in_node + i + 1); - if (bio && !page_is_mergeable(sbi, bio, - *last_block_in_bio, blkaddr)) { + if (bio && (!page_is_mergeable(sbi, bio, + *last_block_in_bio, blkaddr) || + !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) { submit_and_realloc: __submit_bio(sbi, bio, DATA); bio = NULL; @@ -2421,6 +2471,9 @@ int f2fs_encrypt_one_page(struct f2fs_io_info *fio) /* wait for GCed page writeback via META_MAPPING */ f2fs_wait_on_block_writeback(inode, fio->old_blkaddr); + if (fscrypt_inode_uses_inline_crypto(inode)) + return 0; + retry_encrypt: fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page, PAGE_SIZE, 0, gfp_flags); @@ -2594,7 +2647,7 @@ got_it: f2fs_unlock_op(fio->sbi); err = f2fs_inplace_write_data(fio); if (err) { - if (f2fs_encrypted_file(inode)) + if (fscrypt_inode_uses_fs_layer_crypto(inode)) fscrypt_finalize_bounce_page(&fio->encrypted_page); if (PageWriteback(page)) end_page_writeback(page); diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 20e56b0fa46a..23c49c313fb6 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -138,6 +138,7 @@ enum { Opt_alloc, Opt_fsync, Opt_test_dummy_encryption, + Opt_inlinecrypt, Opt_checkpoint_disable, Opt_checkpoint_disable_cap, Opt_checkpoint_disable_cap_perc, @@ -204,6 +205,7 @@ static match_table_t f2fs_tokens = { {Opt_fsync, "fsync_mode=%s"}, {Opt_test_dummy_encryption, "test_dummy_encryption=%s"}, {Opt_test_dummy_encryption, "test_dummy_encryption"}, + {Opt_inlinecrypt, "inlinecrypt"}, {Opt_checkpoint_disable, "checkpoint=disable"}, {Opt_checkpoint_disable_cap, "checkpoint=disable:%u"}, {Opt_checkpoint_disable_cap_perc, "checkpoint=disable:%u%%"}, @@ -833,6 +835,13 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount) if (ret) return ret; break; + case Opt_inlinecrypt: +#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT + sb->s_flags |= SB_INLINECRYPT; +#else + f2fs_info(sbi, "inline encryption not supported"); +#endif + break; case Opt_checkpoint_disable_cap_perc: if (args->from && match_int(args, &arg)) return -EINVAL; @@ -1590,6 +1599,9 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root) fscrypt_show_test_dummy_encryption(seq, ',', sbi->sb); + if (sbi->sb->s_flags & SB_INLINECRYPT) + seq_puts(seq, ",inlinecrypt"); + if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_DEFAULT) seq_printf(seq, ",alloc_mode=%s", "default"); else if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE) @@ -1624,6 +1636,8 @@ static void default_options(struct f2fs_sb_info *sbi) F2FS_OPTION(sbi).compress_ext_cnt = 0; F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON; + sbi->sb->s_flags &= ~SB_INLINECRYPT; + set_opt(sbi, INLINE_XATTR); set_opt(sbi, INLINE_DATA); set_opt(sbi, INLINE_DENTRY); @@ -2470,6 +2484,25 @@ static void f2fs_get_ino_and_lblk_bits(struct super_block *sb, *lblk_bits_ret = 8 * sizeof(block_t); } +static int f2fs_get_num_devices(struct super_block *sb) +{ + struct f2fs_sb_info *sbi = F2FS_SB(sb); + + if (f2fs_is_multi_device(sbi)) + return sbi->s_ndevs; + return 1; +} + +static void f2fs_get_devices(struct super_block *sb, + struct request_queue **devs) +{ + struct f2fs_sb_info *sbi = F2FS_SB(sb); + int i; + + for (i = 0; i < sbi->s_ndevs; i++) + devs[i] = bdev_get_queue(FDEV(i).bdev); +} + static const struct fscrypt_operations f2fs_cryptops = { .key_prefix = "f2fs:", .get_context = f2fs_get_context, @@ -2479,6 +2512,8 @@ static const struct fscrypt_operations f2fs_cryptops = { .max_namelen = F2FS_NAME_LEN, .has_stable_inodes = f2fs_has_stable_inodes, .get_ino_and_lblk_bits = f2fs_get_ino_and_lblk_bits, + .get_num_devices = f2fs_get_num_devices, + .get_devices = f2fs_get_devices, }; #endif diff --git a/fs/io_uring.c b/fs/io_uring.c index 74bc4a04befa..493e5047e67c 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -605,6 +605,7 @@ enum { struct async_poll { struct io_poll_iocb poll; + struct io_poll_iocb *double_poll; struct io_wq_work work; }; @@ -4159,9 +4160,9 @@ static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll) return false; } -static void io_poll_remove_double(struct io_kiocb *req) +static void io_poll_remove_double(struct io_kiocb *req, void *data) { - struct io_poll_iocb *poll = (struct io_poll_iocb *) req->io; + struct io_poll_iocb *poll = data; lockdep_assert_held(&req->ctx->completion_lock); @@ -4181,7 +4182,7 @@ static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error) { struct io_ring_ctx *ctx = req->ctx; - io_poll_remove_double(req); + io_poll_remove_double(req, req->io); req->poll.done = true; io_cqring_fill_event(req, error ? error : mangle_poll(mask)); io_commit_cqring(ctx); @@ -4198,10 +4199,9 @@ static void io_poll_task_handler(struct io_kiocb *req, struct io_kiocb **nxt) hash_del(&req->hash_node); io_poll_complete(req, req->result, 0); - req->flags |= REQ_F_COMP_LOCKED; - io_put_req_find_next(req, nxt); spin_unlock_irq(&ctx->completion_lock); + io_put_req_find_next(req, nxt); io_cqring_ev_posted(ctx); } @@ -4224,21 +4224,21 @@ static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode, int sync, void *key) { struct io_kiocb *req = wait->private; - struct io_poll_iocb *poll = (struct io_poll_iocb *) req->io; + struct io_poll_iocb *poll = req->apoll->double_poll; __poll_t mask = key_to_poll(key); /* for instances that support it check for an event match first: */ if (mask && !(mask & poll->events)) return 0; - if (req->poll.head) { + if (poll && poll->head) { bool done; - spin_lock(&req->poll.head->lock); - done = list_empty(&req->poll.wait.entry); + spin_lock(&poll->head->lock); + done = list_empty(&poll->wait.entry); if (!done) - list_del_init(&req->poll.wait.entry); - spin_unlock(&req->poll.head->lock); + list_del_init(&poll->wait.entry); + spin_unlock(&poll->head->lock); if (!done) __io_async_wake(req, poll, mask, io_poll_task_func); } @@ -4258,7 +4258,8 @@ static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events, } static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt, - struct wait_queue_head *head) + struct wait_queue_head *head, + struct io_poll_iocb **poll_ptr) { struct io_kiocb *req = pt->req; @@ -4269,7 +4270,7 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt, */ if (unlikely(poll->head)) { /* already have a 2nd entry, fail a third attempt */ - if (req->io) { + if (*poll_ptr) { pt->error = -EINVAL; return; } @@ -4281,7 +4282,7 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt, io_init_poll_iocb(poll, req->poll.events, io_poll_double_wake); refcount_inc(&req->refs); poll->wait.private = req; - req->io = (void *) poll; + *poll_ptr = poll; } pt->error = 0; @@ -4293,8 +4294,9 @@ static void io_async_queue_proc(struct file *file, struct wait_queue_head *head, struct poll_table_struct *p) { struct io_poll_table *pt = container_of(p, struct io_poll_table, pt); + struct async_poll *apoll = pt->req->apoll; - __io_queue_proc(&pt->req->apoll->poll, pt, head); + __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll); } static void io_sq_thread_drop_mm(struct io_ring_ctx *ctx) @@ -4344,11 +4346,13 @@ static void io_async_task_func(struct callback_head *cb) } } + io_poll_remove_double(req, apoll->double_poll); spin_unlock_irq(&ctx->completion_lock); /* restore ->work in case we need to retry again */ if (req->flags & REQ_F_WORK_INITIALIZED) memcpy(&req->work, &apoll->work, sizeof(req->work)); + kfree(apoll->double_poll); kfree(apoll); if (!canceled) { @@ -4436,7 +4440,6 @@ static bool io_arm_poll_handler(struct io_kiocb *req) struct async_poll *apoll; struct io_poll_table ipt; __poll_t mask, ret; - bool had_io; if (!req->file || !file_can_poll(req->file)) return false; @@ -4448,11 +4451,11 @@ static bool io_arm_poll_handler(struct io_kiocb *req) apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC); if (unlikely(!apoll)) return false; + apoll->double_poll = NULL; req->flags |= REQ_F_POLLED; if (req->flags & REQ_F_WORK_INITIALIZED) memcpy(&apoll->work, &req->work, sizeof(req->work)); - had_io = req->io != NULL; io_get_req_task(req); req->apoll = apoll; @@ -4470,13 +4473,11 @@ static bool io_arm_poll_handler(struct io_kiocb *req) ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, io_async_wake); if (ret) { - ipt.error = 0; - /* only remove double add if we did it here */ - if (!had_io) - io_poll_remove_double(req); + io_poll_remove_double(req, apoll->double_poll); spin_unlock_irq(&ctx->completion_lock); if (req->flags & REQ_F_WORK_INITIALIZED) memcpy(&req->work, &apoll->work, sizeof(req->work)); + kfree(apoll->double_poll); kfree(apoll); return false; } @@ -4507,11 +4508,13 @@ static bool io_poll_remove_one(struct io_kiocb *req) bool do_complete; if (req->opcode == IORING_OP_POLL_ADD) { - io_poll_remove_double(req); + io_poll_remove_double(req, req->io); do_complete = __io_poll_remove_one(req, &req->poll); } else { struct async_poll *apoll = req->apoll; + io_poll_remove_double(req, apoll->double_poll); + /* non-poll requests have submit ref still */ do_complete = __io_poll_remove_one(req, &apoll->poll); if (do_complete) { @@ -4524,6 +4527,7 @@ static bool io_poll_remove_one(struct io_kiocb *req) if (req->flags & REQ_F_WORK_INITIALIZED) memcpy(&req->work, &apoll->work, sizeof(req->work)); + kfree(apoll->double_poll); kfree(apoll); } } @@ -4624,7 +4628,7 @@ static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head, { struct io_poll_table *pt = container_of(p, struct io_poll_table, pt); - __io_queue_proc(&pt->req->poll, pt, head); + __io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->io); } static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) @@ -4653,6 +4657,10 @@ static int io_poll_add(struct io_kiocb *req) struct io_poll_table ipt; __poll_t mask; + /* ->work is in union with hash_node and others */ + io_req_work_drop_env(req); + req->flags &= ~REQ_F_WORK_INITIALIZED; + INIT_HLIST_NODE(&req->hash_node); INIT_LIST_HEAD(&req->list); ipt.pt._qproc = io_poll_queue_proc; @@ -4732,7 +4740,9 @@ static int io_timeout_remove_prep(struct io_kiocb *req, { if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; - if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len) + if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) + return -EINVAL; + if (sqe->ioprio || sqe->buf_index || sqe->len) return -EINVAL; req->timeout.addr = READ_ONCE(sqe->addr); @@ -4910,8 +4920,9 @@ static int io_async_cancel_prep(struct io_kiocb *req, { if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; - if (sqe->flags || sqe->ioprio || sqe->off || sqe->len || - sqe->cancel_flags) + if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) + return -EINVAL; + if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags) return -EINVAL; req->cancel.addr = READ_ONCE(sqe->addr); @@ -4929,7 +4940,9 @@ static int io_async_cancel(struct io_kiocb *req) static int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { - if (sqe->flags || sqe->ioprio || sqe->rw_flags) + if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) + return -EINVAL; + if (sqe->ioprio || sqe->rw_flags) return -EINVAL; req->files_update.offset = READ_ONCE(sqe->off); @@ -5720,6 +5733,7 @@ fail_req: * Never try inline submit of IOSQE_ASYNC is set, go straight * to async execution. */ + io_req_init_async(req); req->work.flags |= IO_WQ_WORK_CONCURRENT; io_queue_async_work(req); } else { diff --git a/fs/locks.c b/fs/locks.c index 7df0f9fa66f4..938fe325bc54 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -1282,6 +1282,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request, if (!new_fl) goto out; locks_copy_lock(new_fl, request); + locks_move_blocks(new_fl, request); request = new_fl; new_fl = NULL; locks_insert_lock_ctx(request, &fl->fl_list); diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index cce2510b2cca..c9056316a0b3 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -507,6 +507,17 @@ find_any_file(struct nfs4_file *f) return ret; } +static struct nfsd_file *find_deleg_file(struct nfs4_file *f) +{ + struct nfsd_file *ret = NULL; + + spin_lock(&f->fi_lock); + if (f->fi_deleg_file) + ret = nfsd_file_get(f->fi_deleg_file); + spin_unlock(&f->fi_lock); + return ret; +} + static atomic_long_t num_delegations; unsigned long max_delegations; @@ -2444,6 +2455,8 @@ static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st) oo = ols->st_stateowner; nf = st->sc_file; file = find_any_file(nf); + if (!file) + return 0; seq_printf(s, "- "); nfs4_show_stateid(s, &st->sc_stateid); @@ -2481,6 +2494,8 @@ static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st) oo = ols->st_stateowner; nf = st->sc_file; file = find_any_file(nf); + if (!file) + return 0; seq_printf(s, "- "); nfs4_show_stateid(s, &st->sc_stateid); @@ -2513,7 +2528,9 @@ static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st) ds = delegstateid(st); nf = st->sc_file; - file = nf->fi_deleg_file; + file = find_deleg_file(nf); + if (!file) + return 0; seq_printf(s, "- "); nfs4_show_stateid(s, &st->sc_stateid); @@ -2529,6 +2546,7 @@ static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st) seq_printf(s, ", "); nfs4_show_fname(s, file); seq_printf(s, " }\n"); + nfsd_file_put(file); return 0; } diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c index 64f61330564a..76bb1c846845 100644 --- a/fs/squashfs/block.c +++ b/fs/squashfs/block.c @@ -175,7 +175,7 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length, /* Extract the length of the metadata block */ data = page_address(bvec->bv_page) + bvec->bv_offset; length = data[offset]; - if (offset <= bvec->bv_len - 1) { + if (offset < bvec->bv_len - 1) { length |= data[offset + 1] << 8; } else { if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all))) { diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 52de29000c7e..6e264dded46e 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -339,7 +339,6 @@ out: return ret; } -/* Should pair with userfaultfd_signal_pending() */ static inline long userfaultfd_get_blocking_state(unsigned int flags) { if (flags & FAULT_FLAG_INTERRUPTIBLE) @@ -351,18 +350,6 @@ static inline long userfaultfd_get_blocking_state(unsigned int flags) return TASK_UNINTERRUPTIBLE; } -/* Should pair with userfaultfd_get_blocking_state() */ -static inline bool userfaultfd_signal_pending(unsigned int flags) -{ - if (flags & FAULT_FLAG_INTERRUPTIBLE) - return signal_pending(current); - - if (flags & FAULT_FLAG_KILLABLE) - return fatal_signal_pending(current); - - return false; -} - /* * The locking rules involved in returning VM_FAULT_RETRY depending on * FAULT_FLAG_ALLOW_RETRY, FAULT_FLAG_RETRY_NOWAIT and @@ -516,33 +503,9 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason) vmf->flags, reason); mmap_read_unlock(mm); - if (likely(must_wait && !READ_ONCE(ctx->released) && - !userfaultfd_signal_pending(vmf->flags))) { + if (likely(must_wait && !READ_ONCE(ctx->released))) { wake_up_poll(&ctx->fd_wqh, EPOLLIN); schedule(); - ret |= VM_FAULT_MAJOR; - - /* - * False wakeups can orginate even from rwsem before - * up_read() however userfaults will wait either for a - * targeted wakeup on the specific uwq waitqueue from - * wake_userfault() or for signals or for uffd - * release. - */ - while (!READ_ONCE(uwq.waken)) { - /* - * This needs the full smp_store_mb() - * guarantee as the state write must be - * visible to other CPUs before reading - * uwq.waken from other CPUs. - */ - set_current_state(blocking_state); - if (READ_ONCE(uwq.waken) || - READ_ONCE(ctx->released) || - userfaultfd_signal_pending(vmf->flags)) - break; - schedule(); - } } __set_current_state(TASK_RUNNING); diff --git a/fs/verity/open.c b/fs/verity/open.c index d007db0c9304..bfe0280c14e4 100644 --- a/fs/verity/open.c +++ b/fs/verity/open.c @@ -221,11 +221,20 @@ out: void fsverity_set_info(struct inode *inode, struct fsverity_info *vi) { /* - * Multiple processes may race to set ->i_verity_info, so use cmpxchg. - * This pairs with the READ_ONCE() in fsverity_get_info(). + * Multiple tasks may race to set ->i_verity_info, so use + * cmpxchg_release(). This pairs with the smp_load_acquire() in + * fsverity_get_info(). I.e., here we publish ->i_verity_info with a + * RELEASE barrier so that other tasks can ACQUIRE it. */ - if (cmpxchg(&inode->i_verity_info, NULL, vi) != NULL) + if (cmpxchg_release(&inode->i_verity_info, NULL, vi) != NULL) { + /* Lost the race, so free the fsverity_info we allocated. */ fsverity_free_info(vi); + /* + * Afterwards, the caller may access ->i_verity_info directly, + * so make sure to ACQUIRE the winning fsverity_info. + */ + (void)fsverity_get_info(inode); + } } void fsverity_free_info(struct fsverity_info *vi) diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c index 07bc42d62673..abfb17f88f9a 100644 --- a/fs/zonefs/super.c +++ b/fs/zonefs/super.c @@ -607,14 +607,14 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from) int nr_pages; ssize_t ret; - nr_pages = iov_iter_npages(from, BIO_MAX_PAGES); - if (!nr_pages) - return 0; - max = queue_max_zone_append_sectors(bdev_get_queue(bdev)); max = ALIGN_DOWN(max << SECTOR_SHIFT, inode->i_sb->s_blocksize); iov_iter_truncate(from, max); + nr_pages = iov_iter_npages(from, BIO_MAX_PAGES); + if (!nr_pages) + return 0; + bio = bio_alloc_bioset(GFP_NOFS, nr_pages, &fs_bio_set); if (!bio) return -ENOMEM; @@ -1119,7 +1119,7 @@ static int zonefs_create_zgroup(struct zonefs_zone_data *zd, char *file_name; struct dentry *dir; unsigned int n = 0; - int ret = -ENOMEM; + int ret; /* If the group is empty, there is nothing to do */ if (!zd->nr_zones[type]) @@ -1135,8 +1135,10 @@ static int zonefs_create_zgroup(struct zonefs_zone_data *zd, zgroup_name = "seq"; dir = zonefs_create_inode(sb->s_root, zgroup_name, NULL, type); - if (!dir) + if (!dir) { + ret = -ENOMEM; goto free; + } /* * The first zone contains the super block: skip it. @@ -1174,8 +1176,10 @@ static int zonefs_create_zgroup(struct zonefs_zone_data *zd, * Use the file number within its group as file name. */ snprintf(file_name, ZONEFS_NAME_MAX - 1, "%u", n); - if (!zonefs_create_inode(dir, file_name, zone, type)) + if (!zonefs_create_inode(dir, file_name, zone, type)) { + ret = -ENOMEM; goto free; + } n++; } diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h index b0b163b9efc6..bdcac69fa6bd 100644 --- a/include/acpi/actbl3.h +++ b/include/acpi/actbl3.h @@ -415,6 +415,13 @@ struct acpi_table_tpm2 { /* Platform-specific data follows */ }; +/* Optional trailer for revision 4 holding platform-specific data */ +struct acpi_tpm2_phy { + u8 start_method_specific[12]; + u32 log_area_minimum_length; + u64 log_area_start_address; +}; + /* Values for start_method above */ #define ACPI_TPM2_NOT_ALLOWED 0 diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index 8b1e020e9a03..30a3aab312e6 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -456,7 +456,7 @@ static inline void writesq(volatile void __iomem *addr, const void *buffer, #if !defined(inb) && !defined(_inb) #define _inb _inb -static inline u16 _inb(unsigned long addr) +static inline u8 _inb(unsigned long addr) { u8 val; @@ -482,7 +482,7 @@ static inline u16 _inw(unsigned long addr) #if !defined(inl) && !defined(_inl) #define _inl _inl -static inline u16 _inl(unsigned long addr) +static inline u32 _inl(unsigned long addr) { u32 val; diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index db600ef218d7..052e0f05a984 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -341,7 +341,8 @@ #define PAGE_ALIGNED_DATA(page_align) \ . = ALIGN(page_align); \ - *(.data..page_aligned) + *(.data..page_aligned) \ + . = ALIGN(page_align); #define READ_MOSTLY_DATA(align) \ . = ALIGN(align); \ @@ -737,7 +738,9 @@ . = ALIGN(bss_align); \ .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \ BSS_FIRST_SECTIONS \ + . = ALIGN(PAGE_SIZE); \ *(.bss..page_aligned) \ + . = ALIGN(PAGE_SIZE); \ *(.dynbss) \ *(BSS_MAIN) \ *(COMMON) \ diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h index 2b4d2b06ccbd..fcde59c65a81 100644 --- a/include/crypto/acompress.h +++ b/include/crypto/acompress.h @@ -106,6 +106,24 @@ struct acomp_alg { */ struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type, u32 mask); +/** + * crypto_alloc_acomp_node() -- allocate ACOMPRESS tfm handle with desired NUMA node + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * compression algorithm e.g. "deflate" + * @type: specifies the type of the algorithm + * @mask: specifies the mask for the algorithm + * @node: specifies the NUMA node the ZIP hardware belongs to + * + * Allocate a handle for a compression algorithm. Drivers should try to use + * (de)compressors on the specified NUMA node. + * The returned struct crypto_acomp is the handle that is required for any + * subsequent API invocation for the compression operations. + * + * Return: allocated handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type, + u32 mask, int node); static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm) { diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 00a9cf98debe..143d884d65c7 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -116,7 +116,7 @@ struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, void *crypto_spawn_tfm2(struct crypto_spawn *spawn); struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb); -int crypto_check_attr_type(struct rtattr **tb, u32 type); +int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret); const char *crypto_attr_alg_name(struct rtattr *rta); int crypto_attr_u32(struct rtattr *rta, u32 *num); int crypto_inst_setname(struct crypto_instance *inst, const char *name, @@ -235,18 +235,29 @@ static inline struct crypto_async_request *crypto_get_backlog( container_of(queue->backlog, struct crypto_async_request, list); } -static inline int crypto_requires_off(u32 type, u32 mask, u32 off) +static inline u32 crypto_requires_off(struct crypto_attr_type *algt, u32 off) { - return (type ^ off) & mask & off; + return (algt->type ^ off) & algt->mask & off; } /* - * Returns CRYPTO_ALG_ASYNC if type/mask requires the use of sync algorithms. - * Otherwise returns zero. + * When an algorithm uses another algorithm (e.g., if it's an instance of a + * template), these are the flags that should always be set on the "outer" + * algorithm if any "inner" algorithm has them set. */ -static inline int crypto_requires_sync(u32 type, u32 mask) +#define CRYPTO_ALG_INHERITED_FLAGS \ + (CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK | \ + CRYPTO_ALG_ALLOCATES_MEMORY) + +/* + * Given the type and mask that specify the flags restrictions on a template + * instance being created, return the mask that should be passed to + * crypto_grab_*() (along with type=0) to honor any request the user made to + * have any of the CRYPTO_ALG_INHERITED_FLAGS clear. + */ +static inline u32 crypto_algt_inherited_mask(struct crypto_attr_type *algt) { - return crypto_requires_off(type, mask, CRYPTO_ALG_ASYNC); + return crypto_requires_off(algt, CRYPTO_ALG_INHERITED_FLAGS); } noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size); diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h index 2676f4fbd4c1..3a1c72fdb7cf 100644 --- a/include/crypto/chacha.h +++ b/include/crypto/chacha.h @@ -25,11 +25,7 @@ #define CHACHA_BLOCK_SIZE 64 #define CHACHAPOLY_IV_SIZE 12 -#ifdef CONFIG_X86_64 -#define CHACHA_STATE_WORDS ((CHACHA_BLOCK_SIZE + 12) / sizeof(u32)) -#else #define CHACHA_STATE_WORDS (CHACHA_BLOCK_SIZE / sizeof(u32)) -#endif /* 192-bit nonce, then 64-bit stream position */ #define XCHACHA_IV_SIZE 32 diff --git a/include/crypto/chacha20poly1305.h b/include/crypto/chacha20poly1305.h index 234ee28078ef..d2ac3ff7dc1e 100644 --- a/include/crypto/chacha20poly1305.h +++ b/include/crypto/chacha20poly1305.h @@ -45,4 +45,6 @@ bool chacha20poly1305_decrypt_sg_inplace(struct scatterlist *src, size_t src_len const u64 nonce, const u8 key[CHACHA20POLY1305_KEY_SIZE]); +bool chacha20poly1305_selftest(void); + #endif /* __CHACHA20POLY1305_H */ diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 4829d2367eda..19ce91f2359f 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -687,7 +687,7 @@ static inline void ahash_request_set_crypt(struct ahash_request *req, * The message digest API is able to maintain state information for the * caller. * - * The synchronous message digest API can store user-related context in in its + * The synchronous message digest API can store user-related context in its * shash_desc request data structure. */ diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h index 088c1ded2714..ee6412314f8f 100644 --- a/include/crypto/if_alg.h +++ b/include/crypto/if_alg.h @@ -135,6 +135,7 @@ struct af_alg_async_req { * SG? * @enc: Cryptographic operation to be performed when * recvmsg is invoked. + * @init: True if metadata has been sent. * @len: Length of memory allocated for this data structure. */ struct af_alg_ctx { @@ -151,6 +152,7 @@ struct af_alg_ctx { bool more; bool merge; bool enc; + bool init; unsigned int len; }; @@ -226,7 +228,7 @@ unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset); void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst, size_t dst_offset); void af_alg_wmem_wakeup(struct sock *sk); -int af_alg_wait_for_data(struct sock *sk, unsigned flags); +int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min); int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, unsigned int ivsize); ssize_t af_alg_sendpage(struct socket *sock, struct page *page, diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h index 229d37681a9d..7fd7126f593a 100644 --- a/include/crypto/internal/geniv.h +++ b/include/crypto/internal/geniv.h @@ -20,7 +20,7 @@ struct aead_geniv_ctx { }; struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, - struct rtattr **tb, u32 type, u32 mask); + struct rtattr **tb); int aead_init_geniv(struct crypto_aead *tfm); void aead_exit_geniv(struct crypto_aead *tfm); diff --git a/include/crypto/sha.h b/include/crypto/sha.h index 10753ff71d46..4ff3da816630 100644 --- a/include/crypto/sha.h +++ b/include/crypto/sha.h @@ -147,6 +147,7 @@ static inline void sha256_init(struct sha256_state *sctx) } void sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len); void sha256_final(struct sha256_state *sctx, u8 *out); +void sha256(const u8 *data, unsigned int len, u8 *out); static inline void sha224_init(struct sha256_state *sctx) { diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index 141e7690f9c3..5663f71198b3 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -18,7 +18,7 @@ * @iv: Initialisation Vector * @src: Source SG list * @dst: Destination SG list - * @base: Underlying async request request + * @base: Underlying async request * @__ctx: Start of private context data */ struct skcipher_request { diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h index 6c3ef49b46b3..e73dea5c7333 100644 --- a/include/drm/drm_mode_config.h +++ b/include/drm/drm_mode_config.h @@ -866,6 +866,18 @@ struct drm_mode_config { bool prefer_shadow_fbdev; /** + * @fbdev_use_iomem: + * + * Set to true if framebuffer reside in iomem. + * When set to true memcpy_toio() is used when copying the framebuffer in + * drm_fb_helper.drm_fb_helper_dirty_blit_real(). + * + * FIXME: This should be replaced with a per-mapping is_iomem + * flag (like ttm does), and then used everywhere in fbdev code. + */ + bool fbdev_use_iomem; + + /** * @quirk_addfb_prefer_xbgr_30bpp: * * Special hack for legacy ADDFB to keep nouveau userspace happy. Should diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 763863dbc079..ef90e07c9635 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -16,9 +16,8 @@ #include <linux/kernel.h> #include <linux/list.h> #include <linux/bug.h> +#include <linux/refcount.h> #include <linux/slab.h> -#include <linux/string.h> -#include <linux/uaccess.h> #include <linux/completion.h> /* @@ -61,8 +60,8 @@ #define CRYPTO_ALG_ASYNC 0x00000080 /* - * Set this bit if and only if the algorithm requires another algorithm of - * the same type to handle corner cases. + * Set if the algorithm (or an algorithm which it uses) requires another + * algorithm of the same type to handle corner cases. */ #define CRYPTO_ALG_NEED_FALLBACK 0x00000100 @@ -102,6 +101,38 @@ #define CRYPTO_NOLOAD 0x00008000 /* + * The algorithm may allocate memory during request processing, i.e. during + * encryption, decryption, or hashing. Users can request an algorithm with this + * flag unset if they can't handle memory allocation failures. + * + * This flag is currently only implemented for algorithms of type "skcipher", + * "aead", "ahash", "shash", and "cipher". Algorithms of other types might not + * have this flag set even if they allocate memory. + * + * In some edge cases, algorithms can allocate memory regardless of this flag. + * To avoid these cases, users must obey the following usage constraints: + * skcipher: + * - The IV buffer and all scatterlist elements must be aligned to the + * algorithm's alignmask. + * - If the data were to be divided into chunks of size + * crypto_skcipher_walksize() (with any remainder going at the end), no + * chunk can cross a page boundary or a scatterlist element boundary. + * aead: + * - The IV buffer and all scatterlist elements must be aligned to the + * algorithm's alignmask. + * - The first scatterlist element must contain all the associated data, + * and its pages must be !PageHighMem. + * - If the plaintext/ciphertext were to be divided into chunks of size + * crypto_aead_walksize() (with the remainder going at the end), no chunk + * can cross a page boundary or a scatterlist element boundary. + * ahash: + * - The result buffer must be aligned to the algorithm's alignmask. + * - crypto_ahash_finup() must not be used unless the algorithm implements + * ->finup() natively. + */ +#define CRYPTO_ALG_ALLOCATES_MEMORY 0x00010000 + +/* * Transform masks and values (for crt_flags). */ #define CRYPTO_TFM_NEED_KEY 0x00000001 @@ -595,6 +626,8 @@ int crypto_has_alg(const char *name, u32 type, u32 mask); struct crypto_tfm { u32 crt_flags; + + int node; void (*exit)(struct crypto_tfm *tfm); diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 8750f2dc5613..73dec4b5d5be 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -426,6 +426,7 @@ const char *dm_device_name(struct mapped_device *md); int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid); struct gendisk *dm_disk(struct mapped_device *md); int dm_suspended(struct dm_target *ti); +int dm_post_suspending(struct dm_target *ti); int dm_noflush_suspending(struct dm_target *ti); void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors); union map_info *dm_get_rq_mapinfo(struct request *rq); diff --git a/include/linux/efi.h b/include/linux/efi.h index bb35f3305e55..05c47f857383 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -994,6 +994,7 @@ int efivars_register(struct efivars *efivars, int efivars_unregister(struct efivars *efivars); struct kobject *efivars_kobject(void); +int efivar_supports_writes(void); int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *), void *data, bool duplicates, struct list_head *head); diff --git a/include/linux/fs.h b/include/linux/fs.h index f5abba86107d..0a6394178360 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1381,6 +1381,7 @@ extern int send_sigurg(struct fown_struct *fown); #define SB_NODIRATIME 2048 /* Do not update directory access times */ #define SB_SILENT 32768 #define SB_POSIXACL (1<<16) /* VFS does not apply the umask */ +#define SB_INLINECRYPT (1<<17) /* Use blk-crypto for encrypted files */ #define SB_KERNMOUNT (1<<22) /* this is a kern_mount call */ #define SB_I_VERSION (1<<23) /* Update inode I_version field */ #define SB_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */ diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index 2862ca5fea33..991ff8575d0e 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -69,12 +69,20 @@ struct fscrypt_operations { bool (*has_stable_inodes)(struct super_block *sb); void (*get_ino_and_lblk_bits)(struct super_block *sb, int *ino_bits_ret, int *lblk_bits_ret); + int (*get_num_devices)(struct super_block *sb); + void (*get_devices)(struct super_block *sb, + struct request_queue **devs); }; -static inline bool fscrypt_has_encryption_key(const struct inode *inode) +static inline struct fscrypt_info *fscrypt_get_info(const struct inode *inode) { - /* pairs with cmpxchg_release() in fscrypt_get_encryption_info() */ - return READ_ONCE(inode->i_crypt_info) != NULL; + /* + * Pairs with the cmpxchg_release() in fscrypt_get_encryption_info(). + * I.e., another task may publish ->i_crypt_info concurrently, executing + * a RELEASE barrier. We need to use smp_load_acquire() here to safely + * ACQUIRE the memory the other task published. + */ + return smp_load_acquire(&inode->i_crypt_info); } /** @@ -231,9 +239,9 @@ static inline void fscrypt_set_ops(struct super_block *sb, } #else /* !CONFIG_FS_ENCRYPTION */ -static inline bool fscrypt_has_encryption_key(const struct inode *inode) +static inline struct fscrypt_info *fscrypt_get_info(const struct inode *inode) { - return false; + return NULL; } static inline bool fscrypt_needs_contents_encryption(const struct inode *inode) @@ -537,6 +545,99 @@ static inline void fscrypt_set_ops(struct super_block *sb, #endif /* !CONFIG_FS_ENCRYPTION */ +/* inline_crypt.c */ +#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT + +bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode); + +void fscrypt_set_bio_crypt_ctx(struct bio *bio, + const struct inode *inode, u64 first_lblk, + gfp_t gfp_mask); + +void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio, + const struct buffer_head *first_bh, + gfp_t gfp_mask); + +bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode, + u64 next_lblk); + +bool fscrypt_mergeable_bio_bh(struct bio *bio, + const struct buffer_head *next_bh); + +#else /* CONFIG_FS_ENCRYPTION_INLINE_CRYPT */ + +static inline bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode) +{ + return false; +} + +static inline void fscrypt_set_bio_crypt_ctx(struct bio *bio, + const struct inode *inode, + u64 first_lblk, gfp_t gfp_mask) { } + +static inline void fscrypt_set_bio_crypt_ctx_bh( + struct bio *bio, + const struct buffer_head *first_bh, + gfp_t gfp_mask) { } + +static inline bool fscrypt_mergeable_bio(struct bio *bio, + const struct inode *inode, + u64 next_lblk) +{ + return true; +} + +static inline bool fscrypt_mergeable_bio_bh(struct bio *bio, + const struct buffer_head *next_bh) +{ + return true; +} +#endif /* !CONFIG_FS_ENCRYPTION_INLINE_CRYPT */ + +/** + * fscrypt_inode_uses_inline_crypto() - test whether an inode uses inline + * encryption + * @inode: an inode. If encrypted, its key must be set up. + * + * Return: true if the inode requires file contents encryption and if the + * encryption should be done in the block layer via blk-crypto rather + * than in the filesystem layer. + */ +static inline bool fscrypt_inode_uses_inline_crypto(const struct inode *inode) +{ + return fscrypt_needs_contents_encryption(inode) && + __fscrypt_inode_uses_inline_crypto(inode); +} + +/** + * fscrypt_inode_uses_fs_layer_crypto() - test whether an inode uses fs-layer + * encryption + * @inode: an inode. If encrypted, its key must be set up. + * + * Return: true if the inode requires file contents encryption and if the + * encryption should be done in the filesystem layer rather than in the + * block layer via blk-crypto. + */ +static inline bool fscrypt_inode_uses_fs_layer_crypto(const struct inode *inode) +{ + return fscrypt_needs_contents_encryption(inode) && + !__fscrypt_inode_uses_inline_crypto(inode); +} + +/** + * fscrypt_has_encryption_key() - check whether an inode has had its key set up + * @inode: the inode to check + * + * Return: %true if the inode has had its encryption key set up, else %false. + * + * Usually this should be preceded by fscrypt_get_encryption_info() to try to + * set up the key first. + */ +static inline bool fscrypt_has_encryption_key(const struct inode *inode) +{ + return fscrypt_get_info(inode) != NULL; +} + /** * fscrypt_require_key() - require an inode's encryption key * @inode: the inode we need the key for diff --git a/include/linux/fsverity.h b/include/linux/fsverity.h index 78201a6d35f6..c1144a450392 100644 --- a/include/linux/fsverity.h +++ b/include/linux/fsverity.h @@ -115,8 +115,13 @@ struct fsverity_operations { static inline struct fsverity_info *fsverity_get_info(const struct inode *inode) { - /* pairs with the cmpxchg() in fsverity_set_info() */ - return READ_ONCE(inode->i_verity_info); + /* + * Pairs with the cmpxchg_release() in fsverity_set_info(). + * I.e., another task may publish ->i_verity_info concurrently, + * executing a RELEASE barrier. We need to use smp_load_acquire() here + * to safely ACQUIRE the memory the other task published. + */ + return smp_load_acquire(&inode->i_verity_info); } /* enable.c */ diff --git a/include/linux/i2c.h b/include/linux/i2c.h index b8b8963f8bb9..4e7714c88f95 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -56,7 +56,7 @@ struct property_entry; * on a bus (or read from them). Apart from two basic transfer functions to * transmit one message at a time, a more complex version can be used to * transmit an arbitrary number of messages without interruption. - * @count must be be less than 64k since msg.len is u16. + * @count must be less than 64k since msg.len is u16. */ int i2c_transfer_buffer_flags(const struct i2c_client *client, char *buf, int count, u16 flags); @@ -1001,7 +1001,7 @@ static inline u32 i2c_acpi_find_bus_speed(struct device *dev) static inline struct i2c_client *i2c_acpi_new_device(struct device *dev, int index, struct i2c_board_info *info) { - return NULL; + return ERR_PTR(-ENODEV); } static inline struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle) { diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h index 0beaa3eba155..c75e4d3d8833 100644 --- a/include/linux/io-mapping.h +++ b/include/linux/io-mapping.h @@ -107,9 +107,12 @@ io_mapping_init_wc(struct io_mapping *iomap, resource_size_t base, unsigned long size) { + iomap->iomem = ioremap_wc(base, size); + if (!iomap->iomem) + return NULL; + iomap->base = base; iomap->size = size; - iomap->iomem = ioremap_wc(base, size); #if defined(pgprot_noncached_wc) /* archs can't agree on a name ... */ iomap->prot = pgprot_noncached_wc(PAGE_KERNEL); #elif defined(pgprot_writecombine) diff --git a/include/linux/list.h b/include/linux/list.h index aff44d34f4e4..0d0d17a10d25 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -283,6 +283,24 @@ static inline int list_empty(const struct list_head *head) } /** + * list_del_init_careful - deletes entry from list and reinitialize it. + * @entry: the element to delete from the list. + * + * This is the same as list_del_init(), except designed to be used + * together with list_empty_careful() in a way to guarantee ordering + * of other memory operations. + * + * Any memory operations done before a list_del_init_careful() are + * guaranteed to be visible after a list_empty_careful() test. + */ +static inline void list_del_init_careful(struct list_head *entry) +{ + __list_del_entry(entry); + entry->prev = entry; + smp_store_release(&entry->next, entry); +} + +/** * list_empty_careful - tests whether a list is empty and not being modified * @head: the list to test * @@ -297,7 +315,7 @@ static inline int list_empty(const struct list_head *head) */ static inline int list_empty_careful(const struct list_head *head) { - struct list_head *next = head->next; + struct list_head *next = smp_load_acquire(&head->next); return (next == head) && (next == head->prev); } diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 073b79eacc99..1340e02b14ef 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -4381,6 +4381,7 @@ struct mlx5_ifc_query_vport_state_out_bits { enum { MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT = 0x0, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT = 0x1, + MLX5_VPORT_STATE_OP_MOD_UPLINK = 0x2, }; struct mlx5_ifc_arm_monitor_counter_in_bits { diff --git a/include/linux/mpi.h b/include/linux/mpi.h index 7bd6d8af0004..5d906dfbf3ed 100644 --- a/include/linux/mpi.h +++ b/include/linux/mpi.h @@ -63,6 +63,9 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod); int mpi_cmp_ui(MPI u, ulong v); int mpi_cmp(MPI u, MPI v); +/*-- mpi-sub-ui.c --*/ +int mpi_sub_ui(MPI w, MPI u, unsigned long vval); + /*-- mpi-bit.c --*/ void mpi_normalize(MPI a); unsigned mpi_get_nbits(MPI a); diff --git a/include/linux/padata.h b/include/linux/padata.h index 7302efff5e65..a433f13fc4bf 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -67,17 +67,6 @@ struct padata_serial_queue { }; /** - * struct padata_parallel_queue - The percpu padata parallel queue - * - * @reorder: List to wait for reordering after parallel processing. - * @num_obj: Number of objects that are processed by this cpu. - */ -struct padata_parallel_queue { - struct padata_list reorder; - atomic_t num_obj; -}; - -/** * struct padata_cpumask - The cpumasks for the parallel/serial workers * * @pcpu: cpumask for the parallel workers. @@ -93,7 +82,7 @@ struct padata_cpumask { * that depends on the cpumask in use. * * @ps: padata_shell object. - * @pqueue: percpu padata queues used for parallelization. + * @reorder_list: percpu reorder lists * @squeue: percpu padata queues used for serialuzation. * @refcnt: Number of objects holding a reference on this parallel_data. * @seq_nr: Sequence number of the parallelized data object. @@ -105,7 +94,7 @@ struct padata_cpumask { */ struct parallel_data { struct padata_shell *ps; - struct padata_parallel_queue __percpu *pqueue; + struct padata_list __percpu *reorder_list; struct padata_serial_queue __percpu *squeue; atomic_t refcnt; unsigned int seq_nr; @@ -167,7 +156,6 @@ struct padata_mt_job { * @serial_wq: The workqueue used for serial work. * @pslist: List of padata_shell objects attached to this instance. * @cpumask: User supplied cpumasks for parallel and serial works. - * @rcpumask: Actual cpumasks based on user cpumask and cpu_online_mask. * @kobj: padata instance kernel object. * @lock: padata instance lock. * @flags: padata flags. @@ -179,7 +167,6 @@ struct padata_instance { struct workqueue_struct *serial_wq; struct list_head pslist; struct padata_cpumask cpumask; - struct padata_cpumask rcpumask; struct kobject kobj; struct mutex lock; u8 flags; @@ -194,7 +181,7 @@ extern void __init padata_init(void); static inline void __init padata_init(void) {} #endif -extern struct padata_instance *padata_alloc_possible(const char *name); +extern struct padata_instance *padata_alloc(const char *name); extern void padata_free(struct padata_instance *pinst); extern struct padata_shell *padata_alloc_shell(struct padata_instance *pinst); extern void padata_free_shell(struct padata_shell *ps); @@ -204,6 +191,4 @@ extern void padata_do_serial(struct padata_priv *padata); extern void __init padata_do_multithreaded(struct padata_mt_job *job); extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, cpumask_var_t cpumask); -extern int padata_start(struct padata_instance *pinst); -extern void padata_stop(struct padata_instance *pinst); #endif diff --git a/include/linux/random.h b/include/linux/random.h index 45e1f8fa742b..9ab7443bd91b 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -11,6 +11,7 @@ #include <linux/kernel.h> #include <linux/list.h> #include <linux/once.h> +#include <asm/percpu.h> #include <uapi/linux/random.h> @@ -119,6 +120,8 @@ struct rnd_state { __u32 s1, s2, s3, s4; }; +DECLARE_PER_CPU(struct rnd_state, net_rand_state); + u32 prandom_u32_state(struct rnd_state *state); void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes); void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state); diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 70ebef866cc8..68dab3e08aad 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -33,7 +33,7 @@ * of two or more hash tables when the rhashtable is being resized. * The end of the chain is marked with a special nulls marks which has * the least significant bit set but otherwise stores the address of - * the hash bucket. This allows us to be be sure we've found the end + * the hash bucket. This allows us to be sure we've found the end * of the right list. * The value stored in the hash bucket has BIT(0) used as a lock bit. * This bit must be atomically set before any changes are made to @@ -84,7 +84,7 @@ struct bucket_table { struct lockdep_map dep_map; - struct rhash_lock_head *buckets[] ____cacheline_aligned_in_smp; + struct rhash_lock_head __rcu *buckets[] ____cacheline_aligned_in_smp; }; /* @@ -261,13 +261,12 @@ void rhashtable_free_and_destroy(struct rhashtable *ht, void *arg); void rhashtable_destroy(struct rhashtable *ht); -struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl, - unsigned int hash); -struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl, - unsigned int hash); -struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht, - struct bucket_table *tbl, - unsigned int hash); +struct rhash_lock_head __rcu **rht_bucket_nested( + const struct bucket_table *tbl, unsigned int hash); +struct rhash_lock_head __rcu **__rht_bucket_nested( + const struct bucket_table *tbl, unsigned int hash); +struct rhash_lock_head __rcu **rht_bucket_nested_insert( + struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash); #define rht_dereference(p, ht) \ rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht)) @@ -284,21 +283,21 @@ struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht, #define rht_entry(tpos, pos, member) \ ({ tpos = container_of(pos, typeof(*tpos), member); 1; }) -static inline struct rhash_lock_head *const *rht_bucket( +static inline struct rhash_lock_head __rcu *const *rht_bucket( const struct bucket_table *tbl, unsigned int hash) { return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) : &tbl->buckets[hash]; } -static inline struct rhash_lock_head **rht_bucket_var( +static inline struct rhash_lock_head __rcu **rht_bucket_var( struct bucket_table *tbl, unsigned int hash) { return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) : &tbl->buckets[hash]; } -static inline struct rhash_lock_head **rht_bucket_insert( +static inline struct rhash_lock_head __rcu **rht_bucket_insert( struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) { return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) : @@ -325,7 +324,7 @@ static inline struct rhash_lock_head **rht_bucket_insert( */ static inline void rht_lock(struct bucket_table *tbl, - struct rhash_lock_head **bkt) + struct rhash_lock_head __rcu **bkt) { local_bh_disable(); bit_spin_lock(0, (unsigned long *)bkt); @@ -333,7 +332,7 @@ static inline void rht_lock(struct bucket_table *tbl, } static inline void rht_lock_nested(struct bucket_table *tbl, - struct rhash_lock_head **bucket, + struct rhash_lock_head __rcu **bucket, unsigned int subclass) { local_bh_disable(); @@ -342,18 +341,18 @@ static inline void rht_lock_nested(struct bucket_table *tbl, } static inline void rht_unlock(struct bucket_table *tbl, - struct rhash_lock_head **bkt) + struct rhash_lock_head __rcu **bkt) { lock_map_release(&tbl->dep_map); bit_spin_unlock(0, (unsigned long *)bkt); local_bh_enable(); } -static inline struct rhash_head __rcu *__rht_ptr( - struct rhash_lock_head *const *bkt) +static inline struct rhash_head *__rht_ptr( + struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt) { - return (struct rhash_head __rcu *) - ((unsigned long)*bkt & ~BIT(0) ?: + return (struct rhash_head *) + ((unsigned long)p & ~BIT(0) ?: (unsigned long)RHT_NULLS_MARKER(bkt)); } @@ -365,47 +364,41 @@ static inline struct rhash_head __rcu *__rht_ptr( * access is guaranteed, such as when destroying the table. */ static inline struct rhash_head *rht_ptr_rcu( - struct rhash_lock_head *const *bkt) + struct rhash_lock_head __rcu *const *bkt) { - struct rhash_head __rcu *p = __rht_ptr(bkt); - - return rcu_dereference(p); + return __rht_ptr(rcu_dereference(*bkt), bkt); } static inline struct rhash_head *rht_ptr( - struct rhash_lock_head *const *bkt, + struct rhash_lock_head __rcu *const *bkt, struct bucket_table *tbl, unsigned int hash) { - return rht_dereference_bucket(__rht_ptr(bkt), tbl, hash); + return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt); } static inline struct rhash_head *rht_ptr_exclusive( - struct rhash_lock_head *const *bkt) + struct rhash_lock_head __rcu *const *bkt) { - return rcu_dereference_protected(__rht_ptr(bkt), 1); + return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt); } -static inline void rht_assign_locked(struct rhash_lock_head **bkt, +static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt, struct rhash_head *obj) { - struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt; - if (rht_is_a_nulls(obj)) obj = NULL; - rcu_assign_pointer(*p, (void *)((unsigned long)obj | BIT(0))); + rcu_assign_pointer(*bkt, (void *)((unsigned long)obj | BIT(0))); } static inline void rht_assign_unlock(struct bucket_table *tbl, - struct rhash_lock_head **bkt, + struct rhash_lock_head __rcu **bkt, struct rhash_head *obj) { - struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt; - if (rht_is_a_nulls(obj)) obj = NULL; lock_map_release(&tbl->dep_map); - rcu_assign_pointer(*p, obj); + rcu_assign_pointer(*bkt, (void *)obj); preempt_enable(); __release(bitlock); local_bh_enable(); @@ -593,7 +586,7 @@ static inline struct rhash_head *__rhashtable_lookup( .ht = ht, .key = key, }; - struct rhash_lock_head *const *bkt; + struct rhash_lock_head __rcu *const *bkt; struct bucket_table *tbl; struct rhash_head *he; unsigned int hash; @@ -709,7 +702,7 @@ static inline void *__rhashtable_insert_fast( .ht = ht, .key = key, }; - struct rhash_lock_head **bkt; + struct rhash_lock_head __rcu **bkt; struct rhash_head __rcu **pprev; struct bucket_table *tbl; struct rhash_head *head; @@ -995,7 +988,7 @@ static inline int __rhashtable_remove_fast_one( struct rhash_head *obj, const struct rhashtable_params params, bool rhlist) { - struct rhash_lock_head **bkt; + struct rhash_lock_head __rcu **bkt; struct rhash_head __rcu **pprev; struct rhash_head *he; unsigned int hash; @@ -1147,7 +1140,7 @@ static inline int __rhashtable_replace_fast( struct rhash_head *obj_old, struct rhash_head *obj_new, const struct rhashtable_params params) { - struct rhash_lock_head **bkt; + struct rhash_lock_head __rcu **bkt; struct rhash_head __rcu **pprev; struct rhash_head *he; unsigned int hash; diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 9aac824c523c..a1bbaa1c1a3a 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -220,7 +220,9 @@ struct tcp_sock { } rack; u16 advmss; /* Advertised MSS */ u8 compressed_ack; - u8 dup_ack_counter; + u8 dup_ack_counter:2, + tlp_retrans:1, /* TLP is a retransmission */ + unused:5; u32 chrono_start; /* Start time in jiffies of a TCP chrono */ u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */ u8 chrono_type:2, /* current chronograph type */ @@ -243,7 +245,7 @@ struct tcp_sock { save_syn:1, /* Save headers of SYN packet */ is_cwnd_limited:1,/* forward progress limited by snd_cwnd? */ syn_smc:1; /* SYN includes SMC */ - u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */ + u32 tlp_high_seq; /* snd_nxt at the time of TLP */ u32 tcp_tx_delay; /* delay (in usec) added to TX packets */ u64 tcp_wstamp_ns; /* departure time for next sent data packet */ diff --git a/include/linux/tpm.h b/include/linux/tpm.h index 03e9b184411b..8f4ff39f51e7 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h @@ -96,6 +96,7 @@ struct tpm_space { u8 *context_buf; u32 session_tbl[3]; u8 *session_buf; + u32 buf_size; }; struct tpm_bios_log { diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h index 64356b199e94..739ba9a03ec1 100644 --- a/include/linux/tpm_eventlog.h +++ b/include/linux/tpm_eventlog.h @@ -211,9 +211,16 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event, efispecid = (struct tcg_efi_specid_event_head *)event_header->event; - /* Check if event is malformed. */ + /* + * Perform validation of the event in order to identify malformed + * events. This function may be asked to parse arbitrary byte sequences + * immediately following a valid event log. The caller expects this + * function to recognize that the byte sequence is not a valid event + * and to return an event size of 0. + */ if (memcmp(efispecid->signature, TCG_SPECID_SIG, - sizeof(TCG_SPECID_SIG)) || count > efispecid->num_algs) { + sizeof(TCG_SPECID_SIG)) || + !efispecid->num_algs || count != efispecid->num_algs) { size = 0; goto out; } diff --git a/include/linux/xattr.h b/include/linux/xattr.h index 47eaa34f8761..c5afaf8ca7a2 100644 --- a/include/linux/xattr.h +++ b/include/linux/xattr.h @@ -15,6 +15,7 @@ #include <linux/slab.h> #include <linux/types.h> #include <linux/spinlock.h> +#include <linux/mm.h> #include <uapi/linux/xattr.h> struct inode; @@ -94,7 +95,7 @@ static inline void simple_xattrs_free(struct simple_xattrs *xattrs) list_for_each_entry_safe(xattr, node, &xattrs->head, list) { kfree(xattr->name); - kfree(xattr); + kvfree(xattr); } } diff --git a/include/net/addrconf.h b/include/net/addrconf.h index fdb07105384c..8418b7d38468 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -274,6 +274,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr); int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr); +void __ipv6_sock_ac_close(struct sock *sk); void ipv6_sock_ac_close(struct sock *sk); int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr); diff --git a/include/net/devlink.h b/include/net/devlink.h index 1df6dfec26c2..95b0322a2a82 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -718,6 +718,7 @@ enum devlink_trap_group_generic_id { DEVLINK_TRAP_GROUP_GENERIC_ID_PIM, DEVLINK_TRAP_GROUP_GENERIC_ID_UC_LB, DEVLINK_TRAP_GROUP_GENERIC_ID_LOCAL_DELIVERY, + DEVLINK_TRAP_GROUP_GENERIC_ID_EXTERNAL_DELIVERY, DEVLINK_TRAP_GROUP_GENERIC_ID_IPV6, DEVLINK_TRAP_GROUP_GENERIC_ID_PTP_EVENT, DEVLINK_TRAP_GROUP_GENERIC_ID_PTP_GENERAL, @@ -915,6 +916,8 @@ enum devlink_trap_group_generic_id { "uc_loopback" #define DEVLINK_TRAP_GROUP_GENERIC_NAME_LOCAL_DELIVERY \ "local_delivery" +#define DEVLINK_TRAP_GROUP_GENERIC_NAME_EXTERNAL_DELIVERY \ + "external_delivery" #define DEVLINK_TRAP_GROUP_GENERIC_NAME_IPV6 \ "ipv6" #define DEVLINK_TRAP_GROUP_GENERIC_NAME_PTP_EVENT \ diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index 6315324b9dc2..3eaf25f68b79 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -5,7 +5,6 @@ #include <linux/list.h> #include <linux/netlink.h> #include <net/flow_dissector.h> -#include <linux/rhashtable.h> struct flow_match { struct flow_dissector *dissector; diff --git a/include/net/xfrm.h b/include/net/xfrm.h index c7d213c9f9d8..51f65d23ebaf 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -941,7 +941,7 @@ struct xfrm_dst { static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst) { #ifdef CONFIG_XFRM - if (dst->xfrm) { + if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) { const struct xfrm_dst *xdst = (const struct xfrm_dst *) dst; return xdst->path; @@ -953,7 +953,7 @@ static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst) static inline struct dst_entry *xfrm_dst_child(const struct dst_entry *dst) { #ifdef CONFIG_XFRM - if (dst->xfrm) { + if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) { struct xfrm_dst *xdst = (struct xfrm_dst *) dst; return xdst->child; } @@ -1630,13 +1630,16 @@ int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk, void *); void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net); int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl); -struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id, - u8 type, int dir, +struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, + const struct xfrm_mark *mark, + u32 if_id, u8 type, int dir, struct xfrm_selector *sel, struct xfrm_sec_ctx *ctx, int delete, int *err); -struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id, u8, - int dir, u32 id, int delete, int *err); +struct xfrm_policy *xfrm_policy_byid(struct net *net, + const struct xfrm_mark *mark, u32 if_id, + u8 type, int dir, u32 id, int delete, + int *err); int xfrm_policy_flush(struct net *net, u8 type, bool task_valid); void xfrm_policy_hash_rebuild(struct net *net); u32 xfrm_get_acqseq(void); diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h index c4369a6c2951..2f1fc23602cb 100644 --- a/include/rdma/rdmavt_qp.h +++ b/include/rdma/rdmavt_qp.h @@ -305,6 +305,25 @@ struct rvt_rq { spinlock_t lock ____cacheline_aligned_in_smp; }; +/** + * rvt_get_rq_count - count numbers of request work queue entries + * in circular buffer + * @rq: data structure for request queue entry + * @head: head indices of the circular buffer + * @tail: tail indices of the circular buffer + * + * Return - total number of entries in the Receive Queue + */ + +static inline u32 rvt_get_rq_count(struct rvt_rq *rq, u32 head, u32 tail) +{ + u32 count = head - tail; + + if ((s32)count < 0) + count += rq->size; + return count; +} + /* * This structure holds the information that the send tasklet needs * to send a RDMA read response or atomic operation. diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 360b0f9d2220..863335ecb7e8 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -31,13 +31,6 @@ struct extent_io_tree; struct prelim_ref; struct btrfs_space_info; -TRACE_DEFINE_ENUM(FLUSH_DELAYED_ITEMS_NR); -TRACE_DEFINE_ENUM(FLUSH_DELAYED_ITEMS); -TRACE_DEFINE_ENUM(FLUSH_DELALLOC); -TRACE_DEFINE_ENUM(FLUSH_DELALLOC_WAIT); -TRACE_DEFINE_ENUM(ALLOC_CHUNK); -TRACE_DEFINE_ENUM(COMMIT_TRANS); - #define show_ref_type(type) \ __print_symbolic(type, \ { BTRFS_TREE_BLOCK_REF_KEY, "TREE_BLOCK_REF" }, \ @@ -67,30 +60,72 @@ TRACE_DEFINE_ENUM(COMMIT_TRANS); (obj >= BTRFS_ROOT_TREE_OBJECTID && \ obj <= BTRFS_QUOTA_TREE_OBJECTID)) ? __show_root_type(obj) : "-" -#define show_fi_type(type) \ - __print_symbolic(type, \ - { BTRFS_FILE_EXTENT_INLINE, "INLINE" }, \ - { BTRFS_FILE_EXTENT_REG, "REG" }, \ - { BTRFS_FILE_EXTENT_PREALLOC, "PREALLOC"}) +#define FLUSH_ACTIONS \ + EM( BTRFS_RESERVE_NO_FLUSH, "BTRFS_RESERVE_NO_FLUSH") \ + EM( BTRFS_RESERVE_FLUSH_LIMIT, "BTRFS_RESERVE_FLUSH_LIMIT") \ + EM( BTRFS_RESERVE_FLUSH_ALL, "BTRFS_RESERVE_FLUSH_ALL") \ + EMe(BTRFS_RESERVE_FLUSH_ALL_STEAL, "BTRFS_RESERVE_FLUSH_ALL_STEAL") + +#define FI_TYPES \ + EM( BTRFS_FILE_EXTENT_INLINE, "INLINE") \ + EM( BTRFS_FILE_EXTENT_REG, "REG") \ + EMe(BTRFS_FILE_EXTENT_PREALLOC, "PREALLOC") + +#define QGROUP_RSV_TYPES \ + EM( BTRFS_QGROUP_RSV_DATA, "DATA") \ + EM( BTRFS_QGROUP_RSV_META_PERTRANS, "META_PERTRANS") \ + EMe(BTRFS_QGROUP_RSV_META_PREALLOC, "META_PREALLOC") + +#define IO_TREE_OWNER \ + EM( IO_TREE_FS_PINNED_EXTENTS, "PINNED_EXTENTS") \ + EM( IO_TREE_FS_EXCLUDED_EXTENTS, "EXCLUDED_EXTENTS") \ + EM( IO_TREE_INODE_IO, "INODE_IO") \ + EM( IO_TREE_INODE_IO_FAILURE, "INODE_IO_FAILURE") \ + EM( IO_TREE_RELOC_BLOCKS, "RELOC_BLOCKS") \ + EM( IO_TREE_TRANS_DIRTY_PAGES, "TRANS_DIRTY_PAGES") \ + EM( IO_TREE_ROOT_DIRTY_LOG_PAGES, "ROOT_DIRTY_LOG_PAGES") \ + EM( IO_TREE_INODE_FILE_EXTENT, "INODE_FILE_EXTENT") \ + EM( IO_TREE_LOG_CSUM_RANGE, "LOG_CSUM_RANGE") \ + EMe(IO_TREE_SELFTEST, "SELFTEST") + +#define FLUSH_STATES \ + EM( FLUSH_DELAYED_ITEMS_NR, "FLUSH_DELAYED_ITEMS_NR") \ + EM( FLUSH_DELAYED_ITEMS, "FLUSH_DELAYED_ITEMS") \ + EM( FLUSH_DELALLOC, "FLUSH_DELALLOC") \ + EM( FLUSH_DELALLOC_WAIT, "FLUSH_DELALLOC_WAIT") \ + EM( FLUSH_DELAYED_REFS_NR, "FLUSH_DELAYED_REFS_NR") \ + EM( FLUSH_DELAYED_REFS, "FLUSH_ELAYED_REFS") \ + EM( ALLOC_CHUNK, "ALLOC_CHUNK") \ + EM( ALLOC_CHUNK_FORCE, "ALLOC_CHUNK_FORCE") \ + EM( RUN_DELAYED_IPUTS, "RUN_DELAYED_IPUTS") \ + EMe(COMMIT_TRANS, "COMMIT_TRANS") + +/* + * First define the enums in the above macros to be exported to userspace via + * TRACE_DEFINE_ENUM(). + */ + +#undef EM +#undef EMe +#define EM(a, b) TRACE_DEFINE_ENUM(a); +#define EMe(a, b) TRACE_DEFINE_ENUM(a); + +FLUSH_ACTIONS +FI_TYPES +QGROUP_RSV_TYPES +IO_TREE_OWNER +FLUSH_STATES + +/* + * Now redefine the EM and EMe macros to map the enums to the strings that will + * be printed in the output + */ + +#undef EM +#undef EMe +#define EM(a, b) {a, b}, +#define EMe(a, b) {a, b} -#define show_qgroup_rsv_type(type) \ - __print_symbolic(type, \ - { BTRFS_QGROUP_RSV_DATA, "DATA" }, \ - { BTRFS_QGROUP_RSV_META_PERTRANS, "META_PERTRANS" }, \ - { BTRFS_QGROUP_RSV_META_PREALLOC, "META_PREALLOC" }) - -#define show_extent_io_tree_owner(owner) \ - __print_symbolic(owner, \ - { IO_TREE_FS_PINNED_EXTENTS, "PINNED_EXTENTS" }, \ - { IO_TREE_FS_EXCLUDED_EXTENTS, "EXCLUDED_EXTENTS" }, \ - { IO_TREE_INODE_IO, "INODE_IO" }, \ - { IO_TREE_INODE_IO_FAILURE, "INODE_IO_FAILURE" }, \ - { IO_TREE_RELOC_BLOCKS, "RELOC_BLOCKS" }, \ - { IO_TREE_TRANS_DIRTY_PAGES, "TRANS_DIRTY_PAGES" }, \ - { IO_TREE_ROOT_DIRTY_LOG_PAGES, "ROOT_DIRTY_LOG_PAGES" }, \ - { IO_TREE_INODE_FILE_EXTENT, "INODE_FILE_EXTENT" }, \ - { IO_TREE_LOG_CSUM_RANGE, "LOG_CSUM_RANGE" }, \ - { IO_TREE_SELFTEST, "SELFTEST" }) #define BTRFS_GROUP_FLAGS \ { BTRFS_BLOCK_GROUP_DATA, "DATA"}, \ @@ -380,7 +415,7 @@ DECLARE_EVENT_CLASS(btrfs__file_extent_item_regular, __entry->disk_isize, __entry->extent_start, __entry->extent_end, __entry->num_bytes, __entry->ram_bytes, __entry->disk_bytenr, __entry->disk_num_bytes, - __entry->extent_offset, show_fi_type(__entry->extent_type), + __entry->extent_offset, __print_symbolic(__entry->extent_type, FI_TYPES), __entry->compression) ); @@ -421,7 +456,7 @@ DECLARE_EVENT_CLASS( "extent_type=%s compression=%u", show_root_type(__entry->root_obj), __entry->ino, __entry->isize, __entry->disk_isize, __entry->extent_start, - __entry->extent_end, show_fi_type(__entry->extent_type), + __entry->extent_end, __print_symbolic(__entry->extent_type, FI_TYPES), __entry->compression) ); @@ -1042,12 +1077,6 @@ TRACE_EVENT(btrfs_space_reservation, __entry->bytes) ); -#define show_flush_action(action) \ - __print_symbolic(action, \ - { BTRFS_RESERVE_NO_FLUSH, "BTRFS_RESERVE_NO_FLUSH"}, \ - { BTRFS_RESERVE_FLUSH_LIMIT, "BTRFS_RESERVE_FLUSH_LIMIT"}, \ - { BTRFS_RESERVE_FLUSH_ALL, "BTRFS_RESERVE_FLUSH_ALL"}) - TRACE_EVENT(btrfs_trigger_flush, TP_PROTO(const struct btrfs_fs_info *fs_info, u64 flags, u64 bytes, @@ -1071,25 +1100,13 @@ TRACE_EVENT(btrfs_trigger_flush, TP_printk_btrfs("%s: flush=%d(%s) flags=%llu(%s) bytes=%llu", __get_str(reason), __entry->flush, - show_flush_action(__entry->flush), + __print_symbolic(__entry->flush, FLUSH_ACTIONS), __entry->flags, __print_flags((unsigned long)__entry->flags, "|", BTRFS_GROUP_FLAGS), __entry->bytes) ); -#define show_flush_state(state) \ - __print_symbolic(state, \ - { FLUSH_DELAYED_ITEMS_NR, "FLUSH_DELAYED_ITEMS_NR"}, \ - { FLUSH_DELAYED_ITEMS, "FLUSH_DELAYED_ITEMS"}, \ - { FLUSH_DELALLOC, "FLUSH_DELALLOC"}, \ - { FLUSH_DELALLOC_WAIT, "FLUSH_DELALLOC_WAIT"}, \ - { FLUSH_DELAYED_REFS_NR, "FLUSH_DELAYED_REFS_NR"}, \ - { FLUSH_DELAYED_REFS, "FLUSH_ELAYED_REFS"}, \ - { ALLOC_CHUNK, "ALLOC_CHUNK"}, \ - { ALLOC_CHUNK_FORCE, "ALLOC_CHUNK_FORCE"}, \ - { RUN_DELAYED_IPUTS, "RUN_DELAYED_IPUTS"}, \ - { COMMIT_TRANS, "COMMIT_TRANS"}) TRACE_EVENT(btrfs_flush_space, @@ -1114,7 +1131,7 @@ TRACE_EVENT(btrfs_flush_space, TP_printk_btrfs("state=%d(%s) flags=%llu(%s) num_bytes=%llu ret=%d", __entry->state, - show_flush_state(__entry->state), + __print_symbolic(__entry->state, FLUSH_STATES), __entry->flags, __print_flags((unsigned long)__entry->flags, "|", BTRFS_GROUP_FLAGS), @@ -1690,7 +1707,7 @@ TRACE_EVENT(qgroup_update_reserve, ), TP_printk_btrfs("qgid=%llu type=%s cur_reserved=%llu diff=%lld", - __entry->qgid, show_qgroup_rsv_type(__entry->type), + __entry->qgid, __print_symbolic(__entry->type, QGROUP_RSV_TYPES), __entry->cur_reserved, __entry->diff) ); @@ -1714,7 +1731,7 @@ TRACE_EVENT(qgroup_meta_reserve, TP_printk_btrfs("refroot=%llu(%s) type=%s diff=%lld", show_root_type(__entry->refroot), - show_qgroup_rsv_type(__entry->type), __entry->diff) + __print_symbolic(__entry->type, QGROUP_RSV_TYPES), __entry->diff) ); TRACE_EVENT(qgroup_meta_convert, @@ -1735,8 +1752,8 @@ TRACE_EVENT(qgroup_meta_convert, TP_printk_btrfs("refroot=%llu(%s) type=%s->%s diff=%lld", show_root_type(__entry->refroot), - show_qgroup_rsv_type(BTRFS_QGROUP_RSV_META_PREALLOC), - show_qgroup_rsv_type(BTRFS_QGROUP_RSV_META_PERTRANS), + __print_symbolic(BTRFS_QGROUP_RSV_META_PREALLOC, QGROUP_RSV_TYPES), + __print_symbolic(BTRFS_QGROUP_RSV_META_PERTRANS, QGROUP_RSV_TYPES), __entry->diff) ); @@ -1762,7 +1779,7 @@ TRACE_EVENT(qgroup_meta_free_all_pertrans, TP_printk_btrfs("refroot=%llu(%s) type=%s diff=%lld", show_root_type(__entry->refroot), - show_qgroup_rsv_type(__entry->type), __entry->diff) + __print_symbolic(__entry->type, QGROUP_RSV_TYPES), __entry->diff) ); DECLARE_EVENT_CLASS(btrfs__prelim_ref, @@ -1920,7 +1937,7 @@ TRACE_EVENT(btrfs_set_extent_bit, TP_printk_btrfs( "io_tree=%s ino=%llu root=%llu start=%llu len=%llu set_bits=%s", - show_extent_io_tree_owner(__entry->owner), __entry->ino, + __print_symbolic(__entry->owner, IO_TREE_OWNER), __entry->ino, __entry->rootid, __entry->start, __entry->len, __print_flags(__entry->set_bits, "|", EXTENT_FLAGS)) ); @@ -1959,7 +1976,7 @@ TRACE_EVENT(btrfs_clear_extent_bit, TP_printk_btrfs( "io_tree=%s ino=%llu root=%llu start=%llu len=%llu clear_bits=%s", - show_extent_io_tree_owner(__entry->owner), __entry->ino, + __print_symbolic(__entry->owner, IO_TREE_OWNER), __entry->ino, __entry->rootid, __entry->start, __entry->len, __print_flags(__entry->clear_bits, "|", EXTENT_FLAGS)) ); @@ -2000,7 +2017,7 @@ TRACE_EVENT(btrfs_convert_extent_bit, TP_printk_btrfs( "io_tree=%s ino=%llu root=%llu start=%llu len=%llu set_bits=%s clear_bits=%s", - show_extent_io_tree_owner(__entry->owner), __entry->ino, + __print_symbolic(__entry->owner, IO_TREE_OWNER), __entry->ino, __entry->rootid, __entry->start, __entry->len, __print_flags(__entry->set_bits , "|", EXTENT_FLAGS), __print_flags(__entry->clear_bits, "|", EXTENT_FLAGS)) diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h index e6b6cb0f8bc6..2c39d15a2beb 100644 --- a/include/uapi/linux/btrfs.h +++ b/include/uapi/linux/btrfs.h @@ -243,6 +243,18 @@ struct btrfs_ioctl_dev_info_args { __u8 path[BTRFS_DEVICE_PATH_NAME_MAX]; /* out */ }; +/* + * Retrieve information about the filesystem + */ + +/* Request information about checksum type and size */ +#define BTRFS_FS_INFO_FLAG_CSUM_INFO (1 << 0) + +/* Request information about filesystem generation */ +#define BTRFS_FS_INFO_FLAG_GENERATION (1 << 1) +/* Request information about filesystem metadata UUID */ +#define BTRFS_FS_INFO_FLAG_METADATA_UUID (1 << 2) + struct btrfs_ioctl_fs_info_args { __u64 max_id; /* out */ __u64 num_devices; /* out */ @@ -250,8 +262,13 @@ struct btrfs_ioctl_fs_info_args { __u32 nodesize; /* out */ __u32 sectorsize; /* out */ __u32 clone_alignment; /* out */ - __u32 reserved32; - __u64 reserved[122]; /* pad to 1k */ + /* See BTRFS_FS_INFO_FLAG_* */ + __u16 csum_type; /* out */ + __u16 csum_size; /* out */ + __u64 flags; /* in/out */ + __u64 generation; /* out */ + __u8 metadata_uuid[BTRFS_FSID_SIZE]; /* out */ + __u8 reserved[944]; /* pad to 1k */ }; /* diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h index a3f3975df0de..9ba64ca6b4ac 100644 --- a/include/uapi/linux/btrfs_tree.h +++ b/include/uapi/linux/btrfs_tree.h @@ -913,9 +913,9 @@ struct btrfs_free_space_info { #define BTRFS_FREE_SPACE_USING_BITMAPS (1ULL << 0) #define BTRFS_QGROUP_LEVEL_SHIFT 48 -static inline __u64 btrfs_qgroup_level(__u64 qgroupid) +static inline __u16 btrfs_qgroup_level(__u64 qgroupid) { - return qgroupid >> BTRFS_QGROUP_LEVEL_SHIFT; + return (__u16)(qgroupid >> BTRFS_QGROUP_LEVEL_SHIFT); } /* diff --git a/kernel/audit.c b/kernel/audit.c index 8c201f414226..b2301bdc9773 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -1851,7 +1851,6 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, } audit_get_stamp(ab->ctx, &t, &serial); - audit_clear_dummy(ab->ctx); audit_log_format(ab, "audit(%llu.%03lu:%u): ", (unsigned long long)t.tv_sec, t.tv_nsec/1000000, serial); diff --git a/kernel/audit.h b/kernel/audit.h index f0233dc40b17..ddc22878433d 100644 --- a/kernel/audit.h +++ b/kernel/audit.h @@ -290,13 +290,6 @@ extern int audit_signal_info_syscall(struct task_struct *t); extern void audit_filter_inodes(struct task_struct *tsk, struct audit_context *ctx); extern struct list_head *audit_killed_trees(void); - -static inline void audit_clear_dummy(struct audit_context *ctx) -{ - if (ctx) - ctx->dummy = 0; -} - #else /* CONFIG_AUDITSYSCALL */ #define auditsc_get_stamp(c, t, s) 0 #define audit_put_watch(w) {} @@ -330,7 +323,6 @@ static inline int audit_signal_info_syscall(struct task_struct *t) } #define audit_filter_inodes(t, c) AUDIT_DISABLED -#define audit_clear_dummy(c) {} #endif /* CONFIG_AUDITSYSCALL */ extern char *audit_unpack_string(void **bufp, size_t *remain, size_t len); diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 468a23390457..fd840c40abf7 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -1417,6 +1417,9 @@ static void audit_log_proctitle(void) struct audit_context *context = audit_context(); struct audit_buffer *ab; + if (!context || context->dummy) + return; + ab = audit_log_start(context, GFP_KERNEL, AUDIT_PROCTITLE); if (!ab) return; /* audit_panic or being filtered */ diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 9a1a98dd9e97..0443600146dc 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -4058,6 +4058,11 @@ static int __btf_resolve_helper_id(struct bpf_verifier_log *log, void *fn, const char *tname, *sym; u32 btf_id, i; + if (!btf_vmlinux) { + bpf_log(log, "btf_vmlinux doesn't exist\n"); + return -EINVAL; + } + if (IS_ERR(btf_vmlinux)) { bpf_log(log, "btf_vmlinux is malformed\n"); return -EINVAL; diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index b4b288a3c3c9..b32cc8ce8ff6 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -779,15 +779,20 @@ static void htab_elem_free_rcu(struct rcu_head *head) htab_elem_free(htab, l); } -static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) +static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l) { struct bpf_map *map = &htab->map; + void *ptr; if (map->ops->map_fd_put_ptr) { - void *ptr = fd_htab_map_get_ptr(map, l); - + ptr = fd_htab_map_get_ptr(map, l); map->ops->map_fd_put_ptr(ptr); } +} + +static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) +{ + htab_put_fd_value(htab, l); if (htab_is_prealloc(htab)) { __pcpu_freelist_push(&htab->freelist, &l->fnode); @@ -839,6 +844,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, */ pl_new = this_cpu_ptr(htab->extra_elems); l_new = *pl_new; + htab_put_fd_value(htab, old_elem); *pl_new = old_elem; } else { struct pcpu_freelist_node *l; diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index bb0862873dba..5f8b0c52fd2e 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -2199,7 +2199,7 @@ static void handle_swbp(struct pt_regs *regs) if (!uprobe) { if (is_swbp > 0) { /* No matching uprobe; signal SIGTRAP. */ - send_sig(SIGTRAP, current, 0); + force_sig(SIGTRAP); } else { /* * Either we raced with uprobe_unregister() or we can't diff --git a/kernel/padata.c b/kernel/padata.c index 4373f7adaa40..16cb894dc272 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -250,13 +250,11 @@ EXPORT_SYMBOL(padata_do_parallel); static struct padata_priv *padata_find_next(struct parallel_data *pd, bool remove_object) { - struct padata_parallel_queue *next_queue; struct padata_priv *padata; struct padata_list *reorder; int cpu = pd->cpu; - next_queue = per_cpu_ptr(pd->pqueue, cpu); - reorder = &next_queue->reorder; + reorder = per_cpu_ptr(pd->reorder_list, cpu); spin_lock(&reorder->lock); if (list_empty(&reorder->list)) { @@ -291,7 +289,7 @@ static void padata_reorder(struct parallel_data *pd) int cb_cpu; struct padata_priv *padata; struct padata_serial_queue *squeue; - struct padata_parallel_queue *next_queue; + struct padata_list *reorder; /* * We need to ensure that only one cpu can work on dequeueing of @@ -339,9 +337,8 @@ static void padata_reorder(struct parallel_data *pd) */ smp_mb(); - next_queue = per_cpu_ptr(pd->pqueue, pd->cpu); - if (!list_empty(&next_queue->reorder.list) && - padata_find_next(pd, false)) + reorder = per_cpu_ptr(pd->reorder_list, pd->cpu); + if (!list_empty(&reorder->list) && padata_find_next(pd, false)) queue_work(pinst->serial_wq, &pd->reorder_work); } @@ -401,17 +398,16 @@ void padata_do_serial(struct padata_priv *padata) { struct parallel_data *pd = padata->pd; int hashed_cpu = padata_cpu_hash(pd, padata->seq_nr); - struct padata_parallel_queue *pqueue = per_cpu_ptr(pd->pqueue, - hashed_cpu); + struct padata_list *reorder = per_cpu_ptr(pd->reorder_list, hashed_cpu); struct padata_priv *cur; - spin_lock(&pqueue->reorder.lock); + spin_lock(&reorder->lock); /* Sort in ascending order of sequence number. */ - list_for_each_entry_reverse(cur, &pqueue->reorder.list, list) + list_for_each_entry_reverse(cur, &reorder->list, list) if (cur->seq_nr < padata->seq_nr) break; list_add(&padata->list, &cur->list); - spin_unlock(&pqueue->reorder.lock); + spin_unlock(&reorder->lock); /* * Ensure the addition to the reorder list is ordered correctly @@ -441,28 +437,6 @@ static int padata_setup_cpumasks(struct padata_instance *pinst) return err; } -static int pd_setup_cpumasks(struct parallel_data *pd, - const struct cpumask *pcpumask, - const struct cpumask *cbcpumask) -{ - int err = -ENOMEM; - - if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL)) - goto out; - if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) - goto free_pcpu_mask; - - cpumask_copy(pd->cpumask.pcpu, pcpumask); - cpumask_copy(pd->cpumask.cbcpu, cbcpumask); - - return 0; - -free_pcpu_mask: - free_cpumask_var(pd->cpumask.pcpu); -out: - return err; -} - static void __init padata_mt_helper(struct work_struct *w) { struct padata_work *pw = container_of(w, struct padata_work, pw_work); @@ -575,17 +549,15 @@ static void padata_init_squeues(struct parallel_data *pd) } } -/* Initialize all percpu queues used by parallel workers */ -static void padata_init_pqueues(struct parallel_data *pd) +/* Initialize per-CPU reorder lists */ +static void padata_init_reorder_list(struct parallel_data *pd) { int cpu; - struct padata_parallel_queue *pqueue; + struct padata_list *list; for_each_cpu(cpu, pd->cpumask.pcpu) { - pqueue = per_cpu_ptr(pd->pqueue, cpu); - - __padata_list_init(&pqueue->reorder); - atomic_set(&pqueue->num_obj, 0); + list = per_cpu_ptr(pd->reorder_list, cpu); + __padata_list_init(list); } } @@ -593,30 +565,31 @@ static void padata_init_pqueues(struct parallel_data *pd) static struct parallel_data *padata_alloc_pd(struct padata_shell *ps) { struct padata_instance *pinst = ps->pinst; - const struct cpumask *cbcpumask; - const struct cpumask *pcpumask; struct parallel_data *pd; - cbcpumask = pinst->rcpumask.cbcpu; - pcpumask = pinst->rcpumask.pcpu; - pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL); if (!pd) goto err; - pd->pqueue = alloc_percpu(struct padata_parallel_queue); - if (!pd->pqueue) + pd->reorder_list = alloc_percpu(struct padata_list); + if (!pd->reorder_list) goto err_free_pd; pd->squeue = alloc_percpu(struct padata_serial_queue); if (!pd->squeue) - goto err_free_pqueue; + goto err_free_reorder_list; pd->ps = ps; - if (pd_setup_cpumasks(pd, pcpumask, cbcpumask)) + + if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL)) goto err_free_squeue; + if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) + goto err_free_pcpu; + + cpumask_and(pd->cpumask.pcpu, pinst->cpumask.pcpu, cpu_online_mask); + cpumask_and(pd->cpumask.cbcpu, pinst->cpumask.cbcpu, cpu_online_mask); - padata_init_pqueues(pd); + padata_init_reorder_list(pd); padata_init_squeues(pd); pd->seq_nr = -1; atomic_set(&pd->refcnt, 1); @@ -626,10 +599,12 @@ static struct parallel_data *padata_alloc_pd(struct padata_shell *ps) return pd; +err_free_pcpu: + free_cpumask_var(pd->cpumask.pcpu); err_free_squeue: free_percpu(pd->squeue); -err_free_pqueue: - free_percpu(pd->pqueue); +err_free_reorder_list: + free_percpu(pd->reorder_list); err_free_pd: kfree(pd); err: @@ -640,7 +615,7 @@ static void padata_free_pd(struct parallel_data *pd) { free_cpumask_var(pd->cpumask.pcpu); free_cpumask_var(pd->cpumask.cbcpu); - free_percpu(pd->pqueue); + free_percpu(pd->reorder_list); free_percpu(pd->squeue); kfree(pd); } @@ -682,12 +657,6 @@ static int padata_replace(struct padata_instance *pinst) pinst->flags |= PADATA_RESET; - cpumask_and(pinst->rcpumask.pcpu, pinst->cpumask.pcpu, - cpu_online_mask); - - cpumask_and(pinst->rcpumask.cbcpu, pinst->cpumask.cbcpu, - cpu_online_mask); - list_for_each_entry(ps, &pinst->pslist, list) { err = padata_replace_one(ps); if (err) @@ -789,43 +758,6 @@ out: } EXPORT_SYMBOL(padata_set_cpumask); -/** - * padata_start - start the parallel processing - * - * @pinst: padata instance to start - * - * Return: 0 on success or negative error code - */ -int padata_start(struct padata_instance *pinst) -{ - int err = 0; - - mutex_lock(&pinst->lock); - - if (pinst->flags & PADATA_INVALID) - err = -EINVAL; - - __padata_start(pinst); - - mutex_unlock(&pinst->lock); - - return err; -} -EXPORT_SYMBOL(padata_start); - -/** - * padata_stop - stop the parallel processing - * - * @pinst: padata instance to stop - */ -void padata_stop(struct padata_instance *pinst) -{ - mutex_lock(&pinst->lock); - __padata_stop(pinst); - mutex_unlock(&pinst->lock); -} -EXPORT_SYMBOL(padata_stop); - #ifdef CONFIG_HOTPLUG_CPU static int __padata_add_cpu(struct padata_instance *pinst, int cpu) @@ -907,9 +839,6 @@ static void __padata_free(struct padata_instance *pinst) WARN_ON(!list_empty(&pinst->pslist)); - padata_stop(pinst); - free_cpumask_var(pinst->rcpumask.cbcpu); - free_cpumask_var(pinst->rcpumask.pcpu); free_cpumask_var(pinst->cpumask.pcpu); free_cpumask_var(pinst->cpumask.cbcpu); destroy_workqueue(pinst->serial_wq); @@ -1044,18 +973,12 @@ static struct kobj_type padata_attr_type = { }; /** - * padata_alloc - allocate and initialize a padata instance and specify - * cpumasks for serial and parallel workers. - * + * padata_alloc - allocate and initialize a padata instance * @name: used to identify the instance - * @pcpumask: cpumask that will be used for padata parallelization - * @cbcpumask: cpumask that will be used for padata serialization * * Return: new instance on success, NULL on error */ -static struct padata_instance *padata_alloc(const char *name, - const struct cpumask *pcpumask, - const struct cpumask *cbcpumask) +struct padata_instance *padata_alloc(const char *name) { struct padata_instance *pinst; @@ -1081,26 +1004,16 @@ static struct padata_instance *padata_alloc(const char *name, free_cpumask_var(pinst->cpumask.pcpu); goto err_free_serial_wq; } - if (!padata_validate_cpumask(pinst, pcpumask) || - !padata_validate_cpumask(pinst, cbcpumask)) - goto err_free_masks; - - if (!alloc_cpumask_var(&pinst->rcpumask.pcpu, GFP_KERNEL)) - goto err_free_masks; - if (!alloc_cpumask_var(&pinst->rcpumask.cbcpu, GFP_KERNEL)) - goto err_free_rcpumask_pcpu; INIT_LIST_HEAD(&pinst->pslist); - cpumask_copy(pinst->cpumask.pcpu, pcpumask); - cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); - cpumask_and(pinst->rcpumask.pcpu, pcpumask, cpu_online_mask); - cpumask_and(pinst->rcpumask.cbcpu, cbcpumask, cpu_online_mask); + cpumask_copy(pinst->cpumask.pcpu, cpu_possible_mask); + cpumask_copy(pinst->cpumask.cbcpu, cpu_possible_mask); if (padata_setup_cpumasks(pinst)) - goto err_free_rcpumask_cbcpu; + goto err_free_masks; - pinst->flags = 0; + __padata_start(pinst); kobject_init(&pinst->kobj, &padata_attr_type); mutex_init(&pinst->lock); @@ -1116,10 +1029,6 @@ static struct padata_instance *padata_alloc(const char *name, return pinst; -err_free_rcpumask_cbcpu: - free_cpumask_var(pinst->rcpumask.cbcpu); -err_free_rcpumask_pcpu: - free_cpumask_var(pinst->rcpumask.pcpu); err_free_masks: free_cpumask_var(pinst->cpumask.pcpu); free_cpumask_var(pinst->cpumask.cbcpu); @@ -1133,21 +1042,7 @@ err_free_inst: err: return NULL; } - -/** - * padata_alloc_possible - Allocate and initialize padata instance. - * Use the cpu_possible_mask for serial and - * parallel workers. - * - * @name: used to identify the instance - * - * Return: new instance on success, NULL on error - */ -struct padata_instance *padata_alloc_possible(const char *name) -{ - return padata_alloc(name, cpu_possible_mask, cpu_possible_mask); -} -EXPORT_SYMBOL(padata_alloc_possible); +EXPORT_SYMBOL(padata_alloc); /** * padata_free - free a padata instance diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e15543cb8481..2142c6767682 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4119,9 +4119,6 @@ static void __sched notrace __schedule(bool preempt) local_irq_disable(); rcu_note_context_switch(preempt); - /* See deactivate_task() below. */ - prev_state = prev->state; - /* * Make sure that signal_pending_state()->signal_pending() below * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) @@ -4145,11 +4142,16 @@ static void __sched notrace __schedule(bool preempt) update_rq_clock(rq); switch_count = &prev->nivcsw; + /* - * We must re-load prev->state in case ttwu_remote() changed it - * before we acquired rq->lock. + * We must load prev->state once (task_struct::state is volatile), such + * that: + * + * - we form a control dependency vs deactivate_task() below. + * - ptrace_{,un}freeze_traced() can change ->state underneath us. */ - if (!preempt && prev_state && prev_state == prev->state) { + prev_state = prev->state; + if (!preempt && prev_state) { if (signal_pending_state(prev_state, prev)) { prev->state = TASK_RUNNING; } else { @@ -4163,10 +4165,12 @@ static void __sched notrace __schedule(bool preempt) /* * __schedule() ttwu() - * prev_state = prev->state; if (READ_ONCE(p->on_rq) && ...) - * LOCK rq->lock goto out; - * smp_mb__after_spinlock(); smp_acquire__after_ctrl_dep(); - * p->on_rq = 0; p->state = TASK_WAKING; + * prev_state = prev->state; if (p->on_rq && ...) + * if (prev_state) goto out; + * p->on_rq = 0; smp_acquire__after_ctrl_dep(); + * p->state = TASK_WAKING + * + * Where __schedule() and ttwu() have matching control dependencies. * * After this, schedule() must not care about p->state any more. */ @@ -4481,6 +4485,7 @@ asmlinkage __visible void __sched preempt_schedule_irq(void) int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags, void *key) { + WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~WF_SYNC); return try_to_wake_up(curr->private, mode, wake_flags); } EXPORT_SYMBOL(default_wake_function); diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index ba059fbfc53a..01f5d3020589 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c @@ -389,7 +389,7 @@ int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, i int ret = default_wake_function(wq_entry, mode, sync, key); if (ret) - list_del_init(&wq_entry->entry); + list_del_init_careful(&wq_entry->entry); return ret; } diff --git a/kernel/signal.c b/kernel/signal.c index ee22ec78fd6d..6f16f7c5d375 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -719,7 +719,7 @@ static int dequeue_synchronous_signal(kernel_siginfo_t *info) * Return the first synchronous signal in the queue. */ list_for_each_entry(q, &pending->list, list) { - /* Synchronous signals have a postive si_code */ + /* Synchronous signals have a positive si_code */ if ((q->info.si_code > SI_USER) && (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) { sync = q; diff --git a/kernel/time/timer.c b/kernel/time/timer.c index df1ff803acc4..026ac01af9da 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -43,6 +43,7 @@ #include <linux/sched/debug.h> #include <linux/slab.h> #include <linux/compat.h> +#include <linux/random.h> #include <linux/uaccess.h> #include <asm/unistd.h> @@ -1742,6 +1743,13 @@ void update_process_times(int user_tick) scheduler_tick(); if (IS_ENABLED(CONFIG_POSIX_TIMERS)) run_posix_cpu_timers(); + + /* The current CPU might make use of net randoms without receiving IRQs + * to renew them often enough. Let's update the net_rand_state from a + * non-constant value that's not affine to the number of calls to make + * sure it's updated when there's some activity (we don't care in idle). + */ + this_cpu_add(net_rand_state.s1, rol32(jiffies, 24) + user_tick); } /** diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c index 8cc01a603416..1ed2ed487097 100644 --- a/lib/crc-t10dif.c +++ b/lib/crc-t10dif.c @@ -17,64 +17,69 @@ #include <linux/notifier.h> static struct crypto_shash __rcu *crct10dif_tfm; -static struct static_key crct10dif_fallback __read_mostly; +static DEFINE_STATIC_KEY_TRUE(crct10dif_fallback); static DEFINE_MUTEX(crc_t10dif_mutex); +static struct work_struct crct10dif_rehash_work; -static int crc_t10dif_rehash(struct notifier_block *self, unsigned long val, void *data) +static int crc_t10dif_notify(struct notifier_block *self, unsigned long val, void *data) { struct crypto_alg *alg = data; - struct crypto_shash *new, *old; if (val != CRYPTO_MSG_ALG_LOADED || - static_key_false(&crct10dif_fallback) || - strncmp(alg->cra_name, CRC_T10DIF_STRING, strlen(CRC_T10DIF_STRING))) - return 0; + strcmp(alg->cra_name, CRC_T10DIF_STRING)) + return NOTIFY_DONE; + + schedule_work(&crct10dif_rehash_work); + return NOTIFY_OK; +} + +static void crc_t10dif_rehash(struct work_struct *work) +{ + struct crypto_shash *new, *old; mutex_lock(&crc_t10dif_mutex); old = rcu_dereference_protected(crct10dif_tfm, lockdep_is_held(&crc_t10dif_mutex)); - if (!old) { - mutex_unlock(&crc_t10dif_mutex); - return 0; - } - new = crypto_alloc_shash("crct10dif", 0, 0); + new = crypto_alloc_shash(CRC_T10DIF_STRING, 0, 0); if (IS_ERR(new)) { mutex_unlock(&crc_t10dif_mutex); - return 0; + return; } rcu_assign_pointer(crct10dif_tfm, new); mutex_unlock(&crc_t10dif_mutex); - synchronize_rcu(); - crypto_free_shash(old); - return 0; + if (old) { + synchronize_rcu(); + crypto_free_shash(old); + } else { + static_branch_disable(&crct10dif_fallback); + } } static struct notifier_block crc_t10dif_nb = { - .notifier_call = crc_t10dif_rehash, + .notifier_call = crc_t10dif_notify, }; __u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len) { struct { struct shash_desc shash; - char ctx[2]; + __u16 crc; } desc; int err; - if (static_key_false(&crct10dif_fallback)) + if (static_branch_unlikely(&crct10dif_fallback)) return crc_t10dif_generic(crc, buffer, len); rcu_read_lock(); desc.shash.tfm = rcu_dereference(crct10dif_tfm); - *(__u16 *)desc.ctx = crc; - + desc.crc = crc; err = crypto_shash_update(&desc.shash, buffer, len); rcu_read_unlock(); BUG_ON(err); - return *(__u16 *)desc.ctx; + return desc.crc; } EXPORT_SYMBOL(crc_t10dif_update); @@ -86,19 +91,17 @@ EXPORT_SYMBOL(crc_t10dif); static int __init crc_t10dif_mod_init(void) { + INIT_WORK(&crct10dif_rehash_work, crc_t10dif_rehash); crypto_register_notifier(&crc_t10dif_nb); - crct10dif_tfm = crypto_alloc_shash("crct10dif", 0, 0); - if (IS_ERR(crct10dif_tfm)) { - static_key_slow_inc(&crct10dif_fallback); - crct10dif_tfm = NULL; - } + crc_t10dif_rehash(&crct10dif_rehash_work); return 0; } static void __exit crc_t10dif_mod_fini(void) { crypto_unregister_notifier(&crc_t10dif_nb); - crypto_free_shash(crct10dif_tfm); + cancel_work_sync(&crct10dif_rehash_work); + crypto_free_shash(rcu_dereference_protected(crct10dif_tfm, 1)); } module_init(crc_t10dif_mod_init); @@ -106,15 +109,23 @@ module_exit(crc_t10dif_mod_fini); static int crc_t10dif_transform_show(char *buffer, const struct kernel_param *kp) { - if (static_key_false(&crct10dif_fallback)) + struct crypto_shash *tfm; + int len; + + if (static_branch_unlikely(&crct10dif_fallback)) return sprintf(buffer, "fallback\n"); - return sprintf(buffer, "%s\n", - crypto_tfm_alg_driver_name(crypto_shash_tfm(crct10dif_tfm))); + rcu_read_lock(); + tfm = rcu_dereference(crct10dif_tfm); + len = snprintf(buffer, PAGE_SIZE, "%s\n", + crypto_shash_driver_name(tfm)); + rcu_read_unlock(); + + return len; } -module_param_call(transform, NULL, crc_t10dif_transform_show, NULL, 0644); +module_param_call(transform, NULL, crc_t10dif_transform_show, NULL, 0444); -MODULE_DESCRIPTION("T10 DIF CRC calculation"); +MODULE_DESCRIPTION("T10 DIF CRC calculation (library API)"); MODULE_LICENSE("GPL"); MODULE_SOFTDEP("pre: crct10dif"); diff --git a/lib/crypto/chacha20poly1305.c b/lib/crypto/chacha20poly1305.c index ad0699ce702f..431e04280332 100644 --- a/lib/crypto/chacha20poly1305.c +++ b/lib/crypto/chacha20poly1305.c @@ -21,8 +21,6 @@ #define CHACHA_KEY_WORDS (CHACHA_KEY_SIZE / sizeof(u32)) -bool __init chacha20poly1305_selftest(void); - static void chacha_load_key(u32 *k, const u8 *in) { k[0] = get_unaligned_le32(in); diff --git a/lib/crypto/sha256.c b/lib/crypto/sha256.c index 2e621697c5c3..2321f6cb322f 100644 --- a/lib/crypto/sha256.c +++ b/lib/crypto/sha256.c @@ -280,4 +280,14 @@ void sha224_final(struct sha256_state *sctx, u8 *out) } EXPORT_SYMBOL(sha224_final); +void sha256(const u8 *data, unsigned int len, u8 *out) +{ + struct sha256_state sctx; + + sha256_init(&sctx); + sha256_update(&sctx, data, len); + sha256_final(&sctx, out); +} +EXPORT_SYMBOL(sha256); + MODULE_LICENSE("GPL"); diff --git a/lib/mpi/Makefile b/lib/mpi/Makefile index d5874a7f5ff9..43b8fce14079 100644 --- a/lib/mpi/Makefile +++ b/lib/mpi/Makefile @@ -16,6 +16,7 @@ mpi-y = \ mpicoder.o \ mpi-bit.o \ mpi-cmp.o \ + mpi-sub-ui.o \ mpih-cmp.o \ mpih-div.o \ mpih-mul.o \ diff --git a/lib/mpi/mpi-sub-ui.c b/lib/mpi/mpi-sub-ui.c new file mode 100644 index 000000000000..b41b082b5f3e --- /dev/null +++ b/lib/mpi/mpi-sub-ui.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* mpi-sub-ui.c - Subtract an unsigned integer from an MPI. + * + * Copyright 1991, 1993, 1994, 1996, 1999-2002, 2004, 2012, 2013, 2015 + * Free Software Foundation, Inc. + * + * This file was based on the GNU MP Library source file: + * https://gmplib.org/repo/gmp-6.2/file/510b83519d1c/mpz/aors_ui.h + * + * The GNU MP Library is free software; you can redistribute it and/or modify + * it under the terms of either: + * + * * the GNU Lesser General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your + * option) any later version. + * + * or + * + * * the GNU General Public License as published by the Free Software + * Foundation; either version 2 of the License, or (at your option) any + * later version. + * + * or both in parallel, as here. + * + * The GNU MP Library is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received copies of the GNU General Public License and the + * GNU Lesser General Public License along with the GNU MP Library. If not, + * see https://www.gnu.org/licenses/. + */ + +#include "mpi-internal.h" + +int mpi_sub_ui(MPI w, MPI u, unsigned long vval) +{ + if (u->nlimbs == 0) { + if (mpi_resize(w, 1) < 0) + return -ENOMEM; + w->d[0] = vval; + w->nlimbs = (vval != 0); + w->sign = (vval != 0); + return 0; + } + + /* If not space for W (and possible carry), increase space. */ + if (mpi_resize(w, u->nlimbs + 1)) + return -ENOMEM; + + if (u->sign) { + mpi_limb_t cy; + + cy = mpihelp_add_1(w->d, u->d, u->nlimbs, (mpi_limb_t) vval); + w->d[u->nlimbs] = cy; + w->nlimbs = u->nlimbs + cy; + w->sign = 1; + } else { + /* The signs are different. Need exact comparison to determine + * which operand to subtract from which. + */ + if (u->nlimbs == 1 && u->d[0] < vval) { + w->d[0] = vval - u->d[0]; + w->nlimbs = 1; + w->sign = 1; + } else { + mpihelp_sub_1(w->d, u->d, u->nlimbs, (mpi_limb_t) vval); + /* Size can decrease with at most one limb. */ + w->nlimbs = (u->nlimbs - (w->d[u->nlimbs - 1] == 0)); + w->sign = 0; + } + } + + mpi_normalize(w); + return 0; +} +EXPORT_SYMBOL_GPL(mpi_sub_ui); diff --git a/lib/random32.c b/lib/random32.c index 763b920a6206..3d749abb9e80 100644 --- a/lib/random32.c +++ b/lib/random32.c @@ -48,7 +48,7 @@ static inline void prandom_state_selftest(void) } #endif -static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy; +DEFINE_PER_CPU(struct rnd_state, net_rand_state); /** * prandom_u32_state - seeded pseudo-random number generator. diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 9f6890aedd1a..c949c1e3b87c 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -31,7 +31,7 @@ union nested_table { union nested_table __rcu *table; - struct rhash_lock_head *bucket; + struct rhash_lock_head __rcu *bucket; }; static u32 head_hashfn(struct rhashtable *ht, @@ -222,7 +222,7 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht, } static int rhashtable_rehash_one(struct rhashtable *ht, - struct rhash_lock_head **bkt, + struct rhash_lock_head __rcu **bkt, unsigned int old_hash) { struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); @@ -275,7 +275,7 @@ static int rhashtable_rehash_chain(struct rhashtable *ht, unsigned int old_hash) { struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); - struct rhash_lock_head **bkt = rht_bucket_var(old_tbl, old_hash); + struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash); int err; if (!bkt) @@ -485,7 +485,7 @@ fail: } static void *rhashtable_lookup_one(struct rhashtable *ht, - struct rhash_lock_head **bkt, + struct rhash_lock_head __rcu **bkt, struct bucket_table *tbl, unsigned int hash, const void *key, struct rhash_head *obj) { @@ -535,12 +535,10 @@ static void *rhashtable_lookup_one(struct rhashtable *ht, return ERR_PTR(-ENOENT); } -static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, - struct rhash_lock_head **bkt, - struct bucket_table *tbl, - unsigned int hash, - struct rhash_head *obj, - void *data) +static struct bucket_table *rhashtable_insert_one( + struct rhashtable *ht, struct rhash_lock_head __rcu **bkt, + struct bucket_table *tbl, unsigned int hash, struct rhash_head *obj, + void *data) { struct bucket_table *new_tbl; struct rhash_head *head; @@ -591,7 +589,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key, { struct bucket_table *new_tbl; struct bucket_table *tbl; - struct rhash_lock_head **bkt; + struct rhash_lock_head __rcu **bkt; unsigned int hash; void *data; @@ -1173,8 +1171,8 @@ void rhashtable_destroy(struct rhashtable *ht) } EXPORT_SYMBOL_GPL(rhashtable_destroy); -struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl, - unsigned int hash) +struct rhash_lock_head __rcu **__rht_bucket_nested( + const struct bucket_table *tbl, unsigned int hash) { const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); unsigned int index = hash & ((1 << tbl->nest) - 1); @@ -1202,10 +1200,10 @@ struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl, } EXPORT_SYMBOL_GPL(__rht_bucket_nested); -struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl, - unsigned int hash) +struct rhash_lock_head __rcu **rht_bucket_nested( + const struct bucket_table *tbl, unsigned int hash) { - static struct rhash_lock_head *rhnull; + static struct rhash_lock_head __rcu *rhnull; if (!rhnull) INIT_RHT_NULLS_HEAD(rhnull); @@ -1213,9 +1211,8 @@ struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl, } EXPORT_SYMBOL_GPL(rht_bucket_nested); -struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht, - struct bucket_table *tbl, - unsigned int hash) +struct rhash_lock_head __rcu **rht_bucket_nested_insert( + struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) { const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); unsigned int index = hash & ((1 << tbl->nest) - 1); diff --git a/mm/filemap.c b/mm/filemap.c index 385759c4ce4b..991503bbf922 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1002,6 +1002,7 @@ struct wait_page_queue { static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg) { + int ret; struct wait_page_key *key = arg; struct wait_page_queue *wait_page = container_of(wait, struct wait_page_queue, wait); @@ -1014,17 +1015,35 @@ static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, return 0; /* - * Stop walking if it's locked. - * Is this safe if put_and_wait_on_page_locked() is in use? - * Yes: the waker must hold a reference to this page, and if PG_locked - * has now already been set by another task, that task must also hold - * a reference to the *same usage* of this page; so there is no need - * to walk on to wake even the put_and_wait_on_page_locked() callers. + * If it's an exclusive wait, we get the bit for it, and + * stop walking if we can't. + * + * If it's a non-exclusive wait, then the fact that this + * wake function was called means that the bit already + * was cleared, and we don't care if somebody then + * re-took it. */ - if (test_bit(key->bit_nr, &key->page->flags)) - return -1; + ret = 0; + if (wait->flags & WQ_FLAG_EXCLUSIVE) { + if (test_and_set_bit(key->bit_nr, &key->page->flags)) + return -1; + ret = 1; + } + wait->flags |= WQ_FLAG_WOKEN; - return autoremove_wake_function(wait, mode, sync, key); + wake_up_state(wait->private, mode); + + /* + * Ok, we have successfully done what we're waiting for, + * and we can unconditionally remove the wait entry. + * + * Note that this has to be the absolute last thing we do, + * since after list_del_init(&wait->entry) the wait entry + * might be de-allocated and the process might even have + * exited. + */ + list_del_init_careful(&wait->entry); + return ret; } static void wake_up_page_bit(struct page *page, int bit_nr) @@ -1103,16 +1122,31 @@ enum behavior { */ }; +/* + * Attempt to check (or get) the page bit, and mark the + * waiter woken if successful. + */ +static inline bool trylock_page_bit_common(struct page *page, int bit_nr, + struct wait_queue_entry *wait) +{ + if (wait->flags & WQ_FLAG_EXCLUSIVE) { + if (test_and_set_bit(bit_nr, &page->flags)) + return false; + } else if (test_bit(bit_nr, &page->flags)) + return false; + + wait->flags |= WQ_FLAG_WOKEN; + return true; +} + static inline int wait_on_page_bit_common(wait_queue_head_t *q, struct page *page, int bit_nr, int state, enum behavior behavior) { struct wait_page_queue wait_page; wait_queue_entry_t *wait = &wait_page.wait; - bool bit_is_set; bool thrashing = false; bool delayacct = false; unsigned long pflags; - int ret = 0; if (bit_nr == PG_locked && !PageUptodate(page) && PageWorkingset(page)) { @@ -1130,48 +1164,47 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q, wait_page.page = page; wait_page.bit_nr = bit_nr; - for (;;) { - spin_lock_irq(&q->lock); + /* + * Do one last check whether we can get the + * page bit synchronously. + * + * Do the SetPageWaiters() marking before that + * to let any waker we _just_ missed know they + * need to wake us up (otherwise they'll never + * even go to the slow case that looks at the + * page queue), and add ourselves to the wait + * queue if we need to sleep. + * + * This part needs to be done under the queue + * lock to avoid races. + */ + spin_lock_irq(&q->lock); + SetPageWaiters(page); + if (!trylock_page_bit_common(page, bit_nr, wait)) + __add_wait_queue_entry_tail(q, wait); + spin_unlock_irq(&q->lock); - if (likely(list_empty(&wait->entry))) { - __add_wait_queue_entry_tail(q, wait); - SetPageWaiters(page); - } + /* + * From now on, all the logic will be based on + * the WQ_FLAG_WOKEN flag, and the and the page + * bit testing (and setting) will be - or has + * already been - done by the wake function. + * + * We can drop our reference to the page. + */ + if (behavior == DROP) + put_page(page); + for (;;) { set_current_state(state); - spin_unlock_irq(&q->lock); - - bit_is_set = test_bit(bit_nr, &page->flags); - if (behavior == DROP) - put_page(page); - - if (likely(bit_is_set)) - io_schedule(); - - if (behavior == EXCLUSIVE) { - if (!test_and_set_bit_lock(bit_nr, &page->flags)) - break; - } else if (behavior == SHARED) { - if (!test_bit(bit_nr, &page->flags)) - break; - } - - if (signal_pending_state(state, current)) { - ret = -EINTR; + if (signal_pending_state(state, current)) break; - } - if (behavior == DROP) { - /* - * We can no longer safely access page->flags: - * even if CONFIG_MEMORY_HOTREMOVE is not enabled, - * there is a risk of waiting forever on a page reused - * for something that keeps it locked indefinitely. - * But best check for -EINTR above before breaking. - */ + if (wait->flags & WQ_FLAG_WOKEN) break; - } + + io_schedule(); } finish_wait(q, wait); @@ -1190,7 +1223,7 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q, * bother with signals either. */ - return ret; + return wait->flags & WQ_FLAG_WOKEN ? 0 : -EINTR; } void wait_on_page_bit(struct page *page, int bit_nr) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index fab4485b9e52..590111ea6975 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -45,7 +45,10 @@ int hugetlb_max_hstate __read_mostly; unsigned int default_hstate_idx; struct hstate hstates[HUGE_MAX_HSTATE]; +#ifdef CONFIG_CMA static struct cma *hugetlb_cma[MAX_NUMNODES]; +#endif +static unsigned long hugetlb_cma_size __initdata; /* * Minimum page order among possible hugepage sizes, set to a proper value @@ -1235,9 +1238,10 @@ static void free_gigantic_page(struct page *page, unsigned int order) * If the page isn't allocated using the cma allocator, * cma_release() returns false. */ - if (IS_ENABLED(CONFIG_CMA) && - cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order)) +#ifdef CONFIG_CMA + if (cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order)) return; +#endif free_contig_range(page_to_pfn(page), 1 << order); } @@ -1248,7 +1252,8 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, { unsigned long nr_pages = 1UL << huge_page_order(h); - if (IS_ENABLED(CONFIG_CMA)) { +#ifdef CONFIG_CMA + { struct page *page; int node; @@ -1262,6 +1267,7 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, return page; } } +#endif return alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask); } @@ -2571,7 +2577,7 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h) for (i = 0; i < h->max_huge_pages; ++i) { if (hstate_is_gigantic(h)) { - if (IS_ENABLED(CONFIG_CMA) && hugetlb_cma[0]) { + if (hugetlb_cma_size) { pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n"); break; } @@ -5654,7 +5660,6 @@ void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason) } #ifdef CONFIG_CMA -static unsigned long hugetlb_cma_size __initdata; static bool cma_reserve_called __initdata; static int __init cmdline_parse_hugetlb_cma(char *p) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index b043c40a21d4..700f5160f3e4 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -958,6 +958,9 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, return SCAN_ADDRESS_RANGE; if (!hugepage_vma_check(vma, vma->vm_flags)) return SCAN_VMA_CHECK; + /* Anon VMA expected */ + if (!vma->anon_vma || vma->vm_ops) + return SCAN_VMA_CHECK; return 0; } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 19622328e4b5..13f559af1ab6 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5669,7 +5669,6 @@ static void __mem_cgroup_clear_mc(void) if (!mem_cgroup_is_root(mc.to)) page_counter_uncharge(&mc.to->memory, mc.moved_swap); - mem_cgroup_id_get_many(mc.to, mc.moved_swap); css_put_many(&mc.to->css, mc.moved_swap); mc.moved_swap = 0; @@ -5860,7 +5859,8 @@ put: /* get_mctgt_type() gets the page */ ent = target.ent; if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) { mc.precharge--; - /* we fixup refcnts and charges later. */ + mem_cgroup_id_get_many(mc.to, 1); + /* we fixup other refcnts and charges later. */ mc.moved_swap++; } break; @@ -7186,6 +7186,13 @@ static struct cftype memsw_files[] = { { }, /* terminate */ }; +/* + * If mem_cgroup_swap_init() is implemented as a subsys_initcall() + * instead of a core_initcall(), this could mean cgroup_memory_noswap still + * remains set to false even when memcg is disabled via "cgroup_disable=memory" + * boot parameter. This may result in premature OOPS inside + * mem_cgroup_get_nr_swap_pages() function in corner cases. + */ static int __init mem_cgroup_swap_init(void) { /* No memory control -> no swap control */ @@ -7200,6 +7207,6 @@ static int __init mem_cgroup_swap_init(void) return 0; } -subsys_initcall(mem_cgroup_swap_init); +core_initcall(mem_cgroup_swap_init); #endif /* CONFIG_MEMCG_SWAP */ diff --git a/mm/memory.c b/mm/memory.c index 87ec87cdc1ff..3ecad55103ad 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1601,7 +1601,7 @@ int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, return insert_pages(vma, addr, pages, num, vma->vm_page_prot); #else unsigned long idx = 0, pgcount = *num; - int err; + int err = -EINVAL; for (; idx < pgcount; ++idx) { err = vm_insert_page(vma, addr + (PAGE_SIZE * idx), pages[idx]); diff --git a/mm/mmap.c b/mm/mmap.c index 59a4682ebf3f..8c7ca737a19b 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2620,7 +2620,7 @@ static void unmap_region(struct mm_struct *mm, * Create a list of vma's touched by the unmap, removing them from the mm's * vma list as we go.. */ -static void +static bool detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, unsigned long end) { @@ -2645,6 +2645,17 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, /* Kill the cache */ vmacache_invalidate(mm); + + /* + * Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or + * VM_GROWSUP VMA. Such VMAs can change their size under + * down_read(mmap_lock) and collide with the VMA we are about to unmap. + */ + if (vma && (vma->vm_flags & VM_GROWSDOWN)) + return false; + if (prev && (prev->vm_flags & VM_GROWSUP)) + return false; + return true; } /* @@ -2825,7 +2836,8 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, } /* Detach vmas from rbtree */ - detach_vmas_to_be_unmapped(mm, vma, prev, end); + if (!detach_vmas_to_be_unmapped(mm, vma, prev, end)) + downgrade = false; if (downgrade) mmap_write_downgrade(mm); diff --git a/mm/shmem.c b/mm/shmem.c index a0dbe62f8042..b2abca3f7f33 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -3178,7 +3178,7 @@ static int shmem_initxattrs(struct inode *inode, new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len, GFP_KERNEL); if (!new_xattr->name) { - kfree(new_xattr); + kvfree(new_xattr); return -ENOMEM; } diff --git a/mm/slab_common.c b/mm/slab_common.c index 37d48a56431d..fe8b68482670 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -326,6 +326,14 @@ int slab_unmergeable(struct kmem_cache *s) if (s->refcount < 0) return 1; +#ifdef CONFIG_MEMCG_KMEM + /* + * Skip the dying kmem_cache. + */ + if (s->memcg_params.dying) + return 1; +#endif + return 0; } @@ -886,12 +894,15 @@ static int shutdown_memcg_caches(struct kmem_cache *s) return 0; } -static void flush_memcg_workqueue(struct kmem_cache *s) +static void memcg_set_kmem_cache_dying(struct kmem_cache *s) { spin_lock_irq(&memcg_kmem_wq_lock); s->memcg_params.dying = true; spin_unlock_irq(&memcg_kmem_wq_lock); +} +static void flush_memcg_workqueue(struct kmem_cache *s) +{ /* * SLAB and SLUB deactivate the kmem_caches through call_rcu. Make * sure all registered rcu callbacks have been invoked. @@ -923,10 +934,6 @@ static inline int shutdown_memcg_caches(struct kmem_cache *s) { return 0; } - -static inline void flush_memcg_workqueue(struct kmem_cache *s) -{ -} #endif /* CONFIG_MEMCG_KMEM */ void slab_kmem_cache_release(struct kmem_cache *s) @@ -944,8 +951,6 @@ void kmem_cache_destroy(struct kmem_cache *s) if (unlikely(!s)) return; - flush_memcg_workqueue(s); - get_online_cpus(); get_online_mems(); @@ -955,6 +960,22 @@ void kmem_cache_destroy(struct kmem_cache *s) if (s->refcount) goto out_unlock; +#ifdef CONFIG_MEMCG_KMEM + memcg_set_kmem_cache_dying(s); + + mutex_unlock(&slab_mutex); + + put_online_mems(); + put_online_cpus(); + + flush_memcg_workqueue(s); + + get_online_cpus(); + get_online_mems(); + + mutex_lock(&slab_mutex); +#endif + err = shutdown_memcg_caches(s); if (!err) err = shutdown_cache(s); diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index 13cd683a658a..12ecacf0c55f 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c @@ -362,6 +362,10 @@ static void p9_read_work(struct work_struct *work) if (m->rreq->status == REQ_STATUS_SENT) { list_del(&m->rreq->req_list); p9_client_cb(m->client, m->rreq, REQ_STATUS_RCVD); + } else if (m->rreq->status == REQ_STATUS_FLSHD) { + /* Ignore replies associated with a cancelled request. */ + p9_debug(P9_DEBUG_TRANS, + "Ignore replies associated with a cancelled request\n"); } else { spin_unlock(&m->client->lock); p9_debug(P9_DEBUG_ERROR, @@ -703,11 +707,20 @@ static int p9_fd_cancelled(struct p9_client *client, struct p9_req_t *req) { p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req); + spin_lock(&client->lock); + /* Ignore cancelled request if message has been received + * before lock. + */ + if (req->status == REQ_STATUS_RCVD) { + spin_unlock(&client->lock); + return 0; + } + /* we haven't received a response for oldreq, * remove it from the list. */ - spin_lock(&client->lock); list_del(&req->req_list); + req->status = REQ_STATUS_FLSHD; spin_unlock(&client->lock); p9_req_put(req); @@ -803,20 +816,28 @@ static int p9_fd_open(struct p9_client *client, int rfd, int wfd) return -ENOMEM; ts->rd = fget(rfd); + if (!ts->rd) + goto out_free_ts; + if (!(ts->rd->f_mode & FMODE_READ)) + goto out_put_rd; ts->wr = fget(wfd); - if (!ts->rd || !ts->wr) { - if (ts->rd) - fput(ts->rd); - if (ts->wr) - fput(ts->wr); - kfree(ts); - return -EIO; - } + if (!ts->wr) + goto out_put_rd; + if (!(ts->wr->f_mode & FMODE_WRITE)) + goto out_put_wr; client->trans = ts; client->status = Connected; return 0; + +out_put_wr: + fput(ts->wr); +out_put_rd: + fput(ts->rd); +out_free_ts: + kfree(ts); + return -EIO; } static int p9_socket_open(struct p9_client *client, struct socket *csocket) diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index fd91cd34f25e..dec3f35467c9 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -1187,7 +1187,10 @@ static int __must_check ax25_connect(struct socket *sock, if (addr_len > sizeof(struct sockaddr_ax25) && fsa->fsa_ax25.sax25_ndigis != 0) { /* Valid number of digipeaters ? */ - if (fsa->fsa_ax25.sax25_ndigis < 1 || fsa->fsa_ax25.sax25_ndigis > AX25_MAX_DIGIS) { + if (fsa->fsa_ax25.sax25_ndigis < 1 || + fsa->fsa_ax25.sax25_ndigis > AX25_MAX_DIGIS || + addr_len < sizeof(struct sockaddr_ax25) + + sizeof(ax25_address) * fsa->fsa_ax25.sax25_ndigis) { err = -EINVAL; goto out_release; } @@ -1507,7 +1510,10 @@ static int ax25_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)usax; /* Valid number of digipeaters ? */ - if (usax->sax25_ndigis < 1 || usax->sax25_ndigis > AX25_MAX_DIGIS) { + if (usax->sax25_ndigis < 1 || + usax->sax25_ndigis > AX25_MAX_DIGIS || + addr_len < sizeof(struct sockaddr_ax25) + + sizeof(ax25_address) * usax->sax25_ndigis) { err = -EINVAL; goto out; } diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index cfeaee347db3..af9d7f2ff8ba 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -1338,6 +1338,9 @@ static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr, { struct discovery_state *d = &hdev->discovery; + if (len > HCI_MAX_AD_LENGTH) + return; + bacpy(&d->last_adv_addr, bdaddr); d->last_adv_addr_type = bdaddr_type; d->last_adv_rssi = rssi; @@ -5355,7 +5358,8 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, u8 bdaddr_type, bdaddr_t *direct_addr, - u8 direct_addr_type, s8 rssi, u8 *data, u8 len) + u8 direct_addr_type, s8 rssi, u8 *data, u8 len, + bool ext_adv) { struct discovery_state *d = &hdev->discovery; struct smp_irk *irk; @@ -5377,6 +5381,11 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, return; } + if (!ext_adv && len > HCI_MAX_AD_LENGTH) { + bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes"); + return; + } + /* Find the end of the data in case the report contains padded zero * bytes at the end causing an invalid length value. * @@ -5437,7 +5446,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, */ conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type, direct_addr); - if (conn && type == LE_ADV_IND) { + if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) { /* Store report for later inclusion by * mgmt_device_connected */ @@ -5491,7 +5500,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, * event or send an immediate device found event if the data * should not be stored for later. */ - if (!has_pending_adv_report(hdev)) { + if (!ext_adv && !has_pending_adv_report(hdev)) { /* If the report will trigger a SCAN_REQ store it for * later merging. */ @@ -5526,7 +5535,8 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, /* If the new report will trigger a SCAN_REQ store it for * later merging. */ - if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) { + if (!ext_adv && (type == LE_ADV_IND || + type == LE_ADV_SCAN_IND)) { store_pending_adv_report(hdev, bdaddr, bdaddr_type, rssi, flags, data, len); return; @@ -5566,7 +5576,7 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) rssi = ev->data[ev->length]; process_adv_report(hdev, ev->evt_type, &ev->bdaddr, ev->bdaddr_type, NULL, 0, rssi, - ev->data, ev->length); + ev->data, ev->length, false); } else { bt_dev_err(hdev, "Dropping invalid advertising data"); } @@ -5638,7 +5648,8 @@ static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) if (legacy_evt_type != LE_ADV_INVALID) { process_adv_report(hdev, legacy_evt_type, &ev->bdaddr, ev->bdaddr_type, NULL, 0, ev->rssi, - ev->data, ev->length); + ev->data, ev->length, + !(evt_type & LE_EXT_ADV_LEGACY_PDU)); } ptr += sizeof(*ev) + ev->length; @@ -5836,7 +5847,8 @@ static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, process_adv_report(hdev, ev->evt_type, &ev->bdaddr, ev->bdaddr_type, &ev->direct_addr, - ev->direct_addr_type, ev->rssi, NULL, 0); + ev->direct_addr_type, ev->rssi, NULL, 0, + false); ptr += sizeof(*ev); } diff --git a/net/bpfilter/bpfilter_kern.c b/net/bpfilter/bpfilter_kern.c index 1905e01c3aa9..4494ea6056cd 100644 --- a/net/bpfilter/bpfilter_kern.c +++ b/net/bpfilter/bpfilter_kern.c @@ -39,7 +39,7 @@ static int __bpfilter_process_sockopt(struct sock *sk, int optname, { struct mbox_request req; struct mbox_reply reply; - loff_t pos; + loff_t pos = 0; ssize_t n; int ret = -EFAULT; diff --git a/net/compat.c b/net/compat.c index 5e3041a2c37d..434838bef5f8 100644 --- a/net/compat.c +++ b/net/compat.c @@ -202,7 +202,7 @@ int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, struct sock *sk, /* Advance. */ kcmsg = (struct cmsghdr *)((char *)kcmsg + tmp); - ucmsg = cmsg_compat_nxthdr(kmsg, ucmsg, ucmlen); + ucmsg = cmsg_compat_nxthdr(kmsg, ucmsg, cmsg.cmsg_len); } /* diff --git a/net/core/dev.c b/net/core/dev.c index 90b59fc50dc9..7a774ebf64e2 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -5601,7 +5601,7 @@ static void flush_backlog(struct work_struct *work) skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { if (skb->dev->reg_state == NETREG_UNREGISTERING) { __skb_unlink(skb, &sd->input_pkt_queue); - kfree_skb(skb); + dev_kfree_skb_irq(skb); input_queue_head_incr(sd); } } diff --git a/net/core/devlink.c b/net/core/devlink.c index 2cafbc808b09..47f14a2f25fb 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -1065,7 +1065,9 @@ static int devlink_nl_cmd_sb_pool_get_dumpit(struct sk_buff *msg, devlink_sb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq); - if (err && err != -EOPNOTSUPP) { + if (err == -EOPNOTSUPP) { + err = 0; + } else if (err) { mutex_unlock(&devlink->lock); goto out; } @@ -1266,7 +1268,9 @@ static int devlink_nl_cmd_sb_port_pool_get_dumpit(struct sk_buff *msg, devlink, devlink_sb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq); - if (err && err != -EOPNOTSUPP) { + if (err == -EOPNOTSUPP) { + err = 0; + } else if (err) { mutex_unlock(&devlink->lock); goto out; } @@ -1498,7 +1502,9 @@ devlink_nl_cmd_sb_tc_pool_bind_get_dumpit(struct sk_buff *msg, devlink_sb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq); - if (err && err != -EOPNOTSUPP) { + if (err == -EOPNOTSUPP) { + err = 0; + } else if (err) { mutex_unlock(&devlink->lock); goto out; } @@ -3299,7 +3305,9 @@ static int devlink_nl_cmd_param_get_dumpit(struct sk_buff *msg, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI); - if (err && err != -EOPNOTSUPP) { + if (err == -EOPNOTSUPP) { + err = 0; + } else if (err) { mutex_unlock(&devlink->lock); goto out; } @@ -3569,7 +3577,9 @@ static int devlink_nl_cmd_port_param_get_dumpit(struct sk_buff *msg, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI); - if (err && err != -EOPNOTSUPP) { + if (err == -EOPNOTSUPP) { + err = 0; + } else if (err) { mutex_unlock(&devlink->lock); goto out; } @@ -4518,7 +4528,9 @@ static int devlink_nl_cmd_info_get_dumpit(struct sk_buff *msg, cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->extack); mutex_unlock(&devlink->lock); - if (err && err != -EOPNOTSUPP) + if (err == -EOPNOTSUPP) + err = 0; + else if (err) break; idx++; } @@ -8567,6 +8579,7 @@ static const struct devlink_trap_group devlink_trap_group_generic[] = { DEVLINK_TRAP_GROUP(PIM), DEVLINK_TRAP_GROUP(UC_LB), DEVLINK_TRAP_GROUP(LOCAL_DELIVERY), + DEVLINK_TRAP_GROUP(EXTERNAL_DELIVERY), DEVLINK_TRAP_GROUP(IPV6), DEVLINK_TRAP_GROUP(PTP_EVENT), DEVLINK_TRAP_GROUP(PTP_GENERAL), diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c index b739cfab796e..2076219b8ba5 100644 --- a/net/core/flow_offload.c +++ b/net/core/flow_offload.c @@ -4,6 +4,7 @@ #include <net/flow_offload.h> #include <linux/rtnetlink.h> #include <linux/mutex.h> +#include <linux/rhashtable.h> struct flow_rule *flow_rule_alloc(unsigned int num_actions) { diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index e353b822bb15..7bd6440c63bf 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -1108,7 +1108,7 @@ static ssize_t tx_timeout_show(struct netdev_queue *queue, char *buf) trans_timeout = queue->trans_timeout; spin_unlock_irq(&queue->_xmit_lock); - return sprintf(buf, "%lu", trans_timeout); + return sprintf(buf, fmt_ulong, trans_timeout); } static unsigned int get_netdev_queue_index(struct netdev_queue *queue) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 9aedc15736ad..85a4b0101f76 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -3343,7 +3343,8 @@ replay: */ if (err < 0) { /* If device is not registered at all, free it now */ - if (dev->reg_state == NETREG_UNINITIALIZED) + if (dev->reg_state == NETREG_UNINITIALIZED || + dev->reg_state == NETREG_UNREGISTERED) free_netdev(dev); goto out; } diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c index adcb3aea576d..bbdd3c7b6cb5 100644 --- a/net/core/sock_reuseport.c +++ b/net/core/sock_reuseport.c @@ -101,6 +101,7 @@ static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse) more_reuse->prog = reuse->prog; more_reuse->reuseport_id = reuse->reuseport_id; more_reuse->bind_inany = reuse->bind_inany; + more_reuse->has_conns = reuse->has_conns; memcpy(more_reuse->socks, reuse->socks, reuse->num_socks * sizeof(struct sock *)); diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c index ed13760463de..1ea17752fffc 100644 --- a/net/hsr/hsr_forward.c +++ b/net/hsr/hsr_forward.c @@ -120,13 +120,18 @@ static struct sk_buff *frame_get_stripped_skb(struct hsr_frame_info *frame, return skb_clone(frame->skb_std, GFP_ATOMIC); } -static void hsr_fill_tag(struct sk_buff *skb, struct hsr_frame_info *frame, - struct hsr_port *port, u8 proto_version) +static struct sk_buff *hsr_fill_tag(struct sk_buff *skb, + struct hsr_frame_info *frame, + struct hsr_port *port, u8 proto_version) { struct hsr_ethhdr *hsr_ethhdr; int lane_id; int lsdu_size; + /* pad to minimum packet size which is 60 + 6 (HSR tag) */ + if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN)) + return NULL; + if (port->type == HSR_PT_SLAVE_A) lane_id = 0; else @@ -144,6 +149,8 @@ static void hsr_fill_tag(struct sk_buff *skb, struct hsr_frame_info *frame, hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto; hsr_ethhdr->ethhdr.h_proto = htons(proto_version ? ETH_P_HSR : ETH_P_PRP); + + return skb; } static struct sk_buff *create_tagged_skb(struct sk_buff *skb_o, @@ -172,9 +179,10 @@ static struct sk_buff *create_tagged_skb(struct sk_buff *skb_o, memmove(dst, src, movelen); skb_reset_mac_header(skb); - hsr_fill_tag(skb, frame, port, port->hsr->prot_version); - - return skb; + /* skb_put_padto free skb on error and hsr_fill_tag returns NULL in + * that case + */ + return hsr_fill_tag(skb, frame, port, port->hsr->prot_version); } /* If the original frame was an HSR tagged frame, just clone it to be sent diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c index 03b891904314..530de24b1fb5 100644 --- a/net/hsr/hsr_framereg.c +++ b/net/hsr/hsr_framereg.c @@ -325,7 +325,8 @@ void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb, if (port->type != node_dst->addr_B_port) return; - ether_addr_copy(eth_hdr(skb)->h_dest, node_dst->macaddress_B); + if (is_valid_ether_addr(node_dst->macaddress_B)) + ether_addr_copy(eth_hdr(skb)->h_dest, node_dst->macaddress_B); } void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port, diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 248f1c1959a6..3c65f71d0e82 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -1864,7 +1864,7 @@ struct fib_table *fib_trie_unmerge(struct fib_table *oldtb) while ((l = leaf_walk_rcu(&tp, key)) != NULL) { struct key_vector *local_l = NULL, *local_tp; - hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { + hlist_for_each_entry(fa, &l->leaf, fa_list) { struct fib_alias *new_fa; if (local_tb->tb_id != fa->tb_id) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 9615e72656d1..518f04355fbf 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3488,10 +3488,8 @@ static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) } } -/* This routine deals with acks during a TLP episode. - * We mark the end of a TLP episode on receiving TLP dupack or when - * ack is after tlp_high_seq. - * Ref: loss detection algorithm in draft-dukkipati-tcpm-tcp-loss-probe. +/* This routine deals with acks during a TLP episode and ends an episode by + * resetting tlp_high_seq. Ref: TLP algorithm in draft-ietf-tcpm-rack */ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) { @@ -3500,7 +3498,10 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) if (before(ack, tp->tlp_high_seq)) return; - if (flag & FLAG_DSACKING_ACK) { + if (!tp->tlp_retrans) { + /* TLP of new data has been acknowledged */ + tp->tlp_high_seq = 0; + } else if (flag & FLAG_DSACKING_ACK) { /* This DSACK means original and TLP probe arrived; no loss */ tp->tlp_high_seq = 0; } else if (after(ack, tp->tlp_high_seq)) { diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 5f5b2f0b0e60..0bc05d68cd74 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2624,6 +2624,11 @@ void tcp_send_loss_probe(struct sock *sk) int pcount; int mss = tcp_current_mss(sk); + /* At most one outstanding TLP */ + if (tp->tlp_high_seq) + goto rearm_timer; + + tp->tlp_retrans = 0; skb = tcp_send_head(sk); if (skb && tcp_snd_wnd_test(tp, skb, mss)) { pcount = tp->packets_out; @@ -2641,10 +2646,6 @@ void tcp_send_loss_probe(struct sock *sk) return; } - /* At most one outstanding TLP retransmission. */ - if (tp->tlp_high_seq) - goto rearm_timer; - if (skb_still_in_host_queue(sk, skb)) goto rearm_timer; @@ -2666,10 +2667,12 @@ void tcp_send_loss_probe(struct sock *sk) if (__tcp_retransmit_skb(sk, skb, 1)) goto rearm_timer; + tp->tlp_retrans = 1; + +probe_sent: /* Record snd_nxt for loss detection. */ tp->tlp_high_seq = tp->snd_nxt; -probe_sent: NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES); /* Reset s.t. tcp_rearm_rto will restart timer from now */ inet_csk(sk)->icsk_pending = 0; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 1b7ebbcae497..4077d589b72e 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -416,7 +416,7 @@ static struct sock *udp4_lib_lookup2(struct net *net, struct udp_hslot *hslot2, struct sk_buff *skb) { - struct sock *sk, *result; + struct sock *sk, *result, *reuseport_result; int score, badness; u32 hash = 0; @@ -426,17 +426,20 @@ static struct sock *udp4_lib_lookup2(struct net *net, score = compute_score(sk, net, saddr, sport, daddr, hnum, dif, sdif); if (score > badness) { + reuseport_result = NULL; + if (sk->sk_reuseport && sk->sk_state != TCP_ESTABLISHED) { hash = udp_ehashfn(net, daddr, hnum, saddr, sport); - result = reuseport_select_sock(sk, hash, skb, - sizeof(struct udphdr)); - if (result && !reuseport_has_conns(sk, false)) - return result; + reuseport_result = reuseport_select_sock(sk, hash, skb, + sizeof(struct udphdr)); + if (reuseport_result && !reuseport_has_conns(sk, false)) + return reuseport_result; } + + result = reuseport_result ? : sk; badness = score; - result = sk; } } return result; @@ -2051,7 +2054,7 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb) /* * UDP-Lite specific tests, ignored on UDP sockets */ - if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { + if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { /* * MIB statistics other than incrementing the error count are diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c index 893261230ffc..dacdea7fcb62 100644 --- a/net/ipv6/anycast.c +++ b/net/ipv6/anycast.c @@ -183,7 +183,7 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) return 0; } -void ipv6_sock_ac_close(struct sock *sk) +void __ipv6_sock_ac_close(struct sock *sk) { struct ipv6_pinfo *np = inet6_sk(sk); struct net_device *dev = NULL; @@ -191,10 +191,7 @@ void ipv6_sock_ac_close(struct sock *sk) struct net *net = sock_net(sk); int prev_index; - if (!np->ipv6_ac_list) - return; - - rtnl_lock(); + ASSERT_RTNL(); pac = np->ipv6_ac_list; np->ipv6_ac_list = NULL; @@ -211,6 +208,16 @@ void ipv6_sock_ac_close(struct sock *sk) sock_kfree_s(sk, pac, sizeof(*pac)); pac = next; } +} + +void ipv6_sock_ac_close(struct sock *sk) +{ + struct ipv6_pinfo *np = inet6_sk(sk); + + if (!np->ipv6_ac_list) + return; + rtnl_lock(); + __ipv6_sock_ac_close(sk); rtnl_unlock(); } diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index c43592771126..52c2f063529f 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -805,10 +805,17 @@ int esp6_input_done2(struct sk_buff *skb, int err) if (x->encap) { const struct ipv6hdr *ip6h = ipv6_hdr(skb); + int offset = skb_network_offset(skb) + sizeof(*ip6h); struct xfrm_encap_tmpl *encap = x->encap; - struct udphdr *uh = (void *)(skb_network_header(skb) + hdr_len); - struct tcphdr *th = (void *)(skb_network_header(skb) + hdr_len); - __be16 source; + u8 nexthdr = ip6h->nexthdr; + __be16 frag_off, source; + struct udphdr *uh; + struct tcphdr *th; + + offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off); + uh = (void *)(skb->data + offset); + th = (void *)(skb->data + offset); + hdr_len += offset; switch (x->encap->encap_type) { case TCP_ENCAP_ESPINTCP: diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 6532bde82b40..3a57fb9ce049 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -1562,17 +1562,18 @@ static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head) static int __net_init ip6gre_init_net(struct net *net) { struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); + struct net_device *ndev; int err; if (!net_has_fallback_tunnels(net)) return 0; - ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0", - NET_NAME_UNKNOWN, - ip6gre_tunnel_setup); - if (!ign->fb_tunnel_dev) { + ndev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0", + NET_NAME_UNKNOWN, ip6gre_tunnel_setup); + if (!ndev) { err = -ENOMEM; goto err_alloc_dev; } + ign->fb_tunnel_dev = ndev; dev_net_set(ign->fb_tunnel_dev, net); /* FB netdevice is special: we have one, and only one per netns. * Allowing to move it to another netns is clearly unsafe. @@ -1592,7 +1593,7 @@ static int __net_init ip6gre_init_net(struct net *net) return 0; err_reg_dev: - free_netdev(ign->fb_tunnel_dev); + free_netdev(ndev); err_alloc_dev: return err; } diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 20576e87a5f7..76f9e41859a2 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -240,6 +240,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, fl6_free_socklist(sk); __ipv6_sock_mc_close(sk); + __ipv6_sock_ac_close(sk); /* * Sock is moving from IPv6 to IPv4 (sk_prot), so diff --git a/net/ipv6/route.c b/net/ipv6/route.c index f3279810d765..4c36bd0c7930 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -3685,14 +3685,14 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg, rt->fib6_src.plen = cfg->fc_src_len; #endif if (nh) { - if (!nexthop_get(nh)) { - NL_SET_ERR_MSG(extack, "Nexthop has been deleted"); - goto out; - } if (rt->fib6_src.plen) { NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing"); goto out; } + if (!nexthop_get(nh)) { + NL_SET_ERR_MSG(extack, "Nexthop has been deleted"); + goto out; + } rt->nh = nh; fib6_nh = nexthop_fib6_nh(rt->nh); } else { diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 7d4151747340..a8d74f44056a 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -148,7 +148,7 @@ static struct sock *udp6_lib_lookup2(struct net *net, int dif, int sdif, struct udp_hslot *hslot2, struct sk_buff *skb) { - struct sock *sk, *result; + struct sock *sk, *result, *reuseport_result; int score, badness; u32 hash = 0; @@ -158,17 +158,20 @@ static struct sock *udp6_lib_lookup2(struct net *net, score = compute_score(sk, net, saddr, sport, daddr, hnum, dif, sdif); if (score > badness) { + reuseport_result = NULL; + if (sk->sk_reuseport && sk->sk_state != TCP_ESTABLISHED) { hash = udp6_ehashfn(net, daddr, hnum, saddr, sport); - result = reuseport_select_sock(sk, hash, skb, - sizeof(struct udphdr)); - if (result && !reuseport_has_conns(sk, false)) - return result; + reuseport_result = reuseport_select_sock(sk, hash, skb, + sizeof(struct udphdr)); + if (reuseport_result && !reuseport_has_conns(sk, false)) + return reuseport_result; } - result = sk; + + result = reuseport_result ? : sk; badness = score; } } @@ -643,7 +646,7 @@ static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb) /* * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c). */ - if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { + if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { if (up->pcrlen == 0) { /* full coverage was set */ net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n", diff --git a/net/key/af_key.c b/net/key/af_key.c index b67ed3a8486c..a915bc86620a 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -1849,6 +1849,13 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms if (ext_hdrs[SADB_X_EXT_FILTER - 1]) { struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1]; + if ((xfilter->sadb_x_filter_splen >= + (sizeof(xfrm_address_t) << 3)) || + (xfilter->sadb_x_filter_dplen >= + (sizeof(xfrm_address_t) << 3))) { + mutex_unlock(&pfk->dump_lock); + return -EINVAL; + } filter = kmalloc(sizeof(*filter), GFP_KERNEL); if (filter == NULL) { mutex_unlock(&pfk->dump_lock); @@ -2400,7 +2407,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa return err; } - xp = xfrm_policy_bysel_ctx(net, DUMMY_MARK, 0, XFRM_POLICY_TYPE_MAIN, + xp = xfrm_policy_bysel_ctx(net, &dummy_mark, 0, XFRM_POLICY_TYPE_MAIN, pol->sadb_x_policy_dir - 1, &sel, pol_ctx, 1, &err); security_xfrm_policy_free(pol_ctx); @@ -2651,7 +2658,7 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, const struct sadb_ return -EINVAL; delete = (hdr->sadb_msg_type == SADB_X_SPDDELETE2); - xp = xfrm_policy_byid(net, DUMMY_MARK, 0, XFRM_POLICY_TYPE_MAIN, + xp = xfrm_policy_byid(net, &dummy_mark, 0, XFRM_POLICY_TYPE_MAIN, dir, pol->sadb_x_policy_id, delete, &err); if (xp == NULL) return -ENOENT; diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 9b360544ad6f..1079a07e43e4 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -2166,6 +2166,7 @@ static int ieee80211_leave_mesh(struct wiphy *wiphy, struct net_device *dev) ieee80211_stop_mesh(sdata); mutex_lock(&sdata->local->mtx); ieee80211_vif_release_channel(sdata); + kfree(sdata->u.mesh.ie); mutex_unlock(&sdata->local->mtx); return 0; diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 5f1ca25b6c97..e88beb3ff6db 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -617,6 +617,19 @@ int mesh_add_he_oper_ie(struct ieee80211_sub_if_data *sdata, int mesh_add_he_6ghz_cap_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { + struct ieee80211_supported_band *sband; + const struct ieee80211_sband_iftype_data *iftd; + + sband = ieee80211_get_sband(sdata); + if (!sband) + return -EINVAL; + + iftd = ieee80211_get_sband_iftype_data(sband, + NL80211_IFTYPE_MESH_POINT); + /* The device doesn't support HE in mesh mode or at all */ + if (!iftd) + return 0; + ieee80211_ie_build_he_6ghz_cap(sdata, skb); return 0; } diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 117519bf33d6..aca608ae313f 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -521,6 +521,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl, del_timer_sync(&mpath->timer); atomic_dec(&sdata->u.mesh.mpaths); atomic_dec(&tbl->entries); + mesh_path_flush_pending(mpath); kfree_rcu(mpath, rcu); } diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index cd8487bc6fc2..af4cc5fb678e 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -1923,9 +1923,7 @@ void ieee80211_sta_update_pending_airtime(struct ieee80211_local *local, if (sta) { tx_pending = atomic_sub_return(tx_airtime, &sta->airtime[ac].aql_tx_pending); - if (WARN_ONCE(tx_pending < 0, - "STA %pM AC %d txq pending airtime underflow: %u, %u", - sta->addr, ac, tx_pending, tx_airtime)) + if (tx_pending < 0) atomic_cmpxchg(&sta->airtime[ac].aql_tx_pending, tx_pending, 0); } diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 1a2941e5244f..3529d1368068 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -4230,11 +4230,12 @@ static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata, test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)) goto out_free; + memset(info, 0, sizeof(*info)); + if (unlikely(!multicast && skb->sk && skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS)) - ieee80211_store_ack_skb(local, skb, &info->flags, NULL); - - memset(info, 0, sizeof(*info)); + info->ack_frame_id = ieee80211_store_ack_skb(local, skb, + &info->flags, NULL); if (unlikely(sdata->control_port_protocol == ehdr->h_proto)) { if (sdata->control_port_no_encrypt) diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 21c94094a699..dd9f5c7a1ade 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -2878,6 +2878,10 @@ void ieee80211_ie_build_he_6ghz_cap(struct ieee80211_sub_if_data *sdata, if (WARN_ON(!iftd)) return; + /* Check for device HE 6 GHz capability before adding element */ + if (!iftd->he_6ghz_capa.capa) + return; + cap = le16_to_cpu(iftd->he_6ghz_capa.capa); cap &= ~IEEE80211_HE_6GHZ_CAP_SM_PS; diff --git a/net/mptcp/crypto.c b/net/mptcp/crypto.c index 3d980713a9e2..82bd2b54d741 100644 --- a/net/mptcp/crypto.c +++ b/net/mptcp/crypto.c @@ -32,11 +32,8 @@ void mptcp_crypto_key_sha(u64 key, u32 *token, u64 *idsn) { __be32 mptcp_hashed_key[SHA256_DIGEST_WORDS]; __be64 input = cpu_to_be64(key); - struct sha256_state state; - sha256_init(&state); - sha256_update(&state, (__force u8 *)&input, sizeof(input)); - sha256_final(&state, (u8 *)mptcp_hashed_key); + sha256((__force u8 *)&input, sizeof(input), (u8 *)mptcp_hashed_key); if (token) *token = be32_to_cpu(mptcp_hashed_key[0]); @@ -47,7 +44,6 @@ void mptcp_crypto_key_sha(u64 key, u32 *token, u64 *idsn) void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u8 *msg, int len, void *hmac) { u8 input[SHA256_BLOCK_SIZE + SHA256_DIGEST_SIZE]; - struct sha256_state state; u8 key1be[8]; u8 key2be[8]; int i; @@ -67,13 +63,10 @@ void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u8 *msg, int len, void *hmac) memcpy(&input[SHA256_BLOCK_SIZE], msg, len); - sha256_init(&state); - sha256_update(&state, input, SHA256_BLOCK_SIZE + len); - /* emit sha256(K1 || msg) on the second input block, so we can * reuse 'input' for the last hashing */ - sha256_final(&state, &input[SHA256_BLOCK_SIZE]); + sha256(input, SHA256_BLOCK_SIZE + len, &input[SHA256_BLOCK_SIZE]); /* Prepare second part of hmac */ memset(input, 0x5C, SHA256_BLOCK_SIZE); @@ -82,9 +75,7 @@ void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u8 *msg, int len, void *hmac) for (i = 0; i < 8; i++) input[i + 8] ^= key2be[i]; - sha256_init(&state); - sha256_update(&state, input, SHA256_BLOCK_SIZE + SHA256_DIGEST_SIZE); - sha256_final(&state, (u8 *)hmac); + sha256(input, SHA256_BLOCK_SIZE + SHA256_DIGEST_SIZE, hmac); } #ifdef CONFIG_MPTCP_HMAC_TEST diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 3980fbb6f31e..c0abe738e7d3 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -1833,7 +1833,7 @@ do_connect: /* on successful connect, the msk state will be moved to established by * subflow_finish_connect() */ - if (!err || err == EINPROGRESS) + if (!err || err == -EINPROGRESS) mptcp_copy_inaddrs(sock->sk, ssock->sk); else inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk)); diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index 605e0f68f8bd..2b8abbfe018c 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -1717,6 +1717,8 @@ static int sync_thread_backup(void *data) { struct ip_vs_sync_thread_data *tinfo = data; struct netns_ipvs *ipvs = tinfo->ipvs; + struct sock *sk = tinfo->sock->sk; + struct udp_sock *up = udp_sk(sk); int len; pr_info("sync thread started: state = BACKUP, mcast_ifn = %s, " @@ -1724,12 +1726,14 @@ static int sync_thread_backup(void *data) ipvs->bcfg.mcast_ifn, ipvs->bcfg.syncid, tinfo->id); while (!kthread_should_stop()) { - wait_event_interruptible(*sk_sleep(tinfo->sock->sk), - !skb_queue_empty(&tinfo->sock->sk->sk_receive_queue) - || kthread_should_stop()); + wait_event_interruptible(*sk_sleep(sk), + !skb_queue_empty_lockless(&sk->sk_receive_queue) || + !skb_queue_empty_lockless(&up->reader_queue) || + kthread_should_stop()); /* do we have data now? */ - while (!skb_queue_empty(&(tinfo->sock->sk->sk_receive_queue))) { + while (!skb_queue_empty_lockless(&sk->sk_receive_queue) || + !skb_queue_empty_lockless(&up->reader_queue)) { len = ip_vs_receive(tinfo->sock, tinfo->buf, ipvs->bcfg.sync_maxlen); if (len <= 0) { diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 7647ecfa0d40..88325b264737 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -188,24 +188,6 @@ static void nft_netdev_unregister_hooks(struct net *net, nf_unregister_net_hook(net, &hook->ops); } -static int nft_register_basechain_hooks(struct net *net, int family, - struct nft_base_chain *basechain) -{ - if (family == NFPROTO_NETDEV) - return nft_netdev_register_hooks(net, &basechain->hook_list); - - return nf_register_net_hook(net, &basechain->ops); -} - -static void nft_unregister_basechain_hooks(struct net *net, int family, - struct nft_base_chain *basechain) -{ - if (family == NFPROTO_NETDEV) - nft_netdev_unregister_hooks(net, &basechain->hook_list); - else - nf_unregister_net_hook(net, &basechain->ops); -} - static int nf_tables_register_hook(struct net *net, const struct nft_table *table, struct nft_chain *chain) @@ -223,7 +205,10 @@ static int nf_tables_register_hook(struct net *net, if (basechain->type->ops_register) return basechain->type->ops_register(net, ops); - return nft_register_basechain_hooks(net, table->family, basechain); + if (table->family == NFPROTO_NETDEV) + return nft_netdev_register_hooks(net, &basechain->hook_list); + + return nf_register_net_hook(net, &basechain->ops); } static void nf_tables_unregister_hook(struct net *net, @@ -242,7 +227,10 @@ static void nf_tables_unregister_hook(struct net *net, if (basechain->type->ops_unregister) return basechain->type->ops_unregister(net, ops); - nft_unregister_basechain_hooks(net, table->family, basechain); + if (table->family == NFPROTO_NETDEV) + nft_netdev_unregister_hooks(net, &basechain->hook_list); + else + nf_unregister_net_hook(net, &basechain->ops); } static int nft_trans_table_add(struct nft_ctx *ctx, int msg_type) @@ -832,8 +820,7 @@ static void nft_table_disable(struct net *net, struct nft_table *table, u32 cnt) if (cnt && i++ == cnt) break; - nft_unregister_basechain_hooks(net, table->family, - nft_base_chain(chain)); + nf_tables_unregister_hook(net, table, chain); } } @@ -848,8 +835,7 @@ static int nf_tables_table_enable(struct net *net, struct nft_table *table) if (!nft_is_base_chain(chain)) continue; - err = nft_register_basechain_hooks(net, table->family, - nft_base_chain(chain)); + err = nf_tables_register_hook(net, table, chain); if (err < 0) goto err_register_hooks; @@ -894,11 +880,12 @@ static int nf_tables_updtable(struct nft_ctx *ctx) nft_trans_table_enable(trans) = false; } else if (!(flags & NFT_TABLE_F_DORMANT) && ctx->table->flags & NFT_TABLE_F_DORMANT) { + ctx->table->flags &= ~NFT_TABLE_F_DORMANT; ret = nf_tables_table_enable(ctx->net, ctx->table); - if (ret >= 0) { - ctx->table->flags &= ~NFT_TABLE_F_DORMANT; + if (ret >= 0) nft_trans_table_enable(trans) = true; - } + else + ctx->table->flags |= NFT_TABLE_F_DORMANT; } if (ret < 0) goto err; diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c index 7cd524884304..78ea8c94dcba 100644 --- a/net/nfc/nci/core.c +++ b/net/nfc/nci/core.c @@ -1228,10 +1228,13 @@ int nci_register_device(struct nci_dev *ndev) rc = nfc_register_device(ndev->nfc_dev); if (rc) - goto destroy_rx_wq_exit; + goto destroy_tx_wq_exit; goto exit; +destroy_tx_wq_exit: + destroy_workqueue(ndev->tx_wq); + destroy_rx_wq_exit: destroy_workqueue(ndev->rx_wq); diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c index 24a8c3c6da0d..300a104b9a0f 100644 --- a/net/qrtr/qrtr.c +++ b/net/qrtr/qrtr.c @@ -1180,6 +1180,7 @@ static int qrtr_release(struct socket *sock) sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DEAD); + sock_orphan(sk); sock->sk = NULL; if (!sock_flag(sk, SOCK_ZAPPED)) diff --git a/net/rds/recv.c b/net/rds/recv.c index c8404971d5ab..aba4afe4dfed 100644 --- a/net/rds/recv.c +++ b/net/rds/recv.c @@ -450,12 +450,13 @@ static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc, int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr) { struct rds_notifier *notifier; - struct rds_rdma_notify cmsg = { 0 }; /* fill holes with zero */ + struct rds_rdma_notify cmsg; unsigned int count = 0, max_messages = ~0U; unsigned long flags; LIST_HEAD(copy); int err = 0; + memset(&cmsg, 0, sizeof(cmsg)); /* fill holes with zero */ /* put_cmsg copies to user space and thus may sleep. We can't do this * with rs_lock held, so first grab as many notifications as we can stuff diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index f07970207b54..38a46167523f 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -288,7 +288,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, */ ret = rxrpc_connect_call(rx, call, cp, srx, gfp); if (ret < 0) - goto error; + goto error_attached_to_socket; trace_rxrpc_call(call->debug_id, rxrpc_call_connected, atomic_read(&call->usage), here, NULL); @@ -308,18 +308,29 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, error_dup_user_ID: write_unlock(&rx->call_lock); release_sock(&rx->sk); - ret = -EEXIST; - -error: __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, - RX_CALL_DEAD, ret); + RX_CALL_DEAD, -EEXIST); trace_rxrpc_call(call->debug_id, rxrpc_call_error, - atomic_read(&call->usage), here, ERR_PTR(ret)); + atomic_read(&call->usage), here, ERR_PTR(-EEXIST)); rxrpc_release_call(rx, call); mutex_unlock(&call->user_mutex); rxrpc_put_call(call, rxrpc_call_put); - _leave(" = %d", ret); - return ERR_PTR(ret); + _leave(" = -EEXIST"); + return ERR_PTR(-EEXIST); + + /* We got an error, but the call is attached to the socket and is in + * need of release. However, we might now race with recvmsg() when + * completing the call queues it. Return 0 from sys_sendmsg() and + * leave the error to recvmsg() to deal with. + */ +error_attached_to_socket: + trace_rxrpc_call(call->debug_id, rxrpc_call_error, + atomic_read(&call->usage), here, ERR_PTR(ret)); + set_bit(RXRPC_CALL_DISCONNECTED, &call->flags); + __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, + RX_CALL_DEAD, ret); + _leave(" = c=%08x [err]", call->debug_id); + return call; } /* diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c index 19e141eeed17..8cbe0bf20ed5 100644 --- a/net/rxrpc/conn_object.c +++ b/net/rxrpc/conn_object.c @@ -212,9 +212,11 @@ void rxrpc_disconnect_call(struct rxrpc_call *call) call->peer->cong_cwnd = call->cong_cwnd; - spin_lock_bh(&conn->params.peer->lock); - hlist_del_rcu(&call->error_link); - spin_unlock_bh(&conn->params.peer->lock); + if (!hlist_unhashed(&call->error_link)) { + spin_lock_bh(&call->peer->lock); + hlist_del_rcu(&call->error_link); + spin_unlock_bh(&call->peer->lock); + } if (rxrpc_is_client_call(call)) return rxrpc_disconnect_client_call(call); diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index 2989742a4aa1..efecc5a8f67d 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c @@ -543,7 +543,7 @@ try_again: list_empty(&rx->recvmsg_q) && rx->sk.sk_state != RXRPC_SERVER_LISTENING) { release_sock(&rx->sk); - return -ENODATA; + return -EAGAIN; } if (list_empty(&rx->recvmsg_q)) { @@ -620,7 +620,7 @@ try_again: goto error_unlock_call; } - if (msg->msg_name) { + if (msg->msg_name && call->peer) { struct sockaddr_rxrpc *srx = msg->msg_name; size_t len = sizeof(call->peer->srx); diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index 1304b8608f56..f3f6da6e4ad2 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c @@ -304,7 +304,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, /* this should be in poll */ sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); - if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) + if (sk->sk_shutdown & SEND_SHUTDOWN) return -EPIPE; more = msg->msg_flags & MSG_MORE; @@ -681,6 +681,9 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) if (IS_ERR(call)) return PTR_ERR(call); /* ... and we have the call lock. */ + ret = 0; + if (READ_ONCE(call->state) == RXRPC_CALL_COMPLETE) + goto out_put_unlock; } else { switch (READ_ONCE(call->state)) { case RXRPC_CALL_UNINITIALISED: diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c index 67504aece9ae..6ed1652d1e26 100644 --- a/net/sched/act_ct.c +++ b/net/sched/act_ct.c @@ -673,9 +673,10 @@ static int tcf_ct_ipv6_is_fragment(struct sk_buff *skb, bool *frag) } static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb, - u8 family, u16 zone) + u8 family, u16 zone, bool *defrag) { enum ip_conntrack_info ctinfo; + struct qdisc_skb_cb cb; struct nf_conn *ct; int err = 0; bool frag; @@ -693,6 +694,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb, return err; skb_get(skb); + cb = *qdisc_skb_cb(skb); if (family == NFPROTO_IPV4) { enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone; @@ -703,6 +705,9 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb, local_bh_enable(); if (err && err != -EINPROGRESS) goto out_free; + + if (!err) + *defrag = true; } else { /* NFPROTO_IPV6 */ #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone; @@ -711,12 +716,16 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb, err = nf_ct_frag6_gather(net, skb, user); if (err && err != -EINPROGRESS) goto out_free; + + if (!err) + *defrag = true; #else err = -EOPNOTSUPP; goto out_free; #endif } + *qdisc_skb_cb(skb) = cb; skb_clear_hash(skb); skb->ignore_df = 1; return err; @@ -914,6 +923,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a, int nh_ofs, err, retval; struct tcf_ct_params *p; bool skip_add = false; + bool defrag = false; struct nf_conn *ct; u8 family; @@ -946,7 +956,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a, */ nh_ofs = skb_network_offset(skb); skb_pull_rcsum(skb, nh_ofs); - err = tcf_ct_handle_fragments(net, skb, family, p->zone); + err = tcf_ct_handle_fragments(net, skb, family, p->zone, &defrag); if (err == -EINPROGRESS) { retval = TC_ACT_STOLEN; goto out; @@ -1014,6 +1024,8 @@ out_push: out: tcf_action_update_bstats(&c->common, skb); + if (defrag) + qdisc_skb_cb(skb)->pkt_len = skb->len; return retval; drop: @@ -1531,10 +1543,10 @@ static int __init ct_init_module(void) return 0; -err_tbl_init: - destroy_workqueue(act_ct_wq); err_register: tcf_ct_flow_tables_uninit(); +err_tbl_init: + destroy_workqueue(act_ct_wq); return err; } diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index e62beec0d844..4619cb3cb0a8 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -20,7 +20,6 @@ #include <linux/kmod.h> #include <linux/slab.h> #include <linux/idr.h> -#include <linux/rhashtable.h> #include <linux/jhash.h> #include <linux/rculist.h> #include <net/net_namespace.h> diff --git a/net/sctp/stream.c b/net/sctp/stream.c index 67f7e71f9129..bda2536dd740 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c @@ -22,17 +22,11 @@ #include <net/sctp/sm.h> #include <net/sctp/stream_sched.h> -/* Migrates chunks from stream queues to new stream queues if needed, - * but not across associations. Also, removes those chunks to streams - * higher than the new max. - */ -static void sctp_stream_outq_migrate(struct sctp_stream *stream, - struct sctp_stream *new, __u16 outcnt) +static void sctp_stream_shrink_out(struct sctp_stream *stream, __u16 outcnt) { struct sctp_association *asoc; struct sctp_chunk *ch, *temp; struct sctp_outq *outq; - int i; asoc = container_of(stream, struct sctp_association, stream); outq = &asoc->outqueue; @@ -56,6 +50,19 @@ static void sctp_stream_outq_migrate(struct sctp_stream *stream, sctp_chunk_free(ch); } +} + +/* Migrates chunks from stream queues to new stream queues if needed, + * but not across associations. Also, removes those chunks to streams + * higher than the new max. + */ +static void sctp_stream_outq_migrate(struct sctp_stream *stream, + struct sctp_stream *new, __u16 outcnt) +{ + int i; + + if (stream->outcnt > outcnt) + sctp_stream_shrink_out(stream, outcnt); if (new) { /* Here we actually move the old ext stuff into the new @@ -1037,11 +1044,13 @@ struct sctp_chunk *sctp_process_strreset_resp( nums = ntohs(addstrm->number_of_streams); number = stream->outcnt - nums; - if (result == SCTP_STRRESET_PERFORMED) + if (result == SCTP_STRRESET_PERFORMED) { for (i = number; i < stream->outcnt; i++) SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN; - else + } else { + sctp_stream_shrink_out(stream, number); stream->outcnt = number; + } *evp = sctp_ulpevent_make_stream_change_event(asoc, flags, 0, nums, GFP_ATOMIC); diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 903321543838..1163d51196da 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -126,8 +126,10 @@ EXPORT_SYMBOL_GPL(smc_proto6); static void smc_restore_fallback_changes(struct smc_sock *smc) { - smc->clcsock->file->private_data = smc->sk.sk_socket; - smc->clcsock->file = NULL; + if (smc->clcsock->file) { /* non-accepted sockets have no file yet */ + smc->clcsock->file->private_data = smc->sk.sk_socket; + smc->clcsock->file = NULL; + } } static int __smc_release(struct smc_sock *smc) @@ -352,7 +354,7 @@ static int smcr_lgr_reg_rmbs(struct smc_link *link, */ mutex_lock(&lgr->llc_conf_mutex); for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { - if (lgr->lnk[i].state != SMC_LNK_ACTIVE) + if (!smc_link_active(&lgr->lnk[i])) continue; rc = smcr_link_reg_rmb(&lgr->lnk[i], rmb_desc); if (rc) @@ -632,7 +634,9 @@ static int smc_connect_rdma(struct smc_sock *smc, for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { struct smc_link *l = &smc->conn.lgr->lnk[i]; - if (l->peer_qpn == ntoh24(aclc->qpn)) { + if (l->peer_qpn == ntoh24(aclc->qpn) && + !memcmp(l->peer_gid, &aclc->lcl.gid, SMC_GID_SIZE) && + !memcmp(l->peer_mac, &aclc->lcl.mac, sizeof(l->peer_mac))) { link = l; break; } diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c index a47e8855e045..ce468ff62a19 100644 --- a/net/smc/smc_cdc.c +++ b/net/smc/smc_cdc.c @@ -66,9 +66,13 @@ int smc_cdc_get_free_slot(struct smc_connection *conn, rc = smc_wr_tx_get_free_slot(link, smc_cdc_tx_handler, wr_buf, wr_rdma_buf, (struct smc_wr_tx_pend_priv **)pend); - if (conn->killed) + if (conn->killed) { /* abnormal termination */ + if (!rc) + smc_wr_tx_put_slot(link, + (struct smc_wr_tx_pend_priv *)pend); rc = -EPIPE; + } return rc; } diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index f69d205b3e11..f82a2e599917 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -45,18 +45,10 @@ static struct smc_lgr_list smc_lgr_list = { /* established link groups */ static atomic_t lgr_cnt = ATOMIC_INIT(0); /* number of existing link groups */ static DECLARE_WAIT_QUEUE_HEAD(lgrs_deleted); -struct smc_ib_up_work { - struct work_struct work; - struct smc_link_group *lgr; - struct smc_ib_device *smcibdev; - u8 ibport; -}; - static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb, struct smc_buf_desc *buf_desc); static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft); -static void smc_link_up_work(struct work_struct *work); static void smc_link_down_work(struct work_struct *work); /* return head of link group list and its lock for a given link group */ @@ -326,7 +318,6 @@ int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk, get_device(&ini->ib_dev->ibdev->dev); atomic_inc(&ini->ib_dev->lnk_cnt); - lnk->state = SMC_LNK_ACTIVATING; lnk->link_id = smcr_next_link_id(lgr); lnk->lgr = lgr; lnk->link_idx = link_idx; @@ -362,6 +353,7 @@ int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk, rc = smc_wr_create_link(lnk); if (rc) goto destroy_qp; + lnk->state = SMC_LNK_ACTIVATING; return 0; destroy_qp: @@ -452,7 +444,7 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini) } smc->conn.lgr = lgr; spin_lock_bh(lgr_lock); - list_add(&lgr->list, lgr_list); + list_add_tail(&lgr->list, lgr_list); spin_unlock_bh(lgr_lock); return 0; @@ -550,8 +542,7 @@ struct smc_link *smc_switch_conns(struct smc_link_group *lgr, smc_wr_wakeup_tx_wait(from_lnk); for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { - if (lgr->lnk[i].state != SMC_LNK_ACTIVE || - i == from_lnk->link_idx) + if (!smc_link_active(&lgr->lnk[i]) || i == from_lnk->link_idx) continue; if (is_dev_err && from_lnk->smcibdev == lgr->lnk[i].smcibdev && from_lnk->ibport == lgr->lnk[i].ibport) { @@ -1106,67 +1097,23 @@ static void smc_conn_abort_work(struct work_struct *work) sock_put(&smc->sk); /* sock_hold done by schedulers of abort_work */ } -/* link is up - establish alternate link if applicable */ -static void smcr_link_up(struct smc_link_group *lgr, - struct smc_ib_device *smcibdev, u8 ibport) -{ - struct smc_link *link = NULL; - - if (list_empty(&lgr->list) || - lgr->type == SMC_LGR_SYMMETRIC || - lgr->type == SMC_LGR_ASYMMETRIC_PEER) - return; - - if (lgr->role == SMC_SERV) { - /* trigger local add link processing */ - link = smc_llc_usable_link(lgr); - if (!link) - return; - smc_llc_srv_add_link_local(link); - } else { - /* invite server to start add link processing */ - u8 gid[SMC_GID_SIZE]; - - if (smc_ib_determine_gid(smcibdev, ibport, lgr->vlan_id, gid, - NULL)) - return; - if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) { - /* some other llc task is ongoing */ - wait_event_timeout(lgr->llc_flow_waiter, - (list_empty(&lgr->list) || - lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE), - SMC_LLC_WAIT_TIME); - } - /* lgr or device no longer active? */ - if (!list_empty(&lgr->list) && - smc_ib_port_active(smcibdev, ibport)) - link = smc_llc_usable_link(lgr); - if (link) - smc_llc_send_add_link(link, smcibdev->mac[ibport - 1], - gid, NULL, SMC_LLC_REQ); - wake_up(&lgr->llc_flow_waiter); /* wake up next waiter */ - } -} - void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport) { - struct smc_ib_up_work *ib_work; struct smc_link_group *lgr, *n; list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) { + struct smc_link *link; + if (strncmp(smcibdev->pnetid[ibport - 1], lgr->pnet_id, SMC_MAX_PNETID_LEN) || lgr->type == SMC_LGR_SYMMETRIC || lgr->type == SMC_LGR_ASYMMETRIC_PEER) continue; - ib_work = kmalloc(sizeof(*ib_work), GFP_KERNEL); - if (!ib_work) - continue; - INIT_WORK(&ib_work->work, smc_link_up_work); - ib_work->lgr = lgr; - ib_work->smcibdev = smcibdev; - ib_work->ibport = ibport; - schedule_work(&ib_work->work); + + /* trigger local add link processing */ + link = smc_llc_usable_link(lgr); + if (link) + smc_llc_add_link_local(link); } } @@ -1204,10 +1151,12 @@ static void smcr_link_down(struct smc_link *lnk) SMC_LLC_WAIT_TIME); mutex_lock(&lgr->llc_conf_mutex); } - if (!list_empty(&lgr->list)) + if (!list_empty(&lgr->list)) { smc_llc_send_delete_link(to_lnk, del_link_id, SMC_LLC_REQ, true, SMC_LLC_DEL_LOST_PATH); + smcr_link_clear(lnk, true); + } wake_up(&lgr->llc_flow_waiter); /* wake up next waiter */ } } @@ -1247,20 +1196,6 @@ void smcr_port_err(struct smc_ib_device *smcibdev, u8 ibport) } } -static void smc_link_up_work(struct work_struct *work) -{ - struct smc_ib_up_work *ib_work = container_of(work, - struct smc_ib_up_work, - work); - struct smc_link_group *lgr = ib_work->lgr; - - if (list_empty(&lgr->list)) - goto out; - smcr_link_up(lgr, ib_work->smcibdev, ib_work->ibport); -out: - kfree(ib_work); -} - static void smc_link_down_work(struct work_struct *work) { struct smc_link *link = container_of(work, struct smc_link, @@ -1333,7 +1268,7 @@ static bool smcr_lgr_match(struct smc_link_group *lgr, return false; for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { - if (lgr->lnk[i].state != SMC_LNK_ACTIVE) + if (!smc_link_active(&lgr->lnk[i])) continue; if ((lgr->role == SMC_SERV || lgr->lnk[i].peer_qpn == clcqpn) && !memcmp(lgr->lnk[i].peer_gid, &lcl->gid, SMC_GID_SIZE) && @@ -1376,7 +1311,7 @@ int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini) smcr_lgr_match(lgr, ini->ib_lcl, role, ini->ib_clcqpn)) && !lgr->sync_err && lgr->vlan_id == ini->vlan_id && - (role == SMC_CLNT || + (role == SMC_CLNT || ini->is_smcd || lgr->conns_num < SMC_RMBS_PER_LGR_MAX)) { /* link group found */ ini->cln_first_contact = SMC_REUSE_CONTACT; @@ -1781,14 +1716,14 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb) void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn) { - if (!conn->lgr || conn->lgr->is_smcd || !smc_link_usable(conn->lnk)) + if (!conn->lgr || conn->lgr->is_smcd || !smc_link_active(conn->lnk)) return; smc_ib_sync_sg_for_cpu(conn->lnk, conn->sndbuf_desc, DMA_TO_DEVICE); } void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn) { - if (!conn->lgr || conn->lgr->is_smcd || !smc_link_usable(conn->lnk)) + if (!conn->lgr || conn->lgr->is_smcd || !smc_link_active(conn->lnk)) return; smc_ib_sync_sg_for_device(conn->lnk, conn->sndbuf_desc, DMA_TO_DEVICE); } @@ -1800,7 +1735,7 @@ void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn) if (!conn->lgr || conn->lgr->is_smcd) return; for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { - if (!smc_link_usable(&conn->lgr->lnk[i])) + if (!smc_link_active(&conn->lgr->lnk[i])) continue; smc_ib_sync_sg_for_cpu(&conn->lgr->lnk[i], conn->rmb_desc, DMA_FROM_DEVICE); @@ -1814,7 +1749,7 @@ void smc_rmb_sync_sg_for_device(struct smc_connection *conn) if (!conn->lgr || conn->lgr->is_smcd) return; for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { - if (!smc_link_usable(&conn->lgr->lnk[i])) + if (!smc_link_active(&conn->lgr->lnk[i])) continue; smc_ib_sync_sg_for_device(&conn->lgr->lnk[i], conn->rmb_desc, DMA_FROM_DEVICE); @@ -1837,8 +1772,12 @@ int smc_buf_create(struct smc_sock *smc, bool is_smcd) return rc; /* create rmb */ rc = __smc_buf_create(smc, is_smcd, true); - if (rc) + if (rc) { + mutex_lock(&smc->conn.lgr->sndbufs_lock); + list_del(&smc->conn.sndbuf_desc->list); + mutex_unlock(&smc->conn.lgr->sndbufs_lock); smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc); + } return rc; } diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index c3ff512fd891..1c4d5439d0ff 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h @@ -349,6 +349,11 @@ static inline bool smc_link_usable(struct smc_link *lnk) return true; } +static inline bool smc_link_active(struct smc_link *lnk) +{ + return lnk->state == SMC_LNK_ACTIVE; +} + struct smc_sock; struct smc_clc_msg_accept_confirm; struct smc_clc_msg_local; diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c index 7637fdebbb78..1c314dbdc7fa 100644 --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c @@ -506,6 +506,10 @@ long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev) int cqe_size_order, smc_order; long rc; + mutex_lock(&smcibdev->mutex); + rc = 0; + if (smcibdev->initialized) + goto out; /* the calculated number of cq entries fits to mlx5 cq allocation */ cqe_size_order = cache_line_size() == 128 ? 7 : 6; smc_order = MAX_ORDER - cqe_size_order - 1; @@ -517,7 +521,7 @@ long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev) rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_send); if (IS_ERR(smcibdev->roce_cq_send)) { smcibdev->roce_cq_send = NULL; - return rc; + goto out; } smcibdev->roce_cq_recv = ib_create_cq(smcibdev->ibdev, smc_wr_rx_cq_handler, NULL, @@ -529,21 +533,26 @@ long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev) } smc_wr_add_dev(smcibdev); smcibdev->initialized = 1; - return rc; + goto out; err: ib_destroy_cq(smcibdev->roce_cq_send); +out: + mutex_unlock(&smcibdev->mutex); return rc; } static void smc_ib_cleanup_per_ibdev(struct smc_ib_device *smcibdev) { + mutex_lock(&smcibdev->mutex); if (!smcibdev->initialized) - return; + goto out; smcibdev->initialized = 0; ib_destroy_cq(smcibdev->roce_cq_recv); ib_destroy_cq(smcibdev->roce_cq_send); smc_wr_remove_dev(smcibdev); +out: + mutex_unlock(&smcibdev->mutex); } static struct ib_client smc_ib_client; @@ -566,6 +575,7 @@ static int smc_ib_add_dev(struct ib_device *ibdev) INIT_WORK(&smcibdev->port_event_work, smc_ib_port_event_work); atomic_set(&smcibdev->lnk_cnt, 0); init_waitqueue_head(&smcibdev->lnks_deleted); + mutex_init(&smcibdev->mutex); mutex_lock(&smc_ib_devices.mutex); list_add_tail(&smcibdev->list, &smc_ib_devices.list); mutex_unlock(&smc_ib_devices.mutex); diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h index ae6776e1e726..2ce481187dd0 100644 --- a/net/smc/smc_ib.h +++ b/net/smc/smc_ib.h @@ -52,6 +52,7 @@ struct smc_ib_device { /* ib-device infos for smc */ DECLARE_BITMAP(ports_going_away, SMC_MAX_PORTS); atomic_t lnk_cnt; /* number of links on ibdev */ wait_queue_head_t lnks_deleted; /* wait 4 removal of all links*/ + struct mutex mutex; /* protect dev setup+cleanup */ }; struct smc_buf_desc; diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c index c1a038689c63..df5b0a6ea848 100644 --- a/net/smc/smc_llc.c +++ b/net/smc/smc_llc.c @@ -428,7 +428,7 @@ static int smc_llc_send_confirm_rkey(struct smc_link *send_link, rtok_ix = 1; for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { link = &send_link->lgr->lnk[i]; - if (link->state == SMC_LNK_ACTIVE && link != send_link) { + if (smc_link_active(link) && link != send_link) { rkeyllc->rtoken[rtok_ix].link_id = link->link_id; rkeyllc->rtoken[rtok_ix].rmb_key = htonl(rmb_desc->mr_rx[link->link_idx]->rkey); @@ -895,6 +895,36 @@ out: return rc; } +/* as an SMC client, invite server to start the add_link processing */ +static void smc_llc_cli_add_link_invite(struct smc_link *link, + struct smc_llc_qentry *qentry) +{ + struct smc_link_group *lgr = smc_get_lgr(link); + struct smc_init_info ini; + + if (lgr->type == SMC_LGR_SYMMETRIC || + lgr->type == SMC_LGR_ASYMMETRIC_PEER) + goto out; + + ini.vlan_id = lgr->vlan_id; + smc_pnet_find_alt_roce(lgr, &ini, link->smcibdev); + if (!ini.ib_dev) + goto out; + + smc_llc_send_add_link(link, ini.ib_dev->mac[ini.ib_port - 1], + ini.ib_gid, NULL, SMC_LLC_REQ); +out: + kfree(qentry); +} + +static bool smc_llc_is_local_add_link(union smc_llc_msg *llc) +{ + if (llc->raw.hdr.common.type == SMC_LLC_ADD_LINK && + !llc->add_link.qp_mtu && !llc->add_link.link_num) + return true; + return false; +} + static void smc_llc_process_cli_add_link(struct smc_link_group *lgr) { struct smc_llc_qentry *qentry; @@ -902,7 +932,10 @@ static void smc_llc_process_cli_add_link(struct smc_link_group *lgr) qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl); mutex_lock(&lgr->llc_conf_mutex); - smc_llc_cli_add_link(qentry->link, qentry); + if (smc_llc_is_local_add_link(&qentry->msg)) + smc_llc_cli_add_link_invite(qentry->link, qentry); + else + smc_llc_cli_add_link(qentry->link, qentry); mutex_unlock(&lgr->llc_conf_mutex); } @@ -911,7 +944,7 @@ static int smc_llc_active_link_count(struct smc_link_group *lgr) int i, link_count = 0; for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { - if (!smc_link_usable(&lgr->lnk[i])) + if (!smc_link_active(&lgr->lnk[i])) continue; link_count++; } @@ -1051,12 +1084,14 @@ static int smc_llc_srv_conf_link(struct smc_link *link, if (rc) return -ENOLINK; /* receive CONFIRM LINK response over the RoCE fabric */ - qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_FIRST_TIME, - SMC_LLC_CONFIRM_LINK); - if (!qentry) { + qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_FIRST_TIME, 0); + if (!qentry || + qentry->msg.raw.hdr.common.type != SMC_LLC_CONFIRM_LINK) { /* send DELETE LINK */ smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ, false, SMC_LLC_DEL_LOST_PATH); + if (qentry) + smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); return -ENOLINK; } smc_llc_save_peer_uid(qentry); @@ -1158,14 +1193,14 @@ static void smc_llc_process_srv_add_link(struct smc_link_group *lgr) mutex_unlock(&lgr->llc_conf_mutex); } -/* enqueue a local add_link req to trigger a new add_link flow, only as SERV */ -void smc_llc_srv_add_link_local(struct smc_link *link) +/* enqueue a local add_link req to trigger a new add_link flow */ +void smc_llc_add_link_local(struct smc_link *link) { struct smc_llc_msg_add_link add_llc = {0}; add_llc.hd.length = sizeof(add_llc); add_llc.hd.common.type = SMC_LLC_ADD_LINK; - /* no dev and port needed, we as server ignore client data anyway */ + /* no dev and port needed */ smc_llc_enqueue(link, (union smc_llc_msg *)&add_llc); } @@ -1345,7 +1380,7 @@ static void smc_llc_process_srv_delete_link(struct smc_link_group *lgr) if (lgr->type == SMC_LGR_SINGLE && !list_empty(&lgr->list)) { /* trigger setup of asymm alt link */ - smc_llc_srv_add_link_local(lnk); + smc_llc_add_link_local(lnk); } out: mutex_unlock(&lgr->llc_conf_mutex); @@ -1474,7 +1509,18 @@ static void smc_llc_event_handler(struct smc_llc_qentry *qentry) if (list_empty(&lgr->list)) goto out; /* lgr is terminating */ if (lgr->role == SMC_CLNT) { - if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK) { + if (smc_llc_is_local_add_link(llc)) { + if (lgr->llc_flow_lcl.type == + SMC_LLC_FLOW_ADD_LINK) + break; /* add_link in progress */ + if (smc_llc_flow_start(&lgr->llc_flow_lcl, + qentry)) { + schedule_work(&lgr->llc_add_link_work); + } + return; + } + if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK && + !lgr->llc_flow_lcl.qentry) { /* a flow is waiting for this message */ smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry); @@ -1498,28 +1544,13 @@ static void smc_llc_event_handler(struct smc_llc_qentry *qentry) } break; case SMC_LLC_DELETE_LINK: - if (lgr->role == SMC_CLNT) { - /* server requests to delete this link, send response */ - if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) { - /* DEL LINK REQ during ADD LINK SEQ */ - smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, - qentry); - wake_up(&lgr->llc_msg_waiter); - } else if (smc_llc_flow_start(&lgr->llc_flow_lcl, - qentry)) { - schedule_work(&lgr->llc_del_link_work); - } - } else { - if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK && - !lgr->llc_flow_lcl.qentry) { - /* DEL LINK REQ during ADD LINK SEQ */ - smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, - qentry); - wake_up(&lgr->llc_msg_waiter); - } else if (smc_llc_flow_start(&lgr->llc_flow_lcl, - qentry)) { - schedule_work(&lgr->llc_del_link_work); - } + if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK && + !lgr->llc_flow_lcl.qentry) { + /* DEL LINK REQ during ADD LINK SEQ */ + smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry); + wake_up(&lgr->llc_msg_waiter); + } else if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) { + schedule_work(&lgr->llc_del_link_work); } return; case SMC_LLC_CONFIRM_RKEY: @@ -1585,23 +1616,30 @@ again: static void smc_llc_rx_response(struct smc_link *link, struct smc_llc_qentry *qentry) { + enum smc_llc_flowtype flowtype = link->lgr->llc_flow_lcl.type; + struct smc_llc_flow *flow = &link->lgr->llc_flow_lcl; u8 llc_type = qentry->msg.raw.hdr.common.type; switch (llc_type) { case SMC_LLC_TEST_LINK: - if (link->state == SMC_LNK_ACTIVE) + if (smc_link_active(link)) complete(&link->llc_testlink_resp); break; case SMC_LLC_ADD_LINK: - case SMC_LLC_DELETE_LINK: - case SMC_LLC_CONFIRM_LINK: case SMC_LLC_ADD_LINK_CONT: + case SMC_LLC_CONFIRM_LINK: + if (flowtype != SMC_LLC_FLOW_ADD_LINK || flow->qentry) + break; /* drop out-of-flow response */ + goto assign; + case SMC_LLC_DELETE_LINK: + if (flowtype != SMC_LLC_FLOW_DEL_LINK || flow->qentry) + break; /* drop out-of-flow response */ + goto assign; case SMC_LLC_CONFIRM_RKEY: case SMC_LLC_DELETE_RKEY: - /* assign responses to the local flow, we requested them */ - smc_llc_flow_qentry_set(&link->lgr->llc_flow_lcl, qentry); - wake_up(&link->lgr->llc_msg_waiter); - return; + if (flowtype != SMC_LLC_FLOW_RKEY || flow->qentry) + break; /* drop out-of-flow response */ + goto assign; case SMC_LLC_CONFIRM_RKEY_CONT: /* not used because max links is 3 */ break; @@ -1610,6 +1648,11 @@ static void smc_llc_rx_response(struct smc_link *link, break; } kfree(qentry); + return; +assign: + /* assign responses to the local flow, we requested them */ + smc_llc_flow_qentry_set(&link->lgr->llc_flow_lcl, qentry); + wake_up(&link->lgr->llc_msg_waiter); } static void smc_llc_enqueue(struct smc_link *link, union smc_llc_msg *llc) @@ -1663,7 +1706,7 @@ static void smc_llc_testlink_work(struct work_struct *work) u8 user_data[16] = { 0 }; int rc; - if (link->state != SMC_LNK_ACTIVE) + if (!smc_link_active(link)) return; /* don't reschedule worker */ expire_time = link->wr_rx_tstamp + link->llc_testlink_time; if (time_is_after_jiffies(expire_time)) { @@ -1675,7 +1718,7 @@ static void smc_llc_testlink_work(struct work_struct *work) /* receive TEST LINK response over RoCE fabric */ rc = wait_for_completion_interruptible_timeout(&link->llc_testlink_resp, SMC_LLC_WAIT_TIME); - if (link->state != SMC_LNK_ACTIVE) + if (!smc_link_active(link)) return; /* link state changed */ if (rc <= 0) { smcr_link_down_cond_sched(link); diff --git a/net/smc/smc_llc.h b/net/smc/smc_llc.h index a5d2fe3eea61..cc00a2ec4e92 100644 --- a/net/smc/smc_llc.h +++ b/net/smc/smc_llc.h @@ -103,7 +103,7 @@ void smc_llc_send_link_delete_all(struct smc_link_group *lgr, bool ord, u32 rsn); int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry); int smc_llc_srv_add_link(struct smc_link *link); -void smc_llc_srv_add_link_local(struct smc_link *link); +void smc_llc_add_link_local(struct smc_link *link); int smc_llc_init(void) __init; #endif /* SMC_LLC_H */ diff --git a/net/tipc/link.c b/net/tipc/link.c index 263d950e70e9..d40f8e5b7683 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -827,11 +827,11 @@ int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq) state |= l->bc_rcvlink->rcv_unacked; state |= l->rcv_unacked; state |= !skb_queue_empty(&l->transmq); - state |= !skb_queue_empty(&l->deferdq); probe = mstate->probing; probe |= l->silent_intv_cnt; if (probe || mstate->monitoring) l->silent_intv_cnt++; + probe |= !skb_queue_empty(&l->deferdq); if (l->snd_nxt == l->checkpoint) { tipc_link_update_cwin(l, 0, 0); probe = true; diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index dfbaf6bd8b1c..2700a63ab095 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c @@ -22,7 +22,7 @@ #include <net/af_vsock.h> static struct workqueue_struct *virtio_vsock_workqueue; -static struct virtio_vsock *the_virtio_vsock; +static struct virtio_vsock __rcu *the_virtio_vsock; static DEFINE_MUTEX(the_virtio_vsock_mutex); /* protects the_virtio_vsock */ struct virtio_vsock { diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 0e07fb8585fb..7fbca0854265 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -13266,13 +13266,13 @@ static int nl80211_vendor_cmd(struct sk_buff *skb, struct genl_info *info) if (!wdev_running(wdev)) return -ENETDOWN; } - - if (!vcmd->doit) - return -EOPNOTSUPP; } else { wdev = NULL; } + if (!vcmd->doit) + return -EOPNOTSUPP; + if (info->attrs[NL80211_ATTR_VENDOR_DATA]) { data = nla_data(info->attrs[NL80211_ATTR_VENDOR_DATA]); len = nla_len(info->attrs[NL80211_ATTR_VENDOR_DATA]); diff --git a/net/xfrm/espintcp.c b/net/xfrm/espintcp.c index 100e29682b48..827ccdf2db57 100644 --- a/net/xfrm/espintcp.c +++ b/net/xfrm/espintcp.c @@ -15,6 +15,7 @@ static void handle_nonesp(struct espintcp_ctx *ctx, struct sk_buff *skb, { if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf || !sk_rmem_schedule(sk, skb, skb->truesize)) { + XFRM_INC_STATS(sock_net(sk), LINUX_MIB_XFRMINERROR); kfree_skb(skb); return; } @@ -49,23 +50,51 @@ static void espintcp_rcv(struct strparser *strp, struct sk_buff *skb) struct espintcp_ctx *ctx = container_of(strp, struct espintcp_ctx, strp); struct strp_msg *rxm = strp_msg(skb); + int len = rxm->full_len - 2; u32 nonesp_marker; int err; + /* keepalive packet? */ + if (unlikely(len == 1)) { + u8 data; + + err = skb_copy_bits(skb, rxm->offset + 2, &data, 1); + if (err < 0) { + XFRM_INC_STATS(sock_net(strp->sk), LINUX_MIB_XFRMINHDRERROR); + kfree_skb(skb); + return; + } + + if (data == 0xff) { + kfree_skb(skb); + return; + } + } + + /* drop other short messages */ + if (unlikely(len <= sizeof(nonesp_marker))) { + XFRM_INC_STATS(sock_net(strp->sk), LINUX_MIB_XFRMINHDRERROR); + kfree_skb(skb); + return; + } + err = skb_copy_bits(skb, rxm->offset + 2, &nonesp_marker, sizeof(nonesp_marker)); if (err < 0) { + XFRM_INC_STATS(sock_net(strp->sk), LINUX_MIB_XFRMINHDRERROR); kfree_skb(skb); return; } /* remove header, leave non-ESP marker/SPI */ if (!__pskb_pull(skb, rxm->offset + 2)) { + XFRM_INC_STATS(sock_net(strp->sk), LINUX_MIB_XFRMINERROR); kfree_skb(skb); return; } if (pskb_trim(skb, rxm->full_len - 2) != 0) { + XFRM_INC_STATS(sock_net(strp->sk), LINUX_MIB_XFRMINERROR); kfree_skb(skb); return; } @@ -91,7 +120,7 @@ static int espintcp_parse(struct strparser *strp, struct sk_buff *skb) return err; len = be16_to_cpu(blen); - if (len < 6) + if (len < 2) return -EINVAL; return len; @@ -109,8 +138,11 @@ static int espintcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, flags |= nonblock ? MSG_DONTWAIT : 0; skb = __skb_recv_datagram(sk, &ctx->ike_queue, flags, &off, &err); - if (!skb) + if (!skb) { + if (err == -EAGAIN && sk->sk_shutdown & RCV_SHUTDOWN) + return 0; return err; + } copied = len; if (copied > skb->len) @@ -213,7 +245,7 @@ retry: return 0; } -static int espintcp_push_msgs(struct sock *sk) +static int espintcp_push_msgs(struct sock *sk, int flags) { struct espintcp_ctx *ctx = espintcp_getctx(sk); struct espintcp_msg *emsg = &ctx->partial; @@ -227,12 +259,12 @@ static int espintcp_push_msgs(struct sock *sk) ctx->tx_running = 1; if (emsg->skb) - err = espintcp_sendskb_locked(sk, emsg, 0); + err = espintcp_sendskb_locked(sk, emsg, flags); else - err = espintcp_sendskmsg_locked(sk, emsg, 0); + err = espintcp_sendskmsg_locked(sk, emsg, flags); if (err == -EAGAIN) { ctx->tx_running = 0; - return 0; + return flags & MSG_DONTWAIT ? -EAGAIN : 0; } if (!err) memset(emsg, 0, sizeof(*emsg)); @@ -257,7 +289,7 @@ int espintcp_push_skb(struct sock *sk, struct sk_buff *skb) offset = skb_transport_offset(skb); len = skb->len - offset; - espintcp_push_msgs(sk); + espintcp_push_msgs(sk, 0); if (emsg->len) { kfree_skb(skb); @@ -270,7 +302,7 @@ int espintcp_push_skb(struct sock *sk, struct sk_buff *skb) emsg->len = len; emsg->skb = skb; - espintcp_push_msgs(sk); + espintcp_push_msgs(sk, 0); return 0; } @@ -287,7 +319,7 @@ static int espintcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) char buf[2] = {0}; int err, end; - if (msg->msg_flags) + if (msg->msg_flags & ~MSG_DONTWAIT) return -EOPNOTSUPP; if (size > MAX_ESPINTCP_MSG) @@ -298,9 +330,10 @@ static int espintcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) lock_sock(sk); - err = espintcp_push_msgs(sk); + err = espintcp_push_msgs(sk, msg->msg_flags & MSG_DONTWAIT); if (err < 0) { - err = -ENOBUFS; + if (err != -EAGAIN || !(msg->msg_flags & MSG_DONTWAIT)) + err = -ENOBUFS; goto unlock; } @@ -337,10 +370,9 @@ static int espintcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) tcp_rate_check_app_limited(sk); - err = espintcp_push_msgs(sk); + err = espintcp_push_msgs(sk, msg->msg_flags & MSG_DONTWAIT); /* this message could be partially sent, keep it */ - if (err < 0) - goto unlock; + release_sock(sk); return size; @@ -374,7 +406,7 @@ static void espintcp_tx_work(struct work_struct *work) lock_sock(sk); if (!ctx->tx_running) - espintcp_push_msgs(sk); + espintcp_push_msgs(sk, 0); release_sock(sk); } diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 564aa6492e7c..19c5e0fa3f44 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -39,7 +39,7 @@ #ifdef CONFIG_XFRM_STATISTICS #include <net/snmp.h> #endif -#ifdef CONFIG_INET_ESPINTCP +#ifdef CONFIG_XFRM_ESPINTCP #include <net/espintcp.h> #endif @@ -1433,14 +1433,10 @@ static void xfrm_policy_requeue(struct xfrm_policy *old, spin_unlock_bh(&pq->hold_queue.lock); } -static bool xfrm_policy_mark_match(struct xfrm_policy *policy, - struct xfrm_policy *pol) +static inline bool xfrm_policy_mark_match(const struct xfrm_mark *mark, + struct xfrm_policy *pol) { - if (policy->mark.v == pol->mark.v && - policy->priority == pol->priority) - return true; - - return false; + return mark->v == pol->mark.v && mark->m == pol->mark.m; } static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed) @@ -1503,7 +1499,7 @@ static void xfrm_policy_insert_inexact_list(struct hlist_head *chain, if (pol->type == policy->type && pol->if_id == policy->if_id && !selector_cmp(&pol->selector, &policy->selector) && - xfrm_policy_mark_match(policy, pol) && + xfrm_policy_mark_match(&policy->mark, pol) && xfrm_sec_ctx_match(pol->security, policy->security) && !WARN_ON(delpol)) { delpol = pol; @@ -1538,7 +1534,7 @@ static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain, if (pol->type == policy->type && pol->if_id == policy->if_id && !selector_cmp(&pol->selector, &policy->selector) && - xfrm_policy_mark_match(policy, pol) && + xfrm_policy_mark_match(&policy->mark, pol) && xfrm_sec_ctx_match(pol->security, policy->security) && !WARN_ON(delpol)) { if (excl) @@ -1610,9 +1606,8 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) EXPORT_SYMBOL(xfrm_policy_insert); static struct xfrm_policy * -__xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id, - u8 type, int dir, - struct xfrm_selector *sel, +__xfrm_policy_bysel_ctx(struct hlist_head *chain, const struct xfrm_mark *mark, + u32 if_id, u8 type, int dir, struct xfrm_selector *sel, struct xfrm_sec_ctx *ctx) { struct xfrm_policy *pol; @@ -1623,7 +1618,7 @@ __xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id, hlist_for_each_entry(pol, chain, bydst) { if (pol->type == type && pol->if_id == if_id && - (mark & pol->mark.m) == pol->mark.v && + xfrm_policy_mark_match(mark, pol) && !selector_cmp(sel, &pol->selector) && xfrm_sec_ctx_match(ctx, pol->security)) return pol; @@ -1632,11 +1627,10 @@ __xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id, return NULL; } -struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id, - u8 type, int dir, - struct xfrm_selector *sel, - struct xfrm_sec_ctx *ctx, int delete, - int *err) +struct xfrm_policy * +xfrm_policy_bysel_ctx(struct net *net, const struct xfrm_mark *mark, u32 if_id, + u8 type, int dir, struct xfrm_selector *sel, + struct xfrm_sec_ctx *ctx, int delete, int *err) { struct xfrm_pol_inexact_bin *bin = NULL; struct xfrm_policy *pol, *ret = NULL; @@ -1703,9 +1697,9 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id, } EXPORT_SYMBOL(xfrm_policy_bysel_ctx); -struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id, - u8 type, int dir, u32 id, int delete, - int *err) +struct xfrm_policy * +xfrm_policy_byid(struct net *net, const struct xfrm_mark *mark, u32 if_id, + u8 type, int dir, u32 id, int delete, int *err) { struct xfrm_policy *pol, *ret; struct hlist_head *chain; @@ -1720,8 +1714,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id, ret = NULL; hlist_for_each_entry(pol, chain, byidx) { if (pol->type == type && pol->index == id && - pol->if_id == if_id && - (mark & pol->mark.m) == pol->mark.v) { + pol->if_id == if_id && xfrm_policy_mark_match(mark, pol)) { xfrm_pol_hold(pol); if (delete) { *err = security_xfrm_policy_delete( @@ -4156,7 +4149,7 @@ void __init xfrm_init(void) seqcount_init(&xfrm_policy_hash_generation); xfrm_input_init(); -#ifdef CONFIG_INET_ESPINTCP +#ifdef CONFIG_XFRM_ESPINTCP espintcp_init(); #endif diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index e6cfaa680ef3..fbb7d9d06478 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -1863,7 +1863,6 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, struct km_event c; int delete; struct xfrm_mark m; - u32 mark = xfrm_mark_get(attrs, &m); u32 if_id = 0; p = nlmsg_data(nlh); @@ -1880,8 +1879,11 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, if (attrs[XFRMA_IF_ID]) if_id = nla_get_u32(attrs[XFRMA_IF_ID]); + xfrm_mark_get(attrs, &m); + if (p->index) - xp = xfrm_policy_byid(net, mark, if_id, type, p->dir, p->index, delete, &err); + xp = xfrm_policy_byid(net, &m, if_id, type, p->dir, + p->index, delete, &err); else { struct nlattr *rt = attrs[XFRMA_SEC_CTX]; struct xfrm_sec_ctx *ctx; @@ -1898,8 +1900,8 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, if (err) return err; } - xp = xfrm_policy_bysel_ctx(net, mark, if_id, type, p->dir, &p->sel, - ctx, delete, &err); + xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir, + &p->sel, ctx, delete, &err); security_xfrm_policy_free(ctx); } if (xp == NULL) @@ -2166,7 +2168,6 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, u8 type = XFRM_POLICY_TYPE_MAIN; int err = -ENOENT; struct xfrm_mark m; - u32 mark = xfrm_mark_get(attrs, &m); u32 if_id = 0; err = copy_from_user_policy_type(&type, attrs); @@ -2180,8 +2181,11 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, if (attrs[XFRMA_IF_ID]) if_id = nla_get_u32(attrs[XFRMA_IF_ID]); + xfrm_mark_get(attrs, &m); + if (p->index) - xp = xfrm_policy_byid(net, mark, if_id, type, p->dir, p->index, 0, &err); + xp = xfrm_policy_byid(net, &m, if_id, type, p->dir, p->index, + 0, &err); else { struct nlattr *rt = attrs[XFRMA_SEC_CTX]; struct xfrm_sec_ctx *ctx; @@ -2198,7 +2202,7 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, if (err) return err; } - xp = xfrm_policy_bysel_ctx(net, mark, if_id, type, p->dir, + xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir, &p->sel, ctx, 0, &err); security_xfrm_policy_free(ctx); } diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost index 3651cbf6ad49..f54b6ac37ac2 100644 --- a/scripts/Makefile.modpost +++ b/scripts/Makefile.modpost @@ -124,9 +124,6 @@ existing-targets := $(wildcard $(sort $(targets))) -include $(foreach f,$(existing-targets),$(dir $(f)).$(notdir $(f)).cmd) -PHONY += FORCE -FORCE: - endif .PHONY: $(PHONY) diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh index 66a6d511b524..0869def435ee 100755 --- a/scripts/decode_stacktrace.sh +++ b/scripts/decode_stacktrace.sh @@ -87,8 +87,8 @@ parse_symbol() { return fi - # Strip out the base of the path - code=${code#$basepath/} + # Strip out the base of the path on each line + code=$(while read -r line; do echo "${line#$basepath/}"; done <<< "$code") # In the case of inlines, move everything to same line code=${code//$'\n'/' '} diff --git a/scripts/gdb/linux/symbols.py b/scripts/gdb/linux/symbols.py index be984aa29b75..1be9763cf8bb 100644 --- a/scripts/gdb/linux/symbols.py +++ b/scripts/gdb/linux/symbols.py @@ -96,7 +96,7 @@ lx-symbols command.""" return "" attrs = sect_attrs['attrs'] section_name_to_address = { - attrs[n]['name'].string(): attrs[n]['address'] + attrs[n]['battr']['attr']['name'].string(): attrs[n]['address'] for n in range(int(sect_attrs['nsections']))} args = [] for section_name in [".data", ".data..read_mostly", ".rodata", ".bss", diff --git a/scripts/kconfig/.gitignore b/scripts/kconfig/.gitignore index 12a67fdab541..c3d537cd0275 100644 --- a/scripts/kconfig/.gitignore +++ b/scripts/kconfig/.gitignore @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only -*.moc +/qconf-moc.cc *conf-cfg # diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile index 426881ea954f..52b59bf9efe4 100644 --- a/scripts/kconfig/Makefile +++ b/scripts/kconfig/Makefile @@ -181,19 +181,22 @@ $(addprefix $(obj)/, mconf.o $(lxdialog)): $(obj)/mconf-cfg # qconf: Used for the xconfig target based on Qt hostprogs += qconf -qconf-cxxobjs := qconf.o +qconf-cxxobjs := qconf.o qconf-moc.o qconf-objs := images.o $(common-objs) HOSTLDLIBS_qconf = $(shell . $(obj)/qconf-cfg && echo $$libs) HOSTCXXFLAGS_qconf.o = $(shell . $(obj)/qconf-cfg && echo $$cflags) +HOSTCXXFLAGS_qconf-moc.o = $(shell . $(obj)/qconf-cfg && echo $$cflags) -$(obj)/qconf.o: $(obj)/qconf-cfg $(obj)/qconf.moc +$(obj)/qconf.o: $(obj)/qconf-cfg quiet_cmd_moc = MOC $@ - cmd_moc = $(shell . $(obj)/qconf-cfg && echo $$moc) -i $< -o $@ + cmd_moc = $(shell . $(obj)/qconf-cfg && echo $$moc) $< -o $@ -$(obj)/%.moc: $(src)/%.h $(obj)/qconf-cfg - $(call cmd,moc) +$(obj)/qconf-moc.cc: $(src)/qconf.h $(obj)/qconf-cfg FORCE + $(call if_changed,moc) + +targets += qconf-moc.cc # gconf: Used for the gconfig target based on GTK+ hostprogs += gconf diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc index 4a616128a154..23d1cb01a41a 100644 --- a/scripts/kconfig/qconf.cc +++ b/scripts/kconfig/qconf.cc @@ -23,7 +23,6 @@ #include "lkc.h" #include "qconf.h" -#include "qconf.moc" #include "images.h" @@ -308,10 +307,7 @@ ConfigList::ConfigList(ConfigView* p, const char *name) setVerticalScrollMode(ScrollPerPixel); setHorizontalScrollMode(ScrollPerPixel); - if (mode == symbolMode) - setHeaderLabels(QStringList() << "Item" << "Name" << "N" << "M" << "Y" << "Value"); - else - setHeaderLabels(QStringList() << "Option" << "Name" << "N" << "M" << "Y" << "Value"); + setHeaderLabels(QStringList() << "Option" << "Name" << "N" << "M" << "Y" << "Value"); connect(this, SIGNAL(itemSelectionChanged(void)), SLOT(updateSelection(void))); @@ -392,11 +388,6 @@ void ConfigList::updateSelection(void) struct menu *menu; enum prop_type type; - if (mode == symbolMode) - setHeaderLabels(QStringList() << "Item" << "Name" << "N" << "M" << "Y" << "Value"); - else - setHeaderLabels(QStringList() << "Option" << "Name" << "N" << "M" << "Y" << "Value"); - if (selectedItems().count() == 0) return; @@ -437,14 +428,13 @@ void ConfigList::updateList(ConfigItem* item) if (rootEntry != &rootmenu && (mode == singleMode || (mode == symbolMode && rootEntry->parent != &rootmenu))) { item = (ConfigItem *)topLevelItem(0); - if (!item && mode != symbolMode) { + if (!item) item = new ConfigItem(this, 0, true); - last = item; - } + last = item; } if ((mode == singleMode || (mode == symbolMode && !(rootEntry->flags & MENU_ROOT))) && rootEntry->sym && rootEntry->prompt) { - item = last ? last->nextSibling() : firstChild(); + item = last ? last->nextSibling() : nullptr; if (!item) item = new ConfigItem(this, last, rootEntry, true); else @@ -1239,7 +1229,7 @@ void ConfigInfoView::clicked(const QUrl &url) if (count < 1) { qInfo() << "Clicked link is empty"; - delete data; + delete[] data; return; } @@ -1252,7 +1242,7 @@ void ConfigInfoView::clicked(const QUrl &url) result = sym_re_search(data); if (!result) { qInfo() << "Clicked symbol is invalid:" << data; - delete data; + delete[] data; return; } @@ -1735,7 +1725,6 @@ void ConfigMainWindow::listFocusChanged(void) void ConfigMainWindow::goBack(void) { -qInfo() << __FUNCTION__; if (configList->rootEntry == &rootmenu) return; diff --git a/scripts/kconfig/qconf.h b/scripts/kconfig/qconf.h index fb9e9729266f..5eeab4a8bb43 100644 --- a/scripts/kconfig/qconf.h +++ b/scripts/kconfig/qconf.h @@ -92,10 +92,6 @@ public: { return this; } - ConfigItem* firstChild() const - { - return (ConfigItem *)children().first(); - } void addColumn(colIdx idx) { showColumn(idx); diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index 6aea65c65745..69341b36f271 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c @@ -138,11 +138,20 @@ char *read_text_file(const char *filename) char *get_line(char **stringp) { + char *orig = *stringp, *next; + /* do not return the unwanted extra line at EOF */ - if (*stringp && **stringp == '\0') + if (!orig || *orig == '\0') return NULL; - return strsep(stringp, "\n"); + /* don't use strsep here, it is not available everywhere */ + next = strchr(orig, '\n'); + if (next) + *next++ = '\0'; + + *stringp = next; + + return orig; } /* A list of all modules we processed */ diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 7e3ae4534df9..803978d69e3c 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -2935,6 +2935,10 @@ static int hda_codec_runtime_suspend(struct device *dev) struct hda_codec *codec = dev_to_hda_codec(dev); unsigned int state; + /* Nothing to do if card registration fails and the component driver never probes */ + if (!codec->card) + return 0; + cancel_delayed_work_sync(&codec->jackpoll_work); state = hda_call_codec_suspend(codec); if (codec->link_down_at_suspend || @@ -2949,6 +2953,10 @@ static int hda_codec_runtime_resume(struct device *dev) { struct hda_codec *codec = dev_to_hda_codec(dev); + /* Nothing to do if card registration fails and the component driver never probes */ + if (!codec->card) + return 0; + codec_display_power(codec, true); snd_hdac_codec_link_up(&codec->core); hda_call_codec_resume(codec); diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h index 82e26442724b..a356fb0e5773 100644 --- a/sound/pci/hda/hda_controller.h +++ b/sound/pci/hda/hda_controller.h @@ -41,7 +41,7 @@ /* 24 unused */ #define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */ #define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */ -/* 27 unused */ +#define AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP (1 << 27) /* Workaround for spurious wakeups after suspend */ #define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28) /* CORBRP clears itself after reset */ #define AZX_DCAPS_NO_MSI64 (1 << 29) /* Stick to 32-bit MSIs */ #define AZX_DCAPS_SEPARATE_STREAM_TAG (1 << 30) /* capture and playback use separate stream tag */ diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 3565e2ab0965..3fbba2e51e36 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -298,7 +298,8 @@ enum { /* PCH for HSW/BDW; with runtime PM */ /* no i915 binding for this as HSW/BDW has another controller for HDMI */ #define AZX_DCAPS_INTEL_PCH \ - (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME) + (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME |\ + AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP) /* HSW HDMI */ #define AZX_DCAPS_INTEL_HASWELL \ @@ -1028,7 +1029,14 @@ static int azx_suspend(struct device *dev) chip = card->private_data; bus = azx_bus(chip); snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); - pm_runtime_force_suspend(dev); + /* An ugly workaround: direct call of __azx_runtime_suspend() and + * __azx_runtime_resume() for old Intel platforms that suffer from + * spurious wakeups after S3 suspend + */ + if (chip->driver_caps & AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP) + __azx_runtime_suspend(chip); + else + pm_runtime_force_suspend(dev); if (bus->irq >= 0) { free_irq(bus->irq, chip); bus->irq = -1; @@ -1057,7 +1065,10 @@ static int azx_resume(struct device *dev) if (azx_acquire_irq(chip, 1) < 0) return -EIO; - pm_runtime_force_resume(dev); + if (chip->driver_caps & AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP) + __azx_runtime_resume(chip, false); + else + pm_runtime_force_resume(dev); snd_power_change_state(card, SNDRV_CTL_POWER_D0); trace_azx_resume(chip); diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 41eaa89660c3..cd46247988e4 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -2440,6 +2440,7 @@ static void generic_acomp_notifier_set(struct drm_audio_component *acomp, mutex_lock(&spec->bind_lock); spec->use_acomp_notifier = use_acomp; spec->codec->relaxed_resume = use_acomp; + spec->codec->bus->keep_power = 0; /* reprogram each jack detection logic depending on the notifier */ for (i = 0; i < spec->num_pins; i++) reprogram_jack_detect(spec->codec, @@ -2534,7 +2535,6 @@ static void generic_acomp_init(struct hda_codec *codec, if (!snd_hdac_acomp_init(&codec->bus->core, &spec->drm_audio_ops, match_bound_vga, 0)) { spec->acomp_registered = true; - codec->bus->keep_power = 0; } } diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 1b2d8e56390a..29f5878f0c50 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -5975,6 +5975,16 @@ static void alc_fixup_disable_mic_vref(struct hda_codec *codec, snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREFHIZ); } +static void alc285_fixup_hp_gpio_amp_init(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + if (action != HDA_FIXUP_ACT_INIT) + return; + + msleep(100); + alc_write_coef_idx(codec, 0x65, 0x0); +} + /* for hda_fixup_thinkpad_acpi() */ #include "thinkpad_helper.c" @@ -6152,8 +6162,10 @@ enum { ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS, ALC269VC_FIXUP_ACER_HEADSET_MIC, ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE, - ALC289_FIXUP_ASUS_G401, + ALC289_FIXUP_ASUS_GA401, + ALC289_FIXUP_ASUS_GA502, ALC256_FIXUP_ACER_MIC_NO_PRESENCE, + ALC285_FIXUP_HP_GPIO_AMP_INIT, }; static const struct hda_fixup alc269_fixups[] = { @@ -7363,7 +7375,14 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MIC }, - [ALC289_FIXUP_ASUS_G401] = { + [ALC289_FIXUP_ASUS_GA401] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x19, 0x03a11020 }, /* headset mic with jack detect */ + { } + }, + }, + [ALC289_FIXUP_ASUS_GA502] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x03a11020 }, /* headset mic with jack detect */ @@ -7379,6 +7398,12 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE }, + [ALC285_FIXUP_HP_GPIO_AMP_INIT] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc285_fixup_hp_gpio_amp_init, + .chained = true, + .chain_id = ALC285_FIXUP_HP_GPIO_LED + }, }; static const struct snd_pci_quirk alc269_fixup_tbl[] = { @@ -7529,7 +7554,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3), SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED), - SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED), SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), @@ -7561,7 +7586,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC), - SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_G401), + SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502), + SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2), SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC), @@ -7581,7 +7607,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC), SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC), SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE), - SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC225_FIXUP_HEADSET_JACK), + SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK), SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE), SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC), SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), diff --git a/sound/soc/codecs/cros_ec_codec.c b/sound/soc/codecs/cros_ec_codec.c index 8d45c628e988..ab009c7dfdf4 100644 --- a/sound/soc/codecs/cros_ec_codec.c +++ b/sound/soc/codecs/cros_ec_codec.c @@ -103,28 +103,6 @@ error: return ret; } -static int calculate_sha256(struct cros_ec_codec_priv *priv, - uint8_t *buf, uint32_t size, uint8_t *digest) -{ - struct sha256_state sctx; - - sha256_init(&sctx); - sha256_update(&sctx, buf, size); - sha256_final(&sctx, digest); - -#ifdef DEBUG - { - char digest_str[65]; - - bin2hex(digest_str, digest, 32); - digest_str[64] = 0; - dev_dbg(priv->dev, "hash=%s\n", digest_str); - } -#endif - - return 0; -} - static int dmic_get_gain(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { @@ -782,9 +760,8 @@ static int wov_hotword_model_put(struct snd_kcontrol *kcontrol, if (IS_ERR(buf)) return PTR_ERR(buf); - ret = calculate_sha256(priv, buf, size, digest); - if (ret) - goto leave; + sha256(buf, size, digest); + dev_dbg(priv->dev, "hash=%*phN\n", SHA256_DIGEST_SIZE, digest); p.cmd = EC_CODEC_WOV_GET_LANG; ret = send_ec_host_command(priv->ec_device, EC_CMD_EC_CODEC_WOV, diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index 40b7cd13fed9..a69d9e75f66f 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c @@ -367,6 +367,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs, ifnum = 0; goto add_sync_ep_from_ifnum; case USB_ID(0x07fd, 0x0008): /* MOTU M Series */ + case USB_ID(0x31e9, 0x0001): /* Solid State Logic SSL2 */ case USB_ID(0x31e9, 0x0002): /* Solid State Logic SSL2+ */ case USB_ID(0x0d9a, 0x00df): /* RTX6001 */ ep = 0x81; diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c index 5b36c589a029..ba4f33804af1 100644 --- a/tools/lib/traceevent/event-parse.c +++ b/tools/lib/traceevent/event-parse.c @@ -2861,6 +2861,7 @@ process_dynamic_array_len(struct tep_event *event, struct tep_print_arg *arg, if (read_expected(TEP_EVENT_DELIM, ")") < 0) goto out_err; + free_token(token); type = read_token(&token); *tok = token; diff --git a/tools/lib/traceevent/plugins/Makefile b/tools/lib/traceevent/plugins/Makefile index 349bb81482ab..680d883efe05 100644 --- a/tools/lib/traceevent/plugins/Makefile +++ b/tools/lib/traceevent/plugins/Makefile @@ -197,7 +197,7 @@ define do_generate_dynamic_list_file xargs echo "U w W" | tr 'w ' 'W\n' | sort -u | xargs echo`;\ if [ "$$symbol_type" = "U W" ];then \ (echo '{'; \ - $(NM) -u -D $1 | awk 'NF>1 {print "\t"$$2";"}' | sort -u;\ + $(NM) -u -D $1 | awk 'NF>1 {sub("@.*", "", $$2); print "\t"$$2";"}' | sort -u;\ echo '};'; \ ) > $2; \ else \ diff --git a/tools/perf/arch/arm/util/auxtrace.c b/tools/perf/arch/arm/util/auxtrace.c index 0a6e75b8777a..28a5d0c18b1d 100644 --- a/tools/perf/arch/arm/util/auxtrace.c +++ b/tools/perf/arch/arm/util/auxtrace.c @@ -56,7 +56,7 @@ struct auxtrace_record struct perf_pmu *cs_etm_pmu; struct evsel *evsel; bool found_etm = false; - bool found_spe = false; + struct perf_pmu *found_spe = NULL; static struct perf_pmu **arm_spe_pmus = NULL; static int nr_spes = 0; int i = 0; @@ -74,12 +74,12 @@ struct auxtrace_record evsel->core.attr.type == cs_etm_pmu->type) found_etm = true; - if (!nr_spes) + if (!nr_spes || found_spe) continue; for (i = 0; i < nr_spes; i++) { if (evsel->core.attr.type == arm_spe_pmus[i]->type) { - found_spe = true; + found_spe = arm_spe_pmus[i]; break; } } @@ -96,7 +96,7 @@ struct auxtrace_record #if defined(__aarch64__) if (found_spe) - return arm_spe_recording_init(err, arm_spe_pmus[i]); + return arm_spe_recording_init(err, found_spe); #endif /* diff --git a/tools/perf/pmu-events/arch/s390/cf_z15/extended.json b/tools/perf/pmu-events/arch/s390/cf_z15/extended.json index 2df2e231e9ee..24c4ba2a9ae5 100644 --- a/tools/perf/pmu-events/arch/s390/cf_z15/extended.json +++ b/tools/perf/pmu-events/arch/s390/cf_z15/extended.json @@ -380,7 +380,7 @@ { "Unit": "CPU-M-CF", "EventCode": "265", - "EventName": "DFLT_CCERROR", + "EventName": "DFLT_CCFINISH", "BriefDescription": "Increments by one for every DEFLATE CONVERSION CALL instruction executed that ended in Condition Codes 0, 1 or 2", "PublicDescription": "Increments by one for every DEFLATE CONVERSION CALL instruction executed that ended in Condition Codes 0, 1 or 2" }, diff --git a/tools/perf/tests/shell/record+zstd_comp_decomp.sh b/tools/perf/tests/shell/record+zstd_comp_decomp.sh index 63a91ec473bb..045723b3d992 100755 --- a/tools/perf/tests/shell/record+zstd_comp_decomp.sh +++ b/tools/perf/tests/shell/record+zstd_comp_decomp.sh @@ -12,7 +12,8 @@ skip_if_no_z_record() { collect_z_record() { echo "Collecting compressed record file:" - $perf_tool record -o $trace_file -g -z -F 5000 -- \ + [[ "$(uname -m)" != s390x ]] && gflag='-g' + $perf_tool record -o $trace_file $gflag -z -F 5000 -- \ dd count=500 if=/dev/urandom of=/dev/null } diff --git a/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c b/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c index f7ee8fa377ad..6ccecbd39476 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c @@ -5,10 +5,60 @@ #include "test_btf_map_in_map.skel.h" +static int duration; + +static __u32 bpf_map_id(struct bpf_map *map) +{ + struct bpf_map_info info; + __u32 info_len = sizeof(info); + int err; + + memset(&info, 0, info_len); + err = bpf_obj_get_info_by_fd(bpf_map__fd(map), &info, &info_len); + if (err) + return 0; + return info.id; +} + +/* + * Trigger synchronize_rcu() in kernel. + * + * ARRAY_OF_MAPS/HASH_OF_MAPS lookup/update operations trigger synchronize_rcu() + * if looking up an existing non-NULL element or updating the map with a valid + * inner map FD. Use this fact to trigger synchronize_rcu(): create map-in-map, + * create a trivial ARRAY map, update map-in-map with ARRAY inner map. Then + * cleanup. At the end, at least one synchronize_rcu() would be called. + */ +static int kern_sync_rcu(void) +{ + int inner_map_fd, outer_map_fd, err, zero = 0; + + inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 4, 1, 0); + if (CHECK(inner_map_fd < 0, "inner_map_create", "failed %d\n", -errno)) + return -1; + + outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL, + sizeof(int), inner_map_fd, 1, 0); + if (CHECK(outer_map_fd < 0, "outer_map_create", "failed %d\n", -errno)) { + close(inner_map_fd); + return -1; + } + + err = bpf_map_update_elem(outer_map_fd, &zero, &inner_map_fd, 0); + if (err) + err = -errno; + CHECK(err, "outer_map_update", "failed %d\n", err); + close(inner_map_fd); + close(outer_map_fd); + return err; +} + void test_btf_map_in_map(void) { - int duration = 0, err, key = 0, val; - struct test_btf_map_in_map* skel; + int err, key = 0, val, i; + struct test_btf_map_in_map *skel; + int outer_arr_fd, outer_hash_fd; + int fd, map1_fd, map2_fd, map1_id, map2_id; skel = test_btf_map_in_map__open_and_load(); if (CHECK(!skel, "skel_open", "failed to open&load skeleton\n")) @@ -18,32 +68,78 @@ void test_btf_map_in_map(void) if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) goto cleanup; + map1_fd = bpf_map__fd(skel->maps.inner_map1); + map2_fd = bpf_map__fd(skel->maps.inner_map2); + outer_arr_fd = bpf_map__fd(skel->maps.outer_arr); + outer_hash_fd = bpf_map__fd(skel->maps.outer_hash); + /* inner1 = input, inner2 = input + 1 */ - val = bpf_map__fd(skel->maps.inner_map1); - bpf_map_update_elem(bpf_map__fd(skel->maps.outer_arr), &key, &val, 0); - val = bpf_map__fd(skel->maps.inner_map2); - bpf_map_update_elem(bpf_map__fd(skel->maps.outer_hash), &key, &val, 0); + map1_fd = bpf_map__fd(skel->maps.inner_map1); + bpf_map_update_elem(outer_arr_fd, &key, &map1_fd, 0); + map2_fd = bpf_map__fd(skel->maps.inner_map2); + bpf_map_update_elem(outer_hash_fd, &key, &map2_fd, 0); skel->bss->input = 1; usleep(1); - bpf_map_lookup_elem(bpf_map__fd(skel->maps.inner_map1), &key, &val); + bpf_map_lookup_elem(map1_fd, &key, &val); CHECK(val != 1, "inner1", "got %d != exp %d\n", val, 1); - bpf_map_lookup_elem(bpf_map__fd(skel->maps.inner_map2), &key, &val); + bpf_map_lookup_elem(map2_fd, &key, &val); CHECK(val != 2, "inner2", "got %d != exp %d\n", val, 2); /* inner1 = input + 1, inner2 = input */ - val = bpf_map__fd(skel->maps.inner_map2); - bpf_map_update_elem(bpf_map__fd(skel->maps.outer_arr), &key, &val, 0); - val = bpf_map__fd(skel->maps.inner_map1); - bpf_map_update_elem(bpf_map__fd(skel->maps.outer_hash), &key, &val, 0); + bpf_map_update_elem(outer_arr_fd, &key, &map2_fd, 0); + bpf_map_update_elem(outer_hash_fd, &key, &map1_fd, 0); skel->bss->input = 3; usleep(1); - bpf_map_lookup_elem(bpf_map__fd(skel->maps.inner_map1), &key, &val); + bpf_map_lookup_elem(map1_fd, &key, &val); CHECK(val != 4, "inner1", "got %d != exp %d\n", val, 4); - bpf_map_lookup_elem(bpf_map__fd(skel->maps.inner_map2), &key, &val); + bpf_map_lookup_elem(map2_fd, &key, &val); CHECK(val != 3, "inner2", "got %d != exp %d\n", val, 3); + for (i = 0; i < 5; i++) { + val = i % 2 ? map1_fd : map2_fd; + err = bpf_map_update_elem(outer_hash_fd, &key, &val, 0); + if (CHECK_FAIL(err)) { + printf("failed to update hash_of_maps on iter #%d\n", i); + goto cleanup; + } + err = bpf_map_update_elem(outer_arr_fd, &key, &val, 0); + if (CHECK_FAIL(err)) { + printf("failed to update hash_of_maps on iter #%d\n", i); + goto cleanup; + } + } + + map1_id = bpf_map_id(skel->maps.inner_map1); + map2_id = bpf_map_id(skel->maps.inner_map2); + CHECK(map1_id == 0, "map1_id", "failed to get ID 1\n"); + CHECK(map2_id == 0, "map2_id", "failed to get ID 2\n"); + + test_btf_map_in_map__destroy(skel); + skel = NULL; + + /* we need to either wait for or force synchronize_rcu(), before + * checking for "still exists" condition, otherwise map could still be + * resolvable by ID, causing false positives. + * + * Older kernels (5.8 and earlier) freed map only after two + * synchronize_rcu()s, so trigger two, to be entirely sure. + */ + CHECK(kern_sync_rcu(), "sync_rcu", "failed\n"); + CHECK(kern_sync_rcu(), "sync_rcu", "failed\n"); + + fd = bpf_map_get_fd_by_id(map1_id); + if (CHECK(fd >= 0, "map1_leak", "inner_map1 leaked!\n")) { + close(fd); + goto cleanup; + } + fd = bpf_map_get_fd_by_id(map2_id); + if (CHECK(fd >= 0, "map2_leak", "inner_map2 leaked!\n")) { + close(fd); + goto cleanup; + } + cleanup: test_btf_map_in_map__destroy(skel); } diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py index 8294ae3ffb3c..43c9cda199b8 100755 --- a/tools/testing/selftests/bpf/test_offload.py +++ b/tools/testing/selftests/bpf/test_offload.py @@ -318,6 +318,9 @@ class DebugfsDir: continue if os.path.isfile(p): + # We need to init trap_flow_action_cookie before read it + if f == "trap_flow_action_cookie": + cmd('echo deadbeef > %s/%s' % (path, f)) _, out = cmd('cat %s/%s' % (path, f)) dfs[f] = out.strip() elif os.path.isdir(p): diff --git a/tools/testing/selftests/bpf/verifier/event_output.c b/tools/testing/selftests/bpf/verifier/event_output.c index 99f8f582c02b..c5e805980409 100644 --- a/tools/testing/selftests/bpf/verifier/event_output.c +++ b/tools/testing/selftests/bpf/verifier/event_output.c @@ -112,6 +112,7 @@ "perfevent for cgroup sockopt", .insns = { __PERF_EVENT_INSNS__ }, .prog_type = BPF_PROG_TYPE_CGROUP_SOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, .fixup_map_event_output = { 4 }, .result = ACCEPT, .retval = 1, diff --git a/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c b/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c index 54cdefdfb49d..d59f3eb67c8f 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c @@ -76,10 +76,8 @@ void set_default_state(struct kvm_nested_state *state) void set_default_vmx_state(struct kvm_nested_state *state, int size) { memset(state, 0, size); - state->flags = KVM_STATE_NESTED_GUEST_MODE | - KVM_STATE_NESTED_RUN_PENDING; if (have_evmcs) - state->flags |= KVM_STATE_NESTED_EVMCS; + state->flags = KVM_STATE_NESTED_EVMCS; state->format = 0; state->size = size; state->hdr.vmx.vmxon_pa = 0x1000; @@ -148,6 +146,11 @@ void test_vmx_nested_state(struct kvm_vm *vm) state->hdr.vmx.smm.flags = 1; test_nested_state_expect_einval(vm, state); + /* Invalid flags are rejected. */ + set_default_vmx_state(state, state_sz); + state->hdr.vmx.flags = ~0; + test_nested_state_expect_einval(vm, state); + /* It is invalid to have vmxon_pa == -1ull and vmcs_pa != -1ull. */ set_default_vmx_state(state, state_sz); state->hdr.vmx.vmxon_pa = -1ull; @@ -185,20 +188,41 @@ void test_vmx_nested_state(struct kvm_vm *vm) state->hdr.vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE; test_nested_state_expect_einval(vm, state); - /* Size must be large enough to fit kvm_nested_state and vmcs12. */ + /* + * Size must be large enough to fit kvm_nested_state and vmcs12 + * if VMCS12 physical address is set + */ set_default_vmx_state(state, state_sz); state->size = sizeof(*state); + state->flags = 0; + test_nested_state_expect_einval(vm, state); + + set_default_vmx_state(state, state_sz); + state->size = sizeof(*state); + state->flags = 0; + state->hdr.vmx.vmcs12_pa = -1; test_nested_state(vm, state); - /* vmxon_pa cannot be the same address as vmcs_pa. */ + /* + * KVM_SET_NESTED_STATE succeeds with invalid VMCS + * contents but L2 not running. + */ set_default_vmx_state(state, state_sz); - state->hdr.vmx.vmxon_pa = 0; - state->hdr.vmx.vmcs12_pa = 0; + state->flags = 0; + test_nested_state(vm, state); + + /* Invalid flags are rejected, even if no VMCS loaded. */ + set_default_vmx_state(state, state_sz); + state->size = sizeof(*state); + state->flags = 0; + state->hdr.vmx.vmcs12_pa = -1; + state->hdr.vmx.flags = ~0; test_nested_state_expect_einval(vm, state); - /* The revision id for vmcs12 must be VMCS12_REVISION. */ + /* vmxon_pa cannot be the same address as vmcs_pa. */ set_default_vmx_state(state, state_sz); - set_revision_id_for_vmcs12(state, 0); + state->hdr.vmx.vmxon_pa = 0; + state->hdr.vmx.vmcs12_pa = 0; test_nested_state_expect_einval(vm, state); /* diff --git a/tools/testing/selftests/net/fib_nexthop_multiprefix.sh b/tools/testing/selftests/net/fib_nexthop_multiprefix.sh index 9dc35a16e415..51df5e305855 100755 --- a/tools/testing/selftests/net/fib_nexthop_multiprefix.sh +++ b/tools/testing/selftests/net/fib_nexthop_multiprefix.sh @@ -144,7 +144,7 @@ setup() cleanup() { - for n in h1 r1 h2 h3 h4 + for n in h0 r1 h1 h2 h3 do ip netns del ${n} 2>/dev/null done diff --git a/tools/testing/selftests/net/forwarding/ethtool.sh b/tools/testing/selftests/net/forwarding/ethtool.sh index eb8e2a23bbb4..43a948feed26 100755 --- a/tools/testing/selftests/net/forwarding/ethtool.sh +++ b/tools/testing/selftests/net/forwarding/ethtool.sh @@ -252,8 +252,6 @@ check_highest_speed_is_chosen() fi local -a speeds_arr=($(common_speeds_get $h1 $h2 0 1)) - # Remove the first speed, h1 does not advertise this speed. - unset speeds_arr[0] max_speed=${speeds_arr[0]} for current in ${speeds_arr[@]}; do diff --git a/tools/testing/selftests/net/ip_defrag.sh b/tools/testing/selftests/net/ip_defrag.sh index 15d3489ecd9c..ceb7ad4dbd94 100755 --- a/tools/testing/selftests/net/ip_defrag.sh +++ b/tools/testing/selftests/net/ip_defrag.sh @@ -6,6 +6,8 @@ set +x set -e +modprobe -q nf_defrag_ipv6 + readonly NETNS="ns-$(mktemp -u XXXXXX)" setup() { diff --git a/tools/testing/selftests/net/psock_fanout.c b/tools/testing/selftests/net/psock_fanout.c index 8c8c7d79c38d..2c522f7a0aec 100644 --- a/tools/testing/selftests/net/psock_fanout.c +++ b/tools/testing/selftests/net/psock_fanout.c @@ -350,7 +350,8 @@ static int test_datapath(uint16_t typeflags, int port_off, int fds[2], fds_udp[2][2], ret; fprintf(stderr, "\ntest: datapath 0x%hx ports %hu,%hu\n", - typeflags, PORT_BASE, PORT_BASE + port_off); + typeflags, (uint16_t)PORT_BASE, + (uint16_t)(PORT_BASE + port_off)); fds[0] = sock_fanout_open(typeflags, 0); fds[1] = sock_fanout_open(typeflags, 0); diff --git a/tools/testing/selftests/net/rxtimestamp.c b/tools/testing/selftests/net/rxtimestamp.c index 422e7761254d..bcb79ba1f214 100644 --- a/tools/testing/selftests/net/rxtimestamp.c +++ b/tools/testing/selftests/net/rxtimestamp.c @@ -329,8 +329,7 @@ int main(int argc, char **argv) bool all_tests = true; int arg_index = 0; int failures = 0; - int s, t; - char opt; + int s, t, opt; while ((opt = getopt_long(argc, argv, "", long_options, &arg_index)) != -1) { diff --git a/tools/testing/selftests/net/so_txtime.c b/tools/testing/selftests/net/so_txtime.c index ceaad78e9667..3155fbbf644b 100644 --- a/tools/testing/selftests/net/so_txtime.c +++ b/tools/testing/selftests/net/so_txtime.c @@ -121,7 +121,7 @@ static bool do_recv_one(int fdr, struct timed_send *ts) if (rbuf[0] != ts->data) error(1, 0, "payload mismatch. expected %c", ts->data); - if (labs(tstop - texpect) > cfg_variance_us) + if (llabs(tstop - texpect) > cfg_variance_us) error(1, 0, "exceeds variance (%d us)", cfg_variance_us); return false; diff --git a/tools/testing/selftests/net/tcp_mmap.c b/tools/testing/selftests/net/tcp_mmap.c index 4555f88252ba..a61b7b3da549 100644 --- a/tools/testing/selftests/net/tcp_mmap.c +++ b/tools/testing/selftests/net/tcp_mmap.c @@ -344,7 +344,7 @@ int main(int argc, char *argv[]) { struct sockaddr_storage listenaddr, addr; unsigned int max_pacing_rate = 0; - size_t total = 0; + uint64_t total = 0; char *host = NULL; int fd, c, on = 1; char *buffer; @@ -473,12 +473,12 @@ int main(int argc, char *argv[]) zflg = 0; } while (total < FILE_SZ) { - ssize_t wr = FILE_SZ - total; + int64_t wr = FILE_SZ - total; if (wr > chunk_size) wr = chunk_size; /* Note : we just want to fill the pipe with 0 bytes */ - wr = send(fd, buffer, wr, zflg ? MSG_ZEROCOPY : 0); + wr = send(fd, buffer, (size_t)wr, zflg ? MSG_ZEROCOPY : 0); if (wr <= 0) break; total += wr; diff --git a/tools/testing/selftests/net/txtimestamp.sh b/tools/testing/selftests/net/txtimestamp.sh index eea6f5193693..31637769f59f 100755 --- a/tools/testing/selftests/net/txtimestamp.sh +++ b/tools/testing/selftests/net/txtimestamp.sh @@ -75,7 +75,7 @@ main() { fi } -if [[ "$(ip netns identify)" == "root" ]]; then +if [[ -z "$(ip netns identify)" ]]; then ./in_netns.sh $0 $@ else main $@ |