diff options
1069 files changed, 12027 insertions, 7499 deletions
@@ -10,6 +10,8 @@ # Please keep this list dictionary sorted. # Aaron Durbin <adurbin@google.com> +Abel Vesa <abelvesa@kernel.org> <abel.vesa@nxp.com> +Abel Vesa <abelvesa@kernel.org> <abelvesa@gmail.com> Abhinav Kumar <quic_abhinavk@quicinc.com> <abhinavk@codeaurora.org> Adam Oldham <oldhamca@gmail.com> Adam Radford <aradford@gmail.com> @@ -85,6 +87,7 @@ Christian Borntraeger <borntraeger@linux.ibm.com> <borntrae@de.ibm.com> Christian Brauner <brauner@kernel.org> <christian@brauner.io> Christian Brauner <brauner@kernel.org> <christian.brauner@canonical.com> Christian Brauner <brauner@kernel.org> <christian.brauner@ubuntu.com> +Christian Marangi <ansuelsmth@gmail.com> Christophe Ricard <christophe.ricard@gmail.com> Christoph Hellwig <hch@lst.de> Colin Ian King <colin.king@intel.com> <colin.king@canonical.com> @@ -165,6 +168,7 @@ Jan Glauber <jan.glauber@gmail.com> <jang@de.ibm.com> Jan Glauber <jan.glauber@gmail.com> <jang@linux.vnet.ibm.com> Jan Glauber <jan.glauber@gmail.com> <jglauber@cavium.com> Jarkko Sakkinen <jarkko@kernel.org> <jarkko.sakkinen@linux.intel.com> +Jarkko Sakkinen <jarkko@kernel.org> <jarkko@profian.com> Jason Gunthorpe <jgg@ziepe.ca> <jgg@mellanox.com> Jason Gunthorpe <jgg@ziepe.ca> <jgg@nvidia.com> Jason Gunthorpe <jgg@ziepe.ca> <jgunthorpe@obsidianresearch.com> diff --git a/Documentation/ABI/testing/sysfs-ata b/Documentation/ABI/testing/sysfs-ata index 2f726c914752..3daecac48964 100644 --- a/Documentation/ABI/testing/sysfs-ata +++ b/Documentation/ABI/testing/sysfs-ata @@ -107,13 +107,14 @@ Description: described in ATA8 7.16 and 7.17. Only valid if the device is not a PM. - pio_mode: (RO) Transfer modes supported by the device when - in PIO mode. Mostly used by PATA device. + pio_mode: (RO) PIO transfer mode used by the device. + Mostly used by PATA devices. - xfer_mode: (RO) Current transfer mode + xfer_mode: (RO) Current transfer mode. Mostly used by + PATA devices. - dma_mode: (RO) Transfer modes supported by the device when - in DMA mode. Mostly used by PATA device. + dma_mode: (RO) DMA transfer mode used by the device. + Mostly used by PATA devices. class: (RO) Device class. Can be "ata" for disk, "atapi" for packet device, "pmp" for PM, or diff --git a/Documentation/ABI/testing/sysfs-bus-iio-vf610 b/Documentation/ABI/testing/sysfs-bus-iio-vf610 index 308a6756d3bf..491ead804488 100644 --- a/Documentation/ABI/testing/sysfs-bus-iio-vf610 +++ b/Documentation/ABI/testing/sysfs-bus-iio-vf610 @@ -1,4 +1,4 @@ -What: /sys/bus/iio/devices/iio:deviceX/conversion_mode +What: /sys/bus/iio/devices/iio:deviceX/in_conversion_mode KernelVersion: 4.2 Contact: linux-iio@vger.kernel.org Description: diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index 2ad01cad7f1c..bcc974d276dc 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu @@ -526,6 +526,7 @@ What: /sys/devices/system/cpu/vulnerabilities /sys/devices/system/cpu/vulnerabilities/srbds /sys/devices/system/cpu/vulnerabilities/tsx_async_abort /sys/devices/system/cpu/vulnerabilities/itlb_multihit + /sys/devices/system/cpu/vulnerabilities/mmio_stale_data Date: January 2018 Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org> Description: Information about CPU vulnerabilities diff --git a/Documentation/ABI/testing/sysfs-driver-bd9571mwv-regulator b/Documentation/ABI/testing/sysfs-driver-bd9571mwv-regulator index 42214b4ff14a..90596d8bb51c 100644 --- a/Documentation/ABI/testing/sysfs-driver-bd9571mwv-regulator +++ b/Documentation/ABI/testing/sysfs-driver-bd9571mwv-regulator @@ -26,6 +26,6 @@ Description: Read/write the current state of DDR Backup Mode, which controls DDR Backup Mode must be explicitly enabled by the user, to invoke step 1. - See also Documentation/devicetree/bindings/mfd/bd9571mwv.txt. + See also Documentation/devicetree/bindings/mfd/rohm,bd9571mwv.yaml. Users: User space applications for embedded boards equipped with a BD9571MWV PMIC. diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst index 8cbc711cda93..4df436e7c417 100644 --- a/Documentation/admin-guide/hw-vuln/index.rst +++ b/Documentation/admin-guide/hw-vuln/index.rst @@ -17,3 +17,4 @@ are configurable at compile, boot or run time. special-register-buffer-data-sampling.rst core-scheduling.rst l1d_flush.rst + processor_mmio_stale_data.rst diff --git a/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst new file mode 100644 index 000000000000..9393c50b5afc --- /dev/null +++ b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst @@ -0,0 +1,246 @@ +========================================= +Processor MMIO Stale Data Vulnerabilities +========================================= + +Processor MMIO Stale Data Vulnerabilities are a class of memory-mapped I/O +(MMIO) vulnerabilities that can expose data. The sequences of operations for +exposing data range from simple to very complex. Because most of the +vulnerabilities require the attacker to have access to MMIO, many environments +are not affected. System environments using virtualization where MMIO access is +provided to untrusted guests may need mitigation. These vulnerabilities are +not transient execution attacks. However, these vulnerabilities may propagate +stale data into core fill buffers where the data can subsequently be inferred +by an unmitigated transient execution attack. Mitigation for these +vulnerabilities includes a combination of microcode update and software +changes, depending on the platform and usage model. Some of these mitigations +are similar to those used to mitigate Microarchitectural Data Sampling (MDS) or +those used to mitigate Special Register Buffer Data Sampling (SRBDS). + +Data Propagators +================ +Propagators are operations that result in stale data being copied or moved from +one microarchitectural buffer or register to another. Processor MMIO Stale Data +Vulnerabilities are operations that may result in stale data being directly +read into an architectural, software-visible state or sampled from a buffer or +register. + +Fill Buffer Stale Data Propagator (FBSDP) +----------------------------------------- +Stale data may propagate from fill buffers (FB) into the non-coherent portion +of the uncore on some non-coherent writes. Fill buffer propagation by itself +does not make stale data architecturally visible. Stale data must be propagated +to a location where it is subject to reading or sampling. + +Sideband Stale Data Propagator (SSDP) +------------------------------------- +The sideband stale data propagator (SSDP) is limited to the client (including +Intel Xeon server E3) uncore implementation. The sideband response buffer is +shared by all client cores. For non-coherent reads that go to sideband +destinations, the uncore logic returns 64 bytes of data to the core, including +both requested data and unrequested stale data, from a transaction buffer and +the sideband response buffer. As a result, stale data from the sideband +response and transaction buffers may now reside in a core fill buffer. + +Primary Stale Data Propagator (PSDP) +------------------------------------ +The primary stale data propagator (PSDP) is limited to the client (including +Intel Xeon server E3) uncore implementation. Similar to the sideband response +buffer, the primary response buffer is shared by all client cores. For some +processors, MMIO primary reads will return 64 bytes of data to the core fill +buffer including both requested data and unrequested stale data. This is +similar to the sideband stale data propagator. + +Vulnerabilities +=============== +Device Register Partial Write (DRPW) (CVE-2022-21166) +----------------------------------------------------- +Some endpoint MMIO registers incorrectly handle writes that are smaller than +the register size. Instead of aborting the write or only copying the correct +subset of bytes (for example, 2 bytes for a 2-byte write), more bytes than +specified by the write transaction may be written to the register. On +processors affected by FBSDP, this may expose stale data from the fill buffers +of the core that created the write transaction. + +Shared Buffers Data Sampling (SBDS) (CVE-2022-21125) +---------------------------------------------------- +After propagators may have moved data around the uncore and copied stale data +into client core fill buffers, processors affected by MFBDS can leak data from +the fill buffer. It is limited to the client (including Intel Xeon server E3) +uncore implementation. + +Shared Buffers Data Read (SBDR) (CVE-2022-21123) +------------------------------------------------ +It is similar to Shared Buffer Data Sampling (SBDS) except that the data is +directly read into the architectural software-visible state. It is limited to +the client (including Intel Xeon server E3) uncore implementation. + +Affected Processors +=================== +Not all the CPUs are affected by all the variants. For instance, most +processors for the server market (excluding Intel Xeon E3 processors) are +impacted by only Device Register Partial Write (DRPW). + +Below is the list of affected Intel processors [#f1]_: + + =================== ============ ========= + Common name Family_Model Steppings + =================== ============ ========= + HASWELL_X 06_3FH 2,4 + SKYLAKE_L 06_4EH 3 + BROADWELL_X 06_4FH All + SKYLAKE_X 06_55H 3,4,6,7,11 + BROADWELL_D 06_56H 3,4,5 + SKYLAKE 06_5EH 3 + ICELAKE_X 06_6AH 4,5,6 + ICELAKE_D 06_6CH 1 + ICELAKE_L 06_7EH 5 + ATOM_TREMONT_D 06_86H All + LAKEFIELD 06_8AH 1 + KABYLAKE_L 06_8EH 9 to 12 + ATOM_TREMONT 06_96H 1 + ATOM_TREMONT_L 06_9CH 0 + KABYLAKE 06_9EH 9 to 13 + COMETLAKE 06_A5H 2,3,5 + COMETLAKE_L 06_A6H 0,1 + ROCKETLAKE 06_A7H 1 + =================== ============ ========= + +If a CPU is in the affected processor list, but not affected by a variant, it +is indicated by new bits in MSR IA32_ARCH_CAPABILITIES. As described in a later +section, mitigation largely remains the same for all the variants, i.e. to +clear the CPU fill buffers via VERW instruction. + +New bits in MSRs +================ +Newer processors and microcode update on existing affected processors added new +bits to IA32_ARCH_CAPABILITIES MSR. These bits can be used to enumerate +specific variants of Processor MMIO Stale Data vulnerabilities and mitigation +capability. + +MSR IA32_ARCH_CAPABILITIES +-------------------------- +Bit 13 - SBDR_SSDP_NO - When set, processor is not affected by either the + Shared Buffers Data Read (SBDR) vulnerability or the sideband stale + data propagator (SSDP). +Bit 14 - FBSDP_NO - When set, processor is not affected by the Fill Buffer + Stale Data Propagator (FBSDP). +Bit 15 - PSDP_NO - When set, processor is not affected by Primary Stale Data + Propagator (PSDP). +Bit 17 - FB_CLEAR - When set, VERW instruction will overwrite CPU fill buffer + values as part of MD_CLEAR operations. Processors that do not + enumerate MDS_NO (meaning they are affected by MDS) but that do + enumerate support for both L1D_FLUSH and MD_CLEAR implicitly enumerate + FB_CLEAR as part of their MD_CLEAR support. +Bit 18 - FB_CLEAR_CTRL - Processor supports read and write to MSR + IA32_MCU_OPT_CTRL[FB_CLEAR_DIS]. On such processors, the FB_CLEAR_DIS + bit can be set to cause the VERW instruction to not perform the + FB_CLEAR action. Not all processors that support FB_CLEAR will support + FB_CLEAR_CTRL. + +MSR IA32_MCU_OPT_CTRL +--------------------- +Bit 3 - FB_CLEAR_DIS - When set, VERW instruction does not perform the FB_CLEAR +action. This may be useful to reduce the performance impact of FB_CLEAR in +cases where system software deems it warranted (for example, when performance +is more critical, or the untrusted software has no MMIO access). Note that +FB_CLEAR_DIS has no impact on enumeration (for example, it does not change +FB_CLEAR or MD_CLEAR enumeration) and it may not be supported on all processors +that enumerate FB_CLEAR. + +Mitigation +========== +Like MDS, all variants of Processor MMIO Stale Data vulnerabilities have the +same mitigation strategy to force the CPU to clear the affected buffers before +an attacker can extract the secrets. + +This is achieved by using the otherwise unused and obsolete VERW instruction in +combination with a microcode update. The microcode clears the affected CPU +buffers when the VERW instruction is executed. + +Kernel reuses the MDS function to invoke the buffer clearing: + + mds_clear_cpu_buffers() + +On MDS affected CPUs, the kernel already invokes CPU buffer clear on +kernel/userspace, hypervisor/guest and C-state (idle) transitions. No +additional mitigation is needed on such CPUs. + +For CPUs not affected by MDS or TAA, mitigation is needed only for the attacker +with MMIO capability. Therefore, VERW is not required for kernel/userspace. For +virtualization case, VERW is only needed at VMENTER for a guest with MMIO +capability. + +Mitigation points +----------------- +Return to user space +^^^^^^^^^^^^^^^^^^^^ +Same mitigation as MDS when affected by MDS/TAA, otherwise no mitigation +needed. + +C-State transition +^^^^^^^^^^^^^^^^^^ +Control register writes by CPU during C-state transition can propagate data +from fill buffer to uncore buffers. Execute VERW before C-state transition to +clear CPU fill buffers. + +Guest entry point +^^^^^^^^^^^^^^^^^ +Same mitigation as MDS when processor is also affected by MDS/TAA, otherwise +execute VERW at VMENTER only for MMIO capable guests. On CPUs not affected by +MDS/TAA, guest without MMIO access cannot extract secrets using Processor MMIO +Stale Data vulnerabilities, so there is no need to execute VERW for such guests. + +Mitigation control on the kernel command line +--------------------------------------------- +The kernel command line allows to control the Processor MMIO Stale Data +mitigations at boot time with the option "mmio_stale_data=". The valid +arguments for this option are: + + ========== ================================================================= + full If the CPU is vulnerable, enable mitigation; CPU buffer clearing + on exit to userspace and when entering a VM. Idle transitions are + protected as well. It does not automatically disable SMT. + full,nosmt Same as full, with SMT disabled on vulnerable CPUs. This is the + complete mitigation. + off Disables mitigation completely. + ========== ================================================================= + +If the CPU is affected and mmio_stale_data=off is not supplied on the kernel +command line, then the kernel selects the appropriate mitigation. + +Mitigation status information +----------------------------- +The Linux kernel provides a sysfs interface to enumerate the current +vulnerability status of the system: whether the system is vulnerable, and +which mitigations are active. The relevant sysfs file is: + + /sys/devices/system/cpu/vulnerabilities/mmio_stale_data + +The possible values in this file are: + + .. list-table:: + + * - 'Not affected' + - The processor is not vulnerable + * - 'Vulnerable' + - The processor is vulnerable, but no mitigation enabled + * - 'Vulnerable: Clear CPU buffers attempted, no microcode' + - The processor is vulnerable, but microcode is not updated. The + mitigation is enabled on a best effort basis. + * - 'Mitigation: Clear CPU buffers' + - The processor is vulnerable and the CPU buffer clearing mitigation is + enabled. + +If the processor is vulnerable then the following information is appended to +the above information: + + ======================== =========================================== + 'SMT vulnerable' SMT is enabled + 'SMT disabled' SMT is disabled + 'SMT Host state unknown' Kernel runs in a VM, Host SMT state unknown + ======================== =========================================== + +References +---------- +.. [#f1] Affected Processors + https://www.intel.com/content/www/us/en/developer/topic-technology/software-security-guidance/processors-affected-consolidated-product-cpu-model.html diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 8090130b544b..2522b11e593f 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -2469,7 +2469,6 @@ protected: nVHE-based mode with support for guests whose state is kept private from the host. - Not valid if the kernel is running in EL2. Defaults to VHE/nVHE based on hardware support. Setting mode to "protected" will disable kexec and hibernation @@ -3176,6 +3175,7 @@ srbds=off [X86,INTEL] no_entry_flush [PPC] no_uaccess_flush [PPC] + mmio_stale_data=off [X86] Exceptions: This does not have any effect on @@ -3197,6 +3197,7 @@ Equivalent to: l1tf=flush,nosmt [X86] mds=full,nosmt [X86] tsx_async_abort=full,nosmt [X86] + mmio_stale_data=full,nosmt [X86] mminit_loglevel= [KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this @@ -3206,6 +3207,40 @@ log everything. Information is printed at KERN_DEBUG so loglevel=8 may also need to be specified. + mmio_stale_data= + [X86,INTEL] Control mitigation for the Processor + MMIO Stale Data vulnerabilities. + + Processor MMIO Stale Data is a class of + vulnerabilities that may expose data after an MMIO + operation. Exposed data could originate or end in + the same CPU buffers as affected by MDS and TAA. + Therefore, similar to MDS and TAA, the mitigation + is to clear the affected CPU buffers. + + This parameter controls the mitigation. The + options are: + + full - Enable mitigation on vulnerable CPUs + + full,nosmt - Enable mitigation and disable SMT on + vulnerable CPUs. + + off - Unconditionally disable mitigation + + On MDS or TAA affected machines, + mmio_stale_data=off can be prevented by an active + MDS or TAA mitigation as these vulnerabilities are + mitigated with the same mechanism so in order to + disable this mitigation, you need to specify + mds=off and tsx_async_abort=off too. + + Not specifying this option is equivalent to + mmio_stale_data=full. + + For details see: + Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst + module.sig_enforce [KNL] When CONFIG_MODULE_SIG is set, this means that modules without (valid) signatures will fail to load. diff --git a/Documentation/arm/tcm.rst b/Documentation/arm/tcm.rst index b256f9783883..1dc6c39220f9 100644 --- a/Documentation/arm/tcm.rst +++ b/Documentation/arm/tcm.rst @@ -34,7 +34,7 @@ CPU so it is usually wise not to overlap any physical RAM with the TCM. The TCM memory can then be remapped to another address again using -the MMU, but notice that the TCM if often used in situations where +the MMU, but notice that the TCM is often used in situations where the MMU is turned off. To avoid confusion the current Linux implementation will map the TCM 1 to 1 from physical to virtual memory in the location specified by the kernel. Currently Linux diff --git a/Documentation/arm64/sme.rst b/Documentation/arm64/sme.rst index 8ba677b87e90..937147f58cc5 100644 --- a/Documentation/arm64/sme.rst +++ b/Documentation/arm64/sme.rst @@ -371,7 +371,7 @@ The regset data starts with struct user_za_header, containing: Appendix A. SME programmer's model (informative) ================================================= -This section provides a minimal description of the additions made by SVE to the +This section provides a minimal description of the additions made by SME to the ARMv8-A programmer's model that are relevant to this document. Note: This section is for information only and not intended to be complete or diff --git a/Documentation/devicetree/bindings/clock/idt,versaclock5.yaml b/Documentation/devicetree/bindings/clock/idt,versaclock5.yaml index be66f1e8b547..7c331bfbe370 100644 --- a/Documentation/devicetree/bindings/clock/idt,versaclock5.yaml +++ b/Documentation/devicetree/bindings/clock/idt,versaclock5.yaml @@ -45,7 +45,7 @@ description: | The case where SH and SP are both 1 is likely not very interesting. maintainers: - - Luca Ceresoli <luca@lucaceresoli.net> + - Luca Ceresoli <luca.ceresoli@bootlin.com> properties: compatible: diff --git a/Documentation/devicetree/bindings/cpufreq/brcm,stb-avs-cpu-freq.txt b/Documentation/devicetree/bindings/cpufreq/brcm,stb-avs-cpu-freq.txt index 73470ecd1f12..ce91a9197697 100644 --- a/Documentation/devicetree/bindings/cpufreq/brcm,stb-avs-cpu-freq.txt +++ b/Documentation/devicetree/bindings/cpufreq/brcm,stb-avs-cpu-freq.txt @@ -16,7 +16,7 @@ has been processed. See [2] for more information on the brcm,l2-intc node. firmware. On some SoCs, this firmware supports DFS and DVFS in addition to Adaptive Voltage Scaling. -[2] Documentation/devicetree/bindings/interrupt-controller/brcm,l2-intc.txt +[2] Documentation/devicetree/bindings/interrupt-controller/brcm,l2-intc.yaml Node brcm,avs-cpu-data-mem diff --git a/Documentation/devicetree/bindings/display/arm,malidp.yaml b/Documentation/devicetree/bindings/display/arm,malidp.yaml index 795a08ac9f12..2a17ec6fc97c 100644 --- a/Documentation/devicetree/bindings/display/arm,malidp.yaml +++ b/Documentation/devicetree/bindings/display/arm,malidp.yaml @@ -71,11 +71,6 @@ properties: - description: number of output lines for the green channel (G) - description: number of output lines for the blue channel (B) - arm,malidp-arqos-high-level: - $ref: /schemas/types.yaml#/definitions/uint32 - description: - integer describing the ARQoS levels of DP500's QoS signaling - arm,malidp-arqos-value: $ref: /schemas/types.yaml#/definitions/uint32 description: @@ -113,7 +108,7 @@ examples: clocks = <&oscclk2>, <&fpgaosc0>, <&fpgaosc1>, <&fpgaosc1>; clock-names = "pxlclk", "mclk", "aclk", "pclk"; arm,malidp-output-port-lines = /bits/ 8 <8 8 8>; - arm,malidp-arqos-high-level = <0xd000d000>; + arm,malidp-arqos-value = <0xd000d000>; port { dp0_output: endpoint { diff --git a/Documentation/devicetree/bindings/display/msm/dpu-sc7180.yaml b/Documentation/devicetree/bindings/display/msm/dpu-sc7180.yaml index b41991eaa454..d3c3e4b07897 100644 --- a/Documentation/devicetree/bindings/display/msm/dpu-sc7180.yaml +++ b/Documentation/devicetree/bindings/display/msm/dpu-sc7180.yaml @@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: Qualcomm Display DPU dt properties for SC7180 target maintainers: - - Krishna Manikandan <mkrishn@codeaurora.org> + - Krishna Manikandan <quic_mkrishn@quicinc.com> description: | Device tree bindings for MSM Mobile Display Subsystem(MDSS) that encapsulates diff --git a/Documentation/devicetree/bindings/display/msm/dpu-sc7280.yaml b/Documentation/devicetree/bindings/display/msm/dpu-sc7280.yaml index 6e417d06fc79..f427eec3d3a4 100644 --- a/Documentation/devicetree/bindings/display/msm/dpu-sc7280.yaml +++ b/Documentation/devicetree/bindings/display/msm/dpu-sc7280.yaml @@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: Qualcomm Display DPU dt properties for SC7280 maintainers: - - Krishna Manikandan <mkrishn@codeaurora.org> + - Krishna Manikandan <quic_mkrishn@quicinc.com> description: | Device tree bindings for MSM Mobile Display Subsystem (MDSS) that encapsulates diff --git a/Documentation/devicetree/bindings/display/msm/dpu-sdm845.yaml b/Documentation/devicetree/bindings/display/msm/dpu-sdm845.yaml index 1a42491efdbc..2bb8896beffc 100644 --- a/Documentation/devicetree/bindings/display/msm/dpu-sdm845.yaml +++ b/Documentation/devicetree/bindings/display/msm/dpu-sdm845.yaml @@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: Qualcomm Display DPU dt properties for SDM845 target maintainers: - - Krishna Manikandan <mkrishn@codeaurora.org> + - Krishna Manikandan <quic_mkrishn@quicinc.com> description: | Device tree bindings for MSM Mobile Display Subsystem(MDSS) that encapsulates diff --git a/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml b/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml index 7095ec3c890d..880bfe930830 100644 --- a/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml +++ b/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml @@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: Qualcomm Display DSI controller maintainers: - - Krishna Manikandan <mkrishn@codeaurora.org> + - Krishna Manikandan <quic_mkrishn@quicinc.com> allOf: - $ref: "../dsi-controller.yaml#" diff --git a/Documentation/devicetree/bindings/display/msm/dsi-phy-10nm.yaml b/Documentation/devicetree/bindings/display/msm/dsi-phy-10nm.yaml index 2d5a766d028f..716f921e3532 100644 --- a/Documentation/devicetree/bindings/display/msm/dsi-phy-10nm.yaml +++ b/Documentation/devicetree/bindings/display/msm/dsi-phy-10nm.yaml @@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: Qualcomm Display DSI 10nm PHY maintainers: - - Krishna Manikandan <mkrishn@codeaurora.org> + - Krishna Manikandan <quic_mkrishn@quicinc.com> allOf: - $ref: dsi-phy-common.yaml# diff --git a/Documentation/devicetree/bindings/display/msm/dsi-phy-14nm.yaml b/Documentation/devicetree/bindings/display/msm/dsi-phy-14nm.yaml index 81dbee4803c0..1342d74ecfe0 100644 --- a/Documentation/devicetree/bindings/display/msm/dsi-phy-14nm.yaml +++ b/Documentation/devicetree/bindings/display/msm/dsi-phy-14nm.yaml @@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: Qualcomm Display DSI 14nm PHY maintainers: - - Krishna Manikandan <mkrishn@codeaurora.org> + - Krishna Manikandan <quic_mkrishn@quicinc.com> allOf: - $ref: dsi-phy-common.yaml# diff --git a/Documentation/devicetree/bindings/display/msm/dsi-phy-20nm.yaml b/Documentation/devicetree/bindings/display/msm/dsi-phy-20nm.yaml index b8de785ce815..9c1f9140c731 100644 --- a/Documentation/devicetree/bindings/display/msm/dsi-phy-20nm.yaml +++ b/Documentation/devicetree/bindings/display/msm/dsi-phy-20nm.yaml @@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: Qualcomm Display DSI 20nm PHY maintainers: - - Krishna Manikandan <mkrishn@codeaurora.org> + - Krishna Manikandan <quic_mkrishn@quicinc.com> allOf: - $ref: dsi-phy-common.yaml# diff --git a/Documentation/devicetree/bindings/display/msm/dsi-phy-28nm.yaml b/Documentation/devicetree/bindings/display/msm/dsi-phy-28nm.yaml index 69eecaa64b18..3d8540a06fe2 100644 --- a/Documentation/devicetree/bindings/display/msm/dsi-phy-28nm.yaml +++ b/Documentation/devicetree/bindings/display/msm/dsi-phy-28nm.yaml @@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: Qualcomm Display DSI 28nm PHY maintainers: - - Krishna Manikandan <mkrishn@codeaurora.org> + - Krishna Manikandan <quic_mkrishn@quicinc.com> allOf: - $ref: dsi-phy-common.yaml# diff --git a/Documentation/devicetree/bindings/display/msm/dsi-phy-common.yaml b/Documentation/devicetree/bindings/display/msm/dsi-phy-common.yaml index 502bdda90235..76d40f7933dd 100644 --- a/Documentation/devicetree/bindings/display/msm/dsi-phy-common.yaml +++ b/Documentation/devicetree/bindings/display/msm/dsi-phy-common.yaml @@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: Description of Qualcomm Display DSI PHY common dt properties maintainers: - - Krishna Manikandan <mkrishn@codeaurora.org> + - Krishna Manikandan <quic_mkrishn@quicinc.com> description: | This defines the DSI PHY dt properties which are common for all diff --git a/Documentation/devicetree/bindings/hwmon/ti,tmp401.yaml b/Documentation/devicetree/bindings/hwmon/ti,tmp401.yaml index fe0ac08faa1a..0e8ddf0ad789 100644 --- a/Documentation/devicetree/bindings/hwmon/ti,tmp401.yaml +++ b/Documentation/devicetree/bindings/hwmon/ti,tmp401.yaml @@ -40,9 +40,8 @@ properties: value to be used for converting remote channel measurements to temperature. $ref: /schemas/types.yaml#/definitions/int32 - items: - minimum: -128 - maximum: 127 + minimum: -128 + maximum: 127 ti,beta-compensation: description: diff --git a/Documentation/devicetree/bindings/hwmon/vexpress.txt b/Documentation/devicetree/bindings/hwmon/vexpress.txt index 9c27ed694bbb..4a4df4ffc460 100644 --- a/Documentation/devicetree/bindings/hwmon/vexpress.txt +++ b/Documentation/devicetree/bindings/hwmon/vexpress.txt @@ -9,7 +9,7 @@ Requires node properties: "arm,vexpress-power" "arm,vexpress-energy" - "arm,vexpress-sysreg,func" when controlled via vexpress-sysreg - (see Documentation/devicetree/bindings/arm/vexpress-sysreg.txt + (see Documentation/devicetree/bindings/arm/vexpress-config.yaml for more details) Optional node properties: diff --git a/Documentation/devicetree/bindings/interrupt-controller/socionext,uniphier-aidet.yaml b/Documentation/devicetree/bindings/interrupt-controller/socionext,uniphier-aidet.yaml index f89ebde76dab..de7c5e59bae1 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/socionext,uniphier-aidet.yaml +++ b/Documentation/devicetree/bindings/interrupt-controller/socionext,uniphier-aidet.yaml @@ -30,6 +30,7 @@ properties: - socionext,uniphier-ld11-aidet - socionext,uniphier-ld20-aidet - socionext,uniphier-pxs3-aidet + - socionext,uniphier-nx1-aidet reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/iommu/xen,grant-dma.yaml b/Documentation/devicetree/bindings/iommu/xen,grant-dma.yaml new file mode 100644 index 000000000000..be1539d234f9 --- /dev/null +++ b/Documentation/devicetree/bindings/iommu/xen,grant-dma.yaml @@ -0,0 +1,39 @@ +# SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/iommu/xen,grant-dma.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Xen specific IOMMU for virtualized devices (e.g. virtio) + +maintainers: + - Stefano Stabellini <sstabellini@kernel.org> + +description: + The Xen IOMMU represents the Xen grant table interface. Grant mappings + are to be used with devices connected to the Xen IOMMU using the "iommus" + property, which also specifies the ID of the backend domain. + The binding is required to restrict memory access using Xen grant mappings. + +properties: + compatible: + const: xen,grant-dma + + '#iommu-cells': + const: 1 + description: + The single cell is the domid (domain ID) of the domain where the backend + is running. + +required: + - compatible + - "#iommu-cells" + +additionalProperties: false + +examples: + - | + iommu { + compatible = "xen,grant-dma"; + #iommu-cells = <1>; + }; diff --git a/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra186-mc.yaml b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra186-mc.yaml index c7cfa6c2cd81..935d63d181d9 100644 --- a/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra186-mc.yaml +++ b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra186-mc.yaml @@ -150,7 +150,6 @@ allOf: description: 5 memory controller channels and 1 for stream-id registers reg-names: - maxItems: 6 items: - const: sid - const: broadcast @@ -170,7 +169,6 @@ allOf: description: 17 memory controller channels and 1 for stream-id registers reg-names: - minItems: 18 items: - const: sid - const: broadcast @@ -202,7 +200,6 @@ allOf: description: 17 memory controller channels and 1 for stream-id registers reg-names: - minItems: 18 items: - const: sid - const: broadcast diff --git a/Documentation/devicetree/bindings/mfd/maxim,max77714.yaml b/Documentation/devicetree/bindings/mfd/maxim,max77714.yaml index 74a6867d3c82..edac14af101e 100644 --- a/Documentation/devicetree/bindings/mfd/maxim,max77714.yaml +++ b/Documentation/devicetree/bindings/mfd/maxim,max77714.yaml @@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: MAX77714 PMIC with GPIO, RTC and watchdog from Maxim Integrated. maintainers: - - Luca Ceresoli <luca@lucaceresoli.net> + - Luca Ceresoli <luca.ceresoli@bootlin.com> description: | MAX77714 is a Power Management IC with 4 buck regulators, 9 diff --git a/Documentation/devicetree/bindings/mmc/brcm,sdhci-brcmstb.yaml b/Documentation/devicetree/bindings/mmc/brcm,sdhci-brcmstb.yaml index b672202fff4e..5ecdac9de484 100644 --- a/Documentation/devicetree/bindings/mmc/brcm,sdhci-brcmstb.yaml +++ b/Documentation/devicetree/bindings/mmc/brcm,sdhci-brcmstb.yaml @@ -75,7 +75,6 @@ examples: sd-uhs-sdr104; sdhci,auto-cmd12; interrupts = <0x0 0x26 0x4>; - interrupt-names = "sdio0_0"; clocks = <&scmi_clk 245>; clock-names = "sw_sdio"; }; @@ -94,7 +93,6 @@ examples: non-removable; bus-width = <0x8>; interrupts = <0x0 0x27 0x4>; - interrupt-names = "sdio1_0"; clocks = <&scmi_clk 245>; clock-names = "sw_sdio"; }; diff --git a/Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.yaml b/Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.yaml index c79639e9027e..3ee758886558 100644 --- a/Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.yaml +++ b/Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.yaml @@ -56,6 +56,9 @@ properties: - const: core - const: axi + interrupts: + maxItems: 1 + marvell,xenon-sdhc-id: $ref: /schemas/types.yaml#/definitions/uint32 minimum: 0 @@ -145,7 +148,6 @@ allOf: items: - description: Xenon IP registers - description: Armada 3700 SoC PHY PAD Voltage Control register - minItems: 2 marvell,pad-type: $ref: /schemas/types.yaml#/definitions/string diff --git a/Documentation/devicetree/bindings/nvme/apple,nvme-ans.yaml b/Documentation/devicetree/bindings/nvme/apple,nvme-ans.yaml index ddff9233b159..34dd1cc67124 100644 --- a/Documentation/devicetree/bindings/nvme/apple,nvme-ans.yaml +++ b/Documentation/devicetree/bindings/nvme/apple,nvme-ans.yaml @@ -55,7 +55,6 @@ properties: maxItems: 1 apple,sart: - maxItems: 1 $ref: /schemas/types.yaml#/definitions/phandle description: | Reference to the SART address filter. diff --git a/Documentation/devicetree/bindings/phy/phy-stih407-usb.txt b/Documentation/devicetree/bindings/phy/phy-stih407-usb.txt index de6a706abcdb..35f03df00130 100644 --- a/Documentation/devicetree/bindings/phy/phy-stih407-usb.txt +++ b/Documentation/devicetree/bindings/phy/phy-stih407-usb.txt @@ -9,7 +9,7 @@ Required properties: - resets : list of phandle and reset specifier pairs. There should be two entries, one for the whole phy and one for the port - reset-names : list of reset signal names. Should be "global" and "port" -See: Documentation/devicetree/bindings/reset/st,sti-powerdown.txt +See: Documentation/devicetree/bindings/reset/st,stih407-powerdown.yaml See: Documentation/devicetree/bindings/reset/reset.txt Example: diff --git a/Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml index 60dc27834e1d..b078009ed509 100644 --- a/Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml +++ b/Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml @@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#" title: Qualcomm QMP USB3 DP PHY controller maintainers: - - Manu Gautam <mgautam@codeaurora.org> + - Wesley Cheng <quic_wcheng@quicinc.com> properties: compatible: diff --git a/Documentation/devicetree/bindings/phy/qcom,qusb2-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,qusb2-phy.yaml index 0ab3dad3f121..d68ab49345b8 100644 --- a/Documentation/devicetree/bindings/phy/qcom,qusb2-phy.yaml +++ b/Documentation/devicetree/bindings/phy/qcom,qusb2-phy.yaml @@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#" title: Qualcomm QUSB2 phy controller maintainers: - - Manu Gautam <mgautam@codeaurora.org> + - Wesley Cheng <quic_wcheng@quicinc.com> description: QUSB2 controller supports LS/FS/HS usb connectivity on Qualcomm chipsets. diff --git a/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml b/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml index 1ce251de0855..7a0e6a9854da 100644 --- a/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml +++ b/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml @@ -7,7 +7,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#" title: Qualcomm Synopsys Femto High-Speed USB PHY V2 maintainers: - - Wesley Cheng <wcheng@codeaurora.org> + - Wesley Cheng <quic_wcheng@quicinc.com> description: | Qualcomm High-Speed USB PHY diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-rk805.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-rk805.txt index cbcbd31e3ce8..939cb5b6ffea 100644 --- a/Documentation/devicetree/bindings/pinctrl/pinctrl-rk805.txt +++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-rk805.txt @@ -27,7 +27,7 @@ Required properties: - pins: List of pins. Valid values of pins properties are: gpio0, gpio1. First 2 properties must be added in the RK805 PMIC node, documented in -Documentation/devicetree/bindings/mfd/rk808.txt +Documentation/devicetree/bindings/mfd/rockchip,rk808.yaml Optional properties: ------------------- diff --git a/Documentation/devicetree/bindings/pinctrl/ralink,mt7620-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/ralink,mt7620-pinctrl.yaml index 4d820df24b89..6f17f3991640 100644 --- a/Documentation/devicetree/bindings/pinctrl/ralink,mt7620-pinctrl.yaml +++ b/Documentation/devicetree/bindings/pinctrl/ralink,mt7620-pinctrl.yaml @@ -32,31 +32,37 @@ patternProperties: groups: description: The pin group to select. enum: [ + # common + i2c, spi, wdt, + # For MT7620 SoC - ephy, i2c, mdio, nd_sd, pa, pcie, rgmii1, rgmii2, spi, spi refclk, - uartf, uartlite, wdt, wled, + ephy, mdio, nd_sd, pa, pcie, rgmii1, rgmii2, spi refclk, + uartf, uartlite, wled, # For MT7628 and MT7688 SoCs - gpio, i2c, i2s, p0led_an, p0led_kn, p1led_an, p1led_kn, p2led_an, + gpio, i2s, p0led_an, p0led_kn, p1led_an, p1led_kn, p2led_an, p2led_kn, p3led_an, p3led_kn, p4led_an, p4led_kn, perst, pwm0, - pwm1, refclk, sdmode, spi, spi cs1, spis, uart0, uart1, uart2, - wdt, wled_an, wled_kn, + pwm1, refclk, sdmode, spi cs1, spis, uart0, uart1, uart2, + wled_an, wled_kn, ] function: description: The mux function to select. enum: [ + # common + gpio, i2c, refclk, spi, + # For MT7620 SoC - ephy, gpio, gpio i2s, gpio uartf, i2c, i2s uartf, mdio, nand, pa, - pcie refclk, pcie rst, pcm gpio, pcm i2s, pcm uartf, refclk, - rgmii1, rgmii2, sd, spi, spi refclk, uartf, uartlite, wdt refclk, + ephy, gpio i2s, gpio uartf, i2s uartf, mdio, nand, pa, + pcie refclk, pcie rst, pcm gpio, pcm i2s, pcm uartf, + rgmii1, rgmii2, sd, spi refclk, uartf, uartlite, wdt refclk, wdt rst, wled, # For MT7628 and MT7688 SoCs - antenna, debug, gpio, i2c, i2s, jtag, p0led_an, p0led_kn, + antenna, debug, i2s, jtag, p0led_an, p0led_kn, p1led_an, p1led_kn, p2led_an, p2led_kn, p3led_an, p3led_kn, p4led_an, p4led_kn, pcie, pcm, perst, pwm, pwm0, pwm1, pwm_uart2, - refclk, rsvd, sdxc, sdxc d5 d4, sdxc d6, sdxc d7, spi, spi cs1, + rsvd, sdxc, sdxc d5 d4, sdxc d6, sdxc d7, spi cs1, spis, sw_r, uart0, uart1, uart2, utif, wdt, wled_an, wled_kn, -, ] diff --git a/Documentation/devicetree/bindings/pinctrl/ralink,rt305x-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/ralink,rt305x-pinctrl.yaml index 425401c54269..f602a5d6e13a 100644 --- a/Documentation/devicetree/bindings/pinctrl/ralink,rt305x-pinctrl.yaml +++ b/Documentation/devicetree/bindings/pinctrl/ralink,rt305x-pinctrl.yaml @@ -33,32 +33,29 @@ patternProperties: groups: description: The pin group to select. enum: [ + # common + i2c, jtag, led, mdio, rgmii, spi, spi_cs1, uartf, uartlite, + # For RT3050, RT3052 and RT3350 SoCs - i2c, jtag, mdio, rgmii, sdram, spi, uartf, uartlite, + sdram, # For RT3352 SoC - i2c, jtag, led, lna, mdio, pa, rgmii, spi, spi_cs1, uartf, - uartlite, - - # For RT5350 SoC - i2c, jtag, led, spi, spi_cs1, uartf, uartlite, + lna, pa ] function: description: The mux function to select. enum: [ + # common + gpio, gpio i2s, gpio uartf, i2c, i2s uartf, jtag, led, mdio, + pcm gpio, pcm i2s, pcm uartf, rgmii, spi, spi_cs1, uartf, + uartlite, wdg_cs1, + # For RT3050, RT3052 and RT3350 SoCs - gpio, gpio i2s, gpio uartf, i2c, i2s uartf, jtag, mdio, pcm gpio, - pcm i2s, pcm uartf, rgmii, sdram, spi, uartf, uartlite, + sdram, # For RT3352 SoC - gpio, gpio i2s, gpio uartf, i2c, i2s uartf, jtag, led, lna, mdio, - pa, pcm gpio, pcm i2s, pcm uartf, rgmii, spi, spi_cs1, uartf, - uartlite, wdg_cs1, - - # For RT5350 SoC - gpio, gpio i2s, gpio uartf, i2c, i2s uartf, jtag, led, pcm gpio, - pcm i2s, pcm uartf, spi, spi_cs1, uartf, uartlite, wdg_cs1, + lna, pa ] required: diff --git a/Documentation/devicetree/bindings/power/supply/maxim,max77976.yaml b/Documentation/devicetree/bindings/power/supply/maxim,max77976.yaml index 675b9b26d233..f23dcc50793e 100644 --- a/Documentation/devicetree/bindings/power/supply/maxim,max77976.yaml +++ b/Documentation/devicetree/bindings/power/supply/maxim,max77976.yaml @@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: Maxim Integrated MAX77976 Battery charger maintainers: - - Luca Ceresoli <luca@lucaceresoli.net> + - Luca Ceresoli <luca.ceresoli@bootlin.com> description: | The Maxim MAX77976 is a 19Vin / 5.5A, 1-Cell Li+ battery charger diff --git a/Documentation/devicetree/bindings/regulator/qcom,usb-vbus-regulator.yaml b/Documentation/devicetree/bindings/regulator/qcom,usb-vbus-regulator.yaml index 12ed98c28aaa..dbe78cd4adba 100644 --- a/Documentation/devicetree/bindings/regulator/qcom,usb-vbus-regulator.yaml +++ b/Documentation/devicetree/bindings/regulator/qcom,usb-vbus-regulator.yaml @@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: The Qualcomm PMIC VBUS output regulator driver maintainers: - - Wesley Cheng <wcheng@codeaurora.org> + - Wesley Cheng <quic_wcheng@quicinc.com> description: | This regulator driver controls the VBUS output by the Qualcomm PMIC. This diff --git a/Documentation/devicetree/bindings/regulator/vexpress.txt b/Documentation/devicetree/bindings/regulator/vexpress.txt index d775f72487aa..1c2e92c7831e 100644 --- a/Documentation/devicetree/bindings/regulator/vexpress.txt +++ b/Documentation/devicetree/bindings/regulator/vexpress.txt @@ -4,7 +4,7 @@ Versatile Express voltage regulators Requires node properties: - "compatible" value: "arm,vexpress-volt" - "arm,vexpress-sysreg,func" when controlled via vexpress-sysreg - (see Documentation/devicetree/bindings/arm/vexpress-sysreg.txt + (see Documentation/devicetree/bindings/arm/vexpress-config.yaml for more details) Required regulator properties: diff --git a/Documentation/devicetree/bindings/spi/microchip,mpfs-spi.yaml b/Documentation/devicetree/bindings/spi/microchip,mpfs-spi.yaml index ece261b8e963..7326c0a28d16 100644 --- a/Documentation/devicetree/bindings/spi/microchip,mpfs-spi.yaml +++ b/Documentation/devicetree/bindings/spi/microchip,mpfs-spi.yaml @@ -47,6 +47,5 @@ examples: clocks = <&clkcfg CLK_SPI0>; interrupt-parent = <&plic>; interrupts = <54>; - spi-max-frequency = <25000000>; }; ... diff --git a/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.yaml b/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.yaml index e2c7b934c50d..78ceb9d67754 100644 --- a/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.yaml +++ b/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.yaml @@ -110,7 +110,6 @@ examples: pinctrl-names = "default"; pinctrl-0 = <&qup_spi1_default>; interrupts = <GIC_SPI 602 IRQ_TYPE_LEVEL_HIGH>; - spi-max-frequency = <50000000>; #address-cells = <1>; #size-cells = <0>; }; diff --git a/Documentation/devicetree/bindings/usb/dwc3-st.txt b/Documentation/devicetree/bindings/usb/dwc3-st.txt index bf73de0d5b4a..4aa368447b1e 100644 --- a/Documentation/devicetree/bindings/usb/dwc3-st.txt +++ b/Documentation/devicetree/bindings/usb/dwc3-st.txt @@ -13,7 +13,7 @@ Required properties: - resets : list of phandle and reset specifier pairs. There should be two entries, one for the powerdown and softreset lines of the usb3 IP - reset-names : list of reset signal names. Names should be "powerdown" and "softreset" -See: Documentation/devicetree/bindings/reset/st,sti-powerdown.txt +See: Documentation/devicetree/bindings/reset/st,stih407-powerdown.yaml See: Documentation/devicetree/bindings/reset/reset.txt - #address-cells, #size-cells : should be '1' if the device has sub-nodes diff --git a/Documentation/devicetree/bindings/usb/ehci-st.txt b/Documentation/devicetree/bindings/usb/ehci-st.txt index 065c91d955ad..d6f2bdee20fc 100644 --- a/Documentation/devicetree/bindings/usb/ehci-st.txt +++ b/Documentation/devicetree/bindings/usb/ehci-st.txt @@ -17,7 +17,7 @@ See: Documentation/devicetree/bindings/clock/clock-bindings.txt - resets : phandle + reset specifier pairs to the powerdown and softreset lines of the USB IP - reset-names : should be "power" and "softreset" -See: Documentation/devicetree/bindings/reset/st,sti-powerdown.txt +See: Documentation/devicetree/bindings/reset/st,stih407-powerdown.yaml See: Documentation/devicetree/bindings/reset/reset.txt Example: diff --git a/Documentation/devicetree/bindings/usb/generic-ehci.yaml b/Documentation/devicetree/bindings/usb/generic-ehci.yaml index 0b4524b6409e..1e84e1b7ab27 100644 --- a/Documentation/devicetree/bindings/usb/generic-ehci.yaml +++ b/Documentation/devicetree/bindings/usb/generic-ehci.yaml @@ -136,7 +136,8 @@ properties: Phandle of a companion. phys: - maxItems: 1 + minItems: 1 + maxItems: 3 phy-names: const: usb diff --git a/Documentation/devicetree/bindings/usb/generic-ohci.yaml b/Documentation/devicetree/bindings/usb/generic-ohci.yaml index e2ac84665316..bb6bbd5f129d 100644 --- a/Documentation/devicetree/bindings/usb/generic-ohci.yaml +++ b/Documentation/devicetree/bindings/usb/generic-ohci.yaml @@ -103,7 +103,8 @@ properties: Overrides the detected port count phys: - maxItems: 1 + minItems: 1 + maxItems: 3 phy-names: const: usb diff --git a/Documentation/devicetree/bindings/usb/ohci-st.txt b/Documentation/devicetree/bindings/usb/ohci-st.txt index 44c998c16f85..1c735573abc0 100644 --- a/Documentation/devicetree/bindings/usb/ohci-st.txt +++ b/Documentation/devicetree/bindings/usb/ohci-st.txt @@ -15,7 +15,7 @@ See: Documentation/devicetree/bindings/clock/clock-bindings.txt - resets : phandle to the powerdown and reset controller for the USB IP - reset-names : should be "power" and "softreset". -See: Documentation/devicetree/bindings/reset/st,sti-powerdown.txt +See: Documentation/devicetree/bindings/reset/st,stih407-powerdown.yaml See: Documentation/devicetree/bindings/reset/reset.txt Example: diff --git a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml index e336fe2e03cc..749e1963ddbb 100644 --- a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml +++ b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml @@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: Qualcomm SuperSpeed DWC3 USB SoC controller maintainers: - - Manu Gautam <mgautam@codeaurora.org> + - Wesley Cheng <quic_wcheng@quicinc.com> properties: compatible: diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml index 6bb20b4554d7..0496773a3c4d 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.yaml +++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml @@ -143,6 +143,9 @@ patternProperties: description: ASPEED Technology Inc. "^asus,.*": description: AsusTek Computer Inc. + "^atheros,.*": + description: Qualcomm Atheros, Inc. (deprecated, use qca) + deprecated: true "^atlas,.*": description: Atlas Scientific LLC "^atmel,.*": diff --git a/Documentation/devicetree/bindings/watchdog/allwinner,sun4i-a10-wdt.yaml b/Documentation/devicetree/bindings/watchdog/allwinner,sun4i-a10-wdt.yaml index cbcf19f51411..ed6c1ca80dcc 100644 --- a/Documentation/devicetree/bindings/watchdog/allwinner,sun4i-a10-wdt.yaml +++ b/Documentation/devicetree/bindings/watchdog/allwinner,sun4i-a10-wdt.yaml @@ -64,7 +64,6 @@ if: then: properties: clocks: - minItems: 2 items: - description: High-frequency oscillator input, divided internally - description: Low-frequency oscillator input diff --git a/Documentation/driver-api/gpio/board.rst b/Documentation/driver-api/gpio/board.rst index 4e3adf31c8d1..b33aa04f213f 100644 --- a/Documentation/driver-api/gpio/board.rst +++ b/Documentation/driver-api/gpio/board.rst @@ -6,7 +6,7 @@ This document explains how GPIOs can be assigned to given devices and functions. Note that it only applies to the new descriptor-based interface. For a description of the deprecated integer-based GPIO interface please refer to -gpio-legacy.txt (actually, there is no real mapping possible with the old +legacy.rst (actually, there is no real mapping possible with the old interface; you just fetch an integer from somewhere and request the corresponding GPIO). diff --git a/Documentation/driver-api/gpio/consumer.rst b/Documentation/driver-api/gpio/consumer.rst index 47869ca8ccf0..72bcf5f5e3a2 100644 --- a/Documentation/driver-api/gpio/consumer.rst +++ b/Documentation/driver-api/gpio/consumer.rst @@ -4,7 +4,7 @@ GPIO Descriptor Consumer Interface This document describes the consumer interface of the GPIO framework. Note that it describes the new descriptor-based interface. For a description of the -deprecated integer-based GPIO interface please refer to gpio-legacy.txt. +deprecated integer-based GPIO interface please refer to legacy.rst. Guidelines for GPIOs consumers @@ -78,7 +78,7 @@ whether the line is configured active high or active low (see The two last flags are used for use cases where open drain is mandatory, such as I2C: if the line is not already configured as open drain in the mappings -(see board.txt), then open drain will be enforced anyway and a warning will be +(see board.rst), then open drain will be enforced anyway and a warning will be printed that the board configuration needs to be updated to match the use case. Both functions return either a valid GPIO descriptor, or an error code checkable @@ -270,7 +270,7 @@ driven. The same is applicable for open drain or open source output lines: those do not actively drive their output high (open drain) or low (open source), they just switch their output to a high impedance value. The consumer should not need to -care. (For details read about open drain in driver.txt.) +care. (For details read about open drain in driver.rst.) With this, all the gpiod_set_(array)_value_xxx() functions interpret the parameter "value" as "asserted" ("1") or "de-asserted" ("0"). The physical line diff --git a/Documentation/driver-api/gpio/intro.rst b/Documentation/driver-api/gpio/intro.rst index 2e924fb5b3d5..c9c19243b97f 100644 --- a/Documentation/driver-api/gpio/intro.rst +++ b/Documentation/driver-api/gpio/intro.rst @@ -14,12 +14,12 @@ Due to the history of GPIO interfaces in the kernel, there are two different ways to obtain and use GPIOs: - The descriptor-based interface is the preferred way to manipulate GPIOs, - and is described by all the files in this directory excepted gpio-legacy.txt. + and is described by all the files in this directory excepted legacy.rst. - The legacy integer-based interface which is considered deprecated (but still - usable for compatibility reasons) is documented in gpio-legacy.txt. + usable for compatibility reasons) is documented in legacy.rst. The remainder of this document applies to the new descriptor-based interface. -gpio-legacy.txt contains the same information applied to the legacy +legacy.rst contains the same information applied to the legacy integer-based interface. diff --git a/Documentation/hte/hte.rst b/Documentation/driver-api/hte/hte.rst index 153f3233c100..153f3233c100 100644 --- a/Documentation/hte/hte.rst +++ b/Documentation/driver-api/hte/hte.rst diff --git a/Documentation/hte/index.rst b/Documentation/driver-api/hte/index.rst index 9f43301c05dc..9f43301c05dc 100644 --- a/Documentation/hte/index.rst +++ b/Documentation/driver-api/hte/index.rst diff --git a/Documentation/hte/tegra194-hte.rst b/Documentation/driver-api/hte/tegra194-hte.rst index 41983e04d2a0..41983e04d2a0 100644 --- a/Documentation/hte/tegra194-hte.rst +++ b/Documentation/driver-api/hte/tegra194-hte.rst diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst index d76a60d95b58..a6d525cd9fc4 100644 --- a/Documentation/driver-api/index.rst +++ b/Documentation/driver-api/index.rst @@ -108,6 +108,7 @@ available subsections can be seen below. xilinx/index xillybus zorro + hte/index .. only:: subproject and html diff --git a/Documentation/features/core/cBPF-JIT/arch-support.txt b/Documentation/features/core/cBPF-JIT/arch-support.txt index 10482dee8703..a053667a7a8c 100644 --- a/Documentation/features/core/cBPF-JIT/arch-support.txt +++ b/Documentation/features/core/cBPF-JIT/arch-support.txt @@ -13,6 +13,7 @@ | csky: | TODO | | hexagon: | TODO | | ia64: | TODO | + | loong: | TODO | | m68k: | TODO | | microblaze: | TODO | | mips: | ok | diff --git a/Documentation/features/core/eBPF-JIT/arch-support.txt b/Documentation/features/core/eBPF-JIT/arch-support.txt index bcefb5afc7d6..c0bb9c92937f 100644 --- a/Documentation/features/core/eBPF-JIT/arch-support.txt +++ b/Documentation/features/core/eBPF-JIT/arch-support.txt @@ -13,6 +13,7 @@ | csky: | TODO | | hexagon: | TODO | | ia64: | TODO | + | loong: | TODO | | m68k: | TODO | | microblaze: | TODO | | mips: | ok | diff --git a/Documentation/features/core/generic-idle-thread/arch-support.txt b/Documentation/features/core/generic-idle-thread/arch-support.txt index d80d99449ac1..c9bfff292816 100644 --- a/Documentation/features/core/generic-idle-thread/arch-support.txt +++ b/Documentation/features/core/generic-idle-thread/arch-support.txt @@ -13,6 +13,7 @@ | csky: | ok | | hexagon: | ok | | ia64: | ok | + | loong: | ok | | m68k: | TODO | | microblaze: | TODO | | mips: | ok | diff --git a/Documentation/features/core/jump-labels/arch-support.txt b/Documentation/features/core/jump-labels/arch-support.txt index 53eab154925d..35e2a44b1448 100644 --- a/Documentation/features/core/jump-labels/arch-support.txt +++ b/Documentation/features/core/jump-labels/arch-support.txt @@ -13,6 +13,7 @@ | csky: | TODO | | hexagon: | TODO | | ia64: | TODO | + | loong: | TODO | | m68k: | TODO | | microblaze: | TODO | | mips: | ok | diff --git a/Documentation/features/core/thread-info-in-task/arch-support.txt b/Documentation/features/core/thread-info-in-task/arch-support.txt index 94926451afb9..9b3e2ce12b44 100644 --- a/Documentation/features/core/thread-info-in-task/arch-support.txt +++ b/Documentation/features/core/thread-info-in-task/arch-support.txt @@ -13,6 +13,7 @@ | csky: | TODO | | hexagon: | TODO | | ia64: | TODO | + | loong: | TODO | | m68k: | TODO | | microblaze: | TODO | | mips: | TODO | diff --git a/Documentation/features/core/tracehook/arch-support.txt b/Documentation/features/core/tracehook/arch-support.txt index b4274b8141b6..9c7ffec5d51d 100644 --- a/Documentation/features/core/tracehook/arch-support.txt +++ b/Documentation/features/core/tracehook/arch-support.txt @@ -13,6 +13,7 @@ | csky: | ok | | hexagon: | ok | | ia64: | ok | + | loong: | ok | | m68k: | TODO | | microblaze: | TODO | | mips: | ok | diff --git a/Documentation/features/debug/KASAN/arch-support.txt b/Documentation/features/debug/KASAN/arch-support.txt index c15bb4b21b6f..2fd5fb6f5f23 100644 --- a/Documentation/features/debug/KASAN/arch-support.txt +++ b/Documentation/features/debug/KASAN/arch-support.txt @@ -13,6 +13,7 @@ | csky: | TODO | | hexagon: | TODO | | ia64: | TODO | + | loong: | TODO | | m68k: | TODO | | microblaze: | TODO | | mips: | TODO | diff --git a/Documentation/features/debug/debug-vm-pgtable/arch-support.txt b/Documentation/features/debug/debug-vm-pgtable/arch-support.txt index 4c31fc92a312..c45711e55c7b 100644 --- a/Documentation/features/debug/debug-vm-pgtable/arch-support.txt +++ b/Documentation/features/debug/debug-vm-pgtable/arch-support.txt @@ -13,12 +13,13 @@ | csky: | TODO | | hexagon: | TODO | | ia64: | TODO | + | loong: | TODO | | m68k: | TODO | | microblaze: | TODO | | mips: | TODO | | nios2: | TODO | | openrisc: | TODO | - | parisc: | TODO | + | parisc: | ok | | powerpc: | ok | | riscv: | ok | | s390: | ok | diff --git a/Documentation/features/debug/gcov-profile-all/arch-support.txt b/Documentation/features/debug/gcov-profile-all/arch-support.txt index d7a5ac4bc1fe..502c1d409648 100644 --- a/Documentation/features/debug/gcov-profile-all/arch-support.txt +++ b/Documentation/features/debug/gcov-profile-all/arch-support.txt @@ -13,6 +13,7 @@ | csky: | ok | | hexagon: | TODO | | ia64: | TODO | + | loong: | TODO | | m68k: | TODO | | microblaze: | ok | | mips: | ok | @@ -24,7 +25,7 @@ | s390: | ok | | sh: | ok | | sparc: | TODO | - | um: | TODO | + | um: | ok | | x86: | ok | | xtensa: | TODO | ----------------------- diff --git a/Documentation/features/debug/kcov/arch-support.txt b/Documentation/features/debug/kcov/arch-support.txt index 136e14c2b603..afb90bebded2 100644 --- a/Documentation/features/debug/kcov/arch-support.txt +++ b/Documentation/features/debug/kcov/arch-support.txt @@ -13,6 +13,7 @@ | csky: | TODO | | hexagon: | TODO | | ia64: | TODO | + | loong: | TODO | | m68k: | TODO | | microblaze: | TODO | | mips: | ok | diff --git a/Documentation/features/debug/kgdb/arch-support.txt b/Documentation/features/debug/kgdb/arch-support.txt index 5b3f3d8ae462..04120d278c22 100644 --- a/Documentation/features/debug/kgdb/arch-support.txt +++ b/Documentation/features/debug/kgdb/arch-support.txt @@ -13,6 +13,7 @@ | csky: | TODO | | hexagon: | ok | | ia64: | TODO | + | loong: | TODO | | m68k: | TODO | | microblaze: | ok | | mips: | ok | diff --git a/Documentation/features/debug/kmemleak/arch-support.txt b/Documentation/features/debug/kmemleak/arch-support.txt index 7a2eab4fdf9d..e487c356ab20 100644 --- a/Documentation/features/debug/kmemleak/arch-support.txt +++ b/Documentation/features/debug/kmemleak/arch-support.txt @@ -13,6 +13,7 @@ | csky: | ok | | hexagon: | TODO | | ia64: | TODO | + | loong: | TODO | | m68k: | TODO | | microblaze: | ok | | mips: | ok | diff --git a/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt b/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt index db02ab194138..b3697f4c806e 100644 --- a/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt +++ b/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt @@ -13,6 +13,7 @@ | csky: | ok | | hexagon: | TODO | | ia64: | TODO | + | loong: | TODO | | m68k: | TODO | | microblaze: | TODO | | mips: | TODO | diff --git a/Documentation/features/debug/kprobes/arch-support.txt b/Documentation/features/debug/kprobes/arch-support.txt index ec186e7deebc..452385ac9e06 100644 --- a/Documentation/features/debug/kprobes/arch-support.txt +++ b/Documentation/features/debug/kprobes/arch-support.txt @@ -13,6 +13,7 @@ | csky: | ok | | hexagon: | TODO | | ia64: | ok | + | loong: | TODO | | m68k: | TODO | | microblaze: | TODO | | mips: | ok | diff --git a/Documentation/features/debug/kretprobes/arch-support.txt b/Documentation/features/debug/kretprobes/arch-support.txt index 4b7865e693f6..daecf046e72b 100644 --- a/Documentation/features/debug/kretprobes/arch-support.txt +++ b/Documentation/features/debug/kretprobes/arch-support.txt @@ -13,6 +13,7 @@ | csky: | ok | | hexagon: | TODO | | ia64: | ok | + | loong: | TODO | | m68k: | TODO | | microblaze: | TODO | | mips: | ok | diff --git a/Documentation/features/debug/optprobes/arch-support.txt b/Documentation/features/debug/optprobes/arch-support.txt index 5d9befa041c7..adb1bd055bfd 100644 --- a/Documentation/features/debug/optprobes/arch-support.txt +++ b/Documentation/features/debug/optprobes/arch-support.txt @@ -13,6 +13,7 @@ | csky: | TODO | | hexagon: | TODO | | ia64: | TODO | + | loong: | TODO | | m68k: | TODO | | microblaze: | TODO | | mips: | TODO | diff --git a/Documentation/features/debug/stackprotector/arch-support.txt b/Documentation/features/debug/stackprotector/arch-support.txt index d97fd38460e6..ddcd7161d14c 100644 --- a/Documentation/features/debug/stackprotector/arch-support.txt +++ b/Documentation/features/debug/stackprotector/arch-support.txt @@ -13,6 +13,7 @@ | csky: | ok | | hexagon: | TODO | | ia64: | TODO | + | loong: | TODO | | m68k: | TODO | | microblaze: | TODO | | mips: | ok | diff --git a/Documentation/features/debug/uprobes/arch-support.txt b/Documentation/features/debug/uprobes/arch-support.txt index d30e3475904e..25121200f9f9 100644 --- a/Documentation/features/debug/uprobes/arch-support.txt +++ b/Documentation/features/debug/uprobes/arch-support.txt @@ -13,6 +13,7 @@ | csky: | ok | | hexagon: | TODO | | ia64: | TODO | + | loong: | TODO | | m68k: | TODO | | microblaze: | TODO | | mips: | ok | diff --git a/Documentation/features/debug/user-ret-profiler/arch-support.txt b/Documentation/features/debug/user-ret-profiler/arch-support.txt index 9ae1fa2eb27c..f2fcff8e77b7 100644 --- a/Documentation/features/debug/user-ret-profiler/arch-support.txt +++ b/Documentation/features/debug/user-ret-profiler/arch-support.txt @@ -13,6 +13,7 @@ | csky: | TODO | | hexagon: | TODO | | ia64: | TODO | + | loong: | TODO | | m68k: | TODO | | microblaze: | TODO | | mips: | TODO | diff --git a/Documentation/features/io/dma-contiguous/arch-support.txt b/Documentation/features/io/dma-contiguous/arch-support.txt index 9e09988eb654..95e485c87e36 100644 --- a/Documentation/features/io/dma-contiguous/arch-support.txt +++ b/Documentation/features/io/dma-contiguous/arch-support.txt @@ -13,6 +13,7 @@ | csky: | ok | | hexagon: | TODO | | ia64: | TODO | + | loong: | ok | | m68k: | TODO | | microblaze: | ok | | mips: | ok | diff --git a/Documentation/features/locking/cmpxchg-local/arch-support.txt b/Documentation/features/locking/cmpxchg-local/arch-support.txt index 5c4ec316dbac..8b1a8d9e1c79 100644 --- a/Documentation/features/locking/cmpxchg-local/arch-support.txt +++ b/Documentation/features/locking/cmpxchg-local/arch-support.txt @@ -13,6 +13,7 @@ | csky: | TODO | | hexagon: | TODO | | ia64: | TODO | + | loong: | TODO | | m68k: | TODO | | microblaze: | TODO | | mips: | TODO | diff --git a/Documentation/features/locking/lockdep/arch-support.txt b/Documentation/features/locking/lockdep/arch-support.txt index 65007c1ac44f..ab69e8f56a37 100644 --- a/Documentation/features/locking/lockdep/arch-support.txt +++ b/Documentation/features/locking/lockdep/arch-support.txt @@ -13,6 +13,7 @@ | csky: | ok | | hexagon: | ok | | ia64: | TODO | + | loong: | ok | | m68k: | TODO | | microblaze: | ok | | mips: | ok | diff --git a/Documentation/features/locking/queued-rwlocks/arch-support.txt b/Documentation/features/locking/queued-rwlocks/arch-support.txt index 20056670fb09..0bfb72a08d82 100644 --- a/Documentation/features/locking/queued-rwlocks/arch-support.txt +++ b/Documentation/features/locking/queued-rwlocks/arch-support.txt @@ -13,6 +13,7 @@ | csky: | ok | | hexagon: | TODO | | ia64: | TODO | + | loong: | ok | | m68k: | TODO | | microblaze: | TODO | | mips: | ok | @@ -20,7 +21,7 @@ | openrisc: | ok | | parisc: | TODO | | powerpc: | ok | - | riscv: | TODO | + | riscv: | ok | | s390: | TODO | | sh: | TODO | | sparc: | ok | diff --git a/Documentation/features/locking/queued-spinlocks/arch-support.txt b/Documentation/features/locking/queued-spinlocks/arch-support.txt index 707514faac7b..d2f2201febc8 100644 --- a/Documentation/features/locking/queued-spinlocks/arch-support.txt +++ b/Documentation/features/locking/queued-spinlocks/arch-support.txt @@ -13,6 +13,7 @@ | csky: | TODO | | hexagon: | TODO | | ia64: | TODO | + | loong: | TODO | | m68k: | TODO | | microblaze: | TODO | | mips: | ok | diff --git a/Documentation/features/perf/kprobes-event/arch-support.txt b/Documentation/features/perf/kprobes-event/arch-support.txt index 9f31ce9b9f2a..0d0647b06762 100644 --- a/Documentation/features/perf/kprobes-event/arch-support.txt +++ b/Documentation/features/perf/kprobes-event/arch-support.txt @@ -7,12 +7,13 @@ | arch |status| ----------------------- | alpha: | TODO | - | arc: | TODO | + | arc: | ok | | arm: | ok | | arm64: | ok | | csky: | ok | | hexagon: | ok | | ia64: | TODO | + | loong: | ok | | m68k: | TODO | | microblaze: | TODO | | mips: | ok | diff --git a/Documentation/features/perf/perf-regs/arch-support.txt b/Documentation/features/perf/perf-regs/arch-support.txt index f148c4329c7a..13c297bbf05c 100644 --- a/Documentation/features/perf/perf-regs/arch-support.txt +++ b/Documentation/features/perf/perf-regs/arch-support.txt @@ -13,6 +13,7 @@ | csky: | ok | | hexagon: | TODO | | ia64: | TODO | + | loong: | TODO | | m68k: | TODO | | microblaze: | TODO | | mips: | ok | diff --git a/Documentation/features/perf/perf-stackdump/arch-support.txt b/Documentation/features/perf/perf-stackdump/arch-support.txt index 32c88b6a910c..931687eec671 100644 --- a/Documentation/features/perf/perf-stackdump/arch-support.txt +++ b/Documentation/features/perf/perf-stackdump/arch-support.txt @@ -13,6 +13,7 @@ | csky: | ok | | hexagon: | TODO | | ia64: | TODO | + | loong: | TODO | | m68k: | TODO | | microblaze: | TODO | | mips: | ok | diff --git a/Documentation/features/sched/membarrier-sync-core/arch-support.txt b/Documentation/features/sched/membarrier-sync-core/arch-support.txt index d82a1f0cdc91..336d728b8a45 100644 --- a/Documentation/features/sched/membarrier-sync-core/arch-support.txt +++ b/Documentation/features/sched/membarrier-sync-core/arch-support.txt @@ -36,6 +36,7 @@ | csky: | TODO | | hexagon: | TODO | | ia64: | TODO | + | loong: | TODO | | m68k: | TODO | | microblaze: | TODO | | mips: | TODO | diff --git a/Documentation/features/sched/numa-balancing/arch-support.txt b/Documentation/features/sched/numa-balancing/arch-support.txt index 2687564e5fa8..76d012118372 100644 --- a/Documentation/features/sched/numa-balancing/arch-support.txt +++ b/Documentation/features/sched/numa-balancing/arch-support.txt @@ -13,6 +13,7 @@ | csky: | .. | | hexagon: | .. | | ia64: | TODO | + | loong: | ok | | m68k: | .. | | microblaze: | .. | | mips: | TODO | diff --git a/Documentation/features/seccomp/seccomp-filter/arch-support.txt b/Documentation/features/seccomp/seccomp-filter/arch-support.txt index 1b4109199e9d..a86b8b1f3d10 100644 --- a/Documentation/features/seccomp/seccomp-filter/arch-support.txt +++ b/Documentation/features/seccomp/seccomp-filter/arch-support.txt @@ -13,6 +13,7 @@ | csky: | ok | | hexagon: | TODO | | ia64: | TODO | + | loong: | ok | | m68k: | TODO | | microblaze: | TODO | | mips: | ok | diff --git a/Documentation/features/time/arch-tick-broadcast/arch-support.txt b/Documentation/features/time/arch-tick-broadcast/arch-support.txt index 27327256bd05..364169f00ee2 100644 --- a/Documentation/features/time/arch-tick-broadcast/arch-support.txt +++ b/Documentation/features/time/arch-tick-broadcast/arch-support.txt @@ -13,6 +13,7 @@ | csky: | TODO | | hexagon: | TODO | | ia64: | TODO | + | loong: | ok | | m68k: | TODO | | microblaze: | TODO | | mips: | ok | diff --git a/Documentation/features/time/clockevents/arch-support.txt b/Documentation/features/time/clockevents/arch-support.txt index b9a4bda2c8f5..6ea274790e47 100644 --- a/Documentation/features/time/clockevents/arch-support.txt +++ b/Documentation/features/time/clockevents/arch-support.txt @@ -13,6 +13,7 @@ | csky: | ok | | hexagon: | ok | | ia64: | TODO | + | loong: | ok | | m68k: | TODO | | microblaze: | ok | | mips: | ok | diff --git a/Documentation/features/time/context-tracking/arch-support.txt b/Documentation/features/time/context-tracking/arch-support.txt index 4aa51c9fa32b..c9e0a16290e6 100644 --- a/Documentation/features/time/context-tracking/arch-support.txt +++ b/Documentation/features/time/context-tracking/arch-support.txt @@ -13,6 +13,7 @@ | csky: | ok | | hexagon: | TODO | | ia64: | TODO | + | loong: | ok | | m68k: | TODO | | microblaze: | TODO | | mips: | ok | diff --git a/Documentation/features/time/irq-time-acct/arch-support.txt b/Documentation/features/time/irq-time-acct/arch-support.txt index 0306ece41faa..fd17d8de5ef1 100644 --- a/Documentation/features/time/irq-time-acct/arch-support.txt +++ b/Documentation/features/time/irq-time-acct/arch-support.txt @@ -13,6 +13,7 @@ | csky: | TODO | | hexagon: | TODO | | ia64: | .. | + | loong: | ok | | m68k: | TODO | | microblaze: | TODO | | mips: | ok | diff --git a/Documentation/features/time/virt-cpuacct/arch-support.txt b/Documentation/features/time/virt-cpuacct/arch-support.txt index 5d64e40c0092..1a859ac05e9e 100644 --- a/Documentation/features/time/virt-cpuacct/arch-support.txt +++ b/Documentation/features/time/virt-cpuacct/arch-support.txt @@ -13,6 +13,7 @@ | csky: | ok | | hexagon: | TODO | | ia64: | ok | + | loong: | ok | | m68k: | TODO | | microblaze: | TODO | | mips: | ok | diff --git a/Documentation/features/vm/ELF-ASLR/arch-support.txt b/Documentation/features/vm/ELF-ASLR/arch-support.txt index 92c9db24a6a3..b1229953391b 100644 --- a/Documentation/features/vm/ELF-ASLR/arch-support.txt +++ b/Documentation/features/vm/ELF-ASLR/arch-support.txt @@ -13,6 +13,7 @@ | csky: | TODO | | hexagon: | TODO | | ia64: | TODO | + | loong: | TODO | | m68k: | TODO | | microblaze: | TODO | | mips: | ok | diff --git a/Documentation/features/vm/PG_uncached/arch-support.txt b/Documentation/features/vm/PG_uncached/arch-support.txt index 7424fea37614..02f325fbfcd0 100644 --- a/Documentation/features/vm/PG_uncached/arch-support.txt +++ b/Documentation/features/vm/PG_uncached/arch-support.txt @@ -13,6 +13,7 @@ | csky: | TODO | | hexagon: | TODO | | ia64: | ok | + | loong: | TODO | | m68k: | TODO | | microblaze: | TODO | | mips: | TODO | diff --git a/Documentation/features/vm/THP/arch-support.txt b/Documentation/features/vm/THP/arch-support.txt index 60985067626b..9bfff977ef55 100644 --- a/Documentation/features/vm/THP/arch-support.txt +++ b/Documentation/features/vm/THP/arch-support.txt @@ -13,6 +13,7 @@ | csky: | .. | | hexagon: | .. | | ia64: | TODO | + | loong: | ok | | m68k: | .. | | microblaze: | .. | | mips: | ok | diff --git a/Documentation/features/vm/TLB/arch-support.txt b/Documentation/features/vm/TLB/arch-support.txt index f2dcbec6020e..039e4e91ada3 100644 --- a/Documentation/features/vm/TLB/arch-support.txt +++ b/Documentation/features/vm/TLB/arch-support.txt @@ -13,6 +13,7 @@ | csky: | TODO | | hexagon: | TODO | | ia64: | TODO | + | loong: | TODO | | m68k: | .. | | microblaze: | .. | | mips: | TODO | diff --git a/Documentation/features/vm/huge-vmap/arch-support.txt b/Documentation/features/vm/huge-vmap/arch-support.txt index 680090df03e1..13b4940e0c3a 100644 --- a/Documentation/features/vm/huge-vmap/arch-support.txt +++ b/Documentation/features/vm/huge-vmap/arch-support.txt @@ -13,6 +13,7 @@ | csky: | TODO | | hexagon: | TODO | | ia64: | TODO | + | loong: | TODO | | m68k: | TODO | | microblaze: | TODO | | mips: | TODO | diff --git a/Documentation/features/vm/ioremap_prot/arch-support.txt b/Documentation/features/vm/ioremap_prot/arch-support.txt index 205a90e82050..b01bf7bca3e6 100644 --- a/Documentation/features/vm/ioremap_prot/arch-support.txt +++ b/Documentation/features/vm/ioremap_prot/arch-support.txt @@ -13,6 +13,7 @@ | csky: | TODO | | hexagon: | TODO | | ia64: | TODO | + | loong: | ok | | m68k: | TODO | | microblaze: | TODO | | mips: | ok | diff --git a/Documentation/features/vm/pte_special/arch-support.txt b/Documentation/features/vm/pte_special/arch-support.txt index 9f16d6e4e11e..fc3687b5e89b 100644 --- a/Documentation/features/vm/pte_special/arch-support.txt +++ b/Documentation/features/vm/pte_special/arch-support.txt @@ -13,12 +13,13 @@ | csky: | TODO | | hexagon: | TODO | | ia64: | TODO | + | loong: | ok | | m68k: | TODO | | microblaze: | TODO | | mips: | ok | | nios2: | TODO | | openrisc: | TODO | - | parisc: | TODO | + | parisc: | ok | | powerpc: | ok | | riscv: | ok | | s390: | ok | diff --git a/Documentation/filesystems/btrfs.rst b/Documentation/filesystems/btrfs.rst index d0904f602819..992eddb0e11b 100644 --- a/Documentation/filesystems/btrfs.rst +++ b/Documentation/filesystems/btrfs.rst @@ -19,13 +19,23 @@ The main Btrfs features include: * Subvolumes (separate internal filesystem roots) * Object level mirroring and striping * Checksums on data and metadata (multiple algorithms available) - * Compression + * Compression (multiple algorithms available) + * Reflink, deduplication + * Scrub (on-line checksum verification) + * Hierarchical quota groups (subvolume and snapshot support) * Integrated multiple device support, with several raid algorithms * Offline filesystem check - * Efficient incremental backup and FS mirroring + * Efficient incremental backup and FS mirroring (send/receive) + * Trim/discard * Online filesystem defragmentation + * Swapfile support + * Zoned mode + * Read/write metadata verification + * Online resize (shrink, grow) -For more information please refer to the wiki +For more information please refer to the documentation site or wiki + + https://btrfs.readthedocs.io https://btrfs.wiki.kernel.org diff --git a/Documentation/filesystems/ext4/attributes.rst b/Documentation/filesystems/ext4/attributes.rst index 871d2da7a0a9..87814696a65b 100644 --- a/Documentation/filesystems/ext4/attributes.rst +++ b/Documentation/filesystems/ext4/attributes.rst @@ -13,8 +13,8 @@ disappeared as of Linux 3.0. There are two places where extended attributes can be found. The first place is between the end of each inode entry and the beginning of the -next inode entry. For example, if inode.i\_extra\_isize = 28 and -sb.inode\_size = 256, then there are 256 - (128 + 28) = 100 bytes +next inode entry. For example, if inode.i_extra_isize = 28 and +sb.inode_size = 256, then there are 256 - (128 + 28) = 100 bytes available for in-inode extended attribute storage. The second place where extended attributes can be found is in the block pointed to by ``inode.i_file_acl``. As of Linux 3.11, it is not possible for this @@ -38,8 +38,8 @@ Extended attributes, when stored after the inode, have a header - Name - Description * - 0x0 - - \_\_le32 - - h\_magic + - __le32 + - h_magic - Magic number for identification, 0xEA020000. This value is set by the Linux driver, though e2fsprogs doesn't seem to check it(?) @@ -55,28 +55,28 @@ The beginning of an extended attribute block is in - Name - Description * - 0x0 - - \_\_le32 - - h\_magic + - __le32 + - h_magic - Magic number for identification, 0xEA020000. * - 0x4 - - \_\_le32 - - h\_refcount + - __le32 + - h_refcount - Reference count. * - 0x8 - - \_\_le32 - - h\_blocks + - __le32 + - h_blocks - Number of disk blocks used. * - 0xC - - \_\_le32 - - h\_hash + - __le32 + - h_hash - Hash value of all attributes. * - 0x10 - - \_\_le32 - - h\_checksum + - __le32 + - h_checksum - Checksum of the extended attribute block. * - 0x14 - - \_\_u32 - - h\_reserved[3] + - __u32 + - h_reserved[3] - Zero. The checksum is calculated against the FS UUID, the 64-bit block number @@ -100,46 +100,46 @@ Attributes stored inside an inode do not need be stored in sorted order. - Name - Description * - 0x0 - - \_\_u8 - - e\_name\_len + - __u8 + - e_name_len - Length of name. * - 0x1 - - \_\_u8 - - e\_name\_index + - __u8 + - e_name_index - Attribute name index. There is a discussion of this below. * - 0x2 - - \_\_le16 - - e\_value\_offs + - __le16 + - e_value_offs - Location of this attribute's value on the disk block where it is stored. Multiple attributes can share the same value. For an inode attribute this value is relative to the start of the first entry; for a block this value is relative to the start of the block (i.e. the header). * - 0x4 - - \_\_le32 - - e\_value\_inum + - __le32 + - e_value_inum - The inode where the value is stored. Zero indicates the value is in the same block as this entry. This field is only used if the - INCOMPAT\_EA\_INODE feature is enabled. + INCOMPAT_EA_INODE feature is enabled. * - 0x8 - - \_\_le32 - - e\_value\_size + - __le32 + - e_value_size - Length of attribute value. * - 0xC - - \_\_le32 - - e\_hash + - __le32 + - e_hash - Hash value of attribute name and attribute value. The kernel doesn't update the hash for in-inode attributes, so for that case this value must be zero, because e2fsck validates any non-zero hash regardless of where the xattr lives. * - 0x10 - char - - e\_name[e\_name\_len] + - e_name[e_name_len] - Attribute name. Does not include trailing NULL. Attribute values can follow the end of the entry table. There appears to be a requirement that they be aligned to 4-byte boundaries. The values are stored starting at the end of the block and grow towards the -xattr\_header/xattr\_entry table. When the two collide, the overflow is +xattr_header/xattr_entry table. When the two collide, the overflow is put into a separate disk block. If the disk block fills up, the filesystem returns -ENOSPC. @@ -167,15 +167,15 @@ the key name. Here is a map of name index values to key prefixes: * - 1 - “user.†* - 2 - - “system.posix\_acl\_access†+ - “system.posix_acl_access†* - 3 - - “system.posix\_acl\_default†+ - “system.posix_acl_default†* - 4 - “trusted.†* - 6 - “security.†* - 7 - - “system.†(inline\_data only?) + - “system.†(inline_data only?) * - 8 - “system.richacl†(SuSE kernels only?) diff --git a/Documentation/filesystems/ext4/bigalloc.rst b/Documentation/filesystems/ext4/bigalloc.rst index 72075aa608e4..976a180b209c 100644 --- a/Documentation/filesystems/ext4/bigalloc.rst +++ b/Documentation/filesystems/ext4/bigalloc.rst @@ -23,7 +23,7 @@ means that a block group addresses 32 gigabytes instead of 128 megabytes, also shrinking the amount of file system overhead for metadata. The administrator can set a block cluster size at mkfs time (which is -stored in the s\_log\_cluster\_size field in the superblock); from then +stored in the s_log_cluster_size field in the superblock); from then on, the block bitmaps track clusters, not individual blocks. This means that block groups can be several gigabytes in size (instead of just 128MiB); however, the minimum allocation unit becomes a cluster, not a diff --git a/Documentation/filesystems/ext4/bitmaps.rst b/Documentation/filesystems/ext4/bitmaps.rst index c7546dbc197a..91c45d86e9bb 100644 --- a/Documentation/filesystems/ext4/bitmaps.rst +++ b/Documentation/filesystems/ext4/bitmaps.rst @@ -9,15 +9,15 @@ group. The inode bitmap records which entries in the inode table are in use. As with most bitmaps, one bit represents the usage status of one data -block or inode table entry. This implies a block group size of 8 \* -number\_of\_bytes\_in\_a\_logical\_block. +block or inode table entry. This implies a block group size of 8 * +number_of_bytes_in_a_logical_block. NOTE: If ``BLOCK_UNINIT`` is set for a given block group, various parts of the kernel and e2fsprogs code pretends that the block bitmap contains zeros (i.e. all blocks in the group are free). However, it is not necessarily the case that no blocks are in use -- if ``meta_bg`` is set, the bitmaps and group descriptor live inside the group. Unfortunately, -ext2fs\_test\_block\_bitmap2() will return '0' for those locations, +ext2fs_test_block_bitmap2() will return '0' for those locations, which produces confusing debugfs output. Inode Table diff --git a/Documentation/filesystems/ext4/blockgroup.rst b/Documentation/filesystems/ext4/blockgroup.rst index d5d652addce5..46d78f860623 100644 --- a/Documentation/filesystems/ext4/blockgroup.rst +++ b/Documentation/filesystems/ext4/blockgroup.rst @@ -56,39 +56,39 @@ established that the super block and the group descriptor table, if present, will be at the beginning of the block group. The bitmaps and the inode table can be anywhere, and it is quite possible for the bitmaps to come after the inode table, or for both to be in different -groups (flex\_bg). Leftover space is used for file data blocks, indirect +groups (flex_bg). Leftover space is used for file data blocks, indirect block maps, extent tree blocks, and extended attributes. Flexible Block Groups --------------------- Starting in ext4, there is a new feature called flexible block groups -(flex\_bg). In a flex\_bg, several block groups are tied together as one +(flex_bg). In a flex_bg, several block groups are tied together as one logical block group; the bitmap spaces and the inode table space in the -first block group of the flex\_bg are expanded to include the bitmaps -and inode tables of all other block groups in the flex\_bg. For example, -if the flex\_bg size is 4, then group 0 will contain (in order) the +first block group of the flex_bg are expanded to include the bitmaps +and inode tables of all other block groups in the flex_bg. For example, +if the flex_bg size is 4, then group 0 will contain (in order) the superblock, group descriptors, data block bitmaps for groups 0-3, inode bitmaps for groups 0-3, inode tables for groups 0-3, and the remaining space in group 0 is for file data. The effect of this is to group the block group metadata close together for faster loading, and to enable large files to be continuous on disk. Backup copies of the superblock and group descriptors are always at the beginning of block groups, even -if flex\_bg is enabled. The number of block groups that make up a -flex\_bg is given by 2 ^ ``sb.s_log_groups_per_flex``. +if flex_bg is enabled. The number of block groups that make up a +flex_bg is given by 2 ^ ``sb.s_log_groups_per_flex``. Meta Block Groups ----------------- -Without the option META\_BG, for safety concerns, all block group +Without the option META_BG, for safety concerns, all block group descriptors copies are kept in the first block group. Given the default 128MiB(2^27 bytes) block group size and 64-byte group descriptors, ext4 can have at most 2^27/64 = 2^21 block groups. This limits the entire filesystem size to 2^21 * 2^27 = 2^48bytes or 256TiB. The solution to this problem is to use the metablock group feature -(META\_BG), which is already in ext3 for all 2.6 releases. With the -META\_BG feature, ext4 filesystems are partitioned into many metablock +(META_BG), which is already in ext3 for all 2.6 releases. With the +META_BG feature, ext4 filesystems are partitioned into many metablock groups. Each metablock group is a cluster of block groups whose group descriptor structures can be stored in a single disk block. For ext4 filesystems with 4 KB block size, a single metablock group partition @@ -110,7 +110,7 @@ bytes, a meta-block group contains 32 block groups for filesystems with a 1KB block size, and 128 block groups for filesystems with a 4KB blocksize. Filesystems can either be created using this new block group descriptor layout, or existing filesystems can be resized on-line, and -the field s\_first\_meta\_bg in the superblock will indicate the first +the field s_first_meta_bg in the superblock will indicate the first block group using this new layout. Please see an important note about ``BLOCK_UNINIT`` in the section about @@ -121,15 +121,15 @@ Lazy Block Group Initialization A new feature for ext4 are three block group descriptor flags that enable mkfs to skip initializing other parts of the block group -metadata. Specifically, the INODE\_UNINIT and BLOCK\_UNINIT flags mean +metadata. Specifically, the INODE_UNINIT and BLOCK_UNINIT flags mean that the inode and block bitmaps for that group can be calculated and therefore the on-disk bitmap blocks are not initialized. This is generally the case for an empty block group or a block group containing -only fixed-location block group metadata. The INODE\_ZEROED flag means +only fixed-location block group metadata. The INODE_ZEROED flag means that the inode table has been initialized; mkfs will unset this flag and rely on the kernel to initialize the inode tables in the background. By not writing zeroes to the bitmaps and inode table, mkfs time is -reduced considerably. Note the feature flag is RO\_COMPAT\_GDT\_CSUM, -but the dumpe2fs output prints this as “uninit\_bgâ€. They are the same +reduced considerably. Note the feature flag is RO_COMPAT_GDT_CSUM, +but the dumpe2fs output prints this as “uninit_bgâ€. They are the same thing. diff --git a/Documentation/filesystems/ext4/blockmap.rst b/Documentation/filesystems/ext4/blockmap.rst index 30e25750d88a..2bd990402a5c 100644 --- a/Documentation/filesystems/ext4/blockmap.rst +++ b/Documentation/filesystems/ext4/blockmap.rst @@ -1,7 +1,7 @@ .. SPDX-License-Identifier: GPL-2.0 +---------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| i.i\_block Offset | Where It Points | +| i.i_block Offset | Where It Points | +=====================+==============================================================================================================================================================================================================================+ | 0 to 11 | Direct map to file blocks 0 to 11. | +---------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ diff --git a/Documentation/filesystems/ext4/checksums.rst b/Documentation/filesystems/ext4/checksums.rst index 5519e253810d..e232749daf5f 100644 --- a/Documentation/filesystems/ext4/checksums.rst +++ b/Documentation/filesystems/ext4/checksums.rst @@ -4,7 +4,7 @@ Checksums --------- Starting in early 2012, metadata checksums were added to all major ext4 -and jbd2 data structures. The associated feature flag is metadata\_csum. +and jbd2 data structures. The associated feature flag is metadata_csum. The desired checksum algorithm is indicated in the superblock, though as of October 2012 the only supported algorithm is crc32c. Some data structures did not have space to fit a full 32-bit checksum, so only the @@ -20,7 +20,7 @@ encounters directory blocks that lack sufficient empty space to add a checksum, it will request that you run ``e2fsck -D`` to have the directories rebuilt with checksums. This has the added benefit of removing slack space from the directory files and rebalancing the htree -indexes. If you \_ignore\_ this step, your directories will not be +indexes. If you _ignore_ this step, your directories will not be protected by a checksum! The following table describes the data elements that go into each type @@ -35,39 +35,39 @@ of checksum. The checksum function is whatever the superblock describes - Length - Ingredients * - Superblock - - \_\_le32 + - __le32 - The entire superblock up to the checksum field. The UUID lives inside the superblock. * - MMP - - \_\_le32 + - __le32 - UUID + the entire MMP block up to the checksum field. * - Extended Attributes - - \_\_le32 + - __le32 - UUID + the entire extended attribute block. The checksum field is set to zero. * - Directory Entries - - \_\_le32 + - __le32 - UUID + inode number + inode generation + the directory block up to the fake entry enclosing the checksum field. * - HTREE Nodes - - \_\_le32 + - __le32 - UUID + inode number + inode generation + all valid extents + HTREE tail. The checksum field is set to zero. * - Extents - - \_\_le32 + - __le32 - UUID + inode number + inode generation + the entire extent block up to the checksum field. * - Bitmaps - - \_\_le32 or \_\_le16 + - __le32 or __le16 - UUID + the entire bitmap. Checksums are stored in the group descriptor, and truncated if the group descriptor size is 32 bytes (i.e. ^64bit) * - Inodes - - \_\_le32 + - __le32 - UUID + inode number + inode generation + the entire inode. The checksum field is set to zero. Each inode has its own checksum. * - Group Descriptors - - \_\_le16 - - If metadata\_csum, then UUID + group number + the entire descriptor; - else if gdt\_csum, then crc16(UUID + group number + the entire + - __le16 + - If metadata_csum, then UUID + group number + the entire descriptor; + else if gdt_csum, then crc16(UUID + group number + the entire descriptor). In all cases, only the lower 16 bits are stored. diff --git a/Documentation/filesystems/ext4/directory.rst b/Documentation/filesystems/ext4/directory.rst index 55f618b37144..6eece8e31df8 100644 --- a/Documentation/filesystems/ext4/directory.rst +++ b/Documentation/filesystems/ext4/directory.rst @@ -42,24 +42,24 @@ is at most 263 bytes long, though on disk you'll need to reference - Name - Description * - 0x0 - - \_\_le32 + - __le32 - inode - Number of the inode that this directory entry points to. * - 0x4 - - \_\_le16 - - rec\_len + - __le16 + - rec_len - Length of this directory entry. Must be a multiple of 4. * - 0x6 - - \_\_le16 - - name\_len + - __le16 + - name_len - Length of the file name. * - 0x8 - char - - name[EXT4\_NAME\_LEN] + - name[EXT4_NAME_LEN] - File name. Since file names cannot be longer than 255 bytes, the new directory -entry format shortens the name\_len field and uses the space for a file +entry format shortens the name_len field and uses the space for a file type flag, probably to avoid having to load every inode during directory tree traversal. This format is ``ext4_dir_entry_2``, which is at most 263 bytes long, though on disk you'll need to reference @@ -74,24 +74,24 @@ tree traversal. This format is ``ext4_dir_entry_2``, which is at most - Name - Description * - 0x0 - - \_\_le32 + - __le32 - inode - Number of the inode that this directory entry points to. * - 0x4 - - \_\_le16 - - rec\_len + - __le16 + - rec_len - Length of this directory entry. * - 0x6 - - \_\_u8 - - name\_len + - __u8 + - name_len - Length of the file name. * - 0x7 - - \_\_u8 - - file\_type + - __u8 + - file_type - File type code, see ftype_ table below. * - 0x8 - char - - name[EXT4\_NAME\_LEN] + - name[EXT4_NAME_LEN] - File name. .. _ftype: @@ -137,19 +137,19 @@ entry uses this extension, it may be up to 271 bytes. - Name - Description * - 0x0 - - \_\_le32 + - __le32 - hash - The hash of the directory name * - 0x4 - - \_\_le32 - - minor\_hash + - __le32 + - minor_hash - The minor hash of the directory name In order to add checksums to these classic directory blocks, a phony ``struct ext4_dir_entry`` is placed at the end of each leaf block to hold the checksum. The directory entry is 12 bytes long. The inode -number and name\_len fields are set to zero to fool old software into +number and name_len fields are set to zero to fool old software into ignoring an apparently empty directory entry, and the checksum is stored in the place where the name normally goes. The structure is ``struct ext4_dir_entry_tail``: @@ -163,24 +163,24 @@ in the place where the name normally goes. The structure is - Name - Description * - 0x0 - - \_\_le32 - - det\_reserved\_zero1 + - __le32 + - det_reserved_zero1 - Inode number, which must be zero. * - 0x4 - - \_\_le16 - - det\_rec\_len + - __le16 + - det_rec_len - Length of this directory entry, which must be 12. * - 0x6 - - \_\_u8 - - det\_reserved\_zero2 + - __u8 + - det_reserved_zero2 - Length of the file name, which must be zero. * - 0x7 - - \_\_u8 - - det\_reserved\_ft + - __u8 + - det_reserved_ft - File type, which must be 0xDE. * - 0x8 - - \_\_le32 - - det\_checksum + - __le32 + - det_checksum - Directory leaf block checksum. The leaf directory block checksum is calculated against the FS UUID, the @@ -194,7 +194,7 @@ Hash Tree Directories A linear array of directory entries isn't great for performance, so a new feature was added to ext3 to provide a faster (but peculiar) balanced tree keyed off a hash of the directory entry name. If the -EXT4\_INDEX\_FL (0x1000) flag is set in the inode, this directory uses a +EXT4_INDEX_FL (0x1000) flag is set in the inode, this directory uses a hashed btree (htree) to organize and find directory entries. For backwards read-only compatibility with ext2, this tree is actually hidden inside the directory file, masquerading as “empty†directory data @@ -206,14 +206,14 @@ rest of the directory block is empty so that it moves on. The root of the tree always lives in the first data block of the directory. By ext2 custom, the '.' and '..' entries must appear at the beginning of this first block, so they are put here as two -``struct ext4_dir_entry_2``\ s and not stored in the tree. The rest of +``struct ext4_dir_entry_2`` s and not stored in the tree. The rest of the root node contains metadata about the tree and finally a hash->block map to find nodes that are lower in the htree. If ``dx_root.info.indirect_levels`` is non-zero then the htree has two levels; the data block pointed to by the root node's map is an interior node, which is indexed by a minor hash. Interior nodes in this tree contains a zeroed out ``struct ext4_dir_entry_2`` followed by a -minor\_hash->block map to find leafe nodes. Leaf nodes contain a linear +minor_hash->block map to find leafe nodes. Leaf nodes contain a linear array of all ``struct ext4_dir_entry_2``; all of these entries (presumably) hash to the same value. If there is an overflow, the entries simply overflow into the next leaf node, and the @@ -245,83 +245,83 @@ of a data block: - Name - Description * - 0x0 - - \_\_le32 + - __le32 - dot.inode - inode number of this directory. * - 0x4 - - \_\_le16 - - dot.rec\_len + - __le16 + - dot.rec_len - Length of this record, 12. * - 0x6 - u8 - - dot.name\_len + - dot.name_len - Length of the name, 1. * - 0x7 - u8 - - dot.file\_type + - dot.file_type - File type of this entry, 0x2 (directory) (if the feature flag is set). * - 0x8 - char - dot.name[4] - - “.\\0\\0\\0†+ - “.\0\0\0†* - 0xC - - \_\_le32 + - __le32 - dotdot.inode - inode number of parent directory. * - 0x10 - - \_\_le16 - - dotdot.rec\_len - - block\_size - 12. The record length is long enough to cover all htree + - __le16 + - dotdot.rec_len + - block_size - 12. The record length is long enough to cover all htree data. * - 0x12 - u8 - - dotdot.name\_len + - dotdot.name_len - Length of the name, 2. * - 0x13 - u8 - - dotdot.file\_type + - dotdot.file_type - File type of this entry, 0x2 (directory) (if the feature flag is set). * - 0x14 - char - - dotdot\_name[4] - - “..\\0\\0†+ - dotdot_name[4] + - “..\0\0†* - 0x18 - - \_\_le32 - - struct dx\_root\_info.reserved\_zero + - __le32 + - struct dx_root_info.reserved_zero - Zero. * - 0x1C - u8 - - struct dx\_root\_info.hash\_version + - struct dx_root_info.hash_version - Hash type, see dirhash_ table below. * - 0x1D - u8 - - struct dx\_root\_info.info\_length + - struct dx_root_info.info_length - Length of the tree information, 0x8. * - 0x1E - u8 - - struct dx\_root\_info.indirect\_levels - - Depth of the htree. Cannot be larger than 3 if the INCOMPAT\_LARGEDIR + - struct dx_root_info.indirect_levels + - Depth of the htree. Cannot be larger than 3 if the INCOMPAT_LARGEDIR feature is set; cannot be larger than 2 otherwise. * - 0x1F - u8 - - struct dx\_root\_info.unused\_flags + - struct dx_root_info.unused_flags - * - 0x20 - - \_\_le16 + - __le16 - limit - - Maximum number of dx\_entries that can follow this header, plus 1 for + - Maximum number of dx_entries that can follow this header, plus 1 for the header itself. * - 0x22 - - \_\_le16 + - __le16 - count - - Actual number of dx\_entries that follow this header, plus 1 for the + - Actual number of dx_entries that follow this header, plus 1 for the header itself. * - 0x24 - - \_\_le32 + - __le32 - block - The block number (within the directory file) that goes with hash=0. * - 0x28 - - struct dx\_entry + - struct dx_entry - entries[0] - As many 8-byte ``struct dx_entry`` as fits in the rest of the data block. @@ -362,38 +362,38 @@ also the full length of a data block: - Name - Description * - 0x0 - - \_\_le32 + - __le32 - fake.inode - Zero, to make it look like this entry is not in use. * - 0x4 - - \_\_le16 - - fake.rec\_len - - The size of the block, in order to hide all of the dx\_node data. + - __le16 + - fake.rec_len + - The size of the block, in order to hide all of the dx_node data. * - 0x6 - u8 - - name\_len + - name_len - Zero. There is no name for this “unused†directory entry. * - 0x7 - u8 - - file\_type + - file_type - Zero. There is no file type for this “unused†directory entry. * - 0x8 - - \_\_le16 + - __le16 - limit - - Maximum number of dx\_entries that can follow this header, plus 1 for + - Maximum number of dx_entries that can follow this header, plus 1 for the header itself. * - 0xA - - \_\_le16 + - __le16 - count - - Actual number of dx\_entries that follow this header, plus 1 for the + - Actual number of dx_entries that follow this header, plus 1 for the header itself. * - 0xE - - \_\_le32 + - __le32 - block - The block number (within the directory file) that goes with the lowest hash value of this block. This value is stored in the parent block. * - 0x12 - - struct dx\_entry + - struct dx_entry - entries[0] - As many 8-byte ``struct dx_entry`` as fits in the rest of the data block. @@ -410,11 +410,11 @@ long: - Name - Description * - 0x0 - - \_\_le32 + - __le32 - hash - Hash code. * - 0x4 - - \_\_le32 + - __le32 - block - Block number (within the directory file, not filesystem blocks) of the next node in the htree. @@ -423,13 +423,13 @@ long: author.) If metadata checksums are enabled, the last 8 bytes of the directory -block (precisely the length of one dx\_entry) are used to store a +block (precisely the length of one dx_entry) are used to store a ``struct dx_tail``, which contains the checksum. The ``limit`` and -``count`` entries in the dx\_root/dx\_node structures are adjusted as -necessary to fit the dx\_tail into the block. If there is no space for -the dx\_tail, the user is notified to run e2fsck -D to rebuild the +``count`` entries in the dx_root/dx_node structures are adjusted as +necessary to fit the dx_tail into the block. If there is no space for +the dx_tail, the user is notified to run e2fsck -D to rebuild the directory index (which will ensure that there's space for the checksum. -The dx\_tail structure is 8 bytes long and looks like this: +The dx_tail structure is 8 bytes long and looks like this: .. list-table:: :widths: 8 8 24 40 @@ -441,13 +441,13 @@ The dx\_tail structure is 8 bytes long and looks like this: - Description * - 0x0 - u32 - - dt\_reserved + - dt_reserved - Zero. * - 0x4 - - \_\_le32 - - dt\_checksum + - __le32 + - dt_checksum - Checksum of the htree directory block. The checksum is calculated against the FS UUID, the htree index header -(dx\_root or dx\_node), all of the htree indices (dx\_entry) that are in -use, and the tail block (dx\_tail). +(dx_root or dx_node), all of the htree indices (dx_entry) that are in +use, and the tail block (dx_tail). diff --git a/Documentation/filesystems/ext4/eainode.rst b/Documentation/filesystems/ext4/eainode.rst index ecc0d01a0a72..7a2ef26b064a 100644 --- a/Documentation/filesystems/ext4/eainode.rst +++ b/Documentation/filesystems/ext4/eainode.rst @@ -5,14 +5,14 @@ Large Extended Attribute Values To enable ext4 to store extended attribute values that do not fit in the inode or in the single extended attribute block attached to an inode, -the EA\_INODE feature allows us to store the value in the data blocks of +the EA_INODE feature allows us to store the value in the data blocks of a regular file inode. This “EA inode†is linked only from the extended attribute name index and must not appear in a directory entry. The -inode's i\_atime field is used to store a checksum of the xattr value; -and i\_ctime/i\_version store a 64-bit reference count, which enables +inode's i_atime field is used to store a checksum of the xattr value; +and i_ctime/i_version store a 64-bit reference count, which enables sharing of large xattr values between multiple owning inodes. For backward compatibility with older versions of this feature, the -i\_mtime/i\_generation *may* store a back-reference to the inode number -and i\_generation of the **one** owning inode (in cases where the EA +i_mtime/i_generation *may* store a back-reference to the inode number +and i_generation of the **one** owning inode (in cases where the EA inode is not referenced by multiple inodes) to verify that the EA inode is the correct one being accessed. diff --git a/Documentation/filesystems/ext4/group_descr.rst b/Documentation/filesystems/ext4/group_descr.rst index 7ba6114e7f5c..392ec44f8fb0 100644 --- a/Documentation/filesystems/ext4/group_descr.rst +++ b/Documentation/filesystems/ext4/group_descr.rst @@ -7,34 +7,34 @@ Each block group on the filesystem has one of these descriptors associated with it. As noted in the Layout section above, the group descriptors (if present) are the second item in the block group. The standard configuration is for each block group to contain a full copy of -the block group descriptor table unless the sparse\_super feature flag +the block group descriptor table unless the sparse_super feature flag is set. Notice how the group descriptor records the location of both bitmaps and the inode table (i.e. they can float). This means that within a block group, the only data structures with fixed locations are the superblock -and the group descriptor table. The flex\_bg mechanism uses this +and the group descriptor table. The flex_bg mechanism uses this property to group several block groups into a flex group and lay out all of the groups' bitmaps and inode tables into one long run in the first group of the flex group. -If the meta\_bg feature flag is set, then several block groups are -grouped together into a meta group. Note that in the meta\_bg case, +If the meta_bg feature flag is set, then several block groups are +grouped together into a meta group. Note that in the meta_bg case, however, the first and last two block groups within the larger meta group contain only group descriptors for the groups inside the meta group. -flex\_bg and meta\_bg do not appear to be mutually exclusive features. +flex_bg and meta_bg do not appear to be mutually exclusive features. In ext2, ext3, and ext4 (when the 64bit feature is not enabled), the block group descriptor was only 32 bytes long and therefore ends at -bg\_checksum. On an ext4 filesystem with the 64bit feature enabled, the +bg_checksum. On an ext4 filesystem with the 64bit feature enabled, the block group descriptor expands to at least the 64 bytes described below; the size is stored in the superblock. -If gdt\_csum is set and metadata\_csum is not set, the block group +If gdt_csum is set and metadata_csum is not set, the block group checksum is the crc16 of the FS UUID, the group number, and the group -descriptor structure. If metadata\_csum is set, then the block group +descriptor structure. If metadata_csum is set, then the block group checksum is the lower 16 bits of the checksum of the FS UUID, the group number, and the group descriptor structure. Both block and inode bitmap checksums are calculated against the FS UUID, the group number, and the @@ -51,59 +51,59 @@ The block group descriptor is laid out in ``struct ext4_group_desc``. - Name - Description * - 0x0 - - \_\_le32 - - bg\_block\_bitmap\_lo + - __le32 + - bg_block_bitmap_lo - Lower 32-bits of location of block bitmap. * - 0x4 - - \_\_le32 - - bg\_inode\_bitmap\_lo + - __le32 + - bg_inode_bitmap_lo - Lower 32-bits of location of inode bitmap. * - 0x8 - - \_\_le32 - - bg\_inode\_table\_lo + - __le32 + - bg_inode_table_lo - Lower 32-bits of location of inode table. * - 0xC - - \_\_le16 - - bg\_free\_blocks\_count\_lo + - __le16 + - bg_free_blocks_count_lo - Lower 16-bits of free block count. * - 0xE - - \_\_le16 - - bg\_free\_inodes\_count\_lo + - __le16 + - bg_free_inodes_count_lo - Lower 16-bits of free inode count. * - 0x10 - - \_\_le16 - - bg\_used\_dirs\_count\_lo + - __le16 + - bg_used_dirs_count_lo - Lower 16-bits of directory count. * - 0x12 - - \_\_le16 - - bg\_flags + - __le16 + - bg_flags - Block group flags. See the bgflags_ table below. * - 0x14 - - \_\_le32 - - bg\_exclude\_bitmap\_lo + - __le32 + - bg_exclude_bitmap_lo - Lower 32-bits of location of snapshot exclusion bitmap. * - 0x18 - - \_\_le16 - - bg\_block\_bitmap\_csum\_lo + - __le16 + - bg_block_bitmap_csum_lo - Lower 16-bits of the block bitmap checksum. * - 0x1A - - \_\_le16 - - bg\_inode\_bitmap\_csum\_lo + - __le16 + - bg_inode_bitmap_csum_lo - Lower 16-bits of the inode bitmap checksum. * - 0x1C - - \_\_le16 - - bg\_itable\_unused\_lo + - __le16 + - bg_itable_unused_lo - Lower 16-bits of unused inode count. If set, we needn't scan past the - ``(sb.s_inodes_per_group - gdt.bg_itable_unused)``\ th entry in the + ``(sb.s_inodes_per_group - gdt.bg_itable_unused)`` th entry in the inode table for this group. * - 0x1E - - \_\_le16 - - bg\_checksum - - Group descriptor checksum; crc16(sb\_uuid+group\_num+bg\_desc) if the - RO\_COMPAT\_GDT\_CSUM feature is set, or - crc32c(sb\_uuid+group\_num+bg\_desc) & 0xFFFF if the - RO\_COMPAT\_METADATA\_CSUM feature is set. The bg\_checksum - field in bg\_desc is skipped when calculating crc16 checksum, + - __le16 + - bg_checksum + - Group descriptor checksum; crc16(sb_uuid+group_num+bg_desc) if the + RO_COMPAT_GDT_CSUM feature is set, or + crc32c(sb_uuid+group_num+bg_desc) & 0xFFFF if the + RO_COMPAT_METADATA_CSUM feature is set. The bg_checksum + field in bg_desc is skipped when calculating crc16 checksum, and set to zero if crc32c checksum is used. * - - @@ -111,48 +111,48 @@ The block group descriptor is laid out in ``struct ext4_group_desc``. - These fields only exist if the 64bit feature is enabled and s_desc_size > 32. * - 0x20 - - \_\_le32 - - bg\_block\_bitmap\_hi + - __le32 + - bg_block_bitmap_hi - Upper 32-bits of location of block bitmap. * - 0x24 - - \_\_le32 - - bg\_inode\_bitmap\_hi + - __le32 + - bg_inode_bitmap_hi - Upper 32-bits of location of inodes bitmap. * - 0x28 - - \_\_le32 - - bg\_inode\_table\_hi + - __le32 + - bg_inode_table_hi - Upper 32-bits of location of inodes table. * - 0x2C - - \_\_le16 - - bg\_free\_blocks\_count\_hi + - __le16 + - bg_free_blocks_count_hi - Upper 16-bits of free block count. * - 0x2E - - \_\_le16 - - bg\_free\_inodes\_count\_hi + - __le16 + - bg_free_inodes_count_hi - Upper 16-bits of free inode count. * - 0x30 - - \_\_le16 - - bg\_used\_dirs\_count\_hi + - __le16 + - bg_used_dirs_count_hi - Upper 16-bits of directory count. * - 0x32 - - \_\_le16 - - bg\_itable\_unused\_hi + - __le16 + - bg_itable_unused_hi - Upper 16-bits of unused inode count. * - 0x34 - - \_\_le32 - - bg\_exclude\_bitmap\_hi + - __le32 + - bg_exclude_bitmap_hi - Upper 32-bits of location of snapshot exclusion bitmap. * - 0x38 - - \_\_le16 - - bg\_block\_bitmap\_csum\_hi + - __le16 + - bg_block_bitmap_csum_hi - Upper 16-bits of the block bitmap checksum. * - 0x3A - - \_\_le16 - - bg\_inode\_bitmap\_csum\_hi + - __le16 + - bg_inode_bitmap_csum_hi - Upper 16-bits of the inode bitmap checksum. * - 0x3C - - \_\_u32 - - bg\_reserved + - __u32 + - bg_reserved - Padding to 64 bytes. .. _bgflags: @@ -166,8 +166,8 @@ Block group flags can be any combination of the following: * - Value - Description * - 0x1 - - inode table and bitmap are not initialized (EXT4\_BG\_INODE\_UNINIT). + - inode table and bitmap are not initialized (EXT4_BG_INODE_UNINIT). * - 0x2 - - block bitmap is not initialized (EXT4\_BG\_BLOCK\_UNINIT). + - block bitmap is not initialized (EXT4_BG_BLOCK_UNINIT). * - 0x4 - - inode table is zeroed (EXT4\_BG\_INODE\_ZEROED). + - inode table is zeroed (EXT4_BG_INODE_ZEROED). diff --git a/Documentation/filesystems/ext4/ifork.rst b/Documentation/filesystems/ext4/ifork.rst index b9816d5a896b..dc31f505e6c8 100644 --- a/Documentation/filesystems/ext4/ifork.rst +++ b/Documentation/filesystems/ext4/ifork.rst @@ -1,6 +1,6 @@ .. SPDX-License-Identifier: GPL-2.0 -The Contents of inode.i\_block +The Contents of inode.i_block ------------------------------ Depending on the type of file an inode describes, the 60 bytes of @@ -47,7 +47,7 @@ In ext4, the file to logical block map has been replaced with an extent tree. Under the old scheme, allocating a contiguous run of 1,000 blocks requires an indirect block to map all 1,000 entries; with extents, the mapping is reduced to a single ``struct ext4_extent`` with -``ee_len = 1000``. If flex\_bg is enabled, it is possible to allocate +``ee_len = 1000``. If flex_bg is enabled, it is possible to allocate very large files with a single extent, at a considerable reduction in metadata block use, and some improvement in disk efficiency. The inode must have the extents flag (0x80000) flag set for this feature to be in @@ -76,28 +76,28 @@ which is 12 bytes long: - Name - Description * - 0x0 - - \_\_le16 - - eh\_magic + - __le16 + - eh_magic - Magic number, 0xF30A. * - 0x2 - - \_\_le16 - - eh\_entries + - __le16 + - eh_entries - Number of valid entries following the header. * - 0x4 - - \_\_le16 - - eh\_max + - __le16 + - eh_max - Maximum number of entries that could follow the header. * - 0x6 - - \_\_le16 - - eh\_depth + - __le16 + - eh_depth - Depth of this extent node in the extent tree. 0 = this extent node points to data blocks; otherwise, this extent node points to other extent nodes. The extent tree can be at most 5 levels deep: a logical block number can be at most ``2^32``, and the smallest ``n`` that satisfies ``4*(((blocksize - 12)/12)^n) >= 2^32`` is 5. * - 0x8 - - \_\_le32 - - eh\_generation + - __le32 + - eh_generation - Generation of the tree. (Used by Lustre, but not standard ext4). Internal nodes of the extent tree, also known as index nodes, are @@ -112,22 +112,22 @@ recorded as ``struct ext4_extent_idx``, and are 12 bytes long: - Name - Description * - 0x0 - - \_\_le32 - - ei\_block + - __le32 + - ei_block - This index node covers file blocks from 'block' onward. * - 0x4 - - \_\_le32 - - ei\_leaf\_lo + - __le32 + - ei_leaf_lo - Lower 32-bits of the block number of the extent node that is the next level lower in the tree. The tree node pointed to can be either another internal node or a leaf node, described below. * - 0x8 - - \_\_le16 - - ei\_leaf\_hi + - __le16 + - ei_leaf_hi - Upper 16-bits of the previous field. * - 0xA - - \_\_u16 - - ei\_unused + - __u16 + - ei_unused - Leaf nodes of the extent tree are recorded as ``struct ext4_extent``, @@ -142,24 +142,24 @@ and are also 12 bytes long: - Name - Description * - 0x0 - - \_\_le32 - - ee\_block + - __le32 + - ee_block - First file block number that this extent covers. * - 0x4 - - \_\_le16 - - ee\_len + - __le16 + - ee_len - Number of blocks covered by extent. If the value of this field is <= 32768, the extent is initialized. If the value of the field is > 32768, the extent is uninitialized and the actual extent length is ``ee_len`` - 32768. Therefore, the maximum length of a initialized extent is 32768 blocks, and the maximum length of an uninitialized extent is 32767. * - 0x6 - - \_\_le16 - - ee\_start\_hi + - __le16 + - ee_start_hi - Upper 16-bits of the block number to which this extent points. * - 0x8 - - \_\_le32 - - ee\_start\_lo + - __le32 + - ee_start_lo - Lower 32-bits of the block number to which this extent points. Prior to the introduction of metadata checksums, the extent header + @@ -182,8 +182,8 @@ including) the checksum itself. - Name - Description * - 0x0 - - \_\_le32 - - eb\_checksum + - __le32 + - eb_checksum - Checksum of the extent block, crc32c(uuid+inum+igeneration+extentblock) Inline Data diff --git a/Documentation/filesystems/ext4/inlinedata.rst b/Documentation/filesystems/ext4/inlinedata.rst index d1075178ce0b..a728af0d2fd0 100644 --- a/Documentation/filesystems/ext4/inlinedata.rst +++ b/Documentation/filesystems/ext4/inlinedata.rst @@ -11,12 +11,12 @@ file is smaller than 60 bytes, then the data are stored inline in attribute space, then it might be found as an extended attribute “system.data†within the inode body (“ibody EAâ€). This of course constrains the amount of extended attributes one can attach to an inode. -If the data size increases beyond i\_block + ibody EA, a regular block +If the data size increases beyond i_block + ibody EA, a regular block is allocated and the contents moved to that block. Pending a change to compact the extended attribute key used to store inline data, one ought to be able to store 160 bytes of data in a -256-byte inode (as of June 2015, when i\_extra\_isize is 28). Prior to +256-byte inode (as of June 2015, when i_extra_isize is 28). Prior to that, the limit was 156 bytes due to inefficient use of inode space. The inline data feature requires the presence of an extended attribute @@ -25,12 +25,12 @@ for “system.dataâ€, even if the attribute value is zero length. Inline Directories ~~~~~~~~~~~~~~~~~~ -The first four bytes of i\_block are the inode number of the parent +The first four bytes of i_block are the inode number of the parent directory. Following that is a 56-byte space for an array of directory entries; see ``struct ext4_dir_entry``. If there is a “system.data†attribute in the inode body, the EA value is an array of ``struct ext4_dir_entry`` as well. Note that for inline directories, the -i\_block and EA space are treated as separate dirent blocks; directory +i_block and EA space are treated as separate dirent blocks; directory entries cannot span the two. Inline directory entries are not checksummed, as the inode checksum diff --git a/Documentation/filesystems/ext4/inodes.rst b/Documentation/filesystems/ext4/inodes.rst index 6c5ce666e63f..cfc6c1659931 100644 --- a/Documentation/filesystems/ext4/inodes.rst +++ b/Documentation/filesystems/ext4/inodes.rst @@ -38,138 +38,138 @@ The inode table entry is laid out in ``struct ext4_inode``. - Name - Description * - 0x0 - - \_\_le16 - - i\_mode + - __le16 + - i_mode - File mode. See the table i_mode_ below. * - 0x2 - - \_\_le16 - - i\_uid + - __le16 + - i_uid - Lower 16-bits of Owner UID. * - 0x4 - - \_\_le32 - - i\_size\_lo + - __le32 + - i_size_lo - Lower 32-bits of size in bytes. * - 0x8 - - \_\_le32 - - i\_atime - - Last access time, in seconds since the epoch. However, if the EA\_INODE + - __le32 + - i_atime + - Last access time, in seconds since the epoch. However, if the EA_INODE inode flag is set, this inode stores an extended attribute value and this field contains the checksum of the value. * - 0xC - - \_\_le32 - - i\_ctime + - __le32 + - i_ctime - Last inode change time, in seconds since the epoch. However, if the - EA\_INODE inode flag is set, this inode stores an extended attribute + EA_INODE inode flag is set, this inode stores an extended attribute value and this field contains the lower 32 bits of the attribute value's reference count. * - 0x10 - - \_\_le32 - - i\_mtime + - __le32 + - i_mtime - Last data modification time, in seconds since the epoch. However, if the - EA\_INODE inode flag is set, this inode stores an extended attribute + EA_INODE inode flag is set, this inode stores an extended attribute value and this field contains the number of the inode that owns the extended attribute. * - 0x14 - - \_\_le32 - - i\_dtime + - __le32 + - i_dtime - Deletion Time, in seconds since the epoch. * - 0x18 - - \_\_le16 - - i\_gid + - __le16 + - i_gid - Lower 16-bits of GID. * - 0x1A - - \_\_le16 - - i\_links\_count + - __le16 + - i_links_count - Hard link count. Normally, ext4 does not permit an inode to have more than 65,000 hard links. This applies to files as well as directories, which means that there cannot be more than 64,998 subdirectories in a directory (each subdirectory's '..' entry counts as a hard link, as does - the '.' entry in the directory itself). With the DIR\_NLINK feature + the '.' entry in the directory itself). With the DIR_NLINK feature enabled, ext4 supports more than 64,998 subdirectories by setting this field to 1 to indicate that the number of hard links is not known. * - 0x1C - - \_\_le32 - - i\_blocks\_lo - - Lower 32-bits of “block†count. If the huge\_file feature flag is not + - __le32 + - i_blocks_lo + - Lower 32-bits of “block†count. If the huge_file feature flag is not set on the filesystem, the file consumes ``i_blocks_lo`` 512-byte blocks - on disk. If huge\_file is set and EXT4\_HUGE\_FILE\_FL is NOT set in + on disk. If huge_file is set and EXT4_HUGE_FILE_FL is NOT set in ``inode.i_flags``, then the file consumes ``i_blocks_lo + (i_blocks_hi - << 32)`` 512-byte blocks on disk. If huge\_file is set and - EXT4\_HUGE\_FILE\_FL IS set in ``inode.i_flags``, then this file + << 32)`` 512-byte blocks on disk. If huge_file is set and + EXT4_HUGE_FILE_FL IS set in ``inode.i_flags``, then this file consumes (``i_blocks_lo + i_blocks_hi`` << 32) filesystem blocks on disk. * - 0x20 - - \_\_le32 - - i\_flags + - __le32 + - i_flags - Inode flags. See the table i_flags_ below. * - 0x24 - 4 bytes - - i\_osd1 + - i_osd1 - See the table i_osd1_ for more details. * - 0x28 - 60 bytes - - i\_block[EXT4\_N\_BLOCKS=15] - - Block map or extent tree. See the section “The Contents of inode.i\_blockâ€. + - i_block[EXT4_N_BLOCKS=15] + - Block map or extent tree. See the section “The Contents of inode.i_blockâ€. * - 0x64 - - \_\_le32 - - i\_generation + - __le32 + - i_generation - File version (for NFS). * - 0x68 - - \_\_le32 - - i\_file\_acl\_lo + - __le32 + - i_file_acl_lo - Lower 32-bits of extended attribute block. ACLs are of course one of many possible extended attributes; I think the name of this field is a result of the first use of extended attributes being for ACLs. * - 0x6C - - \_\_le32 - - i\_size\_high / i\_dir\_acl + - __le32 + - i_size_high / i_dir_acl - Upper 32-bits of file/directory size. In ext2/3 this field was named - i\_dir\_acl, though it was usually set to zero and never used. + i_dir_acl, though it was usually set to zero and never used. * - 0x70 - - \_\_le32 - - i\_obso\_faddr + - __le32 + - i_obso_faddr - (Obsolete) fragment address. * - 0x74 - 12 bytes - - i\_osd2 + - i_osd2 - See the table i_osd2_ for more details. * - 0x80 - - \_\_le16 - - i\_extra\_isize + - __le16 + - i_extra_isize - Size of this inode - 128. Alternately, the size of the extended inode fields beyond the original ext2 inode, including this field. * - 0x82 - - \_\_le16 - - i\_checksum\_hi + - __le16 + - i_checksum_hi - Upper 16-bits of the inode checksum. * - 0x84 - - \_\_le32 - - i\_ctime\_extra + - __le32 + - i_ctime_extra - Extra change time bits. This provides sub-second precision. See Inode Timestamps section. * - 0x88 - - \_\_le32 - - i\_mtime\_extra + - __le32 + - i_mtime_extra - Extra modification time bits. This provides sub-second precision. * - 0x8C - - \_\_le32 - - i\_atime\_extra + - __le32 + - i_atime_extra - Extra access time bits. This provides sub-second precision. * - 0x90 - - \_\_le32 - - i\_crtime + - __le32 + - i_crtime - File creation time, in seconds since the epoch. * - 0x94 - - \_\_le32 - - i\_crtime\_extra + - __le32 + - i_crtime_extra - Extra file creation time bits. This provides sub-second precision. * - 0x98 - - \_\_le32 - - i\_version\_hi + - __le32 + - i_version_hi - Upper 32-bits for version number. * - 0x9C - - \_\_le32 - - i\_projid + - __le32 + - i_projid - Project ID. .. _i_mode: @@ -183,45 +183,45 @@ The ``i_mode`` value is a combination of the following flags: * - Value - Description * - 0x1 - - S\_IXOTH (Others may execute) + - S_IXOTH (Others may execute) * - 0x2 - - S\_IWOTH (Others may write) + - S_IWOTH (Others may write) * - 0x4 - - S\_IROTH (Others may read) + - S_IROTH (Others may read) * - 0x8 - - S\_IXGRP (Group members may execute) + - S_IXGRP (Group members may execute) * - 0x10 - - S\_IWGRP (Group members may write) + - S_IWGRP (Group members may write) * - 0x20 - - S\_IRGRP (Group members may read) + - S_IRGRP (Group members may read) * - 0x40 - - S\_IXUSR (Owner may execute) + - S_IXUSR (Owner may execute) * - 0x80 - - S\_IWUSR (Owner may write) + - S_IWUSR (Owner may write) * - 0x100 - - S\_IRUSR (Owner may read) + - S_IRUSR (Owner may read) * - 0x200 - - S\_ISVTX (Sticky bit) + - S_ISVTX (Sticky bit) * - 0x400 - - S\_ISGID (Set GID) + - S_ISGID (Set GID) * - 0x800 - - S\_ISUID (Set UID) + - S_ISUID (Set UID) * - - These are mutually-exclusive file types: * - 0x1000 - - S\_IFIFO (FIFO) + - S_IFIFO (FIFO) * - 0x2000 - - S\_IFCHR (Character device) + - S_IFCHR (Character device) * - 0x4000 - - S\_IFDIR (Directory) + - S_IFDIR (Directory) * - 0x6000 - - S\_IFBLK (Block device) + - S_IFBLK (Block device) * - 0x8000 - - S\_IFREG (Regular file) + - S_IFREG (Regular file) * - 0xA000 - - S\_IFLNK (Symbolic link) + - S_IFLNK (Symbolic link) * - 0xC000 - - S\_IFSOCK (Socket) + - S_IFSOCK (Socket) .. _i_flags: @@ -234,56 +234,56 @@ The ``i_flags`` field is a combination of these values: * - Value - Description * - 0x1 - - This file requires secure deletion (EXT4\_SECRM\_FL). (not implemented) + - This file requires secure deletion (EXT4_SECRM_FL). (not implemented) * - 0x2 - This file should be preserved, should undeletion be desired - (EXT4\_UNRM\_FL). (not implemented) + (EXT4_UNRM_FL). (not implemented) * - 0x4 - - File is compressed (EXT4\_COMPR\_FL). (not really implemented) + - File is compressed (EXT4_COMPR_FL). (not really implemented) * - 0x8 - - All writes to the file must be synchronous (EXT4\_SYNC\_FL). + - All writes to the file must be synchronous (EXT4_SYNC_FL). * - 0x10 - - File is immutable (EXT4\_IMMUTABLE\_FL). + - File is immutable (EXT4_IMMUTABLE_FL). * - 0x20 - - File can only be appended (EXT4\_APPEND\_FL). + - File can only be appended (EXT4_APPEND_FL). * - 0x40 - - The dump(1) utility should not dump this file (EXT4\_NODUMP\_FL). + - The dump(1) utility should not dump this file (EXT4_NODUMP_FL). * - 0x80 - - Do not update access time (EXT4\_NOATIME\_FL). + - Do not update access time (EXT4_NOATIME_FL). * - 0x100 - - Dirty compressed file (EXT4\_DIRTY\_FL). (not used) + - Dirty compressed file (EXT4_DIRTY_FL). (not used) * - 0x200 - - File has one or more compressed clusters (EXT4\_COMPRBLK\_FL). (not used) + - File has one or more compressed clusters (EXT4_COMPRBLK_FL). (not used) * - 0x400 - - Do not compress file (EXT4\_NOCOMPR\_FL). (not used) + - Do not compress file (EXT4_NOCOMPR_FL). (not used) * - 0x800 - - Encrypted inode (EXT4\_ENCRYPT\_FL). This bit value previously was - EXT4\_ECOMPR\_FL (compression error), which was never used. + - Encrypted inode (EXT4_ENCRYPT_FL). This bit value previously was + EXT4_ECOMPR_FL (compression error), which was never used. * - 0x1000 - - Directory has hashed indexes (EXT4\_INDEX\_FL). + - Directory has hashed indexes (EXT4_INDEX_FL). * - 0x2000 - - AFS magic directory (EXT4\_IMAGIC\_FL). + - AFS magic directory (EXT4_IMAGIC_FL). * - 0x4000 - File data must always be written through the journal - (EXT4\_JOURNAL\_DATA\_FL). + (EXT4_JOURNAL_DATA_FL). * - 0x8000 - - File tail should not be merged (EXT4\_NOTAIL\_FL). (not used by ext4) + - File tail should not be merged (EXT4_NOTAIL_FL). (not used by ext4) * - 0x10000 - All directory entry data should be written synchronously (see - ``dirsync``) (EXT4\_DIRSYNC\_FL). + ``dirsync``) (EXT4_DIRSYNC_FL). * - 0x20000 - - Top of directory hierarchy (EXT4\_TOPDIR\_FL). + - Top of directory hierarchy (EXT4_TOPDIR_FL). * - 0x40000 - - This is a huge file (EXT4\_HUGE\_FILE\_FL). + - This is a huge file (EXT4_HUGE_FILE_FL). * - 0x80000 - - Inode uses extents (EXT4\_EXTENTS\_FL). + - Inode uses extents (EXT4_EXTENTS_FL). * - 0x100000 - - Verity protected file (EXT4\_VERITY\_FL). + - Verity protected file (EXT4_VERITY_FL). * - 0x200000 - Inode stores a large extended attribute value in its data blocks - (EXT4\_EA\_INODE\_FL). + (EXT4_EA_INODE_FL). * - 0x400000 - - This file has blocks allocated past EOF (EXT4\_EOFBLOCKS\_FL). + - This file has blocks allocated past EOF (EXT4_EOFBLOCKS_FL). (deprecated) * - 0x01000000 - Inode is a snapshot (``EXT4_SNAPFILE_FL``). (not in mainline) @@ -294,21 +294,21 @@ The ``i_flags`` field is a combination of these values: - Snapshot shrink has completed (``EXT4_SNAPFILE_SHRUNK_FL``). (not in mainline) * - 0x10000000 - - Inode has inline data (EXT4\_INLINE\_DATA\_FL). + - Inode has inline data (EXT4_INLINE_DATA_FL). * - 0x20000000 - - Create children with the same project ID (EXT4\_PROJINHERIT\_FL). + - Create children with the same project ID (EXT4_PROJINHERIT_FL). * - 0x80000000 - - Reserved for ext4 library (EXT4\_RESERVED\_FL). + - Reserved for ext4 library (EXT4_RESERVED_FL). * - - Aggregate flags: * - 0x705BDFFF - User-visible flags. * - 0x604BC0FF - - User-modifiable flags. Note that while EXT4\_JOURNAL\_DATA\_FL and - EXT4\_EXTENTS\_FL can be set with setattr, they are not in the kernel's - EXT4\_FL\_USER\_MODIFIABLE mask, since it needs to handle the setting of + - User-modifiable flags. Note that while EXT4_JOURNAL_DATA_FL and + EXT4_EXTENTS_FL can be set with setattr, they are not in the kernel's + EXT4_FL_USER_MODIFIABLE mask, since it needs to handle the setting of these flags in a special manner and they are masked out of the set of - flags that are saved directly to i\_flags. + flags that are saved directly to i_flags. .. _i_osd1: @@ -325,9 +325,9 @@ Linux: - Name - Description * - 0x0 - - \_\_le32 - - l\_i\_version - - Inode version. However, if the EA\_INODE inode flag is set, this inode + - __le32 + - l_i_version + - Inode version. However, if the EA_INODE inode flag is set, this inode stores an extended attribute value and this field contains the upper 32 bits of the attribute value's reference count. @@ -342,8 +342,8 @@ Hurd: - Name - Description * - 0x0 - - \_\_le32 - - h\_i\_translator + - __le32 + - h_i_translator - ?? Masix: @@ -357,8 +357,8 @@ Masix: - Name - Description * - 0x0 - - \_\_le32 - - m\_i\_reserved + - __le32 + - m_i_reserved - ?? .. _i_osd2: @@ -376,30 +376,30 @@ Linux: - Name - Description * - 0x0 - - \_\_le16 - - l\_i\_blocks\_high + - __le16 + - l_i_blocks_high - Upper 16-bits of the block count. Please see the note attached to - i\_blocks\_lo. + i_blocks_lo. * - 0x2 - - \_\_le16 - - l\_i\_file\_acl\_high + - __le16 + - l_i_file_acl_high - Upper 16-bits of the extended attribute block (historically, the file ACL location). See the Extended Attributes section below. * - 0x4 - - \_\_le16 - - l\_i\_uid\_high + - __le16 + - l_i_uid_high - Upper 16-bits of the Owner UID. * - 0x6 - - \_\_le16 - - l\_i\_gid\_high + - __le16 + - l_i_gid_high - Upper 16-bits of the GID. * - 0x8 - - \_\_le16 - - l\_i\_checksum\_lo + - __le16 + - l_i_checksum_lo - Lower 16-bits of the inode checksum. * - 0xA - - \_\_le16 - - l\_i\_reserved + - __le16 + - l_i_reserved - Unused. Hurd: @@ -413,24 +413,24 @@ Hurd: - Name - Description * - 0x0 - - \_\_le16 - - h\_i\_reserved1 + - __le16 + - h_i_reserved1 - ?? * - 0x2 - - \_\_u16 - - h\_i\_mode\_high + - __u16 + - h_i_mode_high - Upper 16-bits of the file mode. * - 0x4 - - \_\_le16 - - h\_i\_uid\_high + - __le16 + - h_i_uid_high - Upper 16-bits of the Owner UID. * - 0x6 - - \_\_le16 - - h\_i\_gid\_high + - __le16 + - h_i_gid_high - Upper 16-bits of the GID. * - 0x8 - - \_\_u32 - - h\_i\_author + - __u32 + - h_i_author - Author code? Masix: @@ -444,17 +444,17 @@ Masix: - Name - Description * - 0x0 - - \_\_le16 - - h\_i\_reserved1 + - __le16 + - h_i_reserved1 - ?? * - 0x2 - - \_\_u16 - - m\_i\_file\_acl\_high + - __u16 + - m_i_file_acl_high - Upper 16-bits of the extended attribute block (historically, the file ACL location). * - 0x4 - - \_\_u32 - - m\_i\_reserved2[2] + - __u32 + - m_i_reserved2[2] - ?? Inode Size @@ -466,11 +466,11 @@ In ext2 and ext3, the inode structure size was fixed at 128 bytes on-disk inode at format time for all inodes in the filesystem to provide space beyond the end of the original ext2 inode. The on-disk inode record size is recorded in the superblock as ``s_inode_size``. The -number of bytes actually used by struct ext4\_inode beyond the original +number of bytes actually used by struct ext4_inode beyond the original 128-byte ext2 inode is recorded in the ``i_extra_isize`` field for each -inode, which allows struct ext4\_inode to grow for a new kernel without +inode, which allows struct ext4_inode to grow for a new kernel without having to upgrade all of the on-disk inodes. Access to fields beyond -EXT2\_GOOD\_OLD\_INODE\_SIZE should be verified to be within +EXT2_GOOD_OLD_INODE_SIZE should be verified to be within ``i_extra_isize``. By default, ext4 inode records are 256 bytes, and (as of August 2019) the inode structure is 160 bytes (``i_extra_isize = 32``). The extra space between the end of the inode @@ -516,7 +516,7 @@ creation time (crtime); this field is 64-bits wide and decoded in the same manner as 64-bit [cma]time. Neither crtime nor dtime are accessible through the regular stat() interface, though debugfs will report them. -We use the 32-bit signed time value plus (2^32 \* (extra epoch bits)). +We use the 32-bit signed time value plus (2^32 * (extra epoch bits)). In other words: .. list-table:: @@ -525,8 +525,8 @@ In other words: * - Extra epoch bits - MSB of 32-bit time - - Adjustment for signed 32-bit to 64-bit tv\_sec - - Decoded 64-bit tv\_sec + - Adjustment for signed 32-bit to 64-bit tv_sec + - Decoded 64-bit tv_sec - valid time range * - 0 0 - 1 diff --git a/Documentation/filesystems/ext4/journal.rst b/Documentation/filesystems/ext4/journal.rst index 5fad38860f17..a6bef5293a60 100644 --- a/Documentation/filesystems/ext4/journal.rst +++ b/Documentation/filesystems/ext4/journal.rst @@ -63,8 +63,8 @@ Generally speaking, the journal has this format: :header-rows: 1 * - Superblock - - descriptor\_block (data\_blocks or revocation\_block) [more data or - revocations] commmit\_block + - descriptor_block (data_blocks or revocation_block) [more data or + revocations] commmit_block - [more transactions...] * - - One transaction @@ -93,8 +93,8 @@ superblock. * - 1024 bytes of padding - ext4 Superblock - Journal Superblock - - descriptor\_block (data\_blocks or revocation\_block) [more data or - revocations] commmit\_block + - descriptor_block (data_blocks or revocation_block) [more data or + revocations] commmit_block - [more transactions...] * - - @@ -117,17 +117,17 @@ Every block in the journal starts with a common 12-byte header - Name - Description * - 0x0 - - \_\_be32 - - h\_magic + - __be32 + - h_magic - jbd2 magic number, 0xC03B3998. * - 0x4 - - \_\_be32 - - h\_blocktype + - __be32 + - h_blocktype - Description of what this block contains. See the jbd2_blocktype_ table below. * - 0x8 - - \_\_be32 - - h\_sequence + - __be32 + - h_sequence - The transaction ID that goes with this block. .. _jbd2_blocktype: @@ -177,99 +177,99 @@ which is 1024 bytes long: - - Static information describing the journal. * - 0x0 - - journal\_header\_t (12 bytes) - - s\_header + - journal_header_t (12 bytes) + - s_header - Common header identifying this as a superblock. * - 0xC - - \_\_be32 - - s\_blocksize + - __be32 + - s_blocksize - Journal device block size. * - 0x10 - - \_\_be32 - - s\_maxlen + - __be32 + - s_maxlen - Total number of blocks in this journal. * - 0x14 - - \_\_be32 - - s\_first + - __be32 + - s_first - First block of log information. * - - - - Dynamic information describing the current state of the log. * - 0x18 - - \_\_be32 - - s\_sequence + - __be32 + - s_sequence - First commit ID expected in log. * - 0x1C - - \_\_be32 - - s\_start + - __be32 + - s_start - Block number of the start of log. Contrary to the comments, this field being zero does not imply that the journal is clean! * - 0x20 - - \_\_be32 - - s\_errno - - Error value, as set by jbd2\_journal\_abort(). + - __be32 + - s_errno + - Error value, as set by jbd2_journal_abort(). * - - - - The remaining fields are only valid in a v2 superblock. * - 0x24 - - \_\_be32 - - s\_feature\_compat; + - __be32 + - s_feature_compat; - Compatible feature set. See the table jbd2_compat_ below. * - 0x28 - - \_\_be32 - - s\_feature\_incompat + - __be32 + - s_feature_incompat - Incompatible feature set. See the table jbd2_incompat_ below. * - 0x2C - - \_\_be32 - - s\_feature\_ro\_compat + - __be32 + - s_feature_ro_compat - Read-only compatible feature set. There aren't any of these currently. * - 0x30 - - \_\_u8 - - s\_uuid[16] + - __u8 + - s_uuid[16] - 128-bit uuid for journal. This is compared against the copy in the ext4 super block at mount time. * - 0x40 - - \_\_be32 - - s\_nr\_users + - __be32 + - s_nr_users - Number of file systems sharing this journal. * - 0x44 - - \_\_be32 - - s\_dynsuper + - __be32 + - s_dynsuper - Location of dynamic super block copy. (Not used?) * - 0x48 - - \_\_be32 - - s\_max\_transaction + - __be32 + - s_max_transaction - Limit of journal blocks per transaction. (Not used?) * - 0x4C - - \_\_be32 - - s\_max\_trans\_data + - __be32 + - s_max_trans_data - Limit of data blocks per transaction. (Not used?) * - 0x50 - - \_\_u8 - - s\_checksum\_type + - __u8 + - s_checksum_type - Checksum algorithm used for the journal. See jbd2_checksum_type_ for more info. * - 0x51 - - \_\_u8[3] - - s\_padding2 + - __u8[3] + - s_padding2 - * - 0x54 - - \_\_be32 - - s\_num\_fc\_blocks + - __be32 + - s_num_fc_blocks - Number of fast commit blocks in the journal. * - 0x58 - - \_\_u32 - - s\_padding[42] + - __u32 + - s_padding[42] - * - 0xFC - - \_\_be32 - - s\_checksum + - __be32 + - s_checksum - Checksum of the entire superblock, with this field set to zero. * - 0x100 - - \_\_u8 - - s\_users[16\*48] + - __u8 + - s_users[16*48] - ids of all file systems sharing the log. e2fsprogs/Linux don't allow shared external journals, but I imagine Lustre (or ocfs2?), which use the jbd2 code, might. @@ -286,7 +286,7 @@ The journal compat features are any combination of the following: - Description * - 0x1 - Journal maintains checksums on the data blocks. - (JBD2\_FEATURE\_COMPAT\_CHECKSUM) + (JBD2_FEATURE_COMPAT_CHECKSUM) .. _jbd2_incompat: @@ -299,23 +299,23 @@ The journal incompat features are any combination of the following: * - Value - Description * - 0x1 - - Journal has block revocation records. (JBD2\_FEATURE\_INCOMPAT\_REVOKE) + - Journal has block revocation records. (JBD2_FEATURE_INCOMPAT_REVOKE) * - 0x2 - Journal can deal with 64-bit block numbers. - (JBD2\_FEATURE\_INCOMPAT\_64BIT) + (JBD2_FEATURE_INCOMPAT_64BIT) * - 0x4 - - Journal commits asynchronously. (JBD2\_FEATURE\_INCOMPAT\_ASYNC\_COMMIT) + - Journal commits asynchronously. (JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) * - 0x8 - This journal uses v2 of the checksum on-disk format. Each journal metadata block gets its own checksum, and the block tags in the descriptor table contain checksums for each of the data blocks in the - journal. (JBD2\_FEATURE\_INCOMPAT\_CSUM\_V2) + journal. (JBD2_FEATURE_INCOMPAT_CSUM_V2) * - 0x10 - This journal uses v3 of the checksum on-disk format. This is the same as v2, but the journal block tag size is fixed regardless of the size of - block numbers. (JBD2\_FEATURE\_INCOMPAT\_CSUM\_V3) + block numbers. (JBD2_FEATURE_INCOMPAT_CSUM_V3) * - 0x20 - - Journal has fast commit blocks. (JBD2\_FEATURE\_INCOMPAT\_FAST\_COMMIT) + - Journal has fast commit blocks. (JBD2_FEATURE_INCOMPAT_FAST_COMMIT) .. _jbd2_checksum_type: @@ -355,11 +355,11 @@ Descriptor blocks consume at least 36 bytes, but use a full block: - Name - Descriptor * - 0x0 - - journal\_header\_t + - journal_header_t - (open coded) - Common block header. * - 0xC - - struct journal\_block\_tag\_s + - struct journal_block_tag_s - open coded array[] - Enough tags either to fill up the block or to describe all the data blocks that follow this descriptor block. @@ -367,7 +367,7 @@ Descriptor blocks consume at least 36 bytes, but use a full block: Journal block tags have any of the following formats, depending on which journal feature and block tag flags are set. -If JBD2\_FEATURE\_INCOMPAT\_CSUM\_V3 is set, the journal block tag is +If JBD2_FEATURE_INCOMPAT_CSUM_V3 is set, the journal block tag is defined as ``struct journal_block_tag3_s``, which looks like the following. The size is 16 or 32 bytes. @@ -380,24 +380,24 @@ following. The size is 16 or 32 bytes. - Name - Descriptor * - 0x0 - - \_\_be32 - - t\_blocknr + - __be32 + - t_blocknr - Lower 32-bits of the location of where the corresponding data block should end up on disk. * - 0x4 - - \_\_be32 - - t\_flags + - __be32 + - t_flags - Flags that go with the descriptor. See the table jbd2_tag_flags_ for more info. * - 0x8 - - \_\_be32 - - t\_blocknr\_high + - __be32 + - t_blocknr_high - Upper 32-bits of the location of where the corresponding data block - should end up on disk. This is zero if JBD2\_FEATURE\_INCOMPAT\_64BIT is + should end up on disk. This is zero if JBD2_FEATURE_INCOMPAT_64BIT is not enabled. * - 0xC - - \_\_be32 - - t\_checksum + - __be32 + - t_checksum - Checksum of the journal UUID, the sequence number, and the data block. * - - @@ -433,7 +433,7 @@ The journal tag flags are any combination of the following: * - 0x8 - This is the last tag in this descriptor block. -If JBD2\_FEATURE\_INCOMPAT\_CSUM\_V3 is NOT set, the journal block tag +If JBD2_FEATURE_INCOMPAT_CSUM_V3 is NOT set, the journal block tag is defined as ``struct journal_block_tag_s``, which looks like the following. The size is 8, 12, 24, or 28 bytes: @@ -446,18 +446,18 @@ following. The size is 8, 12, 24, or 28 bytes: - Name - Descriptor * - 0x0 - - \_\_be32 - - t\_blocknr + - __be32 + - t_blocknr - Lower 32-bits of the location of where the corresponding data block should end up on disk. * - 0x4 - - \_\_be16 - - t\_checksum + - __be16 + - t_checksum - Checksum of the journal UUID, the sequence number, and the data block. Note that only the lower 16 bits are stored. * - 0x6 - - \_\_be16 - - t\_flags + - __be16 + - t_flags - Flags that go with the descriptor. See the table jbd2_tag_flags_ for more info. * - @@ -466,8 +466,8 @@ following. The size is 8, 12, 24, or 28 bytes: - This next field is only present if the super block indicates support for 64-bit block numbers. * - 0x8 - - \_\_be32 - - t\_blocknr\_high + - __be32 + - t_blocknr_high - Upper 32-bits of the location of where the corresponding data block should end up on disk. * - @@ -483,8 +483,8 @@ following. The size is 8, 12, 24, or 28 bytes: ``j_uuid`` field in ``struct journal_s``, but only tune2fs touches that field. -If JBD2\_FEATURE\_INCOMPAT\_CSUM\_V2 or -JBD2\_FEATURE\_INCOMPAT\_CSUM\_V3 are set, the end of the block is a +If JBD2_FEATURE_INCOMPAT_CSUM_V2 or +JBD2_FEATURE_INCOMPAT_CSUM_V3 are set, the end of the block is a ``struct jbd2_journal_block_tail``, which looks like this: .. list-table:: @@ -496,8 +496,8 @@ JBD2\_FEATURE\_INCOMPAT\_CSUM\_V3 are set, the end of the block is a - Name - Descriptor * - 0x0 - - \_\_be32 - - t\_checksum + - __be32 + - t_checksum - Checksum of the journal UUID + the descriptor block, with this field set to zero. @@ -538,25 +538,25 @@ length, but use a full block: - Name - Description * - 0x0 - - journal\_header\_t - - r\_header + - journal_header_t + - r_header - Common block header. * - 0xC - - \_\_be32 - - r\_count + - __be32 + - r_count - Number of bytes used in this block. * - 0x10 - - \_\_be32 or \_\_be64 + - __be32 or __be64 - blocks[0] - Blocks to revoke. -After r\_count is a linear array of block numbers that are effectively +After r_count is a linear array of block numbers that are effectively revoked by this transaction. The size of each block number is 8 bytes if the superblock advertises 64-bit block number support, or 4 bytes otherwise. -If JBD2\_FEATURE\_INCOMPAT\_CSUM\_V2 or -JBD2\_FEATURE\_INCOMPAT\_CSUM\_V3 are set, the end of the revocation +If JBD2_FEATURE_INCOMPAT_CSUM_V2 or +JBD2_FEATURE_INCOMPAT_CSUM_V3 are set, the end of the revocation block is a ``struct jbd2_journal_revoke_tail``, which has this format: .. list-table:: @@ -568,8 +568,8 @@ block is a ``struct jbd2_journal_revoke_tail``, which has this format: - Name - Description * - 0x0 - - \_\_be32 - - r\_checksum + - __be32 + - r_checksum - Checksum of the journal UUID + revocation block Commit Block @@ -592,38 +592,38 @@ bytes long (but uses a full block): - Name - Descriptor * - 0x0 - - journal\_header\_s + - journal_header_s - (open coded) - Common block header. * - 0xC - unsigned char - - h\_chksum\_type + - h_chksum_type - The type of checksum to use to verify the integrity of the data blocks in the transaction. See jbd2_checksum_type_ for more info. * - 0xD - unsigned char - - h\_chksum\_size + - h_chksum_size - The number of bytes used by the checksum. Most likely 4. * - 0xE - unsigned char - - h\_padding[2] + - h_padding[2] - * - 0x10 - - \_\_be32 - - h\_chksum[JBD2\_CHECKSUM\_BYTES] + - __be32 + - h_chksum[JBD2_CHECKSUM_BYTES] - 32 bytes of space to store checksums. If - JBD2\_FEATURE\_INCOMPAT\_CSUM\_V2 or JBD2\_FEATURE\_INCOMPAT\_CSUM\_V3 + JBD2_FEATURE_INCOMPAT_CSUM_V2 or JBD2_FEATURE_INCOMPAT_CSUM_V3 are set, the first ``__be32`` is the checksum of the journal UUID and the entire commit block, with this field zeroed. If - JBD2\_FEATURE\_COMPAT\_CHECKSUM is set, the first ``__be32`` is the + JBD2_FEATURE_COMPAT_CHECKSUM is set, the first ``__be32`` is the crc32 of all the blocks already written to the transaction. * - 0x30 - - \_\_be64 - - h\_commit\_sec + - __be64 + - h_commit_sec - The time that the transaction was committed, in seconds since the epoch. * - 0x38 - - \_\_be32 - - h\_commit\_nsec + - __be32 + - h_commit_nsec - Nanoseconds component of the above timestamp. Fast commits diff --git a/Documentation/filesystems/ext4/mmp.rst b/Documentation/filesystems/ext4/mmp.rst index 25660981d93c..174dd6538737 100644 --- a/Documentation/filesystems/ext4/mmp.rst +++ b/Documentation/filesystems/ext4/mmp.rst @@ -7,8 +7,8 @@ Multiple mount protection (MMP) is a feature that protects the filesystem against multiple hosts trying to use the filesystem simultaneously. When a filesystem is opened (for mounting, or fsck, etc.), the MMP code running on the node (call it node A) checks a -sequence number. If the sequence number is EXT4\_MMP\_SEQ\_CLEAN, the -open continues. If the sequence number is EXT4\_MMP\_SEQ\_FSCK, then +sequence number. If the sequence number is EXT4_MMP_SEQ_CLEAN, the +open continues. If the sequence number is EXT4_MMP_SEQ_FSCK, then fsck is (hopefully) running, and open fails immediately. Otherwise, the open code will wait for twice the specified MMP check interval and check the sequence number again. If the sequence number has changed, then the @@ -40,38 +40,38 @@ The MMP structure (``struct mmp_struct``) is as follows: - Name - Description * - 0x0 - - \_\_le32 - - mmp\_magic + - __le32 + - mmp_magic - Magic number for MMP, 0x004D4D50 (“MMPâ€). * - 0x4 - - \_\_le32 - - mmp\_seq + - __le32 + - mmp_seq - Sequence number, updated periodically. * - 0x8 - - \_\_le64 - - mmp\_time + - __le64 + - mmp_time - Time that the MMP block was last updated. * - 0x10 - char[64] - - mmp\_nodename + - mmp_nodename - Hostname of the node that opened the filesystem. * - 0x50 - char[32] - - mmp\_bdevname + - mmp_bdevname - Block device name of the filesystem. * - 0x70 - - \_\_le16 - - mmp\_check\_interval + - __le16 + - mmp_check_interval - The MMP re-check interval, in seconds. * - 0x72 - - \_\_le16 - - mmp\_pad1 + - __le16 + - mmp_pad1 - Zero. * - 0x74 - - \_\_le32[226] - - mmp\_pad2 + - __le32[226] + - mmp_pad2 - Zero. * - 0x3FC - - \_\_le32 - - mmp\_checksum + - __le32 + - mmp_checksum - Checksum of the MMP block. diff --git a/Documentation/filesystems/ext4/overview.rst b/Documentation/filesystems/ext4/overview.rst index 123ebfde47ee..0fad6eda6e15 100644 --- a/Documentation/filesystems/ext4/overview.rst +++ b/Documentation/filesystems/ext4/overview.rst @@ -7,7 +7,7 @@ An ext4 file system is split into a series of block groups. To reduce performance difficulties due to fragmentation, the block allocator tries very hard to keep each file's blocks within the same group, thereby reducing seek times. The size of a block group is specified in -``sb.s_blocks_per_group`` blocks, though it can also calculated as 8 \* +``sb.s_blocks_per_group`` blocks, though it can also calculated as 8 * ``block_size_in_bytes``. With the default block size of 4KiB, each group will contain 32,768 blocks, for a length of 128MiB. The number of block groups is the size of the device divided by the size of a block group. diff --git a/Documentation/filesystems/ext4/special_inodes.rst b/Documentation/filesystems/ext4/special_inodes.rst index 94f304e3a0a7..fc0636901fa0 100644 --- a/Documentation/filesystems/ext4/special_inodes.rst +++ b/Documentation/filesystems/ext4/special_inodes.rst @@ -34,7 +34,7 @@ ext4 reserves some inode for special features, as follows: * - 10 - Replica inode, used for some non-upstream feature? * - 11 - - Traditional first non-reserved inode. Usually this is the lost+found directory. See s\_first\_ino in the superblock. + - Traditional first non-reserved inode. Usually this is the lost+found directory. See s_first_ino in the superblock. Note that there are also some inodes allocated from non-reserved inode numbers for other filesystem features which are not referenced from standard directory @@ -47,9 +47,9 @@ hierarchy. These are generally reference from the superblock. They are: * - Superblock field - Description - * - s\_lpf\_ino + * - s_lpf_ino - Inode number of lost+found directory. - * - s\_prj\_quota\_inum + * - s_prj_quota_inum - Inode number of quota file tracking project quotas - * - s\_orphan\_file\_inum + * - s_orphan_file_inum - Inode number of file tracking orphan inodes. diff --git a/Documentation/filesystems/ext4/super.rst b/Documentation/filesystems/ext4/super.rst index f6a548e957bb..268888522e35 100644 --- a/Documentation/filesystems/ext4/super.rst +++ b/Documentation/filesystems/ext4/super.rst @@ -7,7 +7,7 @@ The superblock records various information about the enclosing filesystem, such as block counts, inode counts, supported features, maintenance information, and more. -If the sparse\_super feature flag is set, redundant copies of the +If the sparse_super feature flag is set, redundant copies of the superblock and group descriptors are kept only in the groups whose group number is either 0 or a power of 3, 5, or 7. If the flag is not set, redundant copies are kept in all groups. @@ -27,107 +27,107 @@ The ext4 superblock is laid out as follows in - Name - Description * - 0x0 - - \_\_le32 - - s\_inodes\_count + - __le32 + - s_inodes_count - Total inode count. * - 0x4 - - \_\_le32 - - s\_blocks\_count\_lo + - __le32 + - s_blocks_count_lo - Total block count. * - 0x8 - - \_\_le32 - - s\_r\_blocks\_count\_lo + - __le32 + - s_r_blocks_count_lo - This number of blocks can only be allocated by the super-user. * - 0xC - - \_\_le32 - - s\_free\_blocks\_count\_lo + - __le32 + - s_free_blocks_count_lo - Free block count. * - 0x10 - - \_\_le32 - - s\_free\_inodes\_count + - __le32 + - s_free_inodes_count - Free inode count. * - 0x14 - - \_\_le32 - - s\_first\_data\_block + - __le32 + - s_first_data_block - First data block. This must be at least 1 for 1k-block filesystems and is typically 0 for all other block sizes. * - 0x18 - - \_\_le32 - - s\_log\_block\_size - - Block size is 2 ^ (10 + s\_log\_block\_size). + - __le32 + - s_log_block_size + - Block size is 2 ^ (10 + s_log_block_size). * - 0x1C - - \_\_le32 - - s\_log\_cluster\_size - - Cluster size is 2 ^ (10 + s\_log\_cluster\_size) blocks if bigalloc is - enabled. Otherwise s\_log\_cluster\_size must equal s\_log\_block\_size. + - __le32 + - s_log_cluster_size + - Cluster size is 2 ^ (10 + s_log_cluster_size) blocks if bigalloc is + enabled. Otherwise s_log_cluster_size must equal s_log_block_size. * - 0x20 - - \_\_le32 - - s\_blocks\_per\_group + - __le32 + - s_blocks_per_group - Blocks per group. * - 0x24 - - \_\_le32 - - s\_clusters\_per\_group + - __le32 + - s_clusters_per_group - Clusters per group, if bigalloc is enabled. Otherwise - s\_clusters\_per\_group must equal s\_blocks\_per\_group. + s_clusters_per_group must equal s_blocks_per_group. * - 0x28 - - \_\_le32 - - s\_inodes\_per\_group + - __le32 + - s_inodes_per_group - Inodes per group. * - 0x2C - - \_\_le32 - - s\_mtime + - __le32 + - s_mtime - Mount time, in seconds since the epoch. * - 0x30 - - \_\_le32 - - s\_wtime + - __le32 + - s_wtime - Write time, in seconds since the epoch. * - 0x34 - - \_\_le16 - - s\_mnt\_count + - __le16 + - s_mnt_count - Number of mounts since the last fsck. * - 0x36 - - \_\_le16 - - s\_max\_mnt\_count + - __le16 + - s_max_mnt_count - Number of mounts beyond which a fsck is needed. * - 0x38 - - \_\_le16 - - s\_magic + - __le16 + - s_magic - Magic signature, 0xEF53 * - 0x3A - - \_\_le16 - - s\_state + - __le16 + - s_state - File system state. See super_state_ for more info. * - 0x3C - - \_\_le16 - - s\_errors + - __le16 + - s_errors - Behaviour when detecting errors. See super_errors_ for more info. * - 0x3E - - \_\_le16 - - s\_minor\_rev\_level + - __le16 + - s_minor_rev_level - Minor revision level. * - 0x40 - - \_\_le32 - - s\_lastcheck + - __le32 + - s_lastcheck - Time of last check, in seconds since the epoch. * - 0x44 - - \_\_le32 - - s\_checkinterval + - __le32 + - s_checkinterval - Maximum time between checks, in seconds. * - 0x48 - - \_\_le32 - - s\_creator\_os + - __le32 + - s_creator_os - Creator OS. See the table super_creator_ for more info. * - 0x4C - - \_\_le32 - - s\_rev\_level + - __le32 + - s_rev_level - Revision level. See the table super_revision_ for more info. * - 0x50 - - \_\_le16 - - s\_def\_resuid + - __le16 + - s_def_resuid - Default uid for reserved blocks. * - 0x52 - - \_\_le16 - - s\_def\_resgid + - __le16 + - s_def_resgid - Default gid for reserved blocks. * - - @@ -143,50 +143,50 @@ The ext4 superblock is laid out as follows in about a feature in either the compatible or incompatible feature set, it must abort and not try to meddle with things it doesn't understand... * - 0x54 - - \_\_le32 - - s\_first\_ino + - __le32 + - s_first_ino - First non-reserved inode. * - 0x58 - - \_\_le16 - - s\_inode\_size + - __le16 + - s_inode_size - Size of inode structure, in bytes. * - 0x5A - - \_\_le16 - - s\_block\_group\_nr + - __le16 + - s_block_group_nr - Block group # of this superblock. * - 0x5C - - \_\_le32 - - s\_feature\_compat + - __le32 + - s_feature_compat - Compatible feature set flags. Kernel can still read/write this fs even if it doesn't understand a flag; fsck should not do that. See the super_compat_ table for more info. * - 0x60 - - \_\_le32 - - s\_feature\_incompat + - __le32 + - s_feature_incompat - Incompatible feature set. If the kernel or fsck doesn't understand one of these bits, it should stop. See the super_incompat_ table for more info. * - 0x64 - - \_\_le32 - - s\_feature\_ro\_compat + - __le32 + - s_feature_ro_compat - Readonly-compatible feature set. If the kernel doesn't understand one of these bits, it can still mount read-only. See the super_rocompat_ table for more info. * - 0x68 - - \_\_u8 - - s\_uuid[16] + - __u8 + - s_uuid[16] - 128-bit UUID for volume. * - 0x78 - char - - s\_volume\_name[16] + - s_volume_name[16] - Volume label. * - 0x88 - char - - s\_last\_mounted[64] + - s_last_mounted[64] - Directory where filesystem was last mounted. * - 0xC8 - - \_\_le32 - - s\_algorithm\_usage\_bitmap + - __le32 + - s_algorithm_usage_bitmap - For compression (Not used in e2fsprogs/Linux) * - - @@ -194,18 +194,18 @@ The ext4 superblock is laid out as follows in - Performance hints. Directory preallocation should only happen if the EXT4_FEATURE_COMPAT_DIR_PREALLOC flag is on. * - 0xCC - - \_\_u8 - - s\_prealloc\_blocks + - __u8 + - s_prealloc_blocks - #. of blocks to try to preallocate for ... files? (Not used in e2fsprogs/Linux) * - 0xCD - - \_\_u8 - - s\_prealloc\_dir\_blocks + - __u8 + - s_prealloc_dir_blocks - #. of blocks to preallocate for directories. (Not used in e2fsprogs/Linux) * - 0xCE - - \_\_le16 - - s\_reserved\_gdt\_blocks + - __le16 + - s_reserved_gdt_blocks - Number of reserved GDT entries for future filesystem expansion. * - - @@ -213,281 +213,281 @@ The ext4 superblock is laid out as follows in - Journalling support is valid only if EXT4_FEATURE_COMPAT_HAS_JOURNAL is set. * - 0xD0 - - \_\_u8 - - s\_journal\_uuid[16] + - __u8 + - s_journal_uuid[16] - UUID of journal superblock * - 0xE0 - - \_\_le32 - - s\_journal\_inum + - __le32 + - s_journal_inum - inode number of journal file. * - 0xE4 - - \_\_le32 - - s\_journal\_dev + - __le32 + - s_journal_dev - Device number of journal file, if the external journal feature flag is set. * - 0xE8 - - \_\_le32 - - s\_last\_orphan + - __le32 + - s_last_orphan - Start of list of orphaned inodes to delete. * - 0xEC - - \_\_le32 - - s\_hash\_seed[4] + - __le32 + - s_hash_seed[4] - HTREE hash seed. * - 0xFC - - \_\_u8 - - s\_def\_hash\_version + - __u8 + - s_def_hash_version - Default hash algorithm to use for directory hashes. See super_def_hash_ for more info. * - 0xFD - - \_\_u8 - - s\_jnl\_backup\_type - - If this value is 0 or EXT3\_JNL\_BACKUP\_BLOCKS (1), then the + - __u8 + - s_jnl_backup_type + - If this value is 0 or EXT3_JNL_BACKUP_BLOCKS (1), then the ``s_jnl_blocks`` field contains a duplicate copy of the inode's ``i_block[]`` array and ``i_size``. * - 0xFE - - \_\_le16 - - s\_desc\_size + - __le16 + - s_desc_size - Size of group descriptors, in bytes, if the 64bit incompat feature flag is set. * - 0x100 - - \_\_le32 - - s\_default\_mount\_opts + - __le32 + - s_default_mount_opts - Default mount options. See the super_mountopts_ table for more info. * - 0x104 - - \_\_le32 - - s\_first\_meta\_bg - - First metablock block group, if the meta\_bg feature is enabled. + - __le32 + - s_first_meta_bg + - First metablock block group, if the meta_bg feature is enabled. * - 0x108 - - \_\_le32 - - s\_mkfs\_time + - __le32 + - s_mkfs_time - When the filesystem was created, in seconds since the epoch. * - 0x10C - - \_\_le32 - - s\_jnl\_blocks[17] + - __le32 + - s_jnl_blocks[17] - Backup copy of the journal inode's ``i_block[]`` array in the first 15 - elements and i\_size\_high and i\_size in the 16th and 17th elements, + elements and i_size_high and i_size in the 16th and 17th elements, respectively. * - - - - 64bit support is valid only if EXT4_FEATURE_COMPAT_64BIT is set. * - 0x150 - - \_\_le32 - - s\_blocks\_count\_hi + - __le32 + - s_blocks_count_hi - High 32-bits of the block count. * - 0x154 - - \_\_le32 - - s\_r\_blocks\_count\_hi + - __le32 + - s_r_blocks_count_hi - High 32-bits of the reserved block count. * - 0x158 - - \_\_le32 - - s\_free\_blocks\_count\_hi + - __le32 + - s_free_blocks_count_hi - High 32-bits of the free block count. * - 0x15C - - \_\_le16 - - s\_min\_extra\_isize + - __le16 + - s_min_extra_isize - All inodes have at least # bytes. * - 0x15E - - \_\_le16 - - s\_want\_extra\_isize + - __le16 + - s_want_extra_isize - New inodes should reserve # bytes. * - 0x160 - - \_\_le32 - - s\_flags + - __le32 + - s_flags - Miscellaneous flags. See the super_flags_ table for more info. * - 0x164 - - \_\_le16 - - s\_raid\_stride + - __le16 + - s_raid_stride - RAID stride. This is the number of logical blocks read from or written to the disk before moving to the next disk. This affects the placement of filesystem metadata, which will hopefully make RAID storage faster. * - 0x166 - - \_\_le16 - - s\_mmp\_interval + - __le16 + - s_mmp_interval - #. seconds to wait in multi-mount prevention (MMP) checking. In theory, MMP is a mechanism to record in the superblock which host and device have mounted the filesystem, in order to prevent multiple mounts. This feature does not seem to be implemented... * - 0x168 - - \_\_le64 - - s\_mmp\_block + - __le64 + - s_mmp_block - Block # for multi-mount protection data. * - 0x170 - - \_\_le32 - - s\_raid\_stripe\_width + - __le32 + - s_raid_stripe_width - RAID stripe width. This is the number of logical blocks read from or written to the disk before coming back to the current disk. This is used by the block allocator to try to reduce the number of read-modify-write operations in a RAID5/6. * - 0x174 - - \_\_u8 - - s\_log\_groups\_per\_flex + - __u8 + - s_log_groups_per_flex - Size of a flexible block group is 2 ^ ``s_log_groups_per_flex``. * - 0x175 - - \_\_u8 - - s\_checksum\_type + - __u8 + - s_checksum_type - Metadata checksum algorithm type. The only valid value is 1 (crc32c). * - 0x176 - - \_\_le16 - - s\_reserved\_pad + - __le16 + - s_reserved_pad - * - 0x178 - - \_\_le64 - - s\_kbytes\_written + - __le64 + - s_kbytes_written - Number of KiB written to this filesystem over its lifetime. * - 0x180 - - \_\_le32 - - s\_snapshot\_inum + - __le32 + - s_snapshot_inum - inode number of active snapshot. (Not used in e2fsprogs/Linux.) * - 0x184 - - \_\_le32 - - s\_snapshot\_id + - __le32 + - s_snapshot_id - Sequential ID of active snapshot. (Not used in e2fsprogs/Linux.) * - 0x188 - - \_\_le64 - - s\_snapshot\_r\_blocks\_count + - __le64 + - s_snapshot_r_blocks_count - Number of blocks reserved for active snapshot's future use. (Not used in e2fsprogs/Linux.) * - 0x190 - - \_\_le32 - - s\_snapshot\_list + - __le32 + - s_snapshot_list - inode number of the head of the on-disk snapshot list. (Not used in e2fsprogs/Linux.) * - 0x194 - - \_\_le32 - - s\_error\_count + - __le32 + - s_error_count - Number of errors seen. * - 0x198 - - \_\_le32 - - s\_first\_error\_time + - __le32 + - s_first_error_time - First time an error happened, in seconds since the epoch. * - 0x19C - - \_\_le32 - - s\_first\_error\_ino + - __le32 + - s_first_error_ino - inode involved in first error. * - 0x1A0 - - \_\_le64 - - s\_first\_error\_block + - __le64 + - s_first_error_block - Number of block involved of first error. * - 0x1A8 - - \_\_u8 - - s\_first\_error\_func[32] + - __u8 + - s_first_error_func[32] - Name of function where the error happened. * - 0x1C8 - - \_\_le32 - - s\_first\_error\_line + - __le32 + - s_first_error_line - Line number where error happened. * - 0x1CC - - \_\_le32 - - s\_last\_error\_time + - __le32 + - s_last_error_time - Time of most recent error, in seconds since the epoch. * - 0x1D0 - - \_\_le32 - - s\_last\_error\_ino + - __le32 + - s_last_error_ino - inode involved in most recent error. * - 0x1D4 - - \_\_le32 - - s\_last\_error\_line + - __le32 + - s_last_error_line - Line number where most recent error happened. * - 0x1D8 - - \_\_le64 - - s\_last\_error\_block + - __le64 + - s_last_error_block - Number of block involved in most recent error. * - 0x1E0 - - \_\_u8 - - s\_last\_error\_func[32] + - __u8 + - s_last_error_func[32] - Name of function where the most recent error happened. * - 0x200 - - \_\_u8 - - s\_mount\_opts[64] + - __u8 + - s_mount_opts[64] - ASCIIZ string of mount options. * - 0x240 - - \_\_le32 - - s\_usr\_quota\_inum + - __le32 + - s_usr_quota_inum - Inode number of user `quota <quota>`__ file. * - 0x244 - - \_\_le32 - - s\_grp\_quota\_inum + - __le32 + - s_grp_quota_inum - Inode number of group `quota <quota>`__ file. * - 0x248 - - \_\_le32 - - s\_overhead\_blocks + - __le32 + - s_overhead_blocks - Overhead blocks/clusters in fs. (Huh? This field is always zero, which means that the kernel calculates it dynamically.) * - 0x24C - - \_\_le32 - - s\_backup\_bgs[2] - - Block groups containing superblock backups (if sparse\_super2) + - __le32 + - s_backup_bgs[2] + - Block groups containing superblock backups (if sparse_super2) * - 0x254 - - \_\_u8 - - s\_encrypt\_algos[4] + - __u8 + - s_encrypt_algos[4] - Encryption algorithms in use. There can be up to four algorithms in use at any time; valid algorithm codes are given in the super_encrypt_ table below. * - 0x258 - - \_\_u8 - - s\_encrypt\_pw\_salt[16] + - __u8 + - s_encrypt_pw_salt[16] - Salt for the string2key algorithm for encryption. * - 0x268 - - \_\_le32 - - s\_lpf\_ino + - __le32 + - s_lpf_ino - Inode number of lost+found * - 0x26C - - \_\_le32 - - s\_prj\_quota\_inum + - __le32 + - s_prj_quota_inum - Inode that tracks project quotas. * - 0x270 - - \_\_le32 - - s\_checksum\_seed - - Checksum seed used for metadata\_csum calculations. This value is - crc32c(~0, $orig\_fs\_uuid). + - __le32 + - s_checksum_seed + - Checksum seed used for metadata_csum calculations. This value is + crc32c(~0, $orig_fs_uuid). * - 0x274 - - \_\_u8 - - s\_wtime_hi + - __u8 + - s_wtime_hi - Upper 8 bits of the s_wtime field. * - 0x275 - - \_\_u8 - - s\_mtime_hi + - __u8 + - s_mtime_hi - Upper 8 bits of the s_mtime field. * - 0x276 - - \_\_u8 - - s\_mkfs_time_hi + - __u8 + - s_mkfs_time_hi - Upper 8 bits of the s_mkfs_time field. * - 0x277 - - \_\_u8 - - s\_lastcheck_hi + - __u8 + - s_lastcheck_hi - Upper 8 bits of the s_lastcheck_hi field. * - 0x278 - - \_\_u8 - - s\_first_error_time_hi + - __u8 + - s_first_error_time_hi - Upper 8 bits of the s_first_error_time_hi field. * - 0x279 - - \_\_u8 - - s\_last_error_time_hi + - __u8 + - s_last_error_time_hi - Upper 8 bits of the s_last_error_time_hi field. * - 0x27A - - \_\_u8 - - s\_pad[2] + - __u8 + - s_pad[2] - Zero padding. * - 0x27C - - \_\_le16 - - s\_encoding + - __le16 + - s_encoding - Filename charset encoding. * - 0x27E - - \_\_le16 - - s\_encoding_flags + - __le16 + - s_encoding_flags - Filename charset encoding flags. * - 0x280 - - \_\_le32 - - s\_orphan\_file\_inum + - __le32 + - s_orphan_file_inum - Orphan file inode number. * - 0x284 - - \_\_le32 - - s\_reserved[94] + - __le32 + - s_reserved[94] - Padding to the end of the block. * - 0x3FC - - \_\_le32 - - s\_checksum + - __le32 + - s_checksum - Superblock checksum. .. _super_state: @@ -574,44 +574,44 @@ following: * - Value - Description * - 0x1 - - Directory preallocation (COMPAT\_DIR\_PREALLOC). + - Directory preallocation (COMPAT_DIR_PREALLOC). * - 0x2 - “imagic inodesâ€. Not clear from the code what this does - (COMPAT\_IMAGIC\_INODES). + (COMPAT_IMAGIC_INODES). * - 0x4 - - Has a journal (COMPAT\_HAS\_JOURNAL). + - Has a journal (COMPAT_HAS_JOURNAL). * - 0x8 - - Supports extended attributes (COMPAT\_EXT\_ATTR). + - Supports extended attributes (COMPAT_EXT_ATTR). * - 0x10 - Has reserved GDT blocks for filesystem expansion - (COMPAT\_RESIZE\_INODE). Requires RO\_COMPAT\_SPARSE\_SUPER. + (COMPAT_RESIZE_INODE). Requires RO_COMPAT_SPARSE_SUPER. * - 0x20 - - Has directory indices (COMPAT\_DIR\_INDEX). + - Has directory indices (COMPAT_DIR_INDEX). * - 0x40 - “Lazy BGâ€. Not in Linux kernel, seems to have been for uninitialized - block groups? (COMPAT\_LAZY\_BG) + block groups? (COMPAT_LAZY_BG) * - 0x80 - - “Exclude inodeâ€. Not used. (COMPAT\_EXCLUDE\_INODE). + - “Exclude inodeâ€. Not used. (COMPAT_EXCLUDE_INODE). * - 0x100 - “Exclude bitmapâ€. Seems to be used to indicate the presence of snapshot-related exclude bitmaps? Not defined in kernel or used in - e2fsprogs (COMPAT\_EXCLUDE\_BITMAP). + e2fsprogs (COMPAT_EXCLUDE_BITMAP). * - 0x200 - - Sparse Super Block, v2. If this flag is set, the SB field s\_backup\_bgs + - Sparse Super Block, v2. If this flag is set, the SB field s_backup_bgs points to the two block groups that contain backup superblocks - (COMPAT\_SPARSE\_SUPER2). + (COMPAT_SPARSE_SUPER2). * - 0x400 - Fast commits supported. Although fast commits blocks are backward incompatible, fast commit blocks are not always present in the journal. If fast commit blocks are present in the journal, JBD2 incompat feature - (JBD2\_FEATURE\_INCOMPAT\_FAST\_COMMIT) gets - set (COMPAT\_FAST\_COMMIT). + (JBD2_FEATURE_INCOMPAT_FAST_COMMIT) gets + set (COMPAT_FAST_COMMIT). * - 0x1000 - Orphan file allocated. This is the special file for more efficient tracking of unlinked but still open inodes. When there may be any entries in the file, we additionally set proper rocompat feature - (RO\_COMPAT\_ORPHAN\_PRESENT). + (RO_COMPAT_ORPHAN_PRESENT). .. _super_incompat: @@ -625,45 +625,45 @@ following: * - Value - Description * - 0x1 - - Compression (INCOMPAT\_COMPRESSION). + - Compression (INCOMPAT_COMPRESSION). * - 0x2 - - Directory entries record the file type. See ext4\_dir\_entry\_2 below - (INCOMPAT\_FILETYPE). + - Directory entries record the file type. See ext4_dir_entry_2 below + (INCOMPAT_FILETYPE). * - 0x4 - - Filesystem needs recovery (INCOMPAT\_RECOVER). + - Filesystem needs recovery (INCOMPAT_RECOVER). * - 0x8 - - Filesystem has a separate journal device (INCOMPAT\_JOURNAL\_DEV). + - Filesystem has a separate journal device (INCOMPAT_JOURNAL_DEV). * - 0x10 - Meta block groups. See the earlier discussion of this feature - (INCOMPAT\_META\_BG). + (INCOMPAT_META_BG). * - 0x40 - - Files in this filesystem use extents (INCOMPAT\_EXTENTS). + - Files in this filesystem use extents (INCOMPAT_EXTENTS). * - 0x80 - - Enable a filesystem size of 2^64 blocks (INCOMPAT\_64BIT). + - Enable a filesystem size of 2^64 blocks (INCOMPAT_64BIT). * - 0x100 - - Multiple mount protection (INCOMPAT\_MMP). + - Multiple mount protection (INCOMPAT_MMP). * - 0x200 - Flexible block groups. See the earlier discussion of this feature - (INCOMPAT\_FLEX\_BG). + (INCOMPAT_FLEX_BG). * - 0x400 - Inodes can be used to store large extended attribute values - (INCOMPAT\_EA\_INODE). + (INCOMPAT_EA_INODE). * - 0x1000 - - Data in directory entry (INCOMPAT\_DIRDATA). (Not implemented?) + - Data in directory entry (INCOMPAT_DIRDATA). (Not implemented?) * - 0x2000 - Metadata checksum seed is stored in the superblock. This feature enables - the administrator to change the UUID of a metadata\_csum filesystem + the administrator to change the UUID of a metadata_csum filesystem while the filesystem is mounted; without it, the checksum definition - requires all metadata blocks to be rewritten (INCOMPAT\_CSUM\_SEED). + requires all metadata blocks to be rewritten (INCOMPAT_CSUM_SEED). * - 0x4000 - - Large directory >2GB or 3-level htree (INCOMPAT\_LARGEDIR). Prior to + - Large directory >2GB or 3-level htree (INCOMPAT_LARGEDIR). Prior to this feature, directories could not be larger than 4GiB and could not have an htree more than 2 levels deep. If this feature is enabled, directories can be larger than 4GiB and have a maximum htree depth of 3. * - 0x8000 - - Data in inode (INCOMPAT\_INLINE\_DATA). + - Data in inode (INCOMPAT_INLINE_DATA). * - 0x10000 - - Encrypted inodes are present on the filesystem. (INCOMPAT\_ENCRYPT). + - Encrypted inodes are present on the filesystem. (INCOMPAT_ENCRYPT). .. _super_rocompat: @@ -678,54 +678,54 @@ the following: - Description * - 0x1 - Sparse superblocks. See the earlier discussion of this feature - (RO\_COMPAT\_SPARSE\_SUPER). + (RO_COMPAT_SPARSE_SUPER). * - 0x2 - This filesystem has been used to store a file greater than 2GiB - (RO\_COMPAT\_LARGE\_FILE). + (RO_COMPAT_LARGE_FILE). * - 0x4 - - Not used in kernel or e2fsprogs (RO\_COMPAT\_BTREE\_DIR). + - Not used in kernel or e2fsprogs (RO_COMPAT_BTREE_DIR). * - 0x8 - This filesystem has files whose sizes are represented in units of logical blocks, not 512-byte sectors. This implies a very large file - indeed! (RO\_COMPAT\_HUGE\_FILE) + indeed! (RO_COMPAT_HUGE_FILE) * - 0x10 - Group descriptors have checksums. In addition to detecting corruption, this is useful for lazy formatting with uninitialized groups - (RO\_COMPAT\_GDT\_CSUM). + (RO_COMPAT_GDT_CSUM). * - 0x20 - Indicates that the old ext3 32,000 subdirectory limit no longer applies - (RO\_COMPAT\_DIR\_NLINK). A directory's i\_links\_count will be set to 1 + (RO_COMPAT_DIR_NLINK). A directory's i_links_count will be set to 1 if it is incremented past 64,999. * - 0x40 - Indicates that large inodes exist on this filesystem - (RO\_COMPAT\_EXTRA\_ISIZE). + (RO_COMPAT_EXTRA_ISIZE). * - 0x80 - - This filesystem has a snapshot (RO\_COMPAT\_HAS\_SNAPSHOT). + - This filesystem has a snapshot (RO_COMPAT_HAS_SNAPSHOT). * - 0x100 - - `Quota <Quota>`__ (RO\_COMPAT\_QUOTA). + - `Quota <Quota>`__ (RO_COMPAT_QUOTA). * - 0x200 - This filesystem supports “bigallocâ€, which means that file extents are tracked in units of clusters (of blocks) instead of blocks - (RO\_COMPAT\_BIGALLOC). + (RO_COMPAT_BIGALLOC). * - 0x400 - This filesystem supports metadata checksumming. - (RO\_COMPAT\_METADATA\_CSUM; implies RO\_COMPAT\_GDT\_CSUM, though - GDT\_CSUM must not be set) + (RO_COMPAT_METADATA_CSUM; implies RO_COMPAT_GDT_CSUM, though + GDT_CSUM must not be set) * - 0x800 - Filesystem supports replicas. This feature is neither in the kernel nor - e2fsprogs. (RO\_COMPAT\_REPLICA) + e2fsprogs. (RO_COMPAT_REPLICA) * - 0x1000 - Read-only filesystem image; the kernel will not mount this image read-write and most tools will refuse to write to the image. - (RO\_COMPAT\_READONLY) + (RO_COMPAT_READONLY) * - 0x2000 - - Filesystem tracks project quotas. (RO\_COMPAT\_PROJECT) + - Filesystem tracks project quotas. (RO_COMPAT_PROJECT) * - 0x8000 - - Verity inodes may be present on the filesystem. (RO\_COMPAT\_VERITY) + - Verity inodes may be present on the filesystem. (RO_COMPAT_VERITY) * - 0x10000 - Indicates orphan file may have valid orphan entries and thus we need to clean them up when mounting the filesystem - (RO\_COMPAT\_ORPHAN\_PRESENT). + (RO_COMPAT_ORPHAN_PRESENT). .. _super_def_hash: @@ -761,36 +761,36 @@ The ``s_default_mount_opts`` field is any combination of the following: * - Value - Description * - 0x0001 - - Print debugging info upon (re)mount. (EXT4\_DEFM\_DEBUG) + - Print debugging info upon (re)mount. (EXT4_DEFM_DEBUG) * - 0x0002 - New files take the gid of the containing directory (instead of the fsgid - of the current process). (EXT4\_DEFM\_BSDGROUPS) + of the current process). (EXT4_DEFM_BSDGROUPS) * - 0x0004 - - Support userspace-provided extended attributes. (EXT4\_DEFM\_XATTR\_USER) + - Support userspace-provided extended attributes. (EXT4_DEFM_XATTR_USER) * - 0x0008 - - Support POSIX access control lists (ACLs). (EXT4\_DEFM\_ACL) + - Support POSIX access control lists (ACLs). (EXT4_DEFM_ACL) * - 0x0010 - - Do not support 32-bit UIDs. (EXT4\_DEFM\_UID16) + - Do not support 32-bit UIDs. (EXT4_DEFM_UID16) * - 0x0020 - All data and metadata are commited to the journal. - (EXT4\_DEFM\_JMODE\_DATA) + (EXT4_DEFM_JMODE_DATA) * - 0x0040 - All data are flushed to the disk before metadata are committed to the - journal. (EXT4\_DEFM\_JMODE\_ORDERED) + journal. (EXT4_DEFM_JMODE_ORDERED) * - 0x0060 - Data ordering is not preserved; data may be written after the metadata - has been written. (EXT4\_DEFM\_JMODE\_WBACK) + has been written. (EXT4_DEFM_JMODE_WBACK) * - 0x0100 - - Disable write flushes. (EXT4\_DEFM\_NOBARRIER) + - Disable write flushes. (EXT4_DEFM_NOBARRIER) * - 0x0200 - Track which blocks in a filesystem are metadata and therefore should not be used as data blocks. This option will be enabled by default on 3.18, - hopefully. (EXT4\_DEFM\_BLOCK\_VALIDITY) + hopefully. (EXT4_DEFM_BLOCK_VALIDITY) * - 0x0400 - Enable DISCARD support, where the storage device is told about blocks - becoming unused. (EXT4\_DEFM\_DISCARD) + becoming unused. (EXT4_DEFM_DISCARD) * - 0x0800 - - Disable delayed allocation. (EXT4\_DEFM\_NODELALLOC) + - Disable delayed allocation. (EXT4_DEFM_NODELALLOC) .. _super_flags: @@ -820,12 +820,12 @@ The ``s_encrypt_algos`` list can contain any of the following: * - Value - Description * - 0 - - Invalid algorithm (ENCRYPTION\_MODE\_INVALID). + - Invalid algorithm (ENCRYPTION_MODE_INVALID). * - 1 - - 256-bit AES in XTS mode (ENCRYPTION\_MODE\_AES\_256\_XTS). + - 256-bit AES in XTS mode (ENCRYPTION_MODE_AES_256_XTS). * - 2 - - 256-bit AES in GCM mode (ENCRYPTION\_MODE\_AES\_256\_GCM). + - 256-bit AES in GCM mode (ENCRYPTION_MODE_AES_256_GCM). * - 3 - - 256-bit AES in CBC mode (ENCRYPTION\_MODE\_AES\_256\_CBC). + - 256-bit AES in CBC mode (ENCRYPTION_MODE_AES_256_CBC). Total size of the superblock is 1024 bytes. diff --git a/Documentation/filesystems/netfs_library.rst b/Documentation/filesystems/netfs_library.rst index a80a59941d2f..4d19b19bcc08 100644 --- a/Documentation/filesystems/netfs_library.rst +++ b/Documentation/filesystems/netfs_library.rst @@ -37,30 +37,31 @@ The network filesystem helper library needs a place to store a bit of state for its use on each netfs inode it is helping to manage. To this end, a context structure is defined:: - struct netfs_i_context { + struct netfs_inode { + struct inode inode; const struct netfs_request_ops *ops; - struct fscache_cookie *cache; + struct fscache_cookie *cache; }; -A network filesystem that wants to use netfs lib must place one of these -directly after the VFS ``struct inode`` it allocates, usually as part of its -own struct. This can be done in a way similar to the following:: +A network filesystem that wants to use netfs lib must place one of these in its +inode wrapper struct instead of the VFS ``struct inode``. This can be done in +a way similar to the following:: struct my_inode { - struct { - /* These must be contiguous */ - struct inode vfs_inode; - struct netfs_i_context netfs_ctx; - }; + struct netfs_inode netfs; /* Netfslib context and vfs inode */ ... }; -This allows netfslib to find its state by simple offset from the inode pointer, -thereby allowing the netfslib helper functions to be pointed to directly by the -VFS/VM operation tables. +This allows netfslib to find its state by using ``container_of()`` from the +inode pointer, thereby allowing the netfslib helper functions to be pointed to +directly by the VFS/VM operation tables. The structure contains the following fields: + * ``inode`` + + The VFS inode structure. + * ``ops`` The set of operations provided by the network filesystem to netfslib. @@ -78,19 +79,17 @@ To help deal with the per-inode context, a number helper functions are provided. Firstly, a function to perform basic initialisation on a context and set the operations table pointer:: - void netfs_i_context_init(struct inode *inode, - const struct netfs_request_ops *ops); + void netfs_inode_init(struct netfs_inode *ctx, + const struct netfs_request_ops *ops); -then two functions to cast between the VFS inode structure and the netfs -context:: +then a function to cast from the VFS inode structure to the netfs context:: - struct netfs_i_context *netfs_i_context(struct inode *inode); - struct inode *netfs_inode(struct netfs_i_context *ctx); + struct netfs_inode *netfs_node(struct inode *inode); and finally, a function to get the cache cookie pointer from the context attached to an inode (or NULL if fscache is disabled):: - struct fscache_cookie *netfs_i_cookie(struct inode *inode); + struct fscache_cookie *netfs_i_cookie(struct netfs_inode *ctx); Buffered Read Helpers @@ -137,8 +136,9 @@ Three read helpers are provided:: void netfs_readahead(struct readahead_control *ractl); int netfs_read_folio(struct file *file, - struct folio *folio); - int netfs_write_begin(struct file *file, + struct folio *folio); + int netfs_write_begin(struct netfs_inode *ctx, + struct file *file, struct address_space *mapping, loff_t pos, unsigned int len, @@ -158,9 +158,10 @@ The helpers manage the read request, calling back into the network filesystem through the suppplied table of operations. Waits will be performed as necessary before returning for helpers that are meant to be synchronous. -If an error occurs and netfs_priv is non-NULL, ops->cleanup() will be called to -deal with it. If some parts of the request are in progress when an error -occurs, the request will get partially completed if sufficient data is read. +If an error occurs, the ->free_request() will be called to clean up the +netfs_io_request struct allocated. If some parts of the request are in +progress when an error occurs, the request will get partially completed if +sufficient data is read. Additionally, there is:: @@ -208,8 +209,7 @@ The above fields are the ones the netfs can use. They are: * ``netfs_priv`` The network filesystem's private data. The value for this can be passed in - to the helper functions or set during the request. The ->cleanup() op will - be called if this is non-NULL at the end. + to the helper functions or set during the request. * ``start`` * ``len`` @@ -294,6 +294,7 @@ through which it can issue requests and negotiate:: struct netfs_request_ops { void (*init_request)(struct netfs_io_request *rreq, struct file *file); + void (*free_request)(struct netfs_io_request *rreq); int (*begin_cache_operation)(struct netfs_io_request *rreq); void (*expand_readahead)(struct netfs_io_request *rreq); bool (*clamp_length)(struct netfs_io_subrequest *subreq); @@ -302,7 +303,6 @@ through which it can issue requests and negotiate:: int (*check_write_begin)(struct file *file, loff_t pos, unsigned len, struct folio *folio, void **_fsdata); void (*done)(struct netfs_io_request *rreq); - void (*cleanup)(struct address_space *mapping, void *netfs_priv); }; The operations are as follows: @@ -310,7 +310,12 @@ The operations are as follows: * ``init_request()`` [Optional] This is called to initialise the request structure. It is given - the file for reference and can modify the ->netfs_priv value. + the file for reference. + + * ``free_request()`` + + [Optional] This is called as the request is being deallocated so that the + filesystem can clean up any state it has attached there. * ``begin_cache_operation()`` @@ -384,11 +389,6 @@ The operations are as follows: [Optional] This is called after the folios in the request have all been unlocked (and marked uptodate if applicable). - * ``cleanup`` - - [Optional] This is called as the request is being deallocated so that the - filesystem can clean up ->netfs_priv. - Read Helper Procedure diff --git a/Documentation/index.rst b/Documentation/index.rst index 8f9be0e658b4..67036a05b771 100644 --- a/Documentation/index.rst +++ b/Documentation/index.rst @@ -137,7 +137,6 @@ needed). scheduler/index mhi/index peci/index - hte/index Architecture-agnostic documentation ----------------------------------- diff --git a/Documentation/kbuild/llvm.rst b/Documentation/kbuild/llvm.rst index b854bb413164..6b2bac8e9ce0 100644 --- a/Documentation/kbuild/llvm.rst +++ b/Documentation/kbuild/llvm.rst @@ -129,18 +129,24 @@ yet. Bug reports are always welcome at the issue tracker below! * - arm64 - Supported - ``LLVM=1`` + * - hexagon + - Maintained + - ``LLVM=1`` * - mips - Maintained - - ``CC=clang`` + - ``LLVM=1`` * - powerpc - Maintained - ``CC=clang`` * - riscv - Maintained - - ``CC=clang`` + - ``LLVM=1`` * - s390 - Maintained - ``CC=clang`` + * - um (User Mode) + - Maintained + - ``LLVM=1`` * - x86 - Supported - ``LLVM=1`` diff --git a/Documentation/loongarch/introduction.rst b/Documentation/loongarch/introduction.rst index 2bf40ad370df..216b3f390e80 100644 --- a/Documentation/loongarch/introduction.rst +++ b/Documentation/loongarch/introduction.rst @@ -45,10 +45,12 @@ Name Alias Usage Preserved ``$r23``-``$r31`` ``$s0``-``$s8`` Static registers Yes ================= =============== =================== ============ -Note: The register ``$r21`` is reserved in the ELF psABI, but used by the Linux -kernel for storing the percpu base address. It normally has no ABI name, but is -called ``$u0`` in the kernel. You may also see ``$v0`` or ``$v1`` in some old code, -however they are deprecated aliases of ``$a0`` and ``$a1`` respectively. +.. Note:: + The register ``$r21`` is reserved in the ELF psABI, but used by the Linux + kernel for storing the percpu base address. It normally has no ABI name, + but is called ``$u0`` in the kernel. You may also see ``$v0`` or ``$v1`` + in some old code,however they are deprecated aliases of ``$a0`` and ``$a1`` + respectively. FPRs ---- @@ -69,8 +71,9 @@ Name Alias Usage Preserved ``$f24``-``$f31`` ``$fs0``-``$fs7`` Static registers Yes ================= ================== =================== ============ -Note: You may see ``$fv0`` or ``$fv1`` in some old code, however they are deprecated -aliases of ``$fa0`` and ``$fa1`` respectively. +.. Note:: + You may see ``$fv0`` or ``$fv1`` in some old code, however they are + deprecated aliases of ``$fa0`` and ``$fa1`` respectively. VRs ---- diff --git a/Documentation/loongarch/irq-chip-model.rst b/Documentation/loongarch/irq-chip-model.rst index 8d88f7ab2e5e..7988f4192363 100644 --- a/Documentation/loongarch/irq-chip-model.rst +++ b/Documentation/loongarch/irq-chip-model.rst @@ -145,12 +145,16 @@ Documentation of Loongson's LS7A chipset: https://github.com/loongson/LoongArch-Documentation/releases/latest/download/Loongson-7A1000-usermanual-2.00-EN.pdf (in English) -Note: CPUINTC is CSR.ECFG/CSR.ESTAT and its interrupt controller described -in Section 7.4 of "LoongArch Reference Manual, Vol 1"; LIOINTC is "Legacy I/O -Interrupts" described in Section 11.1 of "Loongson 3A5000 Processor Reference -Manual"; EIOINTC is "Extended I/O Interrupts" described in Section 11.2 of -"Loongson 3A5000 Processor Reference Manual"; HTVECINTC is "HyperTransport -Interrupts" described in Section 14.3 of "Loongson 3A5000 Processor Reference -Manual"; PCH-PIC/PCH-MSI is "Interrupt Controller" described in Section 5 of -"Loongson 7A1000 Bridge User Manual"; PCH-LPC is "LPC Interrupts" described in -Section 24.3 of "Loongson 7A1000 Bridge User Manual". +.. Note:: + - CPUINTC is CSR.ECFG/CSR.ESTAT and its interrupt controller described + in Section 7.4 of "LoongArch Reference Manual, Vol 1"; + - LIOINTC is "Legacy I/OInterrupts" described in Section 11.1 of + "Loongson 3A5000 Processor Reference Manual"; + - EIOINTC is "Extended I/O Interrupts" described in Section 11.2 of + "Loongson 3A5000 Processor Reference Manual"; + - HTVECINTC is "HyperTransport Interrupts" described in Section 14.3 of + "Loongson 3A5000 Processor Reference Manual"; + - PCH-PIC/PCH-MSI is "Interrupt Controller" described in Section 5 of + "Loongson 7A1000 Bridge User Manual"; + - PCH-LPC is "LPC Interrupts" described in Section 24.3 of + "Loongson 7A1000 Bridge User Manual". diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst index 04216564a03c..9f41961d11d5 100644 --- a/Documentation/networking/ip-sysctl.rst +++ b/Documentation/networking/ip-sysctl.rst @@ -2925,6 +2925,43 @@ plpmtud_probe_interval - INTEGER Default: 0 +reconf_enable - BOOLEAN + Enable or disable extension of Stream Reconfiguration functionality + specified in RFC6525. This extension provides the ability to "reset" + a stream, and it includes the Parameters of "Outgoing/Incoming SSN + Reset", "SSN/TSN Reset" and "Add Outgoing/Incoming Streams". + + - 1: Enable extension. + - 0: Disable extension. + + Default: 0 + +intl_enable - BOOLEAN + Enable or disable extension of User Message Interleaving functionality + specified in RFC8260. This extension allows the interleaving of user + messages sent on different streams. With this feature enabled, I-DATA + chunk will replace DATA chunk to carry user messages if also supported + by the peer. Note that to use this feature, one needs to set this option + to 1 and also needs to set socket options SCTP_FRAGMENT_INTERLEAVE to 2 + and SCTP_INTERLEAVING_SUPPORTED to 1. + + - 1: Enable extension. + - 0: Disable extension. + + Default: 0 + +ecn_enable - BOOLEAN + Control use of Explicit Congestion Notification (ECN) by SCTP. + Like in TCP, ECN is used only when both ends of the SCTP connection + indicate support for it. This feature is useful in avoiding losses + due to congestion by allowing supporting routers to signal congestion + before having to drop packets. + + 1: Enable ecn. + 0: Disable ecn. + + Default: 1 + ``/proc/sys/net/core/*`` ======================== diff --git a/Documentation/networking/phy.rst b/Documentation/networking/phy.rst index d43da709bf40..704f31da5167 100644 --- a/Documentation/networking/phy.rst +++ b/Documentation/networking/phy.rst @@ -104,7 +104,7 @@ Whenever possible, use the PHY side RGMII delay for these reasons: * PHY device drivers in PHYLIB being reusable by nature, being able to configure correctly a specified delay enables more designs with similar delay - requirements to be operate correctly + requirements to be operated correctly For cases where the PHY is not capable of providing this delay, but the Ethernet MAC driver is capable of doing so, the correct phy_interface_t value diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst index 34415ae1af1b..19c286c23786 100644 --- a/Documentation/process/changes.rst +++ b/Documentation/process/changes.rst @@ -32,6 +32,7 @@ you probably needn't concern yourself with pcmciautils. GNU C 5.1 gcc --version Clang/LLVM (optional) 11.0.0 clang --version GNU make 3.81 make --version +bash 4.2 bash --version binutils 2.23 ld -v flex 2.5.35 flex --version bison 2.0 bison --version @@ -84,6 +85,12 @@ Make You will need GNU make 3.81 or later to build the kernel. +Bash +---- + +Some bash scripts are used for the kernel build. +Bash 4.2 or newer is needed. + Binutils -------- @@ -362,6 +369,11 @@ Make - <ftp://ftp.gnu.org/gnu/make/> +Bash +---- + +- <ftp://ftp.gnu.org/gnu/bash/> + Binutils -------- diff --git a/Documentation/translations/zh_CN/loongarch/introduction.rst b/Documentation/translations/zh_CN/loongarch/introduction.rst index e31a1a928c48..11686ee0caeb 100644 --- a/Documentation/translations/zh_CN/loongarch/introduction.rst +++ b/Documentation/translations/zh_CN/loongarch/introduction.rst @@ -46,10 +46,11 @@ LA64ä¸æ¯ä¸ªå¯„å˜å™¨ä¸º64ä½å®½ã€‚ ``$r0`` 的内容总是固定为0,而其ä ``$r23``-``$r31`` ``$s0``-``$s8`` é™æ€å¯„å˜å™¨ 是 ================= =============== =================== ========== -注æ„:``$r21``寄å˜å™¨åœ¨ELF psABIä¸ä¿ç•™æœªä½¿ç”¨ï¼Œä½†æ˜¯åœ¨Linuxå†…æ ¸ç”¨äºŽä¿å˜æ¯CPU -å˜é‡åŸºåœ°å€ã€‚该寄å˜å™¨æ²¡æœ‰ABI命å,ä¸è¿‡åœ¨å†…æ ¸ä¸ç§°ä¸º``$u0``。在一些é—留代ç -ä¸æœ‰æ—¶å¯èƒ½è§åˆ°``$v0``å’Œ``$v1``,它们是``$a0``å’Œ``$a1``的别å,属于已ç»åºŸå¼ƒ -的用法。 +.. note:: + 注æ„: ``$r21`` 寄å˜å™¨åœ¨ELF psABIä¸ä¿ç•™æœªä½¿ç”¨ï¼Œä½†æ˜¯åœ¨Linuxå†…æ ¸ç”¨äºŽä¿ + å˜æ¯CPUå˜é‡åŸºåœ°å€ã€‚该寄å˜å™¨æ²¡æœ‰ABI命å,ä¸è¿‡åœ¨å†…æ ¸ä¸ç§°ä¸º ``$u0`` 。在 + 一些é—留代ç ä¸æœ‰æ—¶å¯èƒ½è§åˆ° ``$v0`` å’Œ ``$v1`` ,它们是 ``$a0`` å’Œ + ``$a1`` 的别å,属于已ç»åºŸå¼ƒçš„用法。 浮点寄å˜å™¨ ---------- @@ -68,8 +69,9 @@ LA64ä¸æ¯ä¸ªå¯„å˜å™¨ä¸º64ä½å®½ã€‚ ``$r0`` 的内容总是固定为0,而其ä ``$f24``-``$f31`` ``$fs0``-``$fs7`` é™æ€å¯„å˜å™¨ 是 ================= ================== =================== ========== -注æ„:在一些é—留代ç ä¸æœ‰æ—¶å¯èƒ½è§åˆ° ``$v0`` å’Œ ``$v1`` ,它们是 ``$a0`` -å’Œ ``$a1`` 的别å,属于已ç»åºŸå¼ƒçš„用法。 +.. note:: + 注æ„:在一些é—留代ç ä¸æœ‰æ—¶å¯èƒ½è§åˆ° ``$v0`` å’Œ ``$v1`` ,它们是 + ``$a0`` å’Œ ``$a1`` 的别å,属于已ç»åºŸå¼ƒçš„用法。 å‘é‡å¯„å˜å™¨ diff --git a/Documentation/translations/zh_CN/loongarch/irq-chip-model.rst b/Documentation/translations/zh_CN/loongarch/irq-chip-model.rst index 2a4c3ad38be4..fb5d23b49ed5 100644 --- a/Documentation/translations/zh_CN/loongarch/irq-chip-model.rst +++ b/Documentation/translations/zh_CN/loongarch/irq-chip-model.rst @@ -147,9 +147,11 @@ PCH-LPC:: https://github.com/loongson/LoongArch-Documentation/releases/latest/download/Loongson-7A1000-usermanual-2.00-EN.pdf (英文版) -注:CPUINTCå³ã€Šé¾™èŠ¯æž¶æž„å‚考手册å·ä¸€ã€‹ç¬¬7.4节所æè¿°çš„CSR.ECFG/CSR.ESTAT寄å˜å™¨åŠå…¶ä¸æ– -控制逻辑;LIOINTCå³ã€Šé¾™èŠ¯3A5000处ç†å™¨ä½¿ç”¨æ‰‹å†Œã€‹ç¬¬11.1节所æè¿°çš„â€œä¼ ç»ŸI/Oä¸æ–â€ï¼›EIOINTC -å³ã€Šé¾™èŠ¯3A5000处ç†å™¨ä½¿ç”¨æ‰‹å†Œã€‹ç¬¬11.2节所æ述的“扩展I/Oä¸æ–â€ï¼›HTVECINTCå³ã€Šé¾™èŠ¯3A5000 -处ç†å™¨ä½¿ç”¨æ‰‹å†Œã€‹ç¬¬14.3节所æ述的“HyperTransportä¸æ–â€ï¼›PCH-PIC/PCH-MSIå³ã€Šé¾™èŠ¯7A1000æ¡¥ -片用户手册》第5ç« æ‰€æ述的“ä¸æ–控制器â€ï¼›PCH-LPCå³ã€Šé¾™èŠ¯7A1000桥片用户手册》第24.3节所 -æ述的“LPCä¸æ–â€ã€‚ +.. note:: + - CPUINTC:å³ã€Šé¾™èŠ¯æž¶æž„å‚考手册å·ä¸€ã€‹ç¬¬7.4节所æè¿°çš„CSR.ECFG/CSR.ESTAT寄å˜å™¨åŠå…¶ + ä¸æ–控制逻辑; + - LIOINTC:å³ã€Šé¾™èŠ¯3A5000处ç†å™¨ä½¿ç”¨æ‰‹å†Œã€‹ç¬¬11.1节所æè¿°çš„â€œä¼ ç»ŸI/Oä¸æ–â€ï¼› + - EIOINTC:å³ã€Šé¾™èŠ¯3A5000处ç†å™¨ä½¿ç”¨æ‰‹å†Œã€‹ç¬¬11.2节所æ述的“扩展I/Oä¸æ–â€ï¼› + - HTVECINTC:å³ã€Šé¾™èŠ¯3A5000处ç†å™¨ä½¿ç”¨æ‰‹å†Œã€‹ç¬¬14.3节所æ述的“HyperTransportä¸æ–â€ï¼› + - PCH-PIC/PCH-MSI:å³ã€Šé¾™èŠ¯7A1000桥片用户手册》第5ç« æ‰€æ述的“ä¸æ–控制器â€ï¼› + - PCH-LPC:å³ã€Šé¾™èŠ¯7A1000桥片用户手册》第24.3节所æ述的“LPCä¸æ–â€ã€‚ diff --git a/Documentation/usb/usbmon.rst b/Documentation/usb/usbmon.rst index b0bd51080799..6d5ec1e62d09 100644 --- a/Documentation/usb/usbmon.rst +++ b/Documentation/usb/usbmon.rst @@ -42,7 +42,7 @@ if usbmon is built into the kernel:: # modprobe usbmon # -Verify that bus sockets are present: +Verify that bus sockets are present:: # ls /sys/kernel/debug/usb/usbmon 0s 0u 1s 1t 1u 2s 2t 2u 3s 3t 3u 4s 4t 4u diff --git a/Documentation/vm/hwpoison.rst b/Documentation/vm/hwpoison.rst index c742de1769d1..b9d5253c1305 100644 --- a/Documentation/vm/hwpoison.rst +++ b/Documentation/vm/hwpoison.rst @@ -120,7 +120,8 @@ Testing unpoison-pfn Software-unpoison page at PFN echoed into this file. This way a page can be reused again. This only works for Linux - injected failures, not for real memory failures. + injected failures, not for real memory failures. Once any hardware + memory failure happens, this feature is disabled. Note these injection interfaces are not stable and might change between kernel versions diff --git a/MAINTAINERS b/MAINTAINERS index a6d3bd9d2a8d..282b34eae750 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -427,6 +427,7 @@ ACPI VIOT DRIVER M: Jean-Philippe Brucker <jean-philippe@linaro.org> L: linux-acpi@vger.kernel.org L: iommu@lists.linux-foundation.org +L: iommu@lists.linux.dev S: Maintained F: drivers/acpi/viot.c F: include/linux/acpi_viot.h @@ -960,6 +961,7 @@ AMD IOMMU (AMD-VI) M: Joerg Roedel <joro@8bytes.org> R: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> L: iommu@lists.linux-foundation.org +L: iommu@lists.linux.dev S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git F: drivers/iommu/amd/ @@ -1507,7 +1509,7 @@ F: drivers/clocksource/arm_arch_timer.c ARM HDLCD DRM DRIVER M: Liviu Dudau <liviu.dudau@arm.com> S: Supported -F: Documentation/devicetree/bindings/display/arm,hdlcd.txt +F: Documentation/devicetree/bindings/display/arm,hdlcd.yaml F: drivers/gpu/drm/arm/hdlcd_* ARM INTEGRATOR, VERSATILE AND REALVIEW SUPPORT @@ -1542,7 +1544,7 @@ M: Mihail Atanassov <mihail.atanassov@arm.com> L: Mali DP Maintainers <malidp@foss.arm.com> S: Supported T: git git://anongit.freedesktop.org/drm/drm-misc -F: Documentation/devicetree/bindings/display/arm,komeda.txt +F: Documentation/devicetree/bindings/display/arm,komeda.yaml F: Documentation/gpu/komeda-kms.rst F: drivers/gpu/drm/arm/display/include/ F: drivers/gpu/drm/arm/display/komeda/ @@ -1564,7 +1566,7 @@ M: Brian Starkey <brian.starkey@arm.com> L: Mali DP Maintainers <malidp@foss.arm.com> S: Supported T: git git://anongit.freedesktop.org/drm/drm-misc -F: Documentation/devicetree/bindings/display/arm,malidp.txt +F: Documentation/devicetree/bindings/display/arm,malidp.yaml F: Documentation/gpu/afbc.rst F: drivers/gpu/drm/arm/ @@ -2009,7 +2011,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained T: git git://github.com/ulli-kroll/linux.git F: Documentation/devicetree/bindings/arm/gemini.yaml -F: Documentation/devicetree/bindings/net/cortina,gemini-ethernet.txt +F: Documentation/devicetree/bindings/net/cortina,gemini-ethernet.yaml F: Documentation/devicetree/bindings/pinctrl/cortina,gemini-pinctrl.txt F: Documentation/devicetree/bindings/rtc/faraday,ftrtc010.yaml F: arch/arm/boot/dts/gemini* @@ -2467,6 +2469,7 @@ ARM/NXP S32G ARCHITECTURE M: Chester Lin <clin@suse.com> R: Andreas Färber <afaerber@suse.de> R: Matthias Brugger <mbrugger@suse.com> +R: NXP S32 Linux Team <s32@nxp.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm64/boot/dts/freescale/s32g*.dts* @@ -3662,7 +3665,7 @@ BPF JIT for ARM M: Shubham Bansal <illusionist.neo@gmail.com> L: netdev@vger.kernel.org L: bpf@vger.kernel.org -S: Maintained +S: Odd Fixes F: arch/arm/net/ BPF JIT for ARM64 @@ -3686,14 +3689,15 @@ BPF JIT for NFP NICs M: Jakub Kicinski <kuba@kernel.org> L: netdev@vger.kernel.org L: bpf@vger.kernel.org -S: Supported +S: Odd Fixes F: drivers/net/ethernet/netronome/nfp/bpf/ BPF JIT for POWERPC (32-BIT AND 64-BIT) M: Naveen N. Rao <naveen.n.rao@linux.ibm.com> +M: Michael Ellerman <mpe@ellerman.id.au> L: netdev@vger.kernel.org L: bpf@vger.kernel.org -S: Maintained +S: Supported F: arch/powerpc/net/ BPF JIT for RISC-V (32-bit) @@ -3719,7 +3723,7 @@ M: Heiko Carstens <hca@linux.ibm.com> M: Vasily Gorbik <gor@linux.ibm.com> L: netdev@vger.kernel.org L: bpf@vger.kernel.org -S: Maintained +S: Supported F: arch/s390/net/ X: arch/s390/net/pnet.c @@ -3727,14 +3731,14 @@ BPF JIT for SPARC (32-BIT AND 64-BIT) M: David S. Miller <davem@davemloft.net> L: netdev@vger.kernel.org L: bpf@vger.kernel.org -S: Maintained +S: Odd Fixes F: arch/sparc/net/ BPF JIT for X86 32-BIT M: Wang YanQing <udknight@gmail.com> L: netdev@vger.kernel.org L: bpf@vger.kernel.org -S: Maintained +S: Odd Fixes F: arch/x86/net/bpf_jit_comp32.c BPF JIT for X86 64-BIT @@ -3757,6 +3761,26 @@ F: include/linux/bpf_lsm.h F: kernel/bpf/bpf_lsm.c F: security/bpf/ +BPF L7 FRAMEWORK +M: John Fastabend <john.fastabend@gmail.com> +M: Jakub Sitnicki <jakub@cloudflare.com> +L: netdev@vger.kernel.org +L: bpf@vger.kernel.org +S: Maintained +F: include/linux/skmsg.h +F: net/core/skmsg.c +F: net/core/sock_map.c +F: net/ipv4/tcp_bpf.c +F: net/ipv4/udp_bpf.c +F: net/unix/unix_bpf.c + +BPFTOOL +M: Quentin Monnet <quentin@isovalent.com> +L: bpf@vger.kernel.org +S: Maintained +F: kernel/bpf/disasm.* +F: tools/bpf/bpftool/ + BROADCOM B44 10/100 ETHERNET DRIVER M: Michael Chan <michael.chan@broadcom.com> L: netdev@vger.kernel.org @@ -3789,12 +3813,12 @@ N: bcmbca N: bcm[9]?47622 BROADCOM BCM2711/BCM2835 ARM ARCHITECTURE -M: Nicolas Saenz Julienne <nsaenz@kernel.org> +M: Florian Fainelli <f.fainelli@gmail.com> R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com> L: linux-rpi-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained -T: git git://git.kernel.org/pub/scm/linux/kernel/git/nsaenz/linux-rpi.git +T: git git://github.com/broadcom/stblinux.git F: Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml F: drivers/pci/controller/pcie-brcmstb.c F: drivers/staging/vc04_services @@ -5955,6 +5979,7 @@ M: Christoph Hellwig <hch@lst.de> M: Marek Szyprowski <m.szyprowski@samsung.com> R: Robin Murphy <robin.murphy@arm.com> L: iommu@lists.linux-foundation.org +L: iommu@lists.linux.dev S: Supported W: http://git.infradead.org/users/hch/dma-mapping.git T: git git://git.infradead.org/users/hch/dma-mapping.git @@ -5967,6 +5992,7 @@ F: kernel/dma/ DMA MAPPING BENCHMARK M: Xiang Chen <chenxiang66@hisilicon.com> L: iommu@lists.linux-foundation.org +L: iommu@lists.linux.dev F: kernel/dma/map_benchmark.c F: tools/testing/selftests/dma/ @@ -6078,7 +6104,7 @@ M: Sakari Ailus <sakari.ailus@linux.intel.com> L: linux-media@vger.kernel.org S: Maintained T: git git://linuxtv.org/media_tree.git -F: Documentation/devicetree/bindings/media/i2c/dongwoon,dw9807-vcm.txt +F: Documentation/devicetree/bindings/media/i2c/dongwoon,dw9807-vcm.yaml F: drivers/media/i2c/dw9807-vcm.c DOUBLETALK DRIVER @@ -7551,6 +7577,7 @@ F: drivers/gpu/drm/exynos/exynos_dp* EXYNOS SYSMMU (IOMMU) driver M: Marek Szyprowski <m.szyprowski@samsung.com> L: iommu@lists.linux-foundation.org +L: iommu@lists.linux.dev S: Maintained F: drivers/iommu/exynos-iommu.c @@ -7647,6 +7674,7 @@ F: include/uapi/scsi/fc/ FILE LOCKING (flock() and fcntl()/lockf()) M: Jeff Layton <jlayton@kernel.org> +M: Chuck Lever <chuck.lever@oracle.com> L: linux-fsdevel@vger.kernel.org S: Maintained F: fs/fcntl.c @@ -8471,6 +8499,7 @@ F: Documentation/devicetree/bindings/gpio/ F: Documentation/driver-api/gpio/ F: drivers/gpio/ F: include/asm-generic/gpio.h +F: include/dt-bindings/gpio/ F: include/linux/gpio.h F: include/linux/gpio/ F: include/linux/of_gpio.h @@ -9081,7 +9110,7 @@ HTE SUBSYSTEM M: Dipen Patel <dipenp@nvidia.com> S: Maintained F: Documentation/devicetree/bindings/timestamp/ -F: Documentation/hte/ +F: Documentation/driver-api/hte/ F: drivers/hte/ F: include/linux/hte.h @@ -9124,6 +9153,7 @@ F: drivers/media/platform/st/sti/hva HWPOISON MEMORY FAILURE HANDLING M: Naoya Horiguchi <naoya.horiguchi@nec.com> +R: Miaohe Lin <linmiaohe@huawei.com> L: linux-mm@kvack.org S: Maintained F: mm/hwpoison-inject.c @@ -9268,6 +9298,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/wsa/linux.git F: Documentation/devicetree/bindings/i2c/i2c.txt F: Documentation/i2c/ F: drivers/i2c/* +F: include/dt-bindings/i2c/i2c.h F: include/linux/i2c-dev.h F: include/linux/i2c-smbus.h F: include/linux/i2c.h @@ -9283,6 +9314,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/wsa/linux.git F: Documentation/devicetree/bindings/i2c/ F: drivers/i2c/algos/ F: drivers/i2c/busses/ +F: include/dt-bindings/i2c/ I2C-TAOS-EVM DRIVER M: Jean Delvare <jdelvare@suse.com> @@ -9967,6 +9999,7 @@ INTEL IOMMU (VT-d) M: David Woodhouse <dwmw2@infradead.org> M: Lu Baolu <baolu.lu@linux.intel.com> L: iommu@lists.linux-foundation.org +L: iommu@lists.linux.dev S: Supported T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git F: drivers/iommu/intel/ @@ -10346,6 +10379,7 @@ IOMMU DRIVERS M: Joerg Roedel <joro@8bytes.org> M: Will Deacon <will@kernel.org> L: iommu@lists.linux-foundation.org +L: iommu@lists.linux.dev S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git F: Documentation/devicetree/bindings/iommu/ @@ -10739,6 +10773,7 @@ W: http://kernelnewbies.org/KernelJanitors KERNEL NFSD, SUNRPC, AND LOCKD SERVERS M: Chuck Lever <chuck.lever@oracle.com> +M: Jeff Layton <jlayton@kernel.org> L: linux-nfs@vger.kernel.org S: Supported W: http://nfs.sourceforge.net/ @@ -10821,6 +10856,7 @@ M: Marc Zyngier <maz@kernel.org> R: James Morse <james.morse@arm.com> R: Alexandru Elisei <alexandru.elisei@arm.com> R: Suzuki K Poulose <suzuki.poulose@arm.com> +R: Oliver Upton <oliver.upton@linux.dev> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: kvmarm@lists.cs.columbia.edu (moderated for non-subscribers) S: Maintained @@ -10863,7 +10899,6 @@ F: arch/riscv/include/asm/kvm* F: arch/riscv/include/uapi/asm/kvm* F: arch/riscv/kvm/ F: tools/testing/selftests/kvm/*/riscv/ -F: tools/testing/selftests/kvm/riscv/ KERNEL VIRTUAL MACHINE for s390 (KVM/s390) M: Christian Borntraeger <borntraeger@linux.ibm.com> @@ -10888,28 +10923,51 @@ F: tools/testing/selftests/kvm/*/s390x/ F: tools/testing/selftests/kvm/s390x/ KERNEL VIRTUAL MACHINE FOR X86 (KVM/x86) +M: Sean Christopherson <seanjc@google.com> M: Paolo Bonzini <pbonzini@redhat.com> -R: Sean Christopherson <seanjc@google.com> -R: Vitaly Kuznetsov <vkuznets@redhat.com> -R: Wanpeng Li <wanpengli@tencent.com> -R: Jim Mattson <jmattson@google.com> -R: Joerg Roedel <joro@8bytes.org> L: kvm@vger.kernel.org S: Supported -W: http://www.linux-kvm.org T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git F: arch/x86/include/asm/kvm* -F: arch/x86/include/asm/pvclock-abi.h F: arch/x86/include/asm/svm.h F: arch/x86/include/asm/vmx*.h F: arch/x86/include/uapi/asm/kvm* F: arch/x86/include/uapi/asm/svm.h F: arch/x86/include/uapi/asm/vmx.h -F: arch/x86/kernel/kvm.c -F: arch/x86/kernel/kvmclock.c F: arch/x86/kvm/ F: arch/x86/kvm/*/ +KVM PARAVIRT (KVM/paravirt) +M: Paolo Bonzini <pbonzini@redhat.com> +R: Wanpeng Li <wanpengli@tencent.com> +R: Vitaly Kuznetsov <vkuznets@redhat.com> +L: kvm@vger.kernel.org +S: Supported +T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git +F: arch/x86/kernel/kvm.c +F: arch/x86/kernel/kvmclock.c +F: arch/x86/include/asm/pvclock-abi.h +F: include/linux/kvm_para.h +F: include/uapi/linux/kvm_para.h +F: include/uapi/asm-generic/kvm_para.h +F: include/asm-generic/kvm_para.h +F: arch/um/include/asm/kvm_para.h +F: arch/x86/include/asm/kvm_para.h +F: arch/x86/include/uapi/asm/kvm_para.h + +KVM X86 HYPER-V (KVM/hyper-v) +M: Vitaly Kuznetsov <vkuznets@redhat.com> +M: Sean Christopherson <seanjc@google.com> +M: Paolo Bonzini <pbonzini@redhat.com> +L: kvm@vger.kernel.org +S: Supported +T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git +F: arch/x86/kvm/hyperv.* +F: arch/x86/kvm/kvm_onhyperv.* +F: arch/x86/kvm/svm/hyperv.* +F: arch/x86/kvm/svm/svm_onhyperv.* +F: arch/x86/kvm/vmx/evmcs.* + KERNFS M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> M: Tejun Heo <tj@kernel.org> @@ -11088,20 +11146,6 @@ S: Maintained F: include/net/l3mdev.h F: net/l3mdev -L7 BPF FRAMEWORK -M: John Fastabend <john.fastabend@gmail.com> -M: Daniel Borkmann <daniel@iogearbox.net> -M: Jakub Sitnicki <jakub@cloudflare.com> -L: netdev@vger.kernel.org -L: bpf@vger.kernel.org -S: Maintained -F: include/linux/skmsg.h -F: net/core/skmsg.c -F: net/core/sock_map.c -F: net/ipv4/tcp_bpf.c -F: net/ipv4/udp_bpf.c -F: net/unix/unix_bpf.c - LANDLOCK SECURITY MODULE M: Mickaël Salaün <mic@digikod.net> L: linux-security-module@vger.kernel.org @@ -11257,6 +11301,7 @@ M: Damien Le Moal <damien.lemoal@opensource.wdc.com> L: linux-ide@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/dlemoal/libata.git +F: Documentation/ABI/testing/sysfs-ata F: Documentation/devicetree/bindings/ata/ F: drivers/ata/ F: include/linux/ata.h @@ -11580,6 +11625,7 @@ F: drivers/gpu/drm/bridge/lontium-lt8912b.c LOONGARCH M: Huacai Chen <chenhuacai@kernel.org> R: WANG Xuerui <kernel@xen0n.name> +L: loongarch@lists.linux.dev S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson.git F: arch/loongarch/ @@ -12493,6 +12539,7 @@ F: drivers/i2c/busses/i2c-mt65xx.c MEDIATEK IOMMU DRIVER M: Yong Wu <yong.wu@mediatek.com> L: iommu@lists.linux-foundation.org +L: iommu@lists.linux.dev L: linux-mediatek@lists.infradead.org (moderated for non-subscribers) S: Supported F: Documentation/devicetree/bindings/iommu/mediatek* @@ -12696,7 +12743,6 @@ L: netdev@vger.kernel.org S: Supported W: http://www.mellanox.com Q: https://patchwork.kernel.org/project/netdevbpf/list/ -F: drivers/net/ethernet/mellanox/mlx5/core/accel/* F: drivers/net/ethernet/mellanox/mlx5/core/en_accel/* F: drivers/net/ethernet/mellanox/mlx5/core/fpga/* F: include/linux/mlx5/mlx5_ifc_fpga.h @@ -12836,9 +12882,8 @@ M: Andrew Morton <akpm@linux-foundation.org> L: linux-mm@kvack.org S: Maintained W: http://www.linux-mm.org -T: quilt https://ozlabs.org/~akpm/mmotm/ -T: quilt https://ozlabs.org/~akpm/mmots/ -T: git git://github.com/hnaz/linux-mm.git +T: git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm +T: quilt git://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new F: include/linux/gfp.h F: include/linux/memory_hotplug.h F: include/linux/mm.h @@ -12848,6 +12893,18 @@ F: include/linux/vmalloc.h F: mm/ F: tools/testing/selftests/vm/ +MEMORY HOT(UN)PLUG +M: David Hildenbrand <david@redhat.com> +M: Oscar Salvador <osalvador@suse.de> +L: linux-mm@kvack.org +S: Maintained +F: Documentation/admin-guide/mm/memory-hotplug.rst +F: Documentation/core-api/memory-hotplug.rst +F: drivers/base/memory.c +F: include/linux/memory_hotplug.h +F: mm/memory_hotplug.c +F: tools/testing/selftests/memory-hotplug/ + MEMORY TECHNOLOGY DEVICES (MTD) M: Miquel Raynal <miquel.raynal@bootlin.com> M: Richard Weinberger <richard@nod.at> @@ -13792,6 +13849,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git F: Documentation/devicetree/bindings/net/ F: drivers/connector/ F: drivers/net/ +F: include/dt-bindings/net/ F: include/linux/etherdevice.h F: include/linux/fcdevice.h F: include/linux/fddidevice.h @@ -13943,7 +14001,6 @@ F: net/ipv6/tcp*.c NETWORKING [TLS] M: Boris Pismenny <borisp@nvidia.com> M: John Fastabend <john.fastabend@gmail.com> -M: Daniel Borkmann <daniel@iogearbox.net> M: Jakub Kicinski <kuba@kernel.org> L: netdev@vger.kernel.org S: Maintained @@ -14252,7 +14309,7 @@ F: drivers/iio/gyro/fxas21002c_i2c.c F: drivers/iio/gyro/fxas21002c_spi.c NXP i.MX CLOCK DRIVERS -M: Abel Vesa <abel.vesa@nxp.com> +M: Abel Vesa <abelvesa@kernel.org> L: linux-clk@vger.kernel.org L: linux-imx@nxp.com S: Maintained @@ -14860,6 +14917,7 @@ F: include/dt-bindings/ OPENCOMPUTE PTP CLOCK DRIVER M: Jonathan Lemon <jonathan.lemon@gmail.com> +M: Vadim Fedorenko <vadfed@fb.com> L: netdev@vger.kernel.org S: Maintained F: drivers/ptp/ptp_ocp.c @@ -15824,6 +15882,14 @@ S: Maintained F: Documentation/devicetree/bindings/iio/chemical/plantower,pms7003.yaml F: drivers/iio/chemical/pms7003.c +PLATFORM FEATURE INFRASTRUCTURE +M: Juergen Gross <jgross@suse.com> +S: Maintained +F: arch/*/include/asm/platform-feature.h +F: include/asm-generic/platform-feature.h +F: include/linux/platform-feature.h +F: kernel/platform-feature.c + PLDMFW LIBRARY M: Jacob Keller <jacob.e.keller@intel.com> S: Maintained @@ -16471,7 +16537,7 @@ F: Documentation/devicetree/bindings/opp/opp-v2-kryo-cpu.yaml F: drivers/cpufreq/qcom-cpufreq-nvmem.c QUALCOMM CRYPTO DRIVERS -M: Thara Gopinath <thara.gopinath@linaro.org> +M: Thara Gopinath <thara.gopinath@gmail.com> L: linux-crypto@vger.kernel.org L: linux-arm-msm@vger.kernel.org S: Maintained @@ -16526,6 +16592,7 @@ F: drivers/i2c/busses/i2c-qcom-cci.c QUALCOMM IOMMU M: Rob Clark <robdclark@gmail.com> L: iommu@lists.linux-foundation.org +L: iommu@lists.linux.dev L: linux-arm-msm@vger.kernel.org S: Maintained F: drivers/iommu/arm/arm-smmu/qcom_iommu.c @@ -16581,7 +16648,7 @@ F: include/linux/if_rmnet.h QUALCOMM TSENS THERMAL DRIVER M: Amit Kucheria <amitk@kernel.org> -M: Thara Gopinath <thara.gopinath@linaro.org> +M: Thara Gopinath <thara.gopinath@gmail.com> L: linux-pm@vger.kernel.org L: linux-arm-msm@vger.kernel.org S: Maintained @@ -18760,7 +18827,7 @@ F: drivers/pinctrl/spear/ SPI NOR SUBSYSTEM M: Tudor Ambarus <tudor.ambarus@microchip.com> -M: Pratyush Yadav <p.yadav@ti.com> +M: Pratyush Yadav <pratyush@kernel.org> R: Michael Walle <michael@walle.cc> L: linux-mtd@lists.infradead.org S: Maintained @@ -19151,6 +19218,7 @@ F: arch/x86/boot/video* SWIOTLB SUBSYSTEM M: Christoph Hellwig <hch@infradead.org> L: iommu@lists.linux-foundation.org +L: iommu@lists.linux.dev S: Supported W: http://git.infradead.org/users/hch/dma-mapping.git T: git git://git.infradead.org/users/hch/dma-mapping.git @@ -19220,7 +19288,7 @@ F: arch/arc/plat-axs10x SYNOPSYS AXS10x RESET CONTROLLER DRIVER M: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com> S: Supported -F: Documentation/devicetree/bindings/reset/snps,axs10x-reset.txt +F: Documentation/devicetree/bindings/reset/snps,axs10x-reset.yaml F: drivers/reset/reset-axs10x.c SYNOPSYS CREG GPIO DRIVER @@ -19288,7 +19356,7 @@ R: Andy Shevchenko <andriy.shevchenko@linux.intel.com> R: Mika Westerberg <mika.westerberg@linux.intel.com> R: Jan Dabros <jsd@semihalf.com> L: linux-i2c@vger.kernel.org -S: Maintained +S: Supported F: drivers/i2c/busses/i2c-designware-* SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER @@ -20695,6 +20763,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb.git F: Documentation/devicetree/bindings/usb/ F: Documentation/usb/ F: drivers/usb/ +F: include/dt-bindings/usb/ F: include/linux/usb.h F: include/linux/usb/ @@ -21825,6 +21894,7 @@ M: Juergen Gross <jgross@suse.com> M: Stefano Stabellini <sstabellini@kernel.org> L: xen-devel@lists.xenproject.org (moderated for non-subscribers) L: iommu@lists.linux-foundation.org +L: iommu@lists.linux.dev S: Supported F: arch/x86/xen/*swiotlb* F: drivers/xen/*swiotlb* @@ -2,7 +2,7 @@ VERSION = 5 PATCHLEVEL = 19 SUBLEVEL = 0 -EXTRAVERSION = -rc1 +EXTRAVERSION = -rc4 NAME = Superb Owl # *DOCUMENTATION* @@ -788,6 +788,7 @@ stackp-flags-$(CONFIG_STACKPROTECTOR_STRONG) := -fstack-protector-strong KBUILD_CFLAGS += $(stackp-flags-y) KBUILD_CFLAGS-$(CONFIG_WERROR) += -Werror +KBUILD_CFLAGS-$(CONFIG_CC_NO_ARRAY_BOUNDS) += -Wno-array-bounds KBUILD_CFLAGS += $(KBUILD_CFLAGS-y) $(CONFIG_CC_IMPLICIT_FALLTHROUGH) ifdef CONFIG_CC_IS_CLANG @@ -805,6 +806,9 @@ endif KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable) KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable) +# These result in bogus false positives +KBUILD_CFLAGS += $(call cc-disable-warning, dangling-pointer) + ifdef CONFIG_FRAME_POINTER KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls else @@ -1137,7 +1141,7 @@ KBUILD_MODULES := 1 autoksyms_recursive: descend modules.order $(Q)$(CONFIG_SHELL) $(srctree)/scripts/adjust_autoksyms.sh \ - "$(MAKE) -f $(srctree)/Makefile vmlinux" + "$(MAKE) -f $(srctree)/Makefile autoksyms_recursive" endif autoksyms_h := $(if $(CONFIG_TRIM_UNUSED_KSYMS), include/generated/autoksyms.h) diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile index 184899808ee7..5112f493f494 100644 --- a/arch/arm/boot/dts/Makefile +++ b/arch/arm/boot/dts/Makefile @@ -1586,7 +1586,6 @@ dtb-$(CONFIG_ARCH_ASPEED) += \ aspeed-bmc-lenovo-hr630.dtb \ aspeed-bmc-lenovo-hr855xg2.dtb \ aspeed-bmc-microsoft-olympus.dtb \ - aspeed-bmc-nuvia-dc-scm.dtb \ aspeed-bmc-opp-lanyang.dtb \ aspeed-bmc-opp-mihawk.dtb \ aspeed-bmc-opp-mowgli.dtb \ @@ -1599,6 +1598,7 @@ dtb-$(CONFIG_ARCH_ASPEED) += \ aspeed-bmc-opp-witherspoon.dtb \ aspeed-bmc-opp-zaius.dtb \ aspeed-bmc-portwell-neptune.dtb \ + aspeed-bmc-qcom-dc-scm-v1.dtb \ aspeed-bmc-quanta-q71l.dtb \ aspeed-bmc-quanta-s6q.dtb \ aspeed-bmc-supermicro-x11spi.dtb \ diff --git a/arch/arm/boot/dts/aspeed-bmc-nuvia-dc-scm.dts b/arch/arm/boot/dts/aspeed-bmc-qcom-dc-scm-v1.dts index f4a97cfb0f23..259ef3f54c5c 100644 --- a/arch/arm/boot/dts/aspeed-bmc-nuvia-dc-scm.dts +++ b/arch/arm/boot/dts/aspeed-bmc-qcom-dc-scm-v1.dts @@ -6,8 +6,8 @@ #include "aspeed-g6.dtsi" / { - model = "Nuvia DC-SCM BMC"; - compatible = "nuvia,dc-scm-bmc", "aspeed,ast2600"; + model = "Qualcomm DC-SCM V1 BMC"; + compatible = "qcom,dc-scm-v1-bmc", "aspeed,ast2600"; aliases { serial4 = &uart5; diff --git a/arch/arm/boot/dts/at91-sama5d3_ksz9477_evb.dts b/arch/arm/boot/dts/at91-sama5d3_ksz9477_evb.dts index 443e8b022897..14af1fd6d247 100644 --- a/arch/arm/boot/dts/at91-sama5d3_ksz9477_evb.dts +++ b/arch/arm/boot/dts/at91-sama5d3_ksz9477_evb.dts @@ -120,26 +120,31 @@ port@0 { reg = <0>; label = "lan1"; + phy-mode = "internal"; }; port@1 { reg = <1>; label = "lan2"; + phy-mode = "internal"; }; port@2 { reg = <2>; label = "lan3"; + phy-mode = "internal"; }; port@3 { reg = <3>; label = "lan4"; + phy-mode = "internal"; }; port@4 { reg = <4>; label = "lan5"; + phy-mode = "internal"; }; port@5 { diff --git a/arch/arm/boot/dts/bcm2711-rpi-400.dts b/arch/arm/boot/dts/bcm2711-rpi-400.dts index f4d2fc20397c..c53d9eb0b802 100644 --- a/arch/arm/boot/dts/bcm2711-rpi-400.dts +++ b/arch/arm/boot/dts/bcm2711-rpi-400.dts @@ -28,12 +28,12 @@ &expgpio { gpio-line-names = "BT_ON", "WL_ON", - "", + "PWR_LED_OFF", "GLOBAL_RESET", "VDD_SD_IO_SEL", - "CAM_GPIO", + "GLOBAL_SHUTDOWN", "SD_PWR_ON", - "SD_OC_N"; + "SHUTDOWN_REQUEST"; }; &genet_mdio { diff --git a/arch/arm/boot/dts/imx6qdl-colibri.dtsi b/arch/arm/boot/dts/imx6qdl-colibri.dtsi index c383e0e4110c..7df270cea292 100644 --- a/arch/arm/boot/dts/imx6qdl-colibri.dtsi +++ b/arch/arm/boot/dts/imx6qdl-colibri.dtsi @@ -593,7 +593,7 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_atmel_conn>; reg = <0x4a>; - reset-gpios = <&gpio1 14 GPIO_ACTIVE_HIGH>; /* SODIMM 106 */ + reset-gpios = <&gpio1 14 GPIO_ACTIVE_LOW>; /* SODIMM 106 */ status = "disabled"; }; }; diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi index d27beb47f9a3..652feff33496 100644 --- a/arch/arm/boot/dts/imx6qdl.dtsi +++ b/arch/arm/boot/dts/imx6qdl.dtsi @@ -762,7 +762,7 @@ regulator-name = "vddpu"; regulator-min-microvolt = <725000>; regulator-max-microvolt = <1450000>; - regulator-enable-ramp-delay = <150>; + regulator-enable-ramp-delay = <380>; anatop-reg-offset = <0x140>; anatop-vol-bit-shift = <9>; anatop-vol-bit-width = <5>; diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi index 008e3da460f1..039eed79d2e7 100644 --- a/arch/arm/boot/dts/imx7s.dtsi +++ b/arch/arm/boot/dts/imx7s.dtsi @@ -120,6 +120,7 @@ compatible = "usb-nop-xceiv"; clocks = <&clks IMX7D_USB_HSIC_ROOT_CLK>; clock-names = "main_clk"; + power-domains = <&pgc_hsic_phy>; #phy-cells = <0>; }; @@ -1153,7 +1154,6 @@ compatible = "fsl,imx7d-usb", "fsl,imx27-usb"; reg = <0x30b30000 0x200>; interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>; - power-domains = <&pgc_hsic_phy>; clocks = <&clks IMX7D_USB_CTRL_CLK>; fsl,usbphy = <&usbphynop3>; fsl,usbmisc = <&usbmisc3 0>; diff --git a/arch/arm/boot/dts/stm32mp15-scmi.dtsi b/arch/arm/boot/dts/stm32mp15-scmi.dtsi new file mode 100644 index 000000000000..e90cf3acd0b3 --- /dev/null +++ b/arch/arm/boot/dts/stm32mp15-scmi.dtsi @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* + * Copyright (C) STMicroelectronics 2022 - All Rights Reserved + * Author: Alexandre Torgue <alexandre.torgue@foss.st.com> for STMicroelectronics. + */ + +/ { + firmware { + optee: optee { + compatible = "linaro,optee-tz"; + method = "smc"; + }; + + scmi: scmi { + compatible = "linaro,scmi-optee"; + #address-cells = <1>; + #size-cells = <0>; + linaro,optee-channel-id = <0>; + shmem = <&scmi_shm>; + + scmi_clk: protocol@14 { + reg = <0x14>; + #clock-cells = <1>; + }; + + scmi_reset: protocol@16 { + reg = <0x16>; + #reset-cells = <1>; + }; + }; + }; + + soc { + scmi_sram: sram@2ffff000 { + compatible = "mmio-sram"; + reg = <0x2ffff000 0x1000>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0x2ffff000 0x1000>; + + scmi_shm: scmi-sram@0 { + compatible = "arm,scmi-shmem"; + reg = <0 0x80>; + }; + }; + }; +}; diff --git a/arch/arm/boot/dts/stm32mp151.dtsi b/arch/arm/boot/dts/stm32mp151.dtsi index 1b2fd3426a81..7fdc324b3cf9 100644 --- a/arch/arm/boot/dts/stm32mp151.dtsi +++ b/arch/arm/boot/dts/stm32mp151.dtsi @@ -115,33 +115,6 @@ status = "disabled"; }; - firmware { - optee: optee { - compatible = "linaro,optee-tz"; - method = "smc"; - status = "disabled"; - }; - - scmi: scmi { - compatible = "linaro,scmi-optee"; - #address-cells = <1>; - #size-cells = <0>; - linaro,optee-channel-id = <0>; - shmem = <&scmi_shm>; - status = "disabled"; - - scmi_clk: protocol@14 { - reg = <0x14>; - #clock-cells = <1>; - }; - - scmi_reset: protocol@16 { - reg = <0x16>; - #reset-cells = <1>; - }; - }; - }; - soc { compatible = "simple-bus"; #address-cells = <1>; @@ -149,20 +122,6 @@ interrupt-parent = <&intc>; ranges; - scmi_sram: sram@2ffff000 { - compatible = "mmio-sram"; - reg = <0x2ffff000 0x1000>; - #address-cells = <1>; - #size-cells = <1>; - ranges = <0 0x2ffff000 0x1000>; - - scmi_shm: scmi-sram@0 { - compatible = "arm,scmi-shmem"; - reg = <0 0x80>; - status = "disabled"; - }; - }; - timers2: timer@40000000 { #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm/boot/dts/stm32mp157a-dk1-scmi.dts b/arch/arm/boot/dts/stm32mp157a-dk1-scmi.dts index e3d3f3f30c7d..36371d6ed660 100644 --- a/arch/arm/boot/dts/stm32mp157a-dk1-scmi.dts +++ b/arch/arm/boot/dts/stm32mp157a-dk1-scmi.dts @@ -7,6 +7,7 @@ /dts-v1/; #include "stm32mp157a-dk1.dts" +#include "stm32mp15-scmi.dtsi" / { model = "STMicroelectronics STM32MP157A-DK1 SCMI Discovery Board"; @@ -54,10 +55,6 @@ resets = <&scmi_reset RST_SCMI_MCU>; }; -&optee { - status = "okay"; -}; - &rcc { compatible = "st,stm32mp1-rcc-secure", "syscon"; clock-names = "hse", "hsi", "csi", "lse", "lsi"; @@ -76,11 +73,3 @@ &rtc { clocks = <&scmi_clk CK_SCMI_RTCAPB>, <&scmi_clk CK_SCMI_RTC>; }; - -&scmi { - status = "okay"; -}; - -&scmi_shm { - status = "okay"; -}; diff --git a/arch/arm/boot/dts/stm32mp157c-dk2-scmi.dts b/arch/arm/boot/dts/stm32mp157c-dk2-scmi.dts index 45dcd299aa9e..03226a596904 100644 --- a/arch/arm/boot/dts/stm32mp157c-dk2-scmi.dts +++ b/arch/arm/boot/dts/stm32mp157c-dk2-scmi.dts @@ -7,6 +7,7 @@ /dts-v1/; #include "stm32mp157c-dk2.dts" +#include "stm32mp15-scmi.dtsi" / { model = "STMicroelectronics STM32MP157C-DK2 SCMI Discovery Board"; @@ -63,10 +64,6 @@ resets = <&scmi_reset RST_SCMI_MCU>; }; -&optee { - status = "okay"; -}; - &rcc { compatible = "st,stm32mp1-rcc-secure", "syscon"; clock-names = "hse", "hsi", "csi", "lse", "lsi"; @@ -85,11 +82,3 @@ &rtc { clocks = <&scmi_clk CK_SCMI_RTCAPB>, <&scmi_clk CK_SCMI_RTC>; }; - -&scmi { - status = "okay"; -}; - -&scmi_shm { - status = "okay"; -}; diff --git a/arch/arm/boot/dts/stm32mp157c-ed1-scmi.dts b/arch/arm/boot/dts/stm32mp157c-ed1-scmi.dts index 458e0ca3cded..c1a79272c068 100644 --- a/arch/arm/boot/dts/stm32mp157c-ed1-scmi.dts +++ b/arch/arm/boot/dts/stm32mp157c-ed1-scmi.dts @@ -7,6 +7,7 @@ /dts-v1/; #include "stm32mp157c-ed1.dts" +#include "stm32mp15-scmi.dtsi" / { model = "STMicroelectronics STM32MP157C-ED1 SCMI eval daughter"; @@ -59,10 +60,6 @@ resets = <&scmi_reset RST_SCMI_MCU>; }; -&optee { - status = "okay"; -}; - &rcc { compatible = "st,stm32mp1-rcc-secure", "syscon"; clock-names = "hse", "hsi", "csi", "lse", "lsi"; @@ -81,11 +78,3 @@ &rtc { clocks = <&scmi_clk CK_SCMI_RTCAPB>, <&scmi_clk CK_SCMI_RTC>; }; - -&scmi { - status = "okay"; -}; - -&scmi_shm { - status = "okay"; -}; diff --git a/arch/arm/boot/dts/stm32mp157c-ev1-scmi.dts b/arch/arm/boot/dts/stm32mp157c-ev1-scmi.dts index df9c113edb4b..7842384ddbe4 100644 --- a/arch/arm/boot/dts/stm32mp157c-ev1-scmi.dts +++ b/arch/arm/boot/dts/stm32mp157c-ev1-scmi.dts @@ -7,6 +7,7 @@ /dts-v1/; #include "stm32mp157c-ev1.dts" +#include "stm32mp15-scmi.dtsi" / { model = "STMicroelectronics STM32MP157C-EV1 SCMI eval daughter on eval mother"; @@ -68,10 +69,6 @@ resets = <&scmi_reset RST_SCMI_MCU>; }; -&optee { - status = "okay"; -}; - &rcc { compatible = "st,stm32mp1-rcc-secure", "syscon"; clock-names = "hse", "hsi", "csi", "lse", "lsi"; @@ -90,11 +87,3 @@ &rtc { clocks = <&scmi_clk CK_SCMI_RTCAPB>, <&scmi_clk CK_SCMI_RTC>; }; - -&scmi { - status = "okay"; -}; - -&scmi_shm { - status = "okay"; -}; diff --git a/arch/arm/include/asm/xen/xen-ops.h b/arch/arm/include/asm/xen/xen-ops.h new file mode 100644 index 000000000000..7ebb7eb0bd93 --- /dev/null +++ b/arch/arm/include/asm/xen/xen-ops.h @@ -0,0 +1,2 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <xen/arm/xen-ops.h> diff --git a/arch/arm/mach-axxia/platsmp.c b/arch/arm/mach-axxia/platsmp.c index 512943eae30a..2e203626eda5 100644 --- a/arch/arm/mach-axxia/platsmp.c +++ b/arch/arm/mach-axxia/platsmp.c @@ -39,6 +39,7 @@ static int axxia_boot_secondary(unsigned int cpu, struct task_struct *idle) return -ENOENT; syscon = of_iomap(syscon_np, 0); + of_node_put(syscon_np); if (!syscon) return -ENOMEM; diff --git a/arch/arm/mach-cns3xxx/core.c b/arch/arm/mach-cns3xxx/core.c index e4f4b20b83a2..3fc4ec830e3a 100644 --- a/arch/arm/mach-cns3xxx/core.c +++ b/arch/arm/mach-cns3xxx/core.c @@ -372,6 +372,7 @@ static void __init cns3xxx_init(void) /* De-Asscer SATA Reset */ cns3xxx_pwr_soft_rst(CNS3XXX_PWR_SOFTWARE_RST(SATA)); } + of_node_put(dn); dn = of_find_compatible_node(NULL, NULL, "cavium,cns3420-sdhci"); if (of_device_is_available(dn)) { @@ -385,6 +386,7 @@ static void __init cns3xxx_init(void) cns3xxx_pwr_clk_en(CNS3XXX_PWR_CLK_EN(SDIO)); cns3xxx_pwr_soft_rst(CNS3XXX_PWR_SOFTWARE_RST(SDIO)); } + of_node_put(dn); pm_power_off = cns3xxx_power_off; diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c index 8b48326be9fd..51a247ca4da8 100644 --- a/arch/arm/mach-exynos/exynos.c +++ b/arch/arm/mach-exynos/exynos.c @@ -149,6 +149,7 @@ static void exynos_map_pmu(void) np = of_find_matching_node(NULL, exynos_dt_pmu_match); if (np) pmu_base_addr = of_iomap(np, 0); + of_node_put(np); } static void __init exynos_init_irq(void) diff --git a/arch/arm/mach-spear/time.c b/arch/arm/mach-spear/time.c index d1fdb6066f7b..c7c17c0f936c 100644 --- a/arch/arm/mach-spear/time.c +++ b/arch/arm/mach-spear/time.c @@ -218,13 +218,13 @@ void __init spear_setup_of_timer(void) irq = irq_of_parse_and_map(np, 0); if (!irq) { pr_err("%s: No irq passed for timer via DT\n", __func__); - return; + goto err_put_np; } gpt_base = of_iomap(np, 0); if (!gpt_base) { pr_err("%s: of iomap failed\n", __func__); - return; + goto err_put_np; } gpt_clk = clk_get_sys("gpt0", NULL); @@ -239,6 +239,8 @@ void __init spear_setup_of_timer(void) goto err_prepare_enable_clk; } + of_node_put(np); + spear_clockevent_init(irq); spear_clocksource_init(); @@ -248,4 +250,6 @@ err_prepare_enable_clk: clk_put(gpt_clk); err_iomap: iounmap(gpt_base); +err_put_np: + of_node_put(np); } diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 82ffac621854..059cce018570 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -33,7 +33,7 @@ #include <asm/dma-iommu.h> #include <asm/mach/map.h> #include <asm/system_info.h> -#include <xen/swiotlb-xen.h> +#include <asm/xen/xen-ops.h> #include "dma.h" #include "mm.h" @@ -2287,10 +2287,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, set_dma_ops(dev, dma_ops); -#ifdef CONFIG_XEN - if (xen_initial_domain()) - dev->dma_ops = &xen_swiotlb_dma_ops; -#endif + xen_setup_dma_ops(dev); dev->archdata.dma_ops_setup = true; } diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index 07eb69f9e7df..1f9c3ba32833 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c @@ -443,6 +443,8 @@ static int __init xen_guest_init(void) if (!xen_domain()) return 0; + xen_set_restricted_virtio_memory_access(); + if (!acpi_disabled) xen_acpi_guest_init(); else diff --git a/arch/arm64/boot/dts/exynos/exynos7885.dtsi b/arch/arm64/boot/dts/exynos/exynos7885.dtsi index 3170661f5b67..9c233c56558c 100644 --- a/arch/arm64/boot/dts/exynos/exynos7885.dtsi +++ b/arch/arm64/boot/dts/exynos/exynos7885.dtsi @@ -280,8 +280,8 @@ interrupts = <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>; pinctrl-names = "default"; pinctrl-0 = <&uart0_bus>; - clocks = <&cmu_peri CLK_GOUT_UART0_EXT_UCLK>, - <&cmu_peri CLK_GOUT_UART0_PCLK>; + clocks = <&cmu_peri CLK_GOUT_UART0_PCLK>, + <&cmu_peri CLK_GOUT_UART0_EXT_UCLK>; clock-names = "uart", "clk_uart_baud0"; samsung,uart-fifosize = <64>; status = "disabled"; @@ -293,8 +293,8 @@ interrupts = <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>; pinctrl-names = "default"; pinctrl-0 = <&uart1_bus>; - clocks = <&cmu_peri CLK_GOUT_UART1_EXT_UCLK>, - <&cmu_peri CLK_GOUT_UART1_PCLK>; + clocks = <&cmu_peri CLK_GOUT_UART1_PCLK>, + <&cmu_peri CLK_GOUT_UART1_EXT_UCLK>; clock-names = "uart", "clk_uart_baud0"; samsung,uart-fifosize = <256>; status = "disabled"; @@ -306,8 +306,8 @@ interrupts = <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>; pinctrl-names = "default"; pinctrl-0 = <&uart2_bus>; - clocks = <&cmu_peri CLK_GOUT_UART2_EXT_UCLK>, - <&cmu_peri CLK_GOUT_UART2_PCLK>; + clocks = <&cmu_peri CLK_GOUT_UART2_PCLK>, + <&cmu_peri CLK_GOUT_UART2_EXT_UCLK>; clock-names = "uart", "clk_uart_baud0"; samsung,uart-fifosize = <256>; status = "disabled"; diff --git a/arch/arm64/boot/dts/freescale/s32g2.dtsi b/arch/arm64/boot/dts/freescale/s32g2.dtsi index 59ea8a25aa4c..824d401e7a2c 100644 --- a/arch/arm64/boot/dts/freescale/s32g2.dtsi +++ b/arch/arm64/boot/dts/freescale/s32g2.dtsi @@ -79,7 +79,7 @@ }; }; - soc { + soc@0 { compatible = "simple-bus"; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm64/boot/dts/ti/k3-am64-main.dtsi b/arch/arm64/boot/dts/ti/k3-am64-main.dtsi index f64b368c6c37..cdb530597c5e 100644 --- a/arch/arm64/boot/dts/ti/k3-am64-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am64-main.dtsi @@ -456,13 +456,11 @@ clock-names = "clk_ahb", "clk_xin"; mmc-ddr-1_8v; mmc-hs200-1_8v; - mmc-hs400-1_8v; ti,trm-icp = <0x2>; ti,otap-del-sel-legacy = <0x0>; ti,otap-del-sel-mmc-hs = <0x0>; ti,otap-del-sel-ddr52 = <0x6>; ti,otap-del-sel-hs200 = <0x7>; - ti,otap-del-sel-hs400 = <0x4>; }; sdhci1: mmc@fa00000 { diff --git a/arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi index be7f39299894..19966f72c5b3 100644 --- a/arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi @@ -33,7 +33,7 @@ ranges; #interrupt-cells = <3>; interrupt-controller; - reg = <0x00 0x01800000 0x00 0x200000>, /* GICD */ + reg = <0x00 0x01800000 0x00 0x100000>, /* GICD */ <0x00 0x01900000 0x00 0x100000>, /* GICR */ <0x00 0x6f000000 0x00 0x2000>, /* GICC */ <0x00 0x6f010000 0x00 0x1000>, /* GICH */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 47a1e25e25bb..de32152cea04 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -363,11 +363,6 @@ struct kvm_vcpu_arch { struct kvm_pmu pmu; /* - * Anything that is not used directly from assembly code goes - * here. - */ - - /* * Guest registers we preserve during guest debugging. * * These shadow registers are updated by the kvm_handle_sys_reg diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 55f998c3dc28..42ff95dba6da 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -843,9 +843,9 @@ #define ID_AA64SMFR0_F32F32_SHIFT 32 #define ID_AA64SMFR0_FA64 0x1 -#define ID_AA64SMFR0_I16I64 0x4 +#define ID_AA64SMFR0_I16I64 0xf #define ID_AA64SMFR0_F64F64 0x1 -#define ID_AA64SMFR0_I8I32 0x4 +#define ID_AA64SMFR0_I8I32 0xf #define ID_AA64SMFR0_F16F32 0x1 #define ID_AA64SMFR0_B16F32 0x1 #define ID_AA64SMFR0_F32F32 0x1 diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h index 3c8af033a997..0e80db4327b6 100644 --- a/arch/arm64/include/asm/virt.h +++ b/arch/arm64/include/asm/virt.h @@ -113,6 +113,9 @@ static __always_inline bool has_vhe(void) /* * Code only run in VHE/NVHE hyp context can assume VHE is present or * absent. Otherwise fall back to caps. + * This allows the compiler to discard VHE-specific code from the + * nVHE object, reducing the number of external symbol references + * needed to link. */ if (is_vhe_hyp_code()) return true; diff --git a/arch/arm64/include/asm/xen/xen-ops.h b/arch/arm64/include/asm/xen/xen-ops.h new file mode 100644 index 000000000000..7ebb7eb0bd93 --- /dev/null +++ b/arch/arm64/include/asm/xen/xen-ops.h @@ -0,0 +1,2 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <xen/arm/xen-ops.h> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 42ea2bd856c6..8d88433de81d 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1974,15 +1974,7 @@ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap) #ifdef CONFIG_KVM static bool is_kvm_protected_mode(const struct arm64_cpu_capabilities *entry, int __unused) { - if (kvm_get_mode() != KVM_MODE_PROTECTED) - return false; - - if (is_kernel_in_hyp_mode()) { - pr_warn("Protected KVM not available with VHE\n"); - return false; - } - - return true; + return kvm_get_mode() == KVM_MODE_PROTECTED; } #endif /* CONFIG_KVM */ @@ -3109,7 +3101,6 @@ void cpu_set_feature(unsigned int num) WARN_ON(num >= MAX_CPU_FEATURES); elf_hwcap |= BIT(num); } -EXPORT_SYMBOL_GPL(cpu_set_feature); bool cpu_have_feature(unsigned int num) { diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S index d42a205ef625..bd5df50e4643 100644 --- a/arch/arm64/kernel/entry-ftrace.S +++ b/arch/arm64/kernel/entry-ftrace.S @@ -102,7 +102,6 @@ SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) * x19-x29 per the AAPCS, and we created frame records upon entry, so we need * to restore x0-x8, x29, and x30. */ -ftrace_common_return: /* Restore function arguments */ ldp x0, x1, [sp] ldp x2, x3, [sp, #S_X2] diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 819979398127..aecf3071efdd 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -331,7 +331,7 @@ void task_set_vl_onexec(struct task_struct *task, enum vec_type type, * trapping to the kernel. * * When stored, Z0-Z31 (incorporating Vn in bits[127:0] or the - * corresponding Zn), P0-P15 and FFR are encoded in in + * corresponding Zn), P0-P15 and FFR are encoded in * task->thread.sve_state, formatted appropriately for vector * length task->thread.sve_vl or, if SVCR.SM is set, * task->thread.sme_vl. @@ -1916,10 +1916,15 @@ void __efi_fpsimd_begin(void) if (system_supports_sme()) { svcr = read_sysreg_s(SYS_SVCR); - if (!system_supports_fa64()) - ffr = svcr & SVCR_SM_MASK; + __this_cpu_write(efi_sm_state, + svcr & SVCR_SM_MASK); - __this_cpu_write(efi_sm_state, ffr); + /* + * Unless we have FA64 FFR does not + * exist in streaming mode. + */ + if (!system_supports_fa64()) + ffr = !(svcr & SVCR_SM_MASK); } sve_save_state(sve_state + sve_ffr_offset(sve_max_vl()), @@ -1964,8 +1969,13 @@ void __efi_fpsimd_end(void) sysreg_clear_set_s(SYS_SVCR, 0, SVCR_SM_MASK); + + /* + * Unless we have FA64 FFR does not + * exist in streaming mode. + */ if (!system_supports_fa64()) - ffr = efi_sm_state; + ffr = false; } } diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index f447c4a36f69..ea5dc7c90f46 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -78,47 +78,76 @@ static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr) } /* - * Turn on the call to ftrace_caller() in instrumented function + * Find the address the callsite must branch to in order to reach '*addr'. + * + * Due to the limited range of 'BL' instructions, modules may be placed too far + * away to branch directly and must use a PLT. + * + * Returns true when '*addr' contains a reachable target address, or has been + * modified to contain a PLT address. Returns false otherwise. */ -int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +static bool ftrace_find_callable_addr(struct dyn_ftrace *rec, + struct module *mod, + unsigned long *addr) { unsigned long pc = rec->ip; - u32 old, new; - long offset = (long)pc - (long)addr; + long offset = (long)*addr - (long)pc; + struct plt_entry *plt; - if (offset < -SZ_128M || offset >= SZ_128M) { - struct module *mod; - struct plt_entry *plt; + /* + * When the target is within range of the 'BL' instruction, use 'addr' + * as-is and branch to that directly. + */ + if (offset >= -SZ_128M && offset < SZ_128M) + return true; - if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) - return -EINVAL; + /* + * When the target is outside of the range of a 'BL' instruction, we + * must use a PLT to reach it. We can only place PLTs for modules, and + * only when module PLT support is built-in. + */ + if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) + return false; - /* - * On kernels that support module PLTs, the offset between the - * branch instruction and its target may legally exceed the - * range of an ordinary relative 'bl' opcode. In this case, we - * need to branch via a trampoline in the module. - * - * NOTE: __module_text_address() must be called with preemption - * disabled, but we can rely on ftrace_lock to ensure that 'mod' - * retains its validity throughout the remainder of this code. - */ + /* + * 'mod' is only set at module load time, but if we end up + * dealing with an out-of-range condition, we can assume it + * is due to a module being loaded far away from the kernel. + * + * NOTE: __module_text_address() must be called with preemption + * disabled, but we can rely on ftrace_lock to ensure that 'mod' + * retains its validity throughout the remainder of this code. + */ + if (!mod) { preempt_disable(); mod = __module_text_address(pc); preempt_enable(); + } - if (WARN_ON(!mod)) - return -EINVAL; + if (WARN_ON(!mod)) + return false; - plt = get_ftrace_plt(mod, addr); - if (!plt) { - pr_err("ftrace: no module PLT for %ps\n", (void *)addr); - return -EINVAL; - } - - addr = (unsigned long)plt; + plt = get_ftrace_plt(mod, *addr); + if (!plt) { + pr_err("ftrace: no module PLT for %ps\n", (void *)*addr); + return false; } + *addr = (unsigned long)plt; + return true; +} + +/* + * Turn on the call to ftrace_caller() in instrumented function + */ +int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned long pc = rec->ip; + u32 old, new; + + if (!ftrace_find_callable_addr(rec, NULL, &addr)) + return -EINVAL; + old = aarch64_insn_gen_nop(); new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK); @@ -132,6 +161,11 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long pc = rec->ip; u32 old, new; + if (!ftrace_find_callable_addr(rec, NULL, &old_addr)) + return -EINVAL; + if (!ftrace_find_callable_addr(rec, NULL, &addr)) + return -EINVAL; + old = aarch64_insn_gen_branch_imm(pc, old_addr, AARCH64_INSN_BRANCH_LINK); new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK); @@ -181,54 +215,15 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { unsigned long pc = rec->ip; - bool validate = true; u32 old = 0, new; - long offset = (long)pc - (long)addr; - if (offset < -SZ_128M || offset >= SZ_128M) { - u32 replaced; - - if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) - return -EINVAL; - - /* - * 'mod' is only set at module load time, but if we end up - * dealing with an out-of-range condition, we can assume it - * is due to a module being loaded far away from the kernel. - */ - if (!mod) { - preempt_disable(); - mod = __module_text_address(pc); - preempt_enable(); - - if (WARN_ON(!mod)) - return -EINVAL; - } - - /* - * The instruction we are about to patch may be a branch and - * link instruction that was redirected via a PLT entry. In - * this case, the normal validation will fail, but we can at - * least check that we are dealing with a branch and link - * instruction that points into the right module. - */ - if (aarch64_insn_read((void *)pc, &replaced)) - return -EFAULT; - - if (!aarch64_insn_is_bl(replaced) || - !within_module(pc + aarch64_get_branch_offset(replaced), - mod)) - return -EINVAL; - - validate = false; - } else { - old = aarch64_insn_gen_branch_imm(pc, addr, - AARCH64_INSN_BRANCH_LINK); - } + if (!ftrace_find_callable_addr(rec, mod, &addr)) + return -EINVAL; + old = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK); new = aarch64_insn_gen_nop(); - return ftrace_modify_code(pc, old, new, validate); + return ftrace_modify_code(pc, old, new, true); } void arch_ftrace_update_code(int command) diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c index 57b30bcf9f21..f6b00743c399 100644 --- a/arch/arm64/kernel/mte.c +++ b/arch/arm64/kernel/mte.c @@ -244,6 +244,11 @@ static void mte_update_gcr_excl(struct task_struct *task) SYS_GCR_EL1); } +#ifdef CONFIG_KASAN_HW_TAGS +/* Only called from assembly, silence sparse */ +void __init kasan_hw_tags_enable(struct alt_instr *alt, __le32 *origptr, + __le32 *updptr, int nr_inst); + void __init kasan_hw_tags_enable(struct alt_instr *alt, __le32 *origptr, __le32 *updptr, int nr_inst) { @@ -252,6 +257,7 @@ void __init kasan_hw_tags_enable(struct alt_instr *alt, __le32 *origptr, if (kasan_hw_tags_enabled()) *updptr = cpu_to_le32(aarch64_insn_gen_nop()); } +#endif void mte_thread_init_user(void) { diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index cf3a759f10d4..fea3223704b6 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -303,14 +303,13 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p) early_fixmap_init(); early_ioremap_init(); + setup_machine_fdt(__fdt_pointer); + /* * Initialise the static keys early as they may be enabled by the - * cpufeature code, early parameters, and DT setup. + * cpufeature code and early parameters. */ jump_label_init(); - - setup_machine_fdt(__fdt_pointer); - parse_early_param(); /* diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c index 4e39ace073af..3b8d062e30ea 100644 --- a/arch/arm64/kvm/arch_timer.c +++ b/arch/arm64/kvm/arch_timer.c @@ -1230,6 +1230,9 @@ bool kvm_arch_timer_get_input_level(int vintid) struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); struct arch_timer_context *timer; + if (WARN(!vcpu, "No vcpu context!\n")) + return false; + if (vintid == vcpu_vtimer(vcpu)->irq.irq) timer = vcpu_vtimer(vcpu); else if (vintid == vcpu_ptimer(vcpu)->irq.irq) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 400bb0fe2745..83a7f61354d3 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -150,8 +150,10 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) if (ret) goto out_free_stage2_pgd; - if (!zalloc_cpumask_var(&kvm->arch.supported_cpus, GFP_KERNEL)) + if (!zalloc_cpumask_var(&kvm->arch.supported_cpus, GFP_KERNEL)) { + ret = -ENOMEM; goto out_free_stage2_pgd; + } cpumask_copy(kvm->arch.supported_cpus, cpu_possible_mask); kvm_vgic_early_init(kvm); @@ -2110,11 +2112,11 @@ static int finalize_hyp_mode(void) return 0; /* - * Exclude HYP BSS from kmemleak so that it doesn't get peeked - * at, which would end badly once the section is inaccessible. - * None of other sections should ever be introspected. + * Exclude HYP sections from kmemleak so that they don't get peeked + * at, which would end badly once inaccessible. */ kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start); + kmemleak_free_part(__va(hyp_mem_base), hyp_mem_size); return pkvm_drop_host_privileges(); } @@ -2271,7 +2273,11 @@ static int __init early_kvm_mode_cfg(char *arg) return -EINVAL; if (strcmp(arg, "protected") == 0) { - kvm_mode = KVM_MODE_PROTECTED; + if (!is_kernel_in_hyp_mode()) + kvm_mode = KVM_MODE_PROTECTED; + else + pr_warn_once("Protected KVM not available with VHE\n"); + return 0; } diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index 3d251a4d2cf7..6012b08ecb14 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -80,6 +80,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) vcpu->arch.flags &= ~KVM_ARM64_FP_ENABLED; vcpu->arch.flags |= KVM_ARM64_FP_HOST; + vcpu->arch.flags &= ~KVM_ARM64_HOST_SVE_ENABLED; if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN) vcpu->arch.flags |= KVM_ARM64_HOST_SVE_ENABLED; @@ -93,6 +94,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) * operations. Do this for ZA as well for now for simplicity. */ if (system_supports_sme()) { + vcpu->arch.flags &= ~KVM_ARM64_HOST_SME_ENABLED; if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN) vcpu->arch.flags |= KVM_ARM64_HOST_SME_ENABLED; diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c index 78edf077fa3b..1e78acf9662e 100644 --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c @@ -314,15 +314,11 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range) int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot) { - hyp_assert_lock_held(&host_kvm.lock); - return host_stage2_try(__host_stage2_idmap, addr, addr + size, prot); } int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id) { - hyp_assert_lock_held(&host_kvm.lock); - return host_stage2_try(kvm_pgtable_stage2_set_owner, &host_kvm.pgt, addr, size, &host_s2_pool, owner_id); } diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c index b6d86e423319..35a4331ba5f3 100644 --- a/arch/arm64/kvm/hyp/nvhe/sys_regs.c +++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c @@ -243,15 +243,9 @@ u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id) case SYS_ID_AA64MMFR2_EL1: return get_pvm_id_aa64mmfr2(vcpu); default: - /* - * Should never happen because all cases are covered in - * pvm_sys_reg_descs[]. - */ - WARN_ON(1); - break; + /* Unhandled ID register, RAZ */ + return 0; } - - return 0; } static u64 read_id_reg(const struct kvm_vcpu *vcpu, @@ -332,6 +326,16 @@ static bool pvm_gic_read_sre(struct kvm_vcpu *vcpu, /* Mark the specified system register as an AArch64 feature id register. */ #define AARCH64(REG) { SYS_DESC(REG), .access = pvm_access_id_aarch64 } +/* + * sys_reg_desc initialiser for architecturally unallocated cpufeature ID + * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2 + * (1 <= crm < 8, 0 <= Op2 < 8). + */ +#define ID_UNALLOCATED(crm, op2) { \ + Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \ + .access = pvm_access_id_aarch64, \ +} + /* Mark the specified system register as Read-As-Zero/Write-Ignored */ #define RAZ_WI(REG) { SYS_DESC(REG), .access = pvm_access_raz_wi } @@ -375,24 +379,46 @@ static const struct sys_reg_desc pvm_sys_reg_descs[] = { AARCH32(SYS_MVFR0_EL1), AARCH32(SYS_MVFR1_EL1), AARCH32(SYS_MVFR2_EL1), + ID_UNALLOCATED(3,3), AARCH32(SYS_ID_PFR2_EL1), AARCH32(SYS_ID_DFR1_EL1), AARCH32(SYS_ID_MMFR5_EL1), + ID_UNALLOCATED(3,7), /* AArch64 ID registers */ /* CRm=4 */ AARCH64(SYS_ID_AA64PFR0_EL1), AARCH64(SYS_ID_AA64PFR1_EL1), + ID_UNALLOCATED(4,2), + ID_UNALLOCATED(4,3), AARCH64(SYS_ID_AA64ZFR0_EL1), + ID_UNALLOCATED(4,5), + ID_UNALLOCATED(4,6), + ID_UNALLOCATED(4,7), AARCH64(SYS_ID_AA64DFR0_EL1), AARCH64(SYS_ID_AA64DFR1_EL1), + ID_UNALLOCATED(5,2), + ID_UNALLOCATED(5,3), AARCH64(SYS_ID_AA64AFR0_EL1), AARCH64(SYS_ID_AA64AFR1_EL1), + ID_UNALLOCATED(5,6), + ID_UNALLOCATED(5,7), AARCH64(SYS_ID_AA64ISAR0_EL1), AARCH64(SYS_ID_AA64ISAR1_EL1), + AARCH64(SYS_ID_AA64ISAR2_EL1), + ID_UNALLOCATED(6,3), + ID_UNALLOCATED(6,4), + ID_UNALLOCATED(6,5), + ID_UNALLOCATED(6,6), + ID_UNALLOCATED(6,7), AARCH64(SYS_ID_AA64MMFR0_EL1), AARCH64(SYS_ID_AA64MMFR1_EL1), AARCH64(SYS_ID_AA64MMFR2_EL1), + ID_UNALLOCATED(7,3), + ID_UNALLOCATED(7,4), + ID_UNALLOCATED(7,5), + ID_UNALLOCATED(7,6), + ID_UNALLOCATED(7,7), /* Scalable Vector Registers are restricted. */ diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v2.c b/arch/arm64/kvm/vgic/vgic-mmio-v2.c index 77a67e9d3d14..e070cda86e12 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio-v2.c +++ b/arch/arm64/kvm/vgic/vgic-mmio-v2.c @@ -429,11 +429,11 @@ static const struct vgic_register_region vgic_v2_dist_registers[] = { VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET, vgic_mmio_read_pending, vgic_mmio_write_spending, - NULL, vgic_uaccess_write_spending, 1, + vgic_uaccess_read_pending, vgic_uaccess_write_spending, 1, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR, vgic_mmio_read_pending, vgic_mmio_write_cpending, - NULL, vgic_uaccess_write_cpending, 1, + vgic_uaccess_read_pending, vgic_uaccess_write_cpending, 1, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET, vgic_mmio_read_active, vgic_mmio_write_sactive, diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c index f7aa7bcd6fb8..f15e29cc63ce 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c +++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c @@ -353,42 +353,6 @@ static unsigned long vgic_mmio_read_v3_idregs(struct kvm_vcpu *vcpu, return 0; } -static unsigned long vgic_v3_uaccess_read_pending(struct kvm_vcpu *vcpu, - gpa_t addr, unsigned int len) -{ - u32 intid = VGIC_ADDR_TO_INTID(addr, 1); - u32 value = 0; - int i; - - /* - * pending state of interrupt is latched in pending_latch variable. - * Userspace will save and restore pending state and line_level - * separately. - * Refer to Documentation/virt/kvm/devices/arm-vgic-v3.rst - * for handling of ISPENDR and ICPENDR. - */ - for (i = 0; i < len * 8; i++) { - struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); - bool state = irq->pending_latch; - - if (irq->hw && vgic_irq_is_sgi(irq->intid)) { - int err; - - err = irq_get_irqchip_state(irq->host_irq, - IRQCHIP_STATE_PENDING, - &state); - WARN_ON(err); - } - - if (state) - value |= (1U << i); - - vgic_put_irq(vcpu->kvm, irq); - } - - return value; -} - static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val) @@ -666,7 +630,7 @@ static const struct vgic_register_region vgic_v3_dist_registers[] = { VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISPENDR, vgic_mmio_read_pending, vgic_mmio_write_spending, - vgic_v3_uaccess_read_pending, vgic_v3_uaccess_write_pending, 1, + vgic_uaccess_read_pending, vgic_v3_uaccess_write_pending, 1, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICPENDR, vgic_mmio_read_pending, vgic_mmio_write_cpending, @@ -750,7 +714,7 @@ static const struct vgic_register_region vgic_v3_rd_registers[] = { VGIC_ACCESS_32bit), REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISPENDR0, vgic_mmio_read_pending, vgic_mmio_write_spending, - vgic_v3_uaccess_read_pending, vgic_v3_uaccess_write_pending, 4, + vgic_uaccess_read_pending, vgic_v3_uaccess_write_pending, 4, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ICPENDR0, vgic_mmio_read_pending, vgic_mmio_write_cpending, diff --git a/arch/arm64/kvm/vgic/vgic-mmio.c b/arch/arm64/kvm/vgic/vgic-mmio.c index 49837d3a3ef5..997d0fce2088 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio.c +++ b/arch/arm64/kvm/vgic/vgic-mmio.c @@ -226,8 +226,9 @@ int vgic_uaccess_write_cenable(struct kvm_vcpu *vcpu, return 0; } -unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, - gpa_t addr, unsigned int len) +static unsigned long __read_pending(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len, + bool is_user) { u32 intid = VGIC_ADDR_TO_INTID(addr, 1); u32 value = 0; @@ -239,6 +240,15 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, unsigned long flags; bool val; + /* + * When used from userspace with a GICv3 model: + * + * Pending state of interrupt is latched in pending_latch + * variable. Userspace will save and restore pending state + * and line_level separately. + * Refer to Documentation/virt/kvm/devices/arm-vgic-v3.rst + * for handling of ISPENDR and ICPENDR. + */ raw_spin_lock_irqsave(&irq->irq_lock, flags); if (irq->hw && vgic_irq_is_sgi(irq->intid)) { int err; @@ -248,10 +258,20 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, IRQCHIP_STATE_PENDING, &val); WARN_RATELIMIT(err, "IRQ %d", irq->host_irq); - } else if (vgic_irq_is_mapped_level(irq)) { + } else if (!is_user && vgic_irq_is_mapped_level(irq)) { val = vgic_get_phys_line_level(irq); } else { - val = irq_is_pending(irq); + switch (vcpu->kvm->arch.vgic.vgic_model) { + case KVM_DEV_TYPE_ARM_VGIC_V3: + if (is_user) { + val = irq->pending_latch; + break; + } + fallthrough; + default: + val = irq_is_pending(irq); + break; + } } value |= ((u32)val << i); @@ -263,6 +283,18 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, return value; } +unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len) +{ + return __read_pending(vcpu, addr, len, false); +} + +unsigned long vgic_uaccess_read_pending(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len) +{ + return __read_pending(vcpu, addr, len, true); +} + static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq) { return (vgic_irq_is_sgi(irq->intid) && diff --git a/arch/arm64/kvm/vgic/vgic-mmio.h b/arch/arm64/kvm/vgic/vgic-mmio.h index 3fa696f198a3..6082d4b66d39 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio.h +++ b/arch/arm64/kvm/vgic/vgic-mmio.h @@ -149,6 +149,9 @@ int vgic_uaccess_write_cenable(struct kvm_vcpu *vcpu, unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len); +unsigned long vgic_uaccess_read_pending(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len); + void vgic_mmio_write_spending(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val); diff --git a/arch/arm64/kvm/vmid.c b/arch/arm64/kvm/vmid.c index 8d5f0506fd87..d78ae63d7c15 100644 --- a/arch/arm64/kvm/vmid.c +++ b/arch/arm64/kvm/vmid.c @@ -66,7 +66,7 @@ static void flush_context(void) * the next context-switch, we broadcast TLB flush + I-cache * invalidation over the inner shareable domain on rollover. */ - kvm_call_hyp(__kvm_flush_vm_context); + kvm_call_hyp(__kvm_flush_vm_context); } static bool check_update_reserved_vmid(u64 vmid, u64 newvmid) diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index 0ea6cc25dc66..21c907987080 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -218,8 +218,6 @@ SYM_FUNC_ALIAS(__dma_flush_area, __pi___dma_flush_area) */ SYM_FUNC_START(__pi___dma_map_area) add x1, x0, x1 - cmp w2, #DMA_FROM_DEVICE - b.eq __pi_dcache_inval_poc b __pi_dcache_clean_poc SYM_FUNC_END(__pi___dma_map_area) SYM_FUNC_ALIAS(__dma_map_area, __pi___dma_map_area) diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 6719f9efea09..6099c81b9322 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -9,9 +9,9 @@ #include <linux/dma-map-ops.h> #include <linux/dma-iommu.h> #include <xen/xen.h> -#include <xen/swiotlb-xen.h> #include <asm/cacheflush.h> +#include <asm/xen/xen-ops.h> void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, enum dma_data_direction dir) @@ -52,8 +52,5 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, if (iommu) iommu_setup_dma_ops(dev, dma_base, dma_base + size - 1); -#ifdef CONFIG_XEN - if (xen_swiotlb_detect()) - dev->dma_ops = &xen_swiotlb_dma_ops; -#endif + xen_setup_dma_ops(dev); } diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 8ab4035dea27..42f2e9a8616c 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -1478,6 +1478,7 @@ skip_init_ctx: bpf_jit_binary_free(header); prog->bpf_func = NULL; prog->jited = 0; + prog->jited_len = 0; goto out_off; } bpf_jit_binary_lock_ro(header); diff --git a/arch/arm64/tools/gen-sysreg.awk b/arch/arm64/tools/gen-sysreg.awk index 89bfb74e28de..5c55509eb43f 100755 --- a/arch/arm64/tools/gen-sysreg.awk +++ b/arch/arm64/tools/gen-sysreg.awk @@ -253,7 +253,7 @@ END { next } -/0b[01]+/ && block = "Enum" { +/0b[01]+/ && block == "Enum" { expect_fields(2) val = $1 name = $2 diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index 80657bf83b05..1920d52653b4 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -343,6 +343,7 @@ config NR_CPUS config NUMA bool "NUMA Support" + select SMP select ACPI_NUMA if ACPI help Say Y to compile the kernel with NUMA (Non-Uniform Memory Access) diff --git a/arch/loongarch/include/asm/branch.h b/arch/loongarch/include/asm/branch.h index 3f33c89f35b4..9a133e4c068e 100644 --- a/arch/loongarch/include/asm/branch.h +++ b/arch/loongarch/include/asm/branch.h @@ -12,10 +12,9 @@ static inline unsigned long exception_era(struct pt_regs *regs) return regs->csr_era; } -static inline int compute_return_era(struct pt_regs *regs) +static inline void compute_return_era(struct pt_regs *regs) { regs->csr_era += 4; - return 0; } #endif /* _ASM_BRANCH_H */ diff --git a/arch/loongarch/include/asm/hardirq.h b/arch/loongarch/include/asm/hardirq.h index befe8184aa08..0ef3b18f8980 100644 --- a/arch/loongarch/include/asm/hardirq.h +++ b/arch/loongarch/include/asm/hardirq.h @@ -19,7 +19,7 @@ typedef struct { unsigned int __softirq_pending; } ____cacheline_aligned irq_cpustat_t; -DECLARE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat); +DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); #define __ARCH_IRQ_STAT diff --git a/arch/loongarch/include/asm/percpu.h b/arch/loongarch/include/asm/percpu.h index 34f15a6fb1e7..e6569f18c6dd 100644 --- a/arch/loongarch/include/asm/percpu.h +++ b/arch/loongarch/include/asm/percpu.h @@ -6,6 +6,7 @@ #define __ASM_PERCPU_H #include <asm/cmpxchg.h> +#include <asm/loongarch.h> /* Use r21 for fast access */ register unsigned long __my_cpu_offset __asm__("$r21"); diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h index 5dc84d8f18d6..d9e86cfa53e2 100644 --- a/arch/loongarch/include/asm/pgtable.h +++ b/arch/loongarch/include/asm/pgtable.h @@ -426,6 +426,11 @@ static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, #define kern_addr_valid(addr) (1) +static inline unsigned long pmd_pfn(pmd_t pmd) +{ + return (pmd_val(pmd) & _PFN_MASK) >> _PFN_SHIFT; +} + #ifdef CONFIG_TRANSPARENT_HUGEPAGE /* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/ @@ -497,11 +502,6 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd) return pmd; } -static inline unsigned long pmd_pfn(pmd_t pmd) -{ - return (pmd_val(pmd) & _PFN_MASK) >> _PFN_SHIFT; -} - static inline struct page *pmd_page(pmd_t pmd) { if (pmd_trans_huge(pmd)) diff --git a/arch/loongarch/include/asm/smp.h b/arch/loongarch/include/asm/smp.h index 551e1f37c705..71189b28bfb2 100644 --- a/arch/loongarch/include/asm/smp.h +++ b/arch/loongarch/include/asm/smp.h @@ -9,10 +9,16 @@ #include <linux/atomic.h> #include <linux/bitops.h> #include <linux/linkage.h> -#include <linux/smp.h> #include <linux/threads.h> #include <linux/cpumask.h> +extern int smp_num_siblings; +extern int num_processors; +extern int disabled_cpus; +extern cpumask_t cpu_sibling_map[]; +extern cpumask_t cpu_core_map[]; +extern cpumask_t cpu_foreign_map[]; + void loongson3_smp_setup(void); void loongson3_prepare_cpus(unsigned int max_cpus); void loongson3_boot_secondary(int cpu, struct task_struct *idle); @@ -25,26 +31,11 @@ int loongson3_cpu_disable(void); void loongson3_cpu_die(unsigned int cpu); #endif -#ifdef CONFIG_SMP - static inline void plat_smp_setup(void) { loongson3_smp_setup(); } -#else /* !CONFIG_SMP */ - -static inline void plat_smp_setup(void) { } - -#endif /* !CONFIG_SMP */ - -extern int smp_num_siblings; -extern int num_processors; -extern int disabled_cpus; -extern cpumask_t cpu_sibling_map[]; -extern cpumask_t cpu_core_map[]; -extern cpumask_t cpu_foreign_map[]; - static inline int raw_smp_processor_id(void) { #if defined(__VDSO__) diff --git a/arch/loongarch/include/asm/timex.h b/arch/loongarch/include/asm/timex.h index d3ed99a4fdbd..fb41e9e7a222 100644 --- a/arch/loongarch/include/asm/timex.h +++ b/arch/loongarch/include/asm/timex.h @@ -12,13 +12,6 @@ #include <asm/cpu.h> #include <asm/cpu-features.h> -/* - * Standard way to access the cycle counter. - * Currently only used on SMP for scheduling. - * - * We know that all SMP capable CPUs have cycle counters. - */ - typedef unsigned long cycles_t; #define get_cycles get_cycles diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c index b16c3dea5eeb..bb729ee8a237 100644 --- a/arch/loongarch/kernel/acpi.c +++ b/arch/loongarch/kernel/acpi.c @@ -138,6 +138,7 @@ void __init acpi_boot_table_init(void) } } +#ifdef CONFIG_SMP static int set_processor_mask(u32 id, u32 flags) { @@ -166,15 +167,18 @@ static int set_processor_mask(u32 id, u32 flags) return cpu; } +#endif static void __init acpi_process_madt(void) { +#ifdef CONFIG_SMP int i; for (i = 0; i < NR_CPUS; i++) { __cpu_number_map[i] = -1; __cpu_logical_map[i] = -1; } +#endif loongson_sysconf.nr_cpus = num_processors; } diff --git a/arch/loongarch/kernel/cacheinfo.c b/arch/loongarch/kernel/cacheinfo.c index 8c9fe29e98f0..b38f5489d094 100644 --- a/arch/loongarch/kernel/cacheinfo.c +++ b/arch/loongarch/kernel/cacheinfo.c @@ -4,6 +4,7 @@ * * Copyright (C) 2020-2022 Loongson Technology Corporation Limited */ +#include <asm/cpu-info.h> #include <linux/cacheinfo.h> /* Populates leaf and increments to next leaf */ diff --git a/arch/loongarch/kernel/cpu-probe.c b/arch/loongarch/kernel/cpu-probe.c index 6c87ea36b257..529ab8f44ec6 100644 --- a/arch/loongarch/kernel/cpu-probe.c +++ b/arch/loongarch/kernel/cpu-probe.c @@ -263,7 +263,7 @@ void cpu_probe(void) c->cputype = CPU_UNKNOWN; c->processor_id = read_cpucfg(LOONGARCH_CPUCFG0); - c->fpu_vers = (read_cpucfg(LOONGARCH_CPUCFG2) >> 3) & 0x3; + c->fpu_vers = (read_cpucfg(LOONGARCH_CPUCFG2) & CPUCFG2_FPVERS) >> 3; c->fpu_csr0 = FPU_CSR_RN; c->fpu_mask = FPU_CSR_RSVD; diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S index e596dfcd924b..d01e62dd414f 100644 --- a/arch/loongarch/kernel/head.S +++ b/arch/loongarch/kernel/head.S @@ -14,8 +14,6 @@ __REF -SYM_ENTRY(_stext, SYM_L_GLOBAL, SYM_A_NONE) - SYM_CODE_START(kernel_entry) # kernel entry point /* Config direct window and set PG */ diff --git a/arch/loongarch/kernel/irq.c b/arch/loongarch/kernel/irq.c index 4b671d305ede..b34b8d792aa4 100644 --- a/arch/loongarch/kernel/irq.c +++ b/arch/loongarch/kernel/irq.c @@ -22,6 +22,8 @@ #include <asm/setup.h> DEFINE_PER_CPU(unsigned long, irq_stack); +DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); +EXPORT_PER_CPU_SYMBOL(irq_stat); struct irq_domain *cpu_domain; struct irq_domain *liointc_domain; @@ -56,8 +58,11 @@ int arch_show_interrupts(struct seq_file *p, int prec) void __init init_IRQ(void) { - int i, r, ipi_irq; + int i; +#ifdef CONFIG_SMP + int r, ipi_irq; static int ipi_dummy_dev; +#endif unsigned int order = get_order(IRQ_STACK_SIZE); struct page *page; diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c index 6d944d65f600..bfa0dfe8b7d7 100644 --- a/arch/loongarch/kernel/process.c +++ b/arch/loongarch/kernel/process.c @@ -120,10 +120,12 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) /* * Copy architecture-specific thread state */ -int copy_thread(unsigned long clone_flags, unsigned long usp, - unsigned long kthread_arg, struct task_struct *p, unsigned long tls) +int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) { unsigned long childksp; + unsigned long tls = args->tls; + unsigned long usp = args->stack; + unsigned long clone_flags = args->flags; struct pt_regs *childregs, *regs = current_pt_regs(); childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32; @@ -136,12 +138,12 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, p->thread.csr_crmd = csr_read32(LOONGARCH_CSR_CRMD); p->thread.csr_prmd = csr_read32(LOONGARCH_CSR_PRMD); p->thread.csr_ecfg = csr_read32(LOONGARCH_CSR_ECFG); - if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { + if (unlikely(args->fn)) { /* kernel thread */ - p->thread.reg23 = usp; /* fn */ - p->thread.reg24 = kthread_arg; p->thread.reg03 = childksp; - p->thread.reg01 = (unsigned long) ret_from_kernel_thread; + p->thread.reg23 = (unsigned long)args->fn; + p->thread.reg24 = (unsigned long)args->fn_arg; + p->thread.reg01 = (unsigned long)ret_from_kernel_thread; memset(childregs, 0, sizeof(struct pt_regs)); childregs->csr_euen = p->thread.csr_euen; childregs->csr_crmd = p->thread.csr_crmd; diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index 185e4035811a..c74860b53375 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -39,7 +39,6 @@ #include <asm/pgalloc.h> #include <asm/sections.h> #include <asm/setup.h> -#include <asm/smp.h> #include <asm/time.h> #define SMBIOS_BIOSSIZE_OFFSET 0x09 @@ -349,8 +348,6 @@ static void __init prefill_possible_map(void) nr_cpu_ids = possible; } -#else -static inline void prefill_possible_map(void) {} #endif void __init setup_arch(char **cmdline_p) @@ -367,8 +364,10 @@ void __init setup_arch(char **cmdline_p) arch_mem_init(cmdline_p); resource_init(); +#ifdef CONFIG_SMP plat_smp_setup(); prefill_possible_map(); +#endif paging_init(); } diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c index b8c53b755a25..73cec62504fb 100644 --- a/arch/loongarch/kernel/smp.c +++ b/arch/loongarch/kernel/smp.c @@ -66,8 +66,6 @@ static cpumask_t cpu_core_setup_map; struct secondary_data cpuboot_data; static DEFINE_PER_CPU(int, cpu_state); -DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); -EXPORT_PER_CPU_SYMBOL(irq_stat); enum ipi_msg_type { IPI_RESCHEDULE, diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c index e4060f84a221..1bf58c65e2bf 100644 --- a/arch/loongarch/kernel/traps.c +++ b/arch/loongarch/kernel/traps.c @@ -475,8 +475,7 @@ asmlinkage void noinstr do_ri(struct pt_regs *regs) die_if_kernel("Reserved instruction in kernel code", regs); - if (unlikely(compute_return_era(regs) < 0)) - goto out; + compute_return_era(regs); if (unlikely(get_user(opcode, era) < 0)) { status = SIGSEGV; diff --git a/arch/loongarch/kernel/vmlinux.lds.S b/arch/loongarch/kernel/vmlinux.lds.S index 9d508158fe1a..69c76f26c1c5 100644 --- a/arch/loongarch/kernel/vmlinux.lds.S +++ b/arch/loongarch/kernel/vmlinux.lds.S @@ -37,6 +37,7 @@ SECTIONS HEAD_TEXT_SECTION . = ALIGN(PECOFF_SEGMENT_ALIGN); + _stext = .; .text : { TEXT_TEXT SCHED_TEXT @@ -101,6 +102,7 @@ SECTIONS STABS_DEBUG DWARF_DEBUG + ELF_DETAILS .gptab.sdata : { *(.gptab.data) diff --git a/arch/loongarch/mm/tlb.c b/arch/loongarch/mm/tlb.c index e272f8ac57d1..9818ce11546b 100644 --- a/arch/loongarch/mm/tlb.c +++ b/arch/loongarch/mm/tlb.c @@ -281,15 +281,16 @@ void setup_tlb_handler(int cpu) if (pcpu_handlers[cpu]) return; - page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, get_order(vec_sz)); + page = alloc_pages_node(cpu_to_node(cpu), GFP_ATOMIC, get_order(vec_sz)); if (!page) return; addr = page_address(page); - pcpu_handlers[cpu] = virt_to_phys(addr); + pcpu_handlers[cpu] = (unsigned long)addr; memcpy((void *)addr, (void *)eentry, vec_sz); local_flush_icache_range((unsigned long)addr, (unsigned long)addr + vec_sz); - csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_TLBRENTRY); + csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_EENTRY); + csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_MERRENTRY); csr_write64(pcpu_handlers[cpu] + 80*VECSIZE, LOONGARCH_CSR_TLBRENTRY); } #endif diff --git a/arch/mips/boot/dts/ingenic/x1000.dtsi b/arch/mips/boot/dts/ingenic/x1000.dtsi index b0a034b468bb..42e69664efd9 100644 --- a/arch/mips/boot/dts/ingenic/x1000.dtsi +++ b/arch/mips/boot/dts/ingenic/x1000.dtsi @@ -111,8 +111,9 @@ clocks = <&cgu X1000_CLK_RTCLK>, <&cgu X1000_CLK_EXCLK>, - <&cgu X1000_CLK_PCLK>; - clock-names = "rtc", "ext", "pclk"; + <&cgu X1000_CLK_PCLK>, + <&cgu X1000_CLK_TCU>; + clock-names = "rtc", "ext", "pclk", "tcu"; interrupt-controller; #interrupt-cells = <1>; diff --git a/arch/mips/boot/dts/ingenic/x1830.dtsi b/arch/mips/boot/dts/ingenic/x1830.dtsi index dbf21afaccb1..65a5da71c199 100644 --- a/arch/mips/boot/dts/ingenic/x1830.dtsi +++ b/arch/mips/boot/dts/ingenic/x1830.dtsi @@ -104,8 +104,9 @@ clocks = <&cgu X1830_CLK_RTCLK>, <&cgu X1830_CLK_EXCLK>, - <&cgu X1830_CLK_PCLK>; - clock-names = "rtc", "ext", "pclk"; + <&cgu X1830_CLK_PCLK>, + <&cgu X1830_CLK_TCU>; + clock-names = "rtc", "ext", "pclk", "tcu"; interrupt-controller; #interrupt-cells = <1>; diff --git a/arch/mips/generic/board-ranchu.c b/arch/mips/generic/board-ranchu.c index a89aaad59cb1..930c45041882 100644 --- a/arch/mips/generic/board-ranchu.c +++ b/arch/mips/generic/board-ranchu.c @@ -44,6 +44,7 @@ static __init unsigned int ranchu_measure_hpt_freq(void) __func__); rtc_base = of_iomap(np, 0); + of_node_put(np); if (!rtc_base) panic("%s(): Failed to ioremap Goldfish RTC base!", __func__); diff --git a/arch/mips/lantiq/falcon/sysctrl.c b/arch/mips/lantiq/falcon/sysctrl.c index 5204fc6d6d50..1187729d8cbb 100644 --- a/arch/mips/lantiq/falcon/sysctrl.c +++ b/arch/mips/lantiq/falcon/sysctrl.c @@ -208,6 +208,12 @@ void __init ltq_soc_init(void) of_address_to_resource(np_sysgpe, 0, &res_sys[2])) panic("Failed to get core resources"); + of_node_put(np_status); + of_node_put(np_ebu); + of_node_put(np_sys1); + of_node_put(np_syseth); + of_node_put(np_sysgpe); + if ((request_mem_region(res_status.start, resource_size(&res_status), res_status.name) < 0) || (request_mem_region(res_ebu.start, resource_size(&res_ebu), diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c index b732495f138a..20622bf0a9b3 100644 --- a/arch/mips/lantiq/irq.c +++ b/arch/mips/lantiq/irq.c @@ -408,6 +408,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent) if (!ltq_eiu_membase) panic("Failed to remap eiu memory"); } + of_node_put(eiu_node); return 0; } diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c index 084f6caba5f2..d444a1b98a72 100644 --- a/arch/mips/lantiq/xway/sysctrl.c +++ b/arch/mips/lantiq/xway/sysctrl.c @@ -441,6 +441,10 @@ void __init ltq_soc_init(void) of_address_to_resource(np_ebu, 0, &res_ebu)) panic("Failed to get core resources"); + of_node_put(np_pmu); + of_node_put(np_cgu); + of_node_put(np_ebu); + if (!request_mem_region(res_pmu.start, resource_size(&res_pmu), res_pmu.name) || !request_mem_region(res_cgu.start, resource_size(&res_cgu), diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c index bbf1e38e1431..2cb708cdf01a 100644 --- a/arch/mips/mti-malta/malta-time.c +++ b/arch/mips/mti-malta/malta-time.c @@ -214,6 +214,8 @@ static void update_gic_frequency_dt(void) if (of_update_property(node, &gic_frequency_prop) < 0) pr_err("error updating gic frequency property\n"); + + of_node_put(node); } #endif diff --git a/arch/mips/pic32/pic32mzda/init.c b/arch/mips/pic32/pic32mzda/init.c index 129915616763..d9c8c4e46aff 100644 --- a/arch/mips/pic32/pic32mzda/init.c +++ b/arch/mips/pic32/pic32mzda/init.c @@ -98,13 +98,18 @@ static int __init pic32_of_prepare_platform_data(struct of_dev_auxdata *lookup) np = of_find_compatible_node(NULL, NULL, lookup->compatible); if (np) { lookup->name = (char *)np->name; - if (lookup->phys_addr) + if (lookup->phys_addr) { + of_node_put(np); continue; + } if (!of_address_to_resource(np, 0, &res)) lookup->phys_addr = res.start; + of_node_put(np); } } + of_node_put(root); + return 0; } diff --git a/arch/mips/pic32/pic32mzda/time.c b/arch/mips/pic32/pic32mzda/time.c index 7174e9abbb1b..777b515c52c8 100644 --- a/arch/mips/pic32/pic32mzda/time.c +++ b/arch/mips/pic32/pic32mzda/time.c @@ -32,6 +32,9 @@ static unsigned int pic32_xlate_core_timer_irq(void) goto default_map; irq = irq_of_parse_and_map(node, 0); + + of_node_put(node); + if (!irq) goto default_map; diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c index 587c7b998769..ea8072acf8d9 100644 --- a/arch/mips/ralink/of.c +++ b/arch/mips/ralink/of.c @@ -40,6 +40,8 @@ __iomem void *plat_of_remap_node(const char *node) if (of_address_to_resource(np, 0, &res)) panic("Failed to get resource for %s", node); + of_node_put(np); + if (!request_mem_region(res.start, resource_size(&res), res.name)) diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c index 7b7f25b4b057..9240bcdbe74e 100644 --- a/arch/mips/vr41xx/common/icu.c +++ b/arch/mips/vr41xx/common/icu.c @@ -640,8 +640,6 @@ static int icu_get_irq(unsigned int irq) printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2); - atomic_inc(&irq_err_count); - return -1; } diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 5f2448dc5a2b..fa400055b2d5 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -10,6 +10,7 @@ config PARISC select ARCH_WANT_FRAME_POINTERS select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_STRICT_KERNEL_RWX + select ARCH_HAS_STRICT_MODULE_RWX select ARCH_HAS_UBSAN_SANITIZE_ALL select ARCH_HAS_PTE_SPECIAL select ARCH_NO_SG_CHAIN diff --git a/arch/parisc/include/asm/fb.h b/arch/parisc/include/asm/fb.h index d63a2acb91f2..55d29c4f716e 100644 --- a/arch/parisc/include/asm/fb.h +++ b/arch/parisc/include/asm/fb.h @@ -12,7 +12,7 @@ static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; } -#if defined(CONFIG_STI_CONSOLE) || defined(CONFIG_FB_STI) +#if defined(CONFIG_FB_STI) int fb_is_primary_device(struct fb_info *info); #else static inline int fb_is_primary_device(struct fb_info *info) diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index c8a11fcecf4c..a9bc578e4c52 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -722,7 +722,10 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned lon return; if (parisc_requires_coherency()) { - flush_user_cache_page(vma, vmaddr); + if (vma->vm_flags & VM_SHARED) + flush_data_cache(); + else + flush_user_cache_page(vma, vmaddr); return; } diff --git a/arch/parisc/math-emu/decode_exc.c b/arch/parisc/math-emu/decode_exc.c index 494ca41df05d..d41ddb3430b5 100644 --- a/arch/parisc/math-emu/decode_exc.c +++ b/arch/parisc/math-emu/decode_exc.c @@ -102,7 +102,7 @@ decode_fpu(unsigned int Fpu_register[], unsigned int trap_counts[]) * that happen. Want to keep this overhead low, but still provide * some information to the customer. All exits from this routine * need to restore Fpu_register[0] - */ + */ bflags=(Fpu_register[0] & 0xf8000000); Fpu_register[0] &= 0x07ffffff; diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index be68c1f02b79..c2ce2e60c8f0 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -223,7 +223,6 @@ config PPC select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx) select HAVE_IOREMAP_PROT - select HAVE_IRQ_EXIT_ON_IRQ_STACK select HAVE_IRQ_TIME_ACCOUNTING select HAVE_KERNEL_GZIP select HAVE_KERNEL_LZMA if DEFAULT_UIMAGE @@ -786,7 +785,6 @@ config THREAD_SHIFT range 13 15 default "15" if PPC_256K_PAGES default "14" if PPC64 - default "14" if KASAN default "13" help Used to define the stack size. The default is almost always what you diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index 125328d1b980..af58f1ed3952 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -14,10 +14,16 @@ #ifdef __KERNEL__ -#if defined(CONFIG_VMAP_STACK) && CONFIG_THREAD_SHIFT < PAGE_SHIFT +#ifdef CONFIG_KASAN +#define MIN_THREAD_SHIFT (CONFIG_THREAD_SHIFT + 1) +#else +#define MIN_THREAD_SHIFT CONFIG_THREAD_SHIFT +#endif + +#if defined(CONFIG_VMAP_STACK) && MIN_THREAD_SHIFT < PAGE_SHIFT #define THREAD_SHIFT PAGE_SHIFT #else -#define THREAD_SHIFT CONFIG_THREAD_SHIFT +#define THREAD_SHIFT MIN_THREAD_SHIFT #endif #define THREAD_SIZE (1 << THREAD_SHIFT) diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 2e2a2a9bcf43..f91f0f29a566 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -37,6 +37,8 @@ KASAN_SANITIZE_paca.o := n KASAN_SANITIZE_setup_64.o := n KASAN_SANITIZE_mce.o := n KASAN_SANITIZE_mce_power.o := n +KASAN_SANITIZE_udbg.o := n +KASAN_SANITIZE_udbg_16550.o := n # we have to be particularly careful in ppc64 to exclude code that # runs with translations off, as we cannot access the shadow with diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index b62046bf3bb8..0fbda89cd1bb 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1855,7 +1855,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) tm_reclaim_current(0); #endif - memset(regs->gpr, 0, sizeof(regs->gpr)); + memset(®s->gpr[1], 0, sizeof(regs->gpr) - sizeof(regs->gpr[0])); regs->ctr = 0; regs->link = 0; regs->xer = 0; @@ -2158,12 +2158,12 @@ static unsigned long ___get_wchan(struct task_struct *p) return 0; do { - sp = *(unsigned long *)sp; + sp = READ_ONCE_NOCHECK(*(unsigned long *)sp); if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) || task_is_running(p)) return 0; if (count > 0) { - ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE]; + ip = READ_ONCE_NOCHECK(((unsigned long *)sp)[STACK_FRAME_LR_SAVE]); if (!in_sched_functions(ip)) return ip; } diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 04694ec423f6..13d6cb188835 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -2302,7 +2302,7 @@ static void __init prom_init_stdout(void) static int __init prom_find_machine_type(void) { - char compat[256]; + static char compat[256] __prombss; int len, i = 0; #ifdef CONFIG_PPC64 phandle rtas; diff --git a/arch/powerpc/kernel/ptrace/ptrace-fpu.c b/arch/powerpc/kernel/ptrace/ptrace-fpu.c index 5dca19361316..09c49632bfe5 100644 --- a/arch/powerpc/kernel/ptrace/ptrace-fpu.c +++ b/arch/powerpc/kernel/ptrace/ptrace-fpu.c @@ -17,9 +17,13 @@ int ptrace_get_fpr(struct task_struct *child, int index, unsigned long *data) #ifdef CONFIG_PPC_FPU_REGS flush_fp_to_thread(child); - if (fpidx < (PT_FPSCR - PT_FPR0)) - memcpy(data, &child->thread.TS_FPR(fpidx), sizeof(long)); - else + if (fpidx < (PT_FPSCR - PT_FPR0)) { + if (IS_ENABLED(CONFIG_PPC32)) + // On 32-bit the index we are passed refers to 32-bit words + *data = ((u32 *)child->thread.fp_state.fpr)[fpidx]; + else + memcpy(data, &child->thread.TS_FPR(fpidx), sizeof(long)); + } else *data = child->thread.fp_state.fpscr; #else *data = 0; @@ -39,9 +43,13 @@ int ptrace_put_fpr(struct task_struct *child, int index, unsigned long data) #ifdef CONFIG_PPC_FPU_REGS flush_fp_to_thread(child); - if (fpidx < (PT_FPSCR - PT_FPR0)) - memcpy(&child->thread.TS_FPR(fpidx), &data, sizeof(long)); - else + if (fpidx < (PT_FPSCR - PT_FPR0)) { + if (IS_ENABLED(CONFIG_PPC32)) + // On 32-bit the index we are passed refers to 32-bit words + ((u32 *)child->thread.fp_state.fpr)[fpidx] = data; + else + memcpy(&child->thread.TS_FPR(fpidx), &data, sizeof(long)); + } else child->thread.fp_state.fpscr = data; #endif diff --git a/arch/powerpc/kernel/ptrace/ptrace.c b/arch/powerpc/kernel/ptrace/ptrace.c index 4d2dc22d4a2d..5d7a72b41ae7 100644 --- a/arch/powerpc/kernel/ptrace/ptrace.c +++ b/arch/powerpc/kernel/ptrace/ptrace.c @@ -444,4 +444,7 @@ void __init pt_regs_check(void) * real registers. */ BUILD_BUG_ON(PT_DSCR < sizeof(struct user_pt_regs) / sizeof(unsigned long)); + + // ptrace_get/put_fpr() rely on PPC32 and VSX being incompatible + BUILD_BUG_ON(IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_VSX)); } diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 9bb43aa53d43..693133972294 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -993,8 +993,8 @@ int rtas_call_reentrant(int token, int nargs, int nret, int *outputs, ...) * * Return: A pointer to the specified errorlog or NULL if not found. */ -struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log, - uint16_t section_id) +noinstr struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log, + uint16_t section_id) { struct rtas_ext_event_log_v6 *ext_log = (struct rtas_ext_event_log_v6 *)log->buffer; @@ -1071,7 +1071,7 @@ static struct rtas_filter rtas_filters[] __ro_after_init = { { "get-time-of-day", -1, -1, -1, -1, -1 }, { "ibm,get-vpd", -1, 0, -1, 1, 2 }, { "ibm,lpar-perftools", -1, 2, 3, -1, -1 }, - { "ibm,platform-dump", -1, 4, 5, -1, -1 }, + { "ibm,platform-dump", -1, 4, 5, -1, -1 }, /* Special cased */ { "ibm,read-slot-reset-state", -1, -1, -1, -1, -1 }, { "ibm,scan-log-dump", -1, 0, 1, -1, -1 }, { "ibm,set-dynamic-indicator", -1, 2, -1, -1, -1 }, @@ -1120,6 +1120,15 @@ static bool block_rtas_call(int token, int nargs, size = 1; end = base + size - 1; + + /* + * Special case for ibm,platform-dump - NULL buffer + * address is used to indicate end of dump processing + */ + if (!strcmp(f->name, "ibm,platform-dump") && + base == 0) + return false; + if (!in_rmo_buf(base, end)) goto err; } diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index eb0077b302e2..1a02629ec70b 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -935,12 +935,6 @@ void __init setup_arch(char **cmdline_p) /* Print various info about the machine that has been gathered so far. */ print_system_info(); - /* Reserve large chunks of memory for use by CMA for KVM. */ - kvm_cma_reserve(); - - /* Reserve large chunks of memory for us by CMA for hugetlb */ - gigantic_hugetlb_cma_reserve(); - klp_init_thread_info(&init_task); setup_initial_init_mm(_stext, _etext, _edata, _end); @@ -955,6 +949,13 @@ void __init setup_arch(char **cmdline_p) initmem_init(); + /* + * Reserve large chunks of memory for use by CMA for KVM and hugetlb. These must + * be called after initmem_init(), so that pageblock_order is initialised. + */ + kvm_cma_reserve(); + gigantic_hugetlb_cma_reserve(); + early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT); if (ppc_md.setup_arch) diff --git a/arch/powerpc/kexec/crash.c b/arch/powerpc/kexec/crash.c index d85fa9fc6f3c..80f54723cf6d 100644 --- a/arch/powerpc/kexec/crash.c +++ b/arch/powerpc/kexec/crash.c @@ -224,7 +224,7 @@ void crash_kexec_secondary(struct pt_regs *regs) /* wait for all the CPUs to hit real mode but timeout if they don't come in */ #if defined(CONFIG_SMP) && defined(CONFIG_PPC64) -static void __maybe_unused crash_kexec_wait_realmode(int cpu) +noinstr static void __maybe_unused crash_kexec_wait_realmode(int cpu) { unsigned int msecs; int i; diff --git a/arch/powerpc/mm/nohash/kaslr_booke.c b/arch/powerpc/mm/nohash/kaslr_booke.c index 1f3f9fedf1bc..0d04f9d5da8d 100644 --- a/arch/powerpc/mm/nohash/kaslr_booke.c +++ b/arch/powerpc/mm/nohash/kaslr_booke.c @@ -19,7 +19,6 @@ #include <asm/cacheflush.h> #include <asm/kdump.h> #include <mm/mmu_decl.h> -#include <generated/compile.h> #include <generated/utsrelease.h> struct regions { @@ -37,10 +36,6 @@ struct regions { int reserved_mem_size_cells; }; -/* Simplified build-specific string for starting entropy. */ -static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@" - LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION; - struct regions __initdata regions; static __init void kaslr_get_cmdline(void *fdt) @@ -71,7 +66,8 @@ static unsigned long __init get_boot_seed(void *fdt) { unsigned long hash = 0; - hash = rotate_xor(hash, build_str, sizeof(build_str)); + /* build-specific string for starting entropy. */ + hash = rotate_xor(hash, linux_banner, strlen(linux_banner)); hash = rotate_xor(hash, fdt, fdt_totalsize(fdt)); return hash; diff --git a/arch/powerpc/platforms/microwatt/microwatt.h b/arch/powerpc/platforms/microwatt/microwatt.h new file mode 100644 index 000000000000..335417e95e66 --- /dev/null +++ b/arch/powerpc/platforms/microwatt/microwatt.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _MICROWATT_H +#define _MICROWATT_H + +void microwatt_rng_init(void); + +#endif /* _MICROWATT_H */ diff --git a/arch/powerpc/platforms/microwatt/rng.c b/arch/powerpc/platforms/microwatt/rng.c index 7bc4d1cbfaf0..8ece87d005c8 100644 --- a/arch/powerpc/platforms/microwatt/rng.c +++ b/arch/powerpc/platforms/microwatt/rng.c @@ -11,6 +11,7 @@ #include <asm/archrandom.h> #include <asm/cputable.h> #include <asm/machdep.h> +#include "microwatt.h" #define DARN_ERR 0xFFFFFFFFFFFFFFFFul @@ -29,7 +30,7 @@ static int microwatt_get_random_darn(unsigned long *v) return 1; } -static __init int rng_init(void) +void __init microwatt_rng_init(void) { unsigned long val; int i; @@ -37,12 +38,7 @@ static __init int rng_init(void) for (i = 0; i < 10; i++) { if (microwatt_get_random_darn(&val)) { ppc_md.get_random_seed = microwatt_get_random_darn; - return 0; + return; } } - - pr_warn("Unable to use DARN for get_random_seed()\n"); - - return -EIO; } -machine_subsys_initcall(, rng_init); diff --git a/arch/powerpc/platforms/microwatt/setup.c b/arch/powerpc/platforms/microwatt/setup.c index 0b02603bdb74..6b32539395a4 100644 --- a/arch/powerpc/platforms/microwatt/setup.c +++ b/arch/powerpc/platforms/microwatt/setup.c @@ -16,6 +16,8 @@ #include <asm/xics.h> #include <asm/udbg.h> +#include "microwatt.h" + static void __init microwatt_init_IRQ(void) { xics_init(); @@ -32,10 +34,16 @@ static int __init microwatt_populate(void) } machine_arch_initcall(microwatt, microwatt_populate); +static void __init microwatt_setup_arch(void) +{ + microwatt_rng_init(); +} + define_machine(microwatt) { .name = "microwatt", .probe = microwatt_probe, .init_IRQ = microwatt_init_IRQ, + .setup_arch = microwatt_setup_arch, .progress = udbg_progress, .calibrate_decr = generic_calibrate_decr, }; diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile index 6488b3842199..19f0fc5c6f1b 100644 --- a/arch/powerpc/platforms/powernv/Makefile +++ b/arch/powerpc/platforms/powernv/Makefile @@ -4,6 +4,7 @@ # in particular, idle code runs a bunch of things in real mode KASAN_SANITIZE_idle.o := n KASAN_SANITIZE_pci-ioda.o := n +KASAN_SANITIZE_pci-ioda-tce.o := n # pnv_machine_check_early KASAN_SANITIZE_setup.o := n diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h index e297bf4abfcb..866efdc103fd 100644 --- a/arch/powerpc/platforms/powernv/powernv.h +++ b/arch/powerpc/platforms/powernv/powernv.h @@ -42,4 +42,6 @@ ssize_t memcons_copy(struct memcons *mc, char *to, loff_t pos, size_t count); u32 __init memcons_get_size(struct memcons *mc); struct memcons *__init memcons_init(struct device_node *node, const char *mc_prop_name); +void pnv_rng_init(void); + #endif /* _POWERNV_H */ diff --git a/arch/powerpc/platforms/powernv/rng.c b/arch/powerpc/platforms/powernv/rng.c index e3d44b36ae98..463c78c52cc5 100644 --- a/arch/powerpc/platforms/powernv/rng.c +++ b/arch/powerpc/platforms/powernv/rng.c @@ -17,6 +17,7 @@ #include <asm/prom.h> #include <asm/machdep.h> #include <asm/smp.h> +#include "powernv.h" #define DARN_ERR 0xFFFFFFFFFFFFFFFFul @@ -28,7 +29,6 @@ struct powernv_rng { static DEFINE_PER_CPU(struct powernv_rng *, powernv_rng); - int powernv_hwrng_present(void) { struct powernv_rng *rng; @@ -98,9 +98,6 @@ static int __init initialise_darn(void) return 0; } } - - pr_warn("Unable to use DARN for get_random_seed()\n"); - return -EIO; } @@ -163,32 +160,55 @@ static __init int rng_create(struct device_node *dn) rng_init_per_cpu(rng, dn); - pr_info_once("Registering arch random hook.\n"); - ppc_md.get_random_seed = powernv_get_random_long; return 0; } -static __init int rng_init(void) +static int __init pnv_get_random_long_early(unsigned long *v) { struct device_node *dn; - int rc; + + if (!slab_is_available()) + return 0; + + if (cmpxchg(&ppc_md.get_random_seed, pnv_get_random_long_early, + NULL) != pnv_get_random_long_early) + return 0; for_each_compatible_node(dn, NULL, "ibm,power-rng") { - rc = rng_create(dn); - if (rc) { - pr_err("Failed creating rng for %pOF (%d).\n", - dn, rc); + if (rng_create(dn)) continue; - } - /* Create devices for hwrng driver */ of_platform_device_create(dn, NULL, NULL); } - initialise_darn(); + if (!ppc_md.get_random_seed) + return 0; + return ppc_md.get_random_seed(v); +} + +void __init pnv_rng_init(void) +{ + struct device_node *dn; + /* Prefer darn over the rest. */ + if (!initialise_darn()) + return; + + dn = of_find_compatible_node(NULL, NULL, "ibm,power-rng"); + if (dn) + ppc_md.get_random_seed = pnv_get_random_long_early; + + of_node_put(dn); +} + +static int __init pnv_rng_late_init(void) +{ + unsigned long v; + /* In case it wasn't called during init for some other reason. */ + if (ppc_md.get_random_seed == pnv_get_random_long_early) + pnv_get_random_long_early(&v); return 0; } -machine_subsys_initcall(powernv, rng_init); +machine_subsys_initcall(powernv, pnv_rng_late_init); diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index 824c3ad7a0fa..dac545aa0308 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -203,6 +203,8 @@ static void __init pnv_setup_arch(void) pnv_check_guarded_cores(); /* XXX PMCS */ + + pnv_rng_init(); } static void __init pnv_init(void) diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c index 181b855b3050..82cae08976bc 100644 --- a/arch/powerpc/platforms/pseries/papr_scm.c +++ b/arch/powerpc/platforms/pseries/papr_scm.c @@ -465,6 +465,9 @@ static int papr_scm_pmu_check_events(struct papr_scm_priv *p, struct nvdimm_pmu u32 available_events; int index, rc = 0; + if (!p->stat_buffer_len) + return -ENOENT; + available_events = (p->stat_buffer_len - sizeof(struct papr_scm_perf_stats)) / sizeof(struct papr_scm_perf_stat); if (available_events == 0) diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h index f5c916c839c9..1d75b7742ef0 100644 --- a/arch/powerpc/platforms/pseries/pseries.h +++ b/arch/powerpc/platforms/pseries/pseries.h @@ -122,4 +122,6 @@ void pseries_lpar_read_hblkrm_characteristics(void); static inline void pseries_lpar_read_hblkrm_characteristics(void) { } #endif +void pseries_rng_init(void); + #endif /* _PSERIES_PSERIES_H */ diff --git a/arch/powerpc/platforms/pseries/rng.c b/arch/powerpc/platforms/pseries/rng.c index 6268545947b8..6ddfdeaace9e 100644 --- a/arch/powerpc/platforms/pseries/rng.c +++ b/arch/powerpc/platforms/pseries/rng.c @@ -10,6 +10,7 @@ #include <asm/archrandom.h> #include <asm/machdep.h> #include <asm/plpar_wrappers.h> +#include "pseries.h" static int pseries_get_random_long(unsigned long *v) @@ -24,19 +25,13 @@ static int pseries_get_random_long(unsigned long *v) return 0; } -static __init int rng_init(void) +void __init pseries_rng_init(void) { struct device_node *dn; dn = of_find_compatible_node(NULL, NULL, "ibm,random"); if (!dn) - return -ENODEV; - - pr_info("Registering arch random hook.\n"); - + return; ppc_md.get_random_seed = pseries_get_random_long; - of_node_put(dn); - return 0; } -machine_subsys_initcall(pseries, rng_init); diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index afb074269b42..ee4f1db49515 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -839,6 +839,7 @@ static void __init pSeries_setup_arch(void) } ppc_md.pcibios_root_bridge_prepare = pseries_root_bridge_prepare; + pseries_rng_init(); } static void pseries_panic(char *str) diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index c22f58155948..32ffef9f6e5b 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -364,8 +364,13 @@ config RISCV_ISA_SVPBMT select RISCV_ALTERNATIVE default y help - Adds support to dynamically detect the presence of the SVPBMT extension - (Supervisor-mode: page-based memory types) and enable its usage. + Adds support to dynamically detect the presence of the SVPBMT + ISA-extension (Supervisor-mode: page-based memory types) and + enable its usage. + + The memory type for a page contains a combination of attributes + that indicate the cacheability, idempotency, and ordering + properties for access to that page. The SVPBMT extension is only available on 64Bit cpus. diff --git a/arch/riscv/Kconfig.erratas b/arch/riscv/Kconfig.erratas index ebfcd5cc6eaf..457ac72c9b36 100644 --- a/arch/riscv/Kconfig.erratas +++ b/arch/riscv/Kconfig.erratas @@ -35,6 +35,7 @@ config ERRATA_SIFIVE_CIP_1200 config ERRATA_THEAD bool "T-HEAD errata" + depends on !XIP_KERNEL select RISCV_ALTERNATIVE help All T-HEAD errata Kconfig depend on this Kconfig. Disabling diff --git a/arch/riscv/boot/dts/microchip/mpfs.dtsi b/arch/riscv/boot/dts/microchip/mpfs.dtsi index 8c3259134194..3095d08453a1 100644 --- a/arch/riscv/boot/dts/microchip/mpfs.dtsi +++ b/arch/riscv/boot/dts/microchip/mpfs.dtsi @@ -192,6 +192,15 @@ riscv,ndev = <186>; }; + pdma: dma-controller@3000000 { + compatible = "sifive,fu540-c000-pdma", "sifive,pdma0"; + reg = <0x0 0x3000000 0x0 0x8000>; + interrupt-parent = <&plic>; + interrupts = <5 6>, <7 8>, <9 10>, <11 12>; + dma-channels = <4>; + #dma-cells = <1>; + }; + clkcfg: clkcfg@20002000 { compatible = "microchip,mpfs-clkcfg"; reg = <0x0 0x20002000 0x0 0x1000>, <0x0 0x3E001000 0x0 0x1000>; diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h index 9e2888dbb5b1..416ead0f9a65 100644 --- a/arch/riscv/include/asm/errata_list.h +++ b/arch/riscv/include/asm/errata_list.h @@ -75,20 +75,20 @@ asm volatile(ALTERNATIVE( \ "nop\n\t" \ "nop\n\t" \ "nop", \ - "li t3, %2\n\t" \ - "slli t3, t3, %4\n\t" \ + "li t3, %1\n\t" \ + "slli t3, t3, %3\n\t" \ "and t3, %0, t3\n\t" \ "bne t3, zero, 2f\n\t" \ - "li t3, %3\n\t" \ - "slli t3, t3, %4\n\t" \ + "li t3, %2\n\t" \ + "slli t3, t3, %3\n\t" \ "or %0, %0, t3\n\t" \ "2:", THEAD_VENDOR_ID, \ ERRATA_THEAD_PBMT, CONFIG_ERRATA_THEAD_PBMT) \ : "+r"(_val) \ - : "0"(_val), \ - "I"(_PAGE_MTMASK_THEAD >> ALT_THEAD_PBMT_SHIFT), \ + : "I"(_PAGE_MTMASK_THEAD >> ALT_THEAD_PBMT_SHIFT), \ "I"(_PAGE_PMA_THEAD >> ALT_THEAD_PBMT_SHIFT), \ - "I"(ALT_THEAD_PBMT_SHIFT)) + "I"(ALT_THEAD_PBMT_SHIFT) \ + : "t3") #else #define ALT_THEAD_PMA(_val) #endif diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index a6f62a6d1edd..12b05ce164bb 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -293,7 +293,6 @@ void __init_or_module riscv_cpufeature_patch_func(struct alt_entry *begin, unsigned int stage) { u32 cpu_req_feature = cpufeature_probe(stage); - u32 cpu_apply_feature = 0; struct alt_entry *alt; u32 tmp; @@ -307,10 +306,8 @@ void __init_or_module riscv_cpufeature_patch_func(struct alt_entry *begin, } tmp = (1U << alt->errata_id); - if (cpu_req_feature & tmp) { + if (cpu_req_feature & tmp) patch_text_nosync(alt->old_ptr, alt->alt_ptr, alt->alt_len); - cpu_apply_feature |= tmp; - } } } #endif diff --git a/arch/riscv/kvm/vmid.c b/arch/riscv/kvm/vmid.c index 9f764df125db..6cd93995fb65 100644 --- a/arch/riscv/kvm/vmid.c +++ b/arch/riscv/kvm/vmid.c @@ -97,7 +97,7 @@ void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu) * We ran out of VMIDs so we increment vmid_version and * start assigning VMIDs from 1. * - * This also means existing VMIDs assignement to all Guest + * This also means existing VMIDs assignment to all Guest * instances is invalid and we have force VMID re-assignement * for all Guest instances. The Guest instances that were not * running will automatically pick-up new VMIDs because will diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index b1a88f6cc349..91c0b80a8bf0 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -125,6 +125,7 @@ config S390 select CLONE_BACKWARDS2 select DMA_OPS if PCI select DYNAMIC_FTRACE if FUNCTION_TRACER + select GCC12_NO_ARRAY_BOUNDS select GENERIC_ALLOCATOR select GENERIC_CPU_AUTOPROBE select GENERIC_CPU_VULNERABILITIES @@ -768,7 +769,6 @@ menu "Virtualization" config PROTECTED_VIRTUALIZATION_GUEST def_bool n prompt "Protected virtualization guest support" - select ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS help Select this option, if you want to be able to run this kernel as a protected virtualization KVM guest. diff --git a/arch/s390/Makefile b/arch/s390/Makefile index d73611b35164..495c68a4df1e 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -32,15 +32,7 @@ KBUILD_CFLAGS_DECOMPRESSOR += -fno-stack-protector KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-disable-warning, address-of-packed-member) KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g) KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,)) - -ifdef CONFIG_CC_IS_GCC - ifeq ($(call cc-ifversion, -ge, 1200, y), y) - ifeq ($(call cc-ifversion, -lt, 1300, y), y) - KBUILD_CFLAGS += $(call cc-disable-warning, array-bounds) - KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-disable-warning, array-bounds) - endif - endif -endif +KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_CC_NO_ARRAY_BOUNDS),-Wno-array-bounds) UTS_MACHINE := s390x STACK_SIZE := $(if $(CONFIG_KASAN),65536,16384) diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c index a2c1c55daec0..28124d0fa1d5 100644 --- a/arch/s390/kernel/crash_dump.c +++ b/arch/s390/kernel/crash_dump.c @@ -219,6 +219,11 @@ ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, size_t csize, unsigned long src; int rc; + if (!(iter_is_iovec(iter) || iov_iter_is_kvec(iter))) + return -EINVAL; + /* Multi-segment iterators are not supported */ + if (iter->nr_segs > 1) + return -EINVAL; if (!csize) return 0; src = pfn_to_phys(pfn) + offset; @@ -228,7 +233,10 @@ ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, size_t csize, rc = copy_oldmem_user(iter->iov->iov_base, src, csize); else rc = copy_oldmem_kernel(iter->kvec->iov_base, src, csize); - return rc; + if (rc < 0) + return rc; + iov_iter_advance(iter, csize); + return csize; } /* diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c index 483ab5e10164..f7dd3c849e68 100644 --- a/arch/s390/kernel/perf_cpum_cf.c +++ b/arch/s390/kernel/perf_cpum_cf.c @@ -516,6 +516,26 @@ static int __hw_perf_event_init(struct perf_event *event, unsigned int type) return err; } +/* Events CPU_CYLCES and INSTRUCTIONS can be submitted with two different + * attribute::type values: + * - PERF_TYPE_HARDWARE: + * - pmu->type: + * Handle both type of invocations identical. They address the same hardware. + * The result is different when event modifiers exclude_kernel and/or + * exclude_user are also set. + */ +static int cpumf_pmu_event_type(struct perf_event *event) +{ + u64 ev = event->attr.config; + + if (cpumf_generic_events_basic[PERF_COUNT_HW_CPU_CYCLES] == ev || + cpumf_generic_events_basic[PERF_COUNT_HW_INSTRUCTIONS] == ev || + cpumf_generic_events_user[PERF_COUNT_HW_CPU_CYCLES] == ev || + cpumf_generic_events_user[PERF_COUNT_HW_INSTRUCTIONS] == ev) + return PERF_TYPE_HARDWARE; + return PERF_TYPE_RAW; +} + static int cpumf_pmu_event_init(struct perf_event *event) { unsigned int type = event->attr.type; @@ -525,7 +545,7 @@ static int cpumf_pmu_event_init(struct perf_event *event) err = __hw_perf_event_init(event, type); else if (event->pmu->type == type) /* Registered as unknown PMU */ - err = __hw_perf_event_init(event, PERF_TYPE_RAW); + err = __hw_perf_event_init(event, cpumf_pmu_event_type(event)); else return -ENOENT; diff --git a/arch/s390/kernel/perf_pai_crypto.c b/arch/s390/kernel/perf_pai_crypto.c index 8c1545946d85..b38b4ae01589 100644 --- a/arch/s390/kernel/perf_pai_crypto.c +++ b/arch/s390/kernel/perf_pai_crypto.c @@ -193,8 +193,9 @@ static int paicrypt_event_init(struct perf_event *event) /* PAI crypto PMU registered as PERF_TYPE_RAW, check event type */ if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type) return -ENOENT; - /* PAI crypto event must be valid */ - if (a->config > PAI_CRYPTO_BASE + paicrypt_cnt) + /* PAI crypto event must be in valid range */ + if (a->config < PAI_CRYPTO_BASE || + a->config > PAI_CRYPTO_BASE + paicrypt_cnt) return -EINVAL; /* Allow only CPU wide operation, no process context for now. */ if (event->hw.target || event->cpu == -1) @@ -208,6 +209,12 @@ static int paicrypt_event_init(struct perf_event *event) if (rc) return rc; + /* Event initialization sets last_tag to 0. When later on the events + * are deleted and re-added, do not reset the event count value to zero. + * Events are added, deleted and re-added when 2 or more events + * are active at the same time. + */ + event->hw.last_tag = 0; cpump->event = event; event->destroy = paicrypt_event_destroy; @@ -242,9 +249,12 @@ static void paicrypt_start(struct perf_event *event, int flags) { u64 sum; - sum = paicrypt_getall(event); /* Get current value */ - local64_set(&event->hw.prev_count, sum); - local64_set(&event->count, 0); + if (!event->hw.last_tag) { + event->hw.last_tag = 1; + sum = paicrypt_getall(event); /* Get current value */ + local64_set(&event->count, 0); + local64_set(&event->hw.prev_count, sum); + } } static int paicrypt_add(struct perf_event *event, int flags) diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 6fb6bf64326f..6a0ac00d5a42 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -31,6 +31,7 @@ #include <linux/cma.h> #include <linux/gfp.h> #include <linux/dma-direct.h> +#include <linux/platform-feature.h> #include <asm/processor.h> #include <linux/uaccess.h> #include <asm/pgalloc.h> @@ -168,22 +169,14 @@ bool force_dma_unencrypted(struct device *dev) return is_prot_virt_guest(); } -#ifdef CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS - -int arch_has_restricted_virtio_memory_access(void) -{ - return is_prot_virt_guest(); -} -EXPORT_SYMBOL(arch_has_restricted_virtio_memory_access); - -#endif - /* protected virtualization */ static void pv_init(void) { if (!is_prot_virt_guest()) return; + platform_set(PLATFORM_VIRTIO_RESTRICTED_MEM_ACCESS); + /* make sure bounce buffers are shared */ swiotlb_init(true, SWIOTLB_FORCE | SWIOTLB_VERBOSE); swiotlb_update_mem_attributes(); diff --git a/arch/um/drivers/virt-pci.c b/arch/um/drivers/virt-pci.c index 5c092a9153ea..027847023184 100644 --- a/arch/um/drivers/virt-pci.c +++ b/arch/um/drivers/virt-pci.c @@ -544,6 +544,8 @@ static int um_pci_init_vqs(struct um_pci_device *dev) dev->cmd_vq = vqs[0]; dev->irq_vq = vqs[1]; + virtio_device_ready(dev->vdev); + for (i = 0; i < NUM_IRQ_MSGS; i++) { void *msg = kzalloc(MAX_IRQ_MSG_SIZE, GFP_KERNEL); @@ -587,7 +589,7 @@ static int um_pci_virtio_probe(struct virtio_device *vdev) dev->irq = irq_alloc_desc(numa_node_id()); if (dev->irq < 0) { err = dev->irq; - goto error; + goto err_reset; } um_pci_devices[free].dev = dev; vdev->priv = dev; @@ -604,6 +606,9 @@ static int um_pci_virtio_probe(struct virtio_device *vdev) um_pci_rescan(); return 0; +err_reset: + virtio_reset_device(vdev); + vdev->config->del_vqs(vdev); error: mutex_unlock(&um_pci_mtx); kfree(dev); diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 9783ebc4e021..be0b95e51df6 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1542,7 +1542,6 @@ config X86_CPA_STATISTICS config X86_MEM_ENCRYPT select ARCH_HAS_FORCE_DMA_UNENCRYPTED select DYNAMIC_PHYSICAL_MASK - select ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS def_bool n config AMD_MEM_ENCRYPT diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c index 03deb4d6920d..928dcf7a20d9 100644 --- a/arch/x86/coco/tdx/tdx.c +++ b/arch/x86/coco/tdx/tdx.c @@ -124,6 +124,51 @@ static u64 get_cc_mask(void) return BIT_ULL(gpa_width - 1); } +/* + * The TDX module spec states that #VE may be injected for a limited set of + * reasons: + * + * - Emulation of the architectural #VE injection on EPT violation; + * + * - As a result of guest TD execution of a disallowed instruction, + * a disallowed MSR access, or CPUID virtualization; + * + * - A notification to the guest TD about anomalous behavior; + * + * The last one is opt-in and is not used by the kernel. + * + * The Intel Software Developer's Manual describes cases when instruction + * length field can be used in section "Information for VM Exits Due to + * Instruction Execution". + * + * For TDX, it ultimately means GET_VEINFO provides reliable instruction length + * information if #VE occurred due to instruction execution, but not for EPT + * violations. + */ +static int ve_instr_len(struct ve_info *ve) +{ + switch (ve->exit_reason) { + case EXIT_REASON_HLT: + case EXIT_REASON_MSR_READ: + case EXIT_REASON_MSR_WRITE: + case EXIT_REASON_CPUID: + case EXIT_REASON_IO_INSTRUCTION: + /* It is safe to use ve->instr_len for #VE due instructions */ + return ve->instr_len; + case EXIT_REASON_EPT_VIOLATION: + /* + * For EPT violations, ve->insn_len is not defined. For those, + * the kernel must decode instructions manually and should not + * be using this function. + */ + WARN_ONCE(1, "ve->instr_len is not defined for EPT violations"); + return 0; + default: + WARN_ONCE(1, "Unexpected #VE-type: %lld\n", ve->exit_reason); + return ve->instr_len; + } +} + static u64 __cpuidle __halt(const bool irq_disabled, const bool do_sti) { struct tdx_hypercall_args args = { @@ -147,7 +192,7 @@ static u64 __cpuidle __halt(const bool irq_disabled, const bool do_sti) return __tdx_hypercall(&args, do_sti ? TDX_HCALL_ISSUE_STI : 0); } -static bool handle_halt(void) +static int handle_halt(struct ve_info *ve) { /* * Since non safe halt is mainly used in CPU offlining @@ -158,9 +203,9 @@ static bool handle_halt(void) const bool do_sti = false; if (__halt(irq_disabled, do_sti)) - return false; + return -EIO; - return true; + return ve_instr_len(ve); } void __cpuidle tdx_safe_halt(void) @@ -180,7 +225,7 @@ void __cpuidle tdx_safe_halt(void) WARN_ONCE(1, "HLT instruction emulation failed\n"); } -static bool read_msr(struct pt_regs *regs) +static int read_msr(struct pt_regs *regs, struct ve_info *ve) { struct tdx_hypercall_args args = { .r10 = TDX_HYPERCALL_STANDARD, @@ -194,14 +239,14 @@ static bool read_msr(struct pt_regs *regs) * (GHCI), section titled "TDG.VP.VMCALL<Instruction.RDMSR>". */ if (__tdx_hypercall(&args, TDX_HCALL_HAS_OUTPUT)) - return false; + return -EIO; regs->ax = lower_32_bits(args.r11); regs->dx = upper_32_bits(args.r11); - return true; + return ve_instr_len(ve); } -static bool write_msr(struct pt_regs *regs) +static int write_msr(struct pt_regs *regs, struct ve_info *ve) { struct tdx_hypercall_args args = { .r10 = TDX_HYPERCALL_STANDARD, @@ -215,10 +260,13 @@ static bool write_msr(struct pt_regs *regs) * can be found in TDX Guest-Host-Communication Interface * (GHCI) section titled "TDG.VP.VMCALL<Instruction.WRMSR>". */ - return !__tdx_hypercall(&args, 0); + if (__tdx_hypercall(&args, 0)) + return -EIO; + + return ve_instr_len(ve); } -static bool handle_cpuid(struct pt_regs *regs) +static int handle_cpuid(struct pt_regs *regs, struct ve_info *ve) { struct tdx_hypercall_args args = { .r10 = TDX_HYPERCALL_STANDARD, @@ -236,7 +284,7 @@ static bool handle_cpuid(struct pt_regs *regs) */ if (regs->ax < 0x40000000 || regs->ax > 0x4FFFFFFF) { regs->ax = regs->bx = regs->cx = regs->dx = 0; - return true; + return ve_instr_len(ve); } /* @@ -245,7 +293,7 @@ static bool handle_cpuid(struct pt_regs *regs) * (GHCI), section titled "VP.VMCALL<Instruction.CPUID>". */ if (__tdx_hypercall(&args, TDX_HCALL_HAS_OUTPUT)) - return false; + return -EIO; /* * As per TDX GHCI CPUID ABI, r12-r15 registers contain contents of @@ -257,7 +305,7 @@ static bool handle_cpuid(struct pt_regs *regs) regs->cx = args.r14; regs->dx = args.r15; - return true; + return ve_instr_len(ve); } static bool mmio_read(int size, unsigned long addr, unsigned long *val) @@ -283,10 +331,10 @@ static bool mmio_write(int size, unsigned long addr, unsigned long val) EPT_WRITE, addr, val); } -static bool handle_mmio(struct pt_regs *regs, struct ve_info *ve) +static int handle_mmio(struct pt_regs *regs, struct ve_info *ve) { + unsigned long *reg, val, vaddr; char buffer[MAX_INSN_SIZE]; - unsigned long *reg, val; struct insn insn = {}; enum mmio_type mmio; int size, extend_size; @@ -294,34 +342,49 @@ static bool handle_mmio(struct pt_regs *regs, struct ve_info *ve) /* Only in-kernel MMIO is supported */ if (WARN_ON_ONCE(user_mode(regs))) - return false; + return -EFAULT; if (copy_from_kernel_nofault(buffer, (void *)regs->ip, MAX_INSN_SIZE)) - return false; + return -EFAULT; if (insn_decode(&insn, buffer, MAX_INSN_SIZE, INSN_MODE_64)) - return false; + return -EINVAL; mmio = insn_decode_mmio(&insn, &size); if (WARN_ON_ONCE(mmio == MMIO_DECODE_FAILED)) - return false; + return -EINVAL; if (mmio != MMIO_WRITE_IMM && mmio != MMIO_MOVS) { reg = insn_get_modrm_reg_ptr(&insn, regs); if (!reg) - return false; + return -EINVAL; } - ve->instr_len = insn.length; + /* + * Reject EPT violation #VEs that split pages. + * + * MMIO accesses are supposed to be naturally aligned and therefore + * never cross page boundaries. Seeing split page accesses indicates + * a bug or a load_unaligned_zeropad() that stepped into an MMIO page. + * + * load_unaligned_zeropad() will recover using exception fixups. + */ + vaddr = (unsigned long)insn_get_addr_ref(&insn, regs); + if (vaddr / PAGE_SIZE != (vaddr + size - 1) / PAGE_SIZE) + return -EFAULT; /* Handle writes first */ switch (mmio) { case MMIO_WRITE: memcpy(&val, reg, size); - return mmio_write(size, ve->gpa, val); + if (!mmio_write(size, ve->gpa, val)) + return -EIO; + return insn.length; case MMIO_WRITE_IMM: val = insn.immediate.value; - return mmio_write(size, ve->gpa, val); + if (!mmio_write(size, ve->gpa, val)) + return -EIO; + return insn.length; case MMIO_READ: case MMIO_READ_ZERO_EXTEND: case MMIO_READ_SIGN_EXTEND: @@ -334,15 +397,15 @@ static bool handle_mmio(struct pt_regs *regs, struct ve_info *ve) * decoded or handled properly. It was likely not using io.h * helpers or accessed MMIO accidentally. */ - return false; + return -EINVAL; default: WARN_ONCE(1, "Unknown insn_decode_mmio() decode value?"); - return false; + return -EINVAL; } /* Handle reads */ if (!mmio_read(size, ve->gpa, &val)) - return false; + return -EIO; switch (mmio) { case MMIO_READ: @@ -364,13 +427,13 @@ static bool handle_mmio(struct pt_regs *regs, struct ve_info *ve) default: /* All other cases has to be covered with the first switch() */ WARN_ON_ONCE(1); - return false; + return -EINVAL; } if (extend_size) memset(reg, extend_val, extend_size); memcpy(reg, &val, size); - return true; + return insn.length; } static bool handle_in(struct pt_regs *regs, int size, int port) @@ -421,13 +484,14 @@ static bool handle_out(struct pt_regs *regs, int size, int port) * * Return True on success or False on failure. */ -static bool handle_io(struct pt_regs *regs, u32 exit_qual) +static int handle_io(struct pt_regs *regs, struct ve_info *ve) { + u32 exit_qual = ve->exit_qual; int size, port; - bool in; + bool in, ret; if (VE_IS_IO_STRING(exit_qual)) - return false; + return -EIO; in = VE_IS_IO_IN(exit_qual); size = VE_GET_IO_SIZE(exit_qual); @@ -435,9 +499,13 @@ static bool handle_io(struct pt_regs *regs, u32 exit_qual) if (in) - return handle_in(regs, size, port); + ret = handle_in(regs, size, port); else - return handle_out(regs, size, port); + ret = handle_out(regs, size, port); + if (!ret) + return -EIO; + + return ve_instr_len(ve); } /* @@ -447,13 +515,19 @@ static bool handle_io(struct pt_regs *regs, u32 exit_qual) __init bool tdx_early_handle_ve(struct pt_regs *regs) { struct ve_info ve; + int insn_len; tdx_get_ve_info(&ve); if (ve.exit_reason != EXIT_REASON_IO_INSTRUCTION) return false; - return handle_io(regs, ve.exit_qual); + insn_len = handle_io(regs, &ve); + if (insn_len < 0) + return false; + + regs->ip += insn_len; + return true; } void tdx_get_ve_info(struct ve_info *ve) @@ -486,54 +560,65 @@ void tdx_get_ve_info(struct ve_info *ve) ve->instr_info = upper_32_bits(out.r10); } -/* Handle the user initiated #VE */ -static bool virt_exception_user(struct pt_regs *regs, struct ve_info *ve) +/* + * Handle the user initiated #VE. + * + * On success, returns the number of bytes RIP should be incremented (>=0) + * or -errno on error. + */ +static int virt_exception_user(struct pt_regs *regs, struct ve_info *ve) { switch (ve->exit_reason) { case EXIT_REASON_CPUID: - return handle_cpuid(regs); + return handle_cpuid(regs, ve); default: pr_warn("Unexpected #VE: %lld\n", ve->exit_reason); - return false; + return -EIO; } } -/* Handle the kernel #VE */ -static bool virt_exception_kernel(struct pt_regs *regs, struct ve_info *ve) +/* + * Handle the kernel #VE. + * + * On success, returns the number of bytes RIP should be incremented (>=0) + * or -errno on error. + */ +static int virt_exception_kernel(struct pt_regs *regs, struct ve_info *ve) { switch (ve->exit_reason) { case EXIT_REASON_HLT: - return handle_halt(); + return handle_halt(ve); case EXIT_REASON_MSR_READ: - return read_msr(regs); + return read_msr(regs, ve); case EXIT_REASON_MSR_WRITE: - return write_msr(regs); + return write_msr(regs, ve); case EXIT_REASON_CPUID: - return handle_cpuid(regs); + return handle_cpuid(regs, ve); case EXIT_REASON_EPT_VIOLATION: return handle_mmio(regs, ve); case EXIT_REASON_IO_INSTRUCTION: - return handle_io(regs, ve->exit_qual); + return handle_io(regs, ve); default: pr_warn("Unexpected #VE: %lld\n", ve->exit_reason); - return false; + return -EIO; } } bool tdx_handle_virt_exception(struct pt_regs *regs, struct ve_info *ve) { - bool ret; + int insn_len; if (user_mode(regs)) - ret = virt_exception_user(regs, ve); + insn_len = virt_exception_user(regs, ve); else - ret = virt_exception_kernel(regs, ve); + insn_len = virt_exception_kernel(regs, ve); + if (insn_len < 0) + return false; /* After successful #VE handling, move the IP */ - if (ret) - regs->ip += ve->instr_len; + regs->ip += insn_len; - return ret; + return true; } static bool tdx_tlb_flush_required(bool private) diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c index 8b392b6b7b93..3de6d8b53367 100644 --- a/arch/x86/hyperv/hv_init.c +++ b/arch/x86/hyperv/hv_init.c @@ -13,6 +13,7 @@ #include <linux/io.h> #include <asm/apic.h> #include <asm/desc.h> +#include <asm/sev.h> #include <asm/hypervisor.h> #include <asm/hyperv-tlfs.h> #include <asm/mshyperv.h> @@ -405,6 +406,11 @@ void __init hyperv_init(void) } if (hv_isolation_type_snp()) { + /* Negotiate GHCB Version. */ + if (!hv_ghcb_negotiate_protocol()) + hv_ghcb_terminate(SEV_TERM_SET_GEN, + GHCB_SEV_ES_PROT_UNSUPPORTED); + hv_ghcb_pg = alloc_percpu(union hv_ghcb *); if (!hv_ghcb_pg) goto free_vp_assist_page; diff --git a/arch/x86/hyperv/ivm.c b/arch/x86/hyperv/ivm.c index 2b994117581e..1dbcbd9da74d 100644 --- a/arch/x86/hyperv/ivm.c +++ b/arch/x86/hyperv/ivm.c @@ -53,6 +53,8 @@ union hv_ghcb { } hypercall; } __packed __aligned(HV_HYP_PAGE_SIZE); +static u16 hv_ghcb_version __ro_after_init; + u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size) { union hv_ghcb *hv_ghcb; @@ -96,12 +98,85 @@ u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size) return status; } +static inline u64 rd_ghcb_msr(void) +{ + return __rdmsr(MSR_AMD64_SEV_ES_GHCB); +} + +static inline void wr_ghcb_msr(u64 val) +{ + native_wrmsrl(MSR_AMD64_SEV_ES_GHCB, val); +} + +static enum es_result hv_ghcb_hv_call(struct ghcb *ghcb, u64 exit_code, + u64 exit_info_1, u64 exit_info_2) +{ + /* Fill in protocol and format specifiers */ + ghcb->protocol_version = hv_ghcb_version; + ghcb->ghcb_usage = GHCB_DEFAULT_USAGE; + + ghcb_set_sw_exit_code(ghcb, exit_code); + ghcb_set_sw_exit_info_1(ghcb, exit_info_1); + ghcb_set_sw_exit_info_2(ghcb, exit_info_2); + + VMGEXIT(); + + if (ghcb->save.sw_exit_info_1 & GENMASK_ULL(31, 0)) + return ES_VMM_ERROR; + else + return ES_OK; +} + +void hv_ghcb_terminate(unsigned int set, unsigned int reason) +{ + u64 val = GHCB_MSR_TERM_REQ; + + /* Tell the hypervisor what went wrong. */ + val |= GHCB_SEV_TERM_REASON(set, reason); + + /* Request Guest Termination from Hypvervisor */ + wr_ghcb_msr(val); + VMGEXIT(); + + while (true) + asm volatile("hlt\n" : : : "memory"); +} + +bool hv_ghcb_negotiate_protocol(void) +{ + u64 ghcb_gpa; + u64 val; + + /* Save ghcb page gpa. */ + ghcb_gpa = rd_ghcb_msr(); + + /* Do the GHCB protocol version negotiation */ + wr_ghcb_msr(GHCB_MSR_SEV_INFO_REQ); + VMGEXIT(); + val = rd_ghcb_msr(); + + if (GHCB_MSR_INFO(val) != GHCB_MSR_SEV_INFO_RESP) + return false; + + if (GHCB_MSR_PROTO_MAX(val) < GHCB_PROTOCOL_MIN || + GHCB_MSR_PROTO_MIN(val) > GHCB_PROTOCOL_MAX) + return false; + + hv_ghcb_version = min_t(size_t, GHCB_MSR_PROTO_MAX(val), + GHCB_PROTOCOL_MAX); + + /* Write ghcb page back after negotiating protocol. */ + wr_ghcb_msr(ghcb_gpa); + VMGEXIT(); + + return true; +} + void hv_ghcb_msr_write(u64 msr, u64 value) { union hv_ghcb *hv_ghcb; void **ghcb_base; unsigned long flags; - struct es_em_ctxt ctxt; if (!hv_ghcb_pg) return; @@ -120,8 +195,7 @@ void hv_ghcb_msr_write(u64 msr, u64 value) ghcb_set_rax(&hv_ghcb->ghcb, lower_32_bits(value)); ghcb_set_rdx(&hv_ghcb->ghcb, upper_32_bits(value)); - if (sev_es_ghcb_hv_call(&hv_ghcb->ghcb, false, &ctxt, - SVM_EXIT_MSR, 1, 0)) + if (hv_ghcb_hv_call(&hv_ghcb->ghcb, SVM_EXIT_MSR, 1, 0)) pr_warn("Fail to write msr via ghcb %llx.\n", msr); local_irq_restore(flags); @@ -133,7 +207,6 @@ void hv_ghcb_msr_read(u64 msr, u64 *value) union hv_ghcb *hv_ghcb; void **ghcb_base; unsigned long flags; - struct es_em_ctxt ctxt; /* Check size of union hv_ghcb here. */ BUILD_BUG_ON(sizeof(union hv_ghcb) != HV_HYP_PAGE_SIZE); @@ -152,8 +225,7 @@ void hv_ghcb_msr_read(u64 msr, u64 *value) } ghcb_set_rcx(&hv_ghcb->ghcb, msr); - if (sev_es_ghcb_hv_call(&hv_ghcb->ghcb, false, &ctxt, - SVM_EXIT_MSR, 0, 0)) + if (hv_ghcb_hv_call(&hv_ghcb->ghcb, SVM_EXIT_MSR, 0, 0)) pr_warn("Fail to read msr via ghcb %llx.\n", msr); else *value = (u64)lower_32_bits(hv_ghcb->ghcb.save.rax) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 393f2bbb5e3a..03acc823838a 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -446,5 +446,6 @@ #define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */ #define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */ #define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */ +#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/arch/x86/include/asm/e820/api.h b/arch/x86/include/asm/e820/api.h index 5a39ed59b6db..e8f58ddd06d9 100644 --- a/arch/x86/include/asm/e820/api.h +++ b/arch/x86/include/asm/e820/api.h @@ -4,9 +4,6 @@ #include <asm/e820/types.h> -struct device; -struct resource; - extern struct e820_table *e820_table; extern struct e820_table *e820_table_kexec; extern struct e820_table *e820_table_firmware; @@ -46,8 +43,6 @@ extern void e820__register_nosave_regions(unsigned long limit_pfn); extern int e820__get_entry_type(u64 start, u64 end); -extern void remove_e820_regions(struct device *dev, struct resource *avail); - /* * Returns true iff the specified range [start,end) is completely contained inside * the ISA region. diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 71943dce691e..9636742a80f2 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -323,7 +323,7 @@ static inline u32 efi64_convert_status(efi_status_t status) #define __efi64_argmap_get_memory_space_descriptor(phys, desc) \ (__efi64_split(phys), (desc)) -#define __efi64_argmap_set_memory_space_descriptor(phys, size, flags) \ +#define __efi64_argmap_set_memory_space_attributes(phys, size, flags) \ (__efi64_split(phys), __efi64_split(size), __efi64_split(flags)) /* diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 959d66b9be94..9217bd6cf0d1 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -653,6 +653,7 @@ struct kvm_vcpu_arch { u64 ia32_misc_enable_msr; u64 smbase; u64 smi_count; + bool at_instruction_boundary; bool tpr_access_reporting; bool xsaves_enabled; bool xfd_no_write_intercept; @@ -1046,14 +1047,77 @@ struct kvm_x86_msr_filter { }; enum kvm_apicv_inhibit { + + /********************************************************************/ + /* INHIBITs that are relevant to both Intel's APICv and AMD's AVIC. */ + /********************************************************************/ + + /* + * APIC acceleration is disabled by a module parameter + * and/or not supported in hardware. + */ APICV_INHIBIT_REASON_DISABLE, + + /* + * APIC acceleration is inhibited because AutoEOI feature is + * being used by a HyperV guest. + */ APICV_INHIBIT_REASON_HYPERV, + + /* + * APIC acceleration is inhibited because the userspace didn't yet + * enable the kernel/split irqchip. + */ + APICV_INHIBIT_REASON_ABSENT, + + /* APIC acceleration is inhibited because KVM_GUESTDBG_BLOCKIRQ + * (out of band, debug measure of blocking all interrupts on this vCPU) + * was enabled, to avoid AVIC/APICv bypassing it. + */ + APICV_INHIBIT_REASON_BLOCKIRQ, + + /* + * For simplicity, the APIC acceleration is inhibited + * first time either APIC ID or APIC base are changed by the guest + * from their reset values. + */ + APICV_INHIBIT_REASON_APIC_ID_MODIFIED, + APICV_INHIBIT_REASON_APIC_BASE_MODIFIED, + + /******************************************************/ + /* INHIBITs that are relevant only to the AMD's AVIC. */ + /******************************************************/ + + /* + * AVIC is inhibited on a vCPU because it runs a nested guest. + * + * This is needed because unlike APICv, the peers of this vCPU + * cannot use the doorbell mechanism to signal interrupts via AVIC when + * a vCPU runs nested. + */ APICV_INHIBIT_REASON_NESTED, + + /* + * On SVM, the wait for the IRQ window is implemented with pending vIRQ, + * which cannot be injected when the AVIC is enabled, thus AVIC + * is inhibited while KVM waits for IRQ window. + */ APICV_INHIBIT_REASON_IRQWIN, + + /* + * PIT (i8254) 're-inject' mode, relies on EOI intercept, + * which AVIC doesn't support for edge triggered interrupts. + */ APICV_INHIBIT_REASON_PIT_REINJ, + + /* + * AVIC is inhibited because the guest has x2apic in its CPUID. + */ APICV_INHIBIT_REASON_X2APIC, - APICV_INHIBIT_REASON_BLOCKIRQ, - APICV_INHIBIT_REASON_ABSENT, + + /* + * AVIC is disabled because SEV doesn't support it. + */ APICV_INHIBIT_REASON_SEV, }; @@ -1300,6 +1364,8 @@ struct kvm_vcpu_stat { u64 nested_run; u64 directed_yield_attempted; u64 directed_yield_successful; + u64 preemption_reported; + u64 preemption_other; u64 guest_mode; }; diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index a82f603d4312..61f0c206bff0 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -179,9 +179,13 @@ int hv_set_mem_host_visibility(unsigned long addr, int numpages, bool visible); #ifdef CONFIG_AMD_MEM_ENCRYPT void hv_ghcb_msr_write(u64 msr, u64 value); void hv_ghcb_msr_read(u64 msr, u64 *value); +bool hv_ghcb_negotiate_protocol(void); +void hv_ghcb_terminate(unsigned int set, unsigned int reason); #else static inline void hv_ghcb_msr_write(u64 msr, u64 value) {} static inline void hv_ghcb_msr_read(u64 msr, u64 *value) {} +static inline bool hv_ghcb_negotiate_protocol(void) { return false; } +static inline void hv_ghcb_terminate(unsigned int set, unsigned int reason) {} #endif extern bool hv_isolation_type_snp(void); diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 403e83b4adc8..d27e0581b777 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -116,6 +116,30 @@ * Not susceptible to * TSX Async Abort (TAA) vulnerabilities. */ +#define ARCH_CAP_SBDR_SSDP_NO BIT(13) /* + * Not susceptible to SBDR and SSDP + * variants of Processor MMIO stale data + * vulnerabilities. + */ +#define ARCH_CAP_FBSDP_NO BIT(14) /* + * Not susceptible to FBSDP variant of + * Processor MMIO stale data + * vulnerabilities. + */ +#define ARCH_CAP_PSDP_NO BIT(15) /* + * Not susceptible to PSDP variant of + * Processor MMIO stale data + * vulnerabilities. + */ +#define ARCH_CAP_FB_CLEAR BIT(17) /* + * VERW clears CPU fill buffer + * even on MDS_NO CPUs. + */ +#define ARCH_CAP_FB_CLEAR_CTRL BIT(18) /* + * MSR_IA32_MCU_OPT_CTRL[FB_CLEAR_DIS] + * bit available to control VERW + * behavior. + */ #define MSR_IA32_FLUSH_CMD 0x0000010b #define L1D_FLUSH BIT(0) /* @@ -133,6 +157,7 @@ #define MSR_IA32_MCU_OPT_CTRL 0x00000123 #define RNGDS_MITG_DIS BIT(0) /* SRBDS support */ #define RTM_ALLOW BIT(1) /* TSX development mode */ +#define FB_CLEAR_DIS BIT(3) /* CPU Fill buffer clear disable */ #define MSR_IA32_SYSENTER_CS 0x00000174 #define MSR_IA32_SYSENTER_ESP 0x00000175 diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index acbaeaf83b61..da251a5645b0 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -269,6 +269,8 @@ DECLARE_STATIC_KEY_FALSE(mds_idle_clear); DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); +DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear); + #include <asm/segment.h> /** diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h index f52a886d35cf..70533fdcbf02 100644 --- a/arch/x86/include/asm/pci_x86.h +++ b/arch/x86/include/asm/pci_x86.h @@ -69,6 +69,8 @@ void pcibios_scan_specific_bus(int busn); /* pci-irq.c */ +struct pci_dev; + struct irq_info { u8 bus, devfn; /* Bus, device and function */ struct { @@ -246,3 +248,9 @@ static inline void mmio_config_writel(void __iomem *pos, u32 val) # define x86_default_pci_init_irq NULL # define x86_default_pci_fixup_irqs NULL #endif + +#if defined(CONFIG_PCI) && defined(CONFIG_ACPI) +extern bool pci_use_e820; +#else +#define pci_use_e820 false +#endif diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index 7590ac2570b9..f8b9ee97a891 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -108,19 +108,16 @@ extern unsigned long _brk_end; void *extend_brk(size_t size, size_t align); /* - * Reserve space in the brk section. The name must be unique within the file, - * and somewhat descriptive. The size is in bytes. + * Reserve space in the .brk section, which is a block of memory from which the + * caller is allowed to allocate very early (before even memblock is available) + * by calling extend_brk(). All allocated memory will be eventually converted + * to memblock. Any leftover unallocated memory will be freed. * - * The allocation is done using inline asm (rather than using a section - * attribute on a normal variable) in order to allow the use of @nobits, so - * that it doesn't take up any space in the vmlinux file. + * The size is in bytes. */ -#define RESERVE_BRK(name, size) \ - asm(".pushsection .brk_reservation,\"aw\",@nobits\n\t" \ - ".brk." #name ":\n\t" \ - ".skip " __stringify(size) "\n\t" \ - ".size .brk." #name ", " __stringify(size) "\n\t" \ - ".popsection\n\t") +#define RESERVE_BRK(name, size) \ + __section(".bss..brk") __aligned(1) __used \ + static char __brk_##name[size] extern void probe_roms(void); #ifdef __i386__ @@ -133,12 +130,19 @@ asmlinkage void __init x86_64_start_reservations(char *real_mode_data); #endif /* __i386__ */ #endif /* _SETUP */ -#else -#define RESERVE_BRK(name,sz) \ - .pushsection .brk_reservation,"aw",@nobits; \ -.brk.name: \ -1: .skip sz; \ - .size .brk.name,.-1b; \ + +#else /* __ASSEMBLY */ + +.macro __RESERVE_BRK name, size + .pushsection .bss..brk, "aw" +SYM_DATA_START(__brk_\name) + .skip \size +SYM_DATA_END(__brk_\name) .popsection +.endm + +#define RESERVE_BRK(name, size) __RESERVE_BRK name, size + #endif /* __ASSEMBLY__ */ + #endif /* _ASM_X86_SETUP_H */ diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 35f222aa66bf..913e593a3b45 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -439,7 +439,7 @@ do { \ [ptr] "+m" (*_ptr), \ [old] "+a" (__old) \ : [new] ltype (__new) \ - : "memory", "cc"); \ + : "memory"); \ if (unlikely(__err)) \ goto label; \ if (unlikely(!success)) \ diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 03364dc40d8d..4c8b6ae802ac 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -36,10 +36,6 @@ KCSAN_SANITIZE := n OBJECT_FILES_NON_STANDARD_test_nx.o := y -ifdef CONFIG_FRAME_POINTER -OBJECT_FILES_NON_STANDARD_ftrace_$(BITS).o := y -endif - # If instrumentation of this dir is enabled, boot hangs during first second. # Probably could be more selective here, but note that files related to irqs, # boot, dumpstack/stacktrace, etc are either non-interesting or can lead to diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index d879a6c93609..74c62cc47a5f 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -41,8 +41,10 @@ static void __init spectre_v2_select_mitigation(void); static void __init ssb_select_mitigation(void); static void __init l1tf_select_mitigation(void); static void __init mds_select_mitigation(void); -static void __init mds_print_mitigation(void); +static void __init md_clear_update_mitigation(void); +static void __init md_clear_select_mitigation(void); static void __init taa_select_mitigation(void); +static void __init mmio_select_mitigation(void); static void __init srbds_select_mitigation(void); static void __init l1d_flush_select_mitigation(void); @@ -85,6 +87,10 @@ EXPORT_SYMBOL_GPL(mds_idle_clear); */ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); +/* Controls CPU Fill buffer clear before KVM guest MMIO accesses */ +DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear); +EXPORT_SYMBOL_GPL(mmio_stale_data_clear); + void __init check_bugs(void) { identify_boot_cpu(); @@ -117,17 +123,10 @@ void __init check_bugs(void) spectre_v2_select_mitigation(); ssb_select_mitigation(); l1tf_select_mitigation(); - mds_select_mitigation(); - taa_select_mitigation(); + md_clear_select_mitigation(); srbds_select_mitigation(); l1d_flush_select_mitigation(); - /* - * As MDS and TAA mitigations are inter-related, print MDS - * mitigation until after TAA mitigation selection is done. - */ - mds_print_mitigation(); - arch_smt_update(); #ifdef CONFIG_X86_32 @@ -267,14 +266,6 @@ static void __init mds_select_mitigation(void) } } -static void __init mds_print_mitigation(void) -{ - if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) - return; - - pr_info("%s\n", mds_strings[mds_mitigation]); -} - static int __init mds_cmdline(char *str) { if (!boot_cpu_has_bug(X86_BUG_MDS)) @@ -329,7 +320,7 @@ static void __init taa_select_mitigation(void) /* TSX previously disabled by tsx=off */ if (!boot_cpu_has(X86_FEATURE_RTM)) { taa_mitigation = TAA_MITIGATION_TSX_DISABLED; - goto out; + return; } if (cpu_mitigations_off()) { @@ -343,7 +334,7 @@ static void __init taa_select_mitigation(void) */ if (taa_mitigation == TAA_MITIGATION_OFF && mds_mitigation == MDS_MITIGATION_OFF) - goto out; + return; if (boot_cpu_has(X86_FEATURE_MD_CLEAR)) taa_mitigation = TAA_MITIGATION_VERW; @@ -375,18 +366,6 @@ static void __init taa_select_mitigation(void) if (taa_nosmt || cpu_mitigations_auto_nosmt()) cpu_smt_disable(false); - - /* - * Update MDS mitigation, if necessary, as the mds_user_clear is - * now enabled for TAA mitigation. - */ - if (mds_mitigation == MDS_MITIGATION_OFF && - boot_cpu_has_bug(X86_BUG_MDS)) { - mds_mitigation = MDS_MITIGATION_FULL; - mds_select_mitigation(); - } -out: - pr_info("%s\n", taa_strings[taa_mitigation]); } static int __init tsx_async_abort_parse_cmdline(char *str) @@ -411,6 +390,151 @@ static int __init tsx_async_abort_parse_cmdline(char *str) early_param("tsx_async_abort", tsx_async_abort_parse_cmdline); #undef pr_fmt +#define pr_fmt(fmt) "MMIO Stale Data: " fmt + +enum mmio_mitigations { + MMIO_MITIGATION_OFF, + MMIO_MITIGATION_UCODE_NEEDED, + MMIO_MITIGATION_VERW, +}; + +/* Default mitigation for Processor MMIO Stale Data vulnerabilities */ +static enum mmio_mitigations mmio_mitigation __ro_after_init = MMIO_MITIGATION_VERW; +static bool mmio_nosmt __ro_after_init = false; + +static const char * const mmio_strings[] = { + [MMIO_MITIGATION_OFF] = "Vulnerable", + [MMIO_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode", + [MMIO_MITIGATION_VERW] = "Mitigation: Clear CPU buffers", +}; + +static void __init mmio_select_mitigation(void) +{ + u64 ia32_cap; + + if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) || + cpu_mitigations_off()) { + mmio_mitigation = MMIO_MITIGATION_OFF; + return; + } + + if (mmio_mitigation == MMIO_MITIGATION_OFF) + return; + + ia32_cap = x86_read_arch_cap_msr(); + + /* + * Enable CPU buffer clear mitigation for host and VMM, if also affected + * by MDS or TAA. Otherwise, enable mitigation for VMM only. + */ + if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) && + boot_cpu_has(X86_FEATURE_RTM))) + static_branch_enable(&mds_user_clear); + else + static_branch_enable(&mmio_stale_data_clear); + + /* + * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can + * be propagated to uncore buffers, clearing the Fill buffers on idle + * is required irrespective of SMT state. + */ + if (!(ia32_cap & ARCH_CAP_FBSDP_NO)) + static_branch_enable(&mds_idle_clear); + + /* + * Check if the system has the right microcode. + * + * CPU Fill buffer clear mitigation is enumerated by either an explicit + * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS + * affected systems. + */ + if ((ia32_cap & ARCH_CAP_FB_CLEAR) || + (boot_cpu_has(X86_FEATURE_MD_CLEAR) && + boot_cpu_has(X86_FEATURE_FLUSH_L1D) && + !(ia32_cap & ARCH_CAP_MDS_NO))) + mmio_mitigation = MMIO_MITIGATION_VERW; + else + mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED; + + if (mmio_nosmt || cpu_mitigations_auto_nosmt()) + cpu_smt_disable(false); +} + +static int __init mmio_stale_data_parse_cmdline(char *str) +{ + if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) + return 0; + + if (!str) + return -EINVAL; + + if (!strcmp(str, "off")) { + mmio_mitigation = MMIO_MITIGATION_OFF; + } else if (!strcmp(str, "full")) { + mmio_mitigation = MMIO_MITIGATION_VERW; + } else if (!strcmp(str, "full,nosmt")) { + mmio_mitigation = MMIO_MITIGATION_VERW; + mmio_nosmt = true; + } + + return 0; +} +early_param("mmio_stale_data", mmio_stale_data_parse_cmdline); + +#undef pr_fmt +#define pr_fmt(fmt) "" fmt + +static void __init md_clear_update_mitigation(void) +{ + if (cpu_mitigations_off()) + return; + + if (!static_key_enabled(&mds_user_clear)) + goto out; + + /* + * mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data + * mitigation, if necessary. + */ + if (mds_mitigation == MDS_MITIGATION_OFF && + boot_cpu_has_bug(X86_BUG_MDS)) { + mds_mitigation = MDS_MITIGATION_FULL; + mds_select_mitigation(); + } + if (taa_mitigation == TAA_MITIGATION_OFF && + boot_cpu_has_bug(X86_BUG_TAA)) { + taa_mitigation = TAA_MITIGATION_VERW; + taa_select_mitigation(); + } + if (mmio_mitigation == MMIO_MITIGATION_OFF && + boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) { + mmio_mitigation = MMIO_MITIGATION_VERW; + mmio_select_mitigation(); + } +out: + if (boot_cpu_has_bug(X86_BUG_MDS)) + pr_info("MDS: %s\n", mds_strings[mds_mitigation]); + if (boot_cpu_has_bug(X86_BUG_TAA)) + pr_info("TAA: %s\n", taa_strings[taa_mitigation]); + if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) + pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]); +} + +static void __init md_clear_select_mitigation(void) +{ + mds_select_mitigation(); + taa_select_mitigation(); + mmio_select_mitigation(); + + /* + * As MDS, TAA and MMIO Stale Data mitigations are inter-related, update + * and print their mitigation after MDS, TAA and MMIO Stale Data + * mitigation selection is done. + */ + md_clear_update_mitigation(); +} + +#undef pr_fmt #define pr_fmt(fmt) "SRBDS: " fmt enum srbds_mitigations { @@ -478,11 +602,13 @@ static void __init srbds_select_mitigation(void) return; /* - * Check to see if this is one of the MDS_NO systems supporting - * TSX that are only exposed to SRBDS when TSX is enabled. + * Check to see if this is one of the MDS_NO systems supporting TSX that + * are only exposed to SRBDS when TSX is enabled or when CPU is affected + * by Processor MMIO Stale Data vulnerability. */ ia32_cap = x86_read_arch_cap_msr(); - if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM)) + if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) && + !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) srbds_mitigation = SRBDS_MITIGATION_TSX_OFF; else if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR; @@ -1116,6 +1242,8 @@ static void update_indir_branch_cond(void) /* Update the static key controlling the MDS CPU buffer clear in idle */ static void update_mds_branch_idle(void) { + u64 ia32_cap = x86_read_arch_cap_msr(); + /* * Enable the idle clearing if SMT is active on CPUs which are * affected only by MSBDS and not any other MDS variant. @@ -1127,14 +1255,17 @@ static void update_mds_branch_idle(void) if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY)) return; - if (sched_smt_active()) + if (sched_smt_active()) { static_branch_enable(&mds_idle_clear); - else + } else if (mmio_mitigation == MMIO_MITIGATION_OFF || + (ia32_cap & ARCH_CAP_FBSDP_NO)) { static_branch_disable(&mds_idle_clear); + } } #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n" #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n" +#define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n" void cpu_bugs_smt_update(void) { @@ -1179,6 +1310,16 @@ void cpu_bugs_smt_update(void) break; } + switch (mmio_mitigation) { + case MMIO_MITIGATION_VERW: + case MMIO_MITIGATION_UCODE_NEEDED: + if (sched_smt_active()) + pr_warn_once(MMIO_MSG_SMT); + break; + case MMIO_MITIGATION_OFF: + break; + } + mutex_unlock(&spec_ctrl_mutex); } @@ -1781,6 +1922,20 @@ static ssize_t tsx_async_abort_show_state(char *buf) sched_smt_active() ? "vulnerable" : "disabled"); } +static ssize_t mmio_stale_data_show_state(char *buf) +{ + if (mmio_mitigation == MMIO_MITIGATION_OFF) + return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]); + + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { + return sysfs_emit(buf, "%s; SMT Host state unknown\n", + mmio_strings[mmio_mitigation]); + } + + return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation], + sched_smt_active() ? "vulnerable" : "disabled"); +} + static char *stibp_state(void) { if (spectre_v2_in_eibrs_mode(spectre_v2_enabled)) @@ -1881,6 +2036,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr case X86_BUG_SRBDS: return srbds_show_state(buf); + case X86_BUG_MMIO_STALE_DATA: + return mmio_stale_data_show_state(buf); + default: break; } @@ -1932,4 +2090,9 @@ ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char * { return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS); } + +ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf) +{ + return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA); +} #endif diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index c296cb1c0113..4730b0a58f24 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1211,18 +1211,42 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { X86_FEATURE_ANY, issues) #define SRBDS BIT(0) +/* CPU is affected by X86_BUG_MMIO_STALE_DATA */ +#define MMIO BIT(1) +/* CPU is affected by Shared Buffers Data Sampling (SBDS), a variant of X86_BUG_MMIO_STALE_DATA */ +#define MMIO_SBDS BIT(2) static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS), VULNBL_INTEL_STEPPINGS(HASWELL, X86_STEPPING_ANY, SRBDS), VULNBL_INTEL_STEPPINGS(HASWELL_L, X86_STEPPING_ANY, SRBDS), VULNBL_INTEL_STEPPINGS(HASWELL_G, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(HASWELL_X, BIT(2) | BIT(4), MMIO), + VULNBL_INTEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x3, 0x5), MMIO), VULNBL_INTEL_STEPPINGS(BROADWELL_G, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO), VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPINGS(0x3, 0x3), SRBDS | MMIO), VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(SKYLAKE_X, BIT(3) | BIT(4) | BIT(6) | + BIT(7) | BIT(0xB), MMIO), + VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPINGS(0x3, 0x3), SRBDS | MMIO), VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, SRBDS), - VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x0, 0xC), SRBDS), - VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x0, 0xD), SRBDS), + VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x9, 0xC), SRBDS | MMIO), + VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x0, 0x8), SRBDS), + VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x9, 0xD), SRBDS | MMIO), + VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x0, 0x8), SRBDS), + VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPINGS(0x5, 0x5), MMIO | MMIO_SBDS), + VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPINGS(0x1, 0x1), MMIO), + VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPINGS(0x4, 0x6), MMIO), + VULNBL_INTEL_STEPPINGS(COMETLAKE, BIT(2) | BIT(3) | BIT(5), MMIO | MMIO_SBDS), + VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x1, 0x1), MMIO | MMIO_SBDS), + VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO), + VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPINGS(0x1, 0x1), MMIO | MMIO_SBDS), + VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPINGS(0x1, 0x1), MMIO), + VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPINGS(0x1, 0x1), MMIO | MMIO_SBDS), + VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPING_ANY, MMIO), + VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPINGS(0x0, 0x0), MMIO | MMIO_SBDS), {} }; @@ -1243,6 +1267,13 @@ u64 x86_read_arch_cap_msr(void) return ia32_cap; } +static bool arch_cap_mmio_immune(u64 ia32_cap) +{ + return (ia32_cap & ARCH_CAP_FBSDP_NO && + ia32_cap & ARCH_CAP_PSDP_NO && + ia32_cap & ARCH_CAP_SBDR_SSDP_NO); +} + static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) { u64 ia32_cap = x86_read_arch_cap_msr(); @@ -1296,12 +1327,27 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) /* * SRBDS affects CPUs which support RDRAND or RDSEED and are listed * in the vulnerability blacklist. + * + * Some of the implications and mitigation of Shared Buffers Data + * Sampling (SBDS) are similar to SRBDS. Give SBDS same treatment as + * SRBDS. */ if ((cpu_has(c, X86_FEATURE_RDRAND) || cpu_has(c, X86_FEATURE_RDSEED)) && - cpu_matches(cpu_vuln_blacklist, SRBDS)) + cpu_matches(cpu_vuln_blacklist, SRBDS | MMIO_SBDS)) setup_force_cpu_bug(X86_BUG_SRBDS); + /* + * Processor MMIO Stale Data bug enumeration + * + * Affected CPU list is generally enough to enumerate the vulnerability, + * but for virtualization case check for ARCH_CAP MSR bits also, VMM may + * not want the guest to enumerate the bug. + */ + if (cpu_matches(cpu_vuln_blacklist, MMIO) && + !arch_cap_mmio_immune(ia32_cap)) + setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA); + if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) return; diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S index 4ec13608d3c6..dfeb227de561 100644 --- a/arch/x86/kernel/ftrace_64.S +++ b/arch/x86/kernel/ftrace_64.S @@ -175,6 +175,7 @@ SYM_INNER_LABEL(ftrace_caller_end, SYM_L_GLOBAL) jmp ftrace_epilogue SYM_FUNC_END(ftrace_caller); +STACK_FRAME_NON_STANDARD_FP(ftrace_caller) SYM_FUNC_START(ftrace_epilogue) /* @@ -282,6 +283,7 @@ SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL) jmp ftrace_epilogue SYM_FUNC_END(ftrace_regs_caller) +STACK_FRAME_NON_STANDARD_FP(ftrace_regs_caller) #else /* ! CONFIG_DYNAMIC_FTRACE */ @@ -311,10 +313,14 @@ trace: jmp ftrace_stub SYM_FUNC_END(__fentry__) EXPORT_SYMBOL(__fentry__) +STACK_FRAME_NON_STANDARD_FP(__fentry__) + #endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER -SYM_FUNC_START(return_to_handler) +SYM_CODE_START(return_to_handler) + UNWIND_HINT_EMPTY + ANNOTATE_NOENDBR subq $16, %rsp /* Save the return values */ @@ -339,7 +345,6 @@ SYM_FUNC_START(return_to_handler) int3 .Ldo_rop: mov %rdi, (%rsp) - UNWIND_HINT_FUNC RET -SYM_FUNC_END(return_to_handler) +SYM_CODE_END(return_to_handler) #endif diff --git a/arch/x86/kernel/resource.c b/arch/x86/kernel/resource.c index db2b350a37b7..bba1abd05bfe 100644 --- a/arch/x86/kernel/resource.c +++ b/arch/x86/kernel/resource.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -#include <linux/dev_printk.h> #include <linux/ioport.h> +#include <linux/printk.h> #include <asm/e820/api.h> +#include <asm/pci_x86.h> static void resource_clip(struct resource *res, resource_size_t start, resource_size_t end) @@ -24,14 +25,14 @@ static void resource_clip(struct resource *res, resource_size_t start, res->start = end + 1; } -void remove_e820_regions(struct device *dev, struct resource *avail) +static void remove_e820_regions(struct resource *avail) { int i; struct e820_entry *entry; u64 e820_start, e820_end; struct resource orig = *avail; - if (!(avail->flags & IORESOURCE_MEM)) + if (!pci_use_e820) return; for (i = 0; i < e820_table->nr_entries; i++) { @@ -41,7 +42,7 @@ void remove_e820_regions(struct device *dev, struct resource *avail) resource_clip(avail, e820_start, e820_end); if (orig.start != avail->start || orig.end != avail->end) { - dev_info(dev, "clipped %pR to %pR for e820 entry [mem %#010Lx-%#010Lx]\n", + pr_info("clipped %pR to %pR for e820 entry [mem %#010Lx-%#010Lx]\n", &orig, avail, e820_start, e820_end); orig = *avail; } @@ -55,6 +56,9 @@ void arch_remove_reservations(struct resource *avail) * the low 1MB unconditionally, as this area is needed for some ISA * cards requiring a memory range, e.g. the i82365 PCMCIA controller. */ - if (avail->flags & IORESOURCE_MEM) + if (avail->flags & IORESOURCE_MEM) { resource_clip(avail, BIOS_ROM_BASE, BIOS_ROM_END); + + remove_e820_regions(avail); + } } diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 3ebb85327edb..bd6c6fd373ae 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -67,11 +67,6 @@ RESERVE_BRK(dmi_alloc, 65536); #endif -/* - * Range of the BSS area. The size of the BSS area is determined - * at link time, with RESERVE_BRK() facility reserving additional - * chunks. - */ unsigned long _brk_start = (unsigned long)__brk_base; unsigned long _brk_end = (unsigned long)__brk_base; diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index f5f6dc2e8007..81aba718ecd5 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -385,10 +385,10 @@ SECTIONS __end_of_kernel_reserve = .; . = ALIGN(PAGE_SIZE); - .brk : AT(ADDR(.brk) - LOAD_OFFSET) { + .brk (NOLOAD) : AT(ADDR(.brk) - LOAD_OFFSET) { __brk_base = .; . += 64 * 1024; /* 64k alignment slop space */ - *(.brk_reservation) /* areas brk users have reserved */ + *(.bss..brk) /* areas brk users have reserved */ __brk_limit = .; } diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index f1bdac3f5aa8..0e68b4c937fc 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -2039,6 +2039,19 @@ static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val) } } +static void kvm_lapic_xapic_id_updated(struct kvm_lapic *apic) +{ + struct kvm *kvm = apic->vcpu->kvm; + + if (KVM_BUG_ON(apic_x2apic_mode(apic), kvm)) + return; + + if (kvm_xapic_id(apic) == apic->vcpu->vcpu_id) + return; + + kvm_set_apicv_inhibit(apic->vcpu->kvm, APICV_INHIBIT_REASON_APIC_ID_MODIFIED); +} + static int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) { int ret = 0; @@ -2047,10 +2060,12 @@ static int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) switch (reg) { case APIC_ID: /* Local APIC ID */ - if (!apic_x2apic_mode(apic)) + if (!apic_x2apic_mode(apic)) { kvm_apic_set_xapic_id(apic, val >> 24); - else + kvm_lapic_xapic_id_updated(apic); + } else { ret = 1; + } break; case APIC_TASKPRI: @@ -2336,8 +2351,10 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) MSR_IA32_APICBASE_BASE; if ((value & MSR_IA32_APICBASE_ENABLE) && - apic->base_address != APIC_DEFAULT_PHYS_BASE) - pr_warn_once("APIC base relocation is unsupported by KVM"); + apic->base_address != APIC_DEFAULT_PHYS_BASE) { + kvm_set_apicv_inhibit(apic->vcpu->kvm, + APICV_INHIBIT_REASON_APIC_BASE_MODIFIED); + } } void kvm_apic_update_apicv(struct kvm_vcpu *vcpu) @@ -2648,6 +2665,8 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu, icr = __kvm_lapic_get_reg64(s->regs, APIC_ICR); __kvm_lapic_set_reg(s->regs, APIC_ICR2, icr >> 32); } + } else { + kvm_lapic_xapic_id_updated(vcpu->arch.apic); } return 0; diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index f4653688fa6d..17252f39bd7c 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3411,7 +3411,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT), i << 30, PT32_ROOT_LEVEL, true); mmu->pae_root[i] = root | PT_PRESENT_MASK | - shadow_me_mask; + shadow_me_value; } mmu->root.hpa = __pa(mmu->pae_root); } else { @@ -5179,7 +5179,7 @@ static void __kvm_mmu_free_obsolete_roots(struct kvm *kvm, struct kvm_mmu *mmu) roots_to_free |= KVM_MMU_ROOT_CURRENT; for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { - if (is_obsolete_root(kvm, mmu->root.hpa)) + if (is_obsolete_root(kvm, mmu->prev_roots[i].hpa)) roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i); } diff --git a/arch/x86/kvm/mmu/tdp_iter.c b/arch/x86/kvm/mmu/tdp_iter.c index 6d3b3e5a5533..ee4802d7b36c 100644 --- a/arch/x86/kvm/mmu/tdp_iter.c +++ b/arch/x86/kvm/mmu/tdp_iter.c @@ -146,6 +146,15 @@ static bool try_step_up(struct tdp_iter *iter) } /* + * Step the iterator back up a level in the paging structure. Should only be + * used when the iterator is below the root level. + */ +void tdp_iter_step_up(struct tdp_iter *iter) +{ + WARN_ON(!try_step_up(iter)); +} + +/* * Step to the next SPTE in a pre-order traversal of the paging structure. * To get to the next SPTE, the iterator either steps down towards the goal * GFN, if at a present, non-last-level SPTE, or over to a SPTE mapping a diff --git a/arch/x86/kvm/mmu/tdp_iter.h b/arch/x86/kvm/mmu/tdp_iter.h index f0af385c56e0..adfca0cf94d3 100644 --- a/arch/x86/kvm/mmu/tdp_iter.h +++ b/arch/x86/kvm/mmu/tdp_iter.h @@ -114,5 +114,6 @@ void tdp_iter_start(struct tdp_iter *iter, struct kvm_mmu_page *root, int min_level, gfn_t next_last_level_gfn); void tdp_iter_next(struct tdp_iter *iter); void tdp_iter_restart(struct tdp_iter *iter); +void tdp_iter_step_up(struct tdp_iter *iter); #endif /* __KVM_X86_MMU_TDP_ITER_H */ diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 841feaa48be5..7b9265d67131 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -1742,12 +1742,12 @@ static void zap_collapsible_spte_range(struct kvm *kvm, gfn_t start = slot->base_gfn; gfn_t end = start + slot->npages; struct tdp_iter iter; + int max_mapping_level; kvm_pfn_t pfn; rcu_read_lock(); tdp_root_for_each_pte(iter, root, start, end) { -retry: if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) continue; @@ -1755,15 +1755,41 @@ retry: !is_last_spte(iter.old_spte, iter.level)) continue; + /* + * This is a leaf SPTE. Check if the PFN it maps can + * be mapped at a higher level. + */ pfn = spte_to_pfn(iter.old_spte); - if (kvm_is_reserved_pfn(pfn) || - iter.level >= kvm_mmu_max_mapping_level(kvm, slot, iter.gfn, - pfn, PG_LEVEL_NUM)) + + if (kvm_is_reserved_pfn(pfn)) continue; + max_mapping_level = kvm_mmu_max_mapping_level(kvm, slot, + iter.gfn, pfn, PG_LEVEL_NUM); + + WARN_ON(max_mapping_level < iter.level); + + /* + * If this page is already mapped at the highest + * viable level, there's nothing more to do. + */ + if (max_mapping_level == iter.level) + continue; + + /* + * The page can be remapped at a higher level, so step + * up to zap the parent SPTE. + */ + while (max_mapping_level > iter.level) + tdp_iter_step_up(&iter); + /* Note, a successful atomic zap also does a remote TLB flush. */ - if (tdp_mmu_zap_spte_atomic(kvm, &iter)) - goto retry; + tdp_mmu_zap_spte_atomic(kvm, &iter); + + /* + * If the atomic zap fails, the iter will recurse back into + * the same subtree to retry. + */ } rcu_read_unlock(); diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index 54fe03714f8a..d1bc5820ea46 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -291,58 +291,91 @@ void avic_ring_doorbell(struct kvm_vcpu *vcpu) static int avic_kick_target_vcpus_fast(struct kvm *kvm, struct kvm_lapic *source, u32 icrl, u32 icrh, u32 index) { - u32 dest, apic_id; - struct kvm_vcpu *vcpu; + u32 l1_physical_id, dest; + struct kvm_vcpu *target_vcpu; int dest_mode = icrl & APIC_DEST_MASK; int shorthand = icrl & APIC_SHORT_MASK; struct kvm_svm *kvm_svm = to_kvm_svm(kvm); - u32 *avic_logical_id_table = page_address(kvm_svm->avic_logical_id_table_page); if (shorthand != APIC_DEST_NOSHORT) return -EINVAL; - /* - * The AVIC incomplete IPI #vmexit info provides index into - * the physical APIC ID table, which can be used to derive - * guest physical APIC ID. - */ + if (apic_x2apic_mode(source)) + dest = icrh; + else + dest = GET_APIC_DEST_FIELD(icrh); + if (dest_mode == APIC_DEST_PHYSICAL) { - apic_id = index; + /* broadcast destination, use slow path */ + if (apic_x2apic_mode(source) && dest == X2APIC_BROADCAST) + return -EINVAL; + if (!apic_x2apic_mode(source) && dest == APIC_BROADCAST) + return -EINVAL; + + l1_physical_id = dest; + + if (WARN_ON_ONCE(l1_physical_id != index)) + return -EINVAL; + } else { - if (!apic_x2apic_mode(source)) { - /* For xAPIC logical mode, the index is for logical APIC table. */ - apic_id = avic_logical_id_table[index] & 0x1ff; + u32 bitmap, cluster; + int logid_index; + + if (apic_x2apic_mode(source)) { + /* 16 bit dest mask, 16 bit cluster id */ + bitmap = dest & 0xFFFF0000; + cluster = (dest >> 16) << 4; + } else if (kvm_lapic_get_reg(source, APIC_DFR) == APIC_DFR_FLAT) { + /* 8 bit dest mask*/ + bitmap = dest; + cluster = 0; } else { - return -EINVAL; + /* 4 bit desk mask, 4 bit cluster id */ + bitmap = dest & 0xF; + cluster = (dest >> 4) << 2; } - } - /* - * Assuming vcpu ID is the same as physical apic ID, - * and use it to retrieve the target vCPU. - */ - vcpu = kvm_get_vcpu_by_id(kvm, apic_id); - if (!vcpu) - return -EINVAL; + if (unlikely(!bitmap)) + /* guest bug: nobody to send the logical interrupt to */ + return 0; - if (apic_x2apic_mode(vcpu->arch.apic)) - dest = icrh; - else - dest = GET_APIC_DEST_FIELD(icrh); + if (!is_power_of_2(bitmap)) + /* multiple logical destinations, use slow path */ + return -EINVAL; - /* - * Try matching the destination APIC ID with the vCPU. - */ - if (kvm_apic_match_dest(vcpu, source, shorthand, dest, dest_mode)) { - vcpu->arch.apic->irr_pending = true; - svm_complete_interrupt_delivery(vcpu, - icrl & APIC_MODE_MASK, - icrl & APIC_INT_LEVELTRIG, - icrl & APIC_VECTOR_MASK); - return 0; + logid_index = cluster + __ffs(bitmap); + + if (apic_x2apic_mode(source)) { + l1_physical_id = logid_index; + } else { + u32 *avic_logical_id_table = + page_address(kvm_svm->avic_logical_id_table_page); + + u32 logid_entry = avic_logical_id_table[logid_index]; + + if (WARN_ON_ONCE(index != logid_index)) + return -EINVAL; + + /* guest bug: non existing/reserved logical destination */ + if (unlikely(!(logid_entry & AVIC_LOGICAL_ID_ENTRY_VALID_MASK))) + return 0; + + l1_physical_id = logid_entry & + AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK; + } } - return -EINVAL; + target_vcpu = kvm_get_vcpu_by_id(kvm, l1_physical_id); + if (unlikely(!target_vcpu)) + /* guest bug: non existing vCPU is a target of this IPI*/ + return 0; + + target_vcpu->arch.apic->irr_pending = true; + svm_complete_interrupt_delivery(target_vcpu, + icrl & APIC_MODE_MASK, + icrl & APIC_INT_LEVELTRIG, + icrl & APIC_VECTOR_MASK); + return 0; } static void avic_kick_target_vcpus(struct kvm *kvm, struct kvm_lapic *source, @@ -508,35 +541,6 @@ static int avic_handle_ldr_update(struct kvm_vcpu *vcpu) return ret; } -static int avic_handle_apic_id_update(struct kvm_vcpu *vcpu) -{ - u64 *old, *new; - struct vcpu_svm *svm = to_svm(vcpu); - u32 id = kvm_xapic_id(vcpu->arch.apic); - - if (vcpu->vcpu_id == id) - return 0; - - old = avic_get_physical_id_entry(vcpu, vcpu->vcpu_id); - new = avic_get_physical_id_entry(vcpu, id); - if (!new || !old) - return 1; - - /* We need to move physical_id_entry to new offset */ - *new = *old; - *old = 0ULL; - to_svm(vcpu)->avic_physical_id_cache = new; - - /* - * Also update the guest physical APIC ID in the logical - * APIC ID table entry if already setup the LDR. - */ - if (svm->ldr_reg) - avic_handle_ldr_update(vcpu); - - return 0; -} - static void avic_handle_dfr_update(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -555,10 +559,6 @@ static int avic_unaccel_trap_write(struct kvm_vcpu *vcpu) AVIC_UNACCEL_ACCESS_OFFSET_MASK; switch (offset) { - case APIC_ID: - if (avic_handle_apic_id_update(vcpu)) - return 0; - break; case APIC_LDR: if (avic_handle_ldr_update(vcpu)) return 0; @@ -650,8 +650,6 @@ int avic_init_vcpu(struct vcpu_svm *svm) void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu) { - if (avic_handle_apic_id_update(vcpu) != 0) - return; avic_handle_dfr_update(vcpu); avic_handle_ldr_update(vcpu); } @@ -910,7 +908,9 @@ bool avic_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason) BIT(APICV_INHIBIT_REASON_PIT_REINJ) | BIT(APICV_INHIBIT_REASON_X2APIC) | BIT(APICV_INHIBIT_REASON_BLOCKIRQ) | - BIT(APICV_INHIBIT_REASON_SEV); + BIT(APICV_INHIBIT_REASON_SEV) | + BIT(APICV_INHIBIT_REASON_APIC_ID_MODIFIED) | + BIT(APICV_INHIBIT_REASON_APIC_BASE_MODIFIED); return supported & BIT(reason); } @@ -946,7 +946,7 @@ out: return ret; } -void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu) +void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { u64 entry; int h_physical_id = kvm_cpu_get_apicid(cpu); @@ -978,7 +978,7 @@ void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu) avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, true); } -void __avic_vcpu_put(struct kvm_vcpu *vcpu) +void avic_vcpu_put(struct kvm_vcpu *vcpu) { u64 entry; struct vcpu_svm *svm = to_svm(vcpu); @@ -997,25 +997,6 @@ void __avic_vcpu_put(struct kvm_vcpu *vcpu) WRITE_ONCE(*(svm->avic_physical_id_cache), entry); } -static void avic_vcpu_load(struct kvm_vcpu *vcpu) -{ - int cpu = get_cpu(); - - WARN_ON(cpu != vcpu->cpu); - - __avic_vcpu_load(vcpu, cpu); - - put_cpu(); -} - -static void avic_vcpu_put(struct kvm_vcpu *vcpu) -{ - preempt_disable(); - - __avic_vcpu_put(vcpu); - - preempt_enable(); -} void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu) { @@ -1042,7 +1023,7 @@ void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu) vmcb_mark_dirty(vmcb, VMCB_AVIC); if (activated) - avic_vcpu_load(vcpu); + avic_vcpu_load(vcpu, vcpu->cpu); else avic_vcpu_put(vcpu); @@ -1075,5 +1056,5 @@ void avic_vcpu_unblocking(struct kvm_vcpu *vcpu) if (!kvm_vcpu_apicv_active(vcpu)) return; - avic_vcpu_load(vcpu); + avic_vcpu_load(vcpu, vcpu->cpu); } diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index bed5e1692cef..ba7cd26f438f 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -616,6 +616,8 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm) struct kvm_vcpu *vcpu = &svm->vcpu; struct vmcb *vmcb01 = svm->vmcb01.ptr; struct vmcb *vmcb02 = svm->nested.vmcb02.ptr; + u32 pause_count12; + u32 pause_thresh12; /* * Filled at exit: exit_code, exit_code_hi, exit_info_1, exit_info_2, @@ -671,27 +673,25 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm) if (!nested_vmcb_needs_vls_intercept(svm)) vmcb02->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; + pause_count12 = svm->pause_filter_enabled ? svm->nested.ctl.pause_filter_count : 0; + pause_thresh12 = svm->pause_threshold_enabled ? svm->nested.ctl.pause_filter_thresh : 0; if (kvm_pause_in_guest(svm->vcpu.kvm)) { - /* use guest values since host doesn't use them */ - vmcb02->control.pause_filter_count = - svm->pause_filter_enabled ? - svm->nested.ctl.pause_filter_count : 0; + /* use guest values since host doesn't intercept PAUSE */ + vmcb02->control.pause_filter_count = pause_count12; + vmcb02->control.pause_filter_thresh = pause_thresh12; - vmcb02->control.pause_filter_thresh = - svm->pause_threshold_enabled ? - svm->nested.ctl.pause_filter_thresh : 0; - - } else if (!vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_PAUSE)) { - /* use host values when guest doesn't use them */ + } else { + /* start from host values otherwise */ vmcb02->control.pause_filter_count = vmcb01->control.pause_filter_count; vmcb02->control.pause_filter_thresh = vmcb01->control.pause_filter_thresh; - } else { - /* - * Intercept every PAUSE otherwise and - * ignore both host and guest values - */ - vmcb02->control.pause_filter_count = 0; - vmcb02->control.pause_filter_thresh = 0; + + /* ... but ensure filtering is disabled if so requested. */ + if (vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_PAUSE)) { + if (!pause_count12) + vmcb02->control.pause_filter_count = 0; + if (!pause_thresh12) + vmcb02->control.pause_filter_thresh = 0; + } } nested_svm_transition_tlb_flush(vcpu); @@ -951,8 +951,11 @@ int nested_svm_vmexit(struct vcpu_svm *svm) vmcb12->control.event_inj = svm->nested.ctl.event_inj; vmcb12->control.event_inj_err = svm->nested.ctl.event_inj_err; - if (!kvm_pause_in_guest(vcpu->kvm) && vmcb02->control.pause_filter_count) + if (!kvm_pause_in_guest(vcpu->kvm)) { vmcb01->control.pause_filter_count = vmcb02->control.pause_filter_count; + vmcb_mark_dirty(vmcb01, VMCB_INTERCEPTS); + + } nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr); @@ -982,7 +985,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm) if (svm->tsc_ratio_msr != kvm_default_tsc_scaling_ratio) { WARN_ON(!svm->tsc_scaling_enabled); vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio; - svm_write_tsc_multiplier(vcpu, vcpu->arch.tsc_scaling_ratio); + __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio); } svm->nested.ctl.nested_cr3 = 0; @@ -1387,7 +1390,7 @@ void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu) vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier(vcpu->arch.l1_tsc_scaling_ratio, svm->tsc_ratio_msr); - svm_write_tsc_multiplier(vcpu, vcpu->arch.tsc_scaling_ratio); + __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio); } /* Inverse operation of nested_copy_vmcb_control_to_cache(). asid is copied too. */ diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 51fd985cf21d..0c240ed04f96 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -844,7 +844,7 @@ static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr, /* If source buffer is not aligned then use an intermediate buffer */ if (!IS_ALIGNED((unsigned long)vaddr, 16)) { - src_tpage = alloc_page(GFP_KERNEL); + src_tpage = alloc_page(GFP_KERNEL_ACCOUNT); if (!src_tpage) return -ENOMEM; @@ -865,7 +865,7 @@ static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr, if (!IS_ALIGNED((unsigned long)dst_vaddr, 16) || !IS_ALIGNED(size, 16)) { int dst_offset; - dst_tpage = alloc_page(GFP_KERNEL); + dst_tpage = alloc_page(GFP_KERNEL_ACCOUNT); if (!dst_tpage) { ret = -ENOMEM; goto e_free; @@ -1665,19 +1665,24 @@ static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm) { struct kvm_sev_info *dst = &to_kvm_svm(dst_kvm)->sev_info; struct kvm_sev_info *src = &to_kvm_svm(src_kvm)->sev_info; + struct kvm_vcpu *dst_vcpu, *src_vcpu; + struct vcpu_svm *dst_svm, *src_svm; struct kvm_sev_info *mirror; + unsigned long i; dst->active = true; dst->asid = src->asid; dst->handle = src->handle; dst->pages_locked = src->pages_locked; dst->enc_context_owner = src->enc_context_owner; + dst->es_active = src->es_active; src->asid = 0; src->active = false; src->handle = 0; src->pages_locked = 0; src->enc_context_owner = NULL; + src->es_active = false; list_cut_before(&dst->regions_list, &src->regions_list, &src->regions_list); @@ -1704,26 +1709,21 @@ static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm) list_del(&src->mirror_entry); list_add_tail(&dst->mirror_entry, &owner_sev_info->mirror_vms); } -} -static int sev_es_migrate_from(struct kvm *dst, struct kvm *src) -{ - unsigned long i; - struct kvm_vcpu *dst_vcpu, *src_vcpu; - struct vcpu_svm *dst_svm, *src_svm; + kvm_for_each_vcpu(i, dst_vcpu, dst_kvm) { + dst_svm = to_svm(dst_vcpu); - if (atomic_read(&src->online_vcpus) != atomic_read(&dst->online_vcpus)) - return -EINVAL; + sev_init_vmcb(dst_svm); - kvm_for_each_vcpu(i, src_vcpu, src) { - if (!src_vcpu->arch.guest_state_protected) - return -EINVAL; - } + if (!dst->es_active) + continue; - kvm_for_each_vcpu(i, src_vcpu, src) { + /* + * Note, the source is not required to have the same number of + * vCPUs as the destination when migrating a vanilla SEV VM. + */ + src_vcpu = kvm_get_vcpu(dst_kvm, i); src_svm = to_svm(src_vcpu); - dst_vcpu = kvm_get_vcpu(dst, i); - dst_svm = to_svm(dst_vcpu); /* * Transfer VMSA and GHCB state to the destination. Nullify and @@ -1740,8 +1740,23 @@ static int sev_es_migrate_from(struct kvm *dst, struct kvm *src) src_svm->vmcb->control.vmsa_pa = INVALID_PAGE; src_vcpu->arch.guest_state_protected = false; } - to_kvm_svm(src)->sev_info.es_active = false; - to_kvm_svm(dst)->sev_info.es_active = true; +} + +static int sev_check_source_vcpus(struct kvm *dst, struct kvm *src) +{ + struct kvm_vcpu *src_vcpu; + unsigned long i; + + if (!sev_es_guest(src)) + return 0; + + if (atomic_read(&src->online_vcpus) != atomic_read(&dst->online_vcpus)) + return -EINVAL; + + kvm_for_each_vcpu(i, src_vcpu, src) { + if (!src_vcpu->arch.guest_state_protected) + return -EINVAL; + } return 0; } @@ -1789,11 +1804,9 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd) if (ret) goto out_dst_vcpu; - if (sev_es_guest(source_kvm)) { - ret = sev_es_migrate_from(kvm, source_kvm); - if (ret) - goto out_source_vcpu; - } + ret = sev_check_source_vcpus(kvm, source_kvm); + if (ret) + goto out_source_vcpu; sev_migrate_from(kvm, source_kvm); kvm_vm_dead(source_kvm); @@ -2914,7 +2927,7 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in) count, in); } -void sev_es_init_vmcb(struct vcpu_svm *svm) +static void sev_es_init_vmcb(struct vcpu_svm *svm) { struct kvm_vcpu *vcpu = &svm->vcpu; @@ -2967,6 +2980,15 @@ void sev_es_init_vmcb(struct vcpu_svm *svm) } } +void sev_init_vmcb(struct vcpu_svm *svm) +{ + svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE; + clr_exception_intercept(svm, UD_VECTOR); + + if (sev_es_guest(svm->vcpu.kvm)) + sev_es_init_vmcb(svm); +} + void sev_es_vcpu_reset(struct vcpu_svm *svm) { /* diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 200045f71df0..44bbf25dfeb9 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -465,11 +465,24 @@ static int has_svm(void) return 1; } +void __svm_write_tsc_multiplier(u64 multiplier) +{ + preempt_disable(); + + if (multiplier == __this_cpu_read(current_tsc_ratio)) + goto out; + + wrmsrl(MSR_AMD64_TSC_RATIO, multiplier); + __this_cpu_write(current_tsc_ratio, multiplier); +out: + preempt_enable(); +} + static void svm_hardware_disable(void) { /* Make sure we clean up behind us */ if (tsc_scaling) - wrmsrl(MSR_AMD64_TSC_RATIO, SVM_TSC_RATIO_DEFAULT); + __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT); cpu_svm_disable(); @@ -515,8 +528,7 @@ static int svm_hardware_enable(void) * Set the default value, even if we don't use TSC scaling * to avoid having stale value in the msr */ - wrmsrl(MSR_AMD64_TSC_RATIO, SVM_TSC_RATIO_DEFAULT); - __this_cpu_write(current_tsc_ratio, SVM_TSC_RATIO_DEFAULT); + __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT); } @@ -909,7 +921,7 @@ static void grow_ple_window(struct kvm_vcpu *vcpu) struct vmcb_control_area *control = &svm->vmcb->control; int old = control->pause_filter_count; - if (kvm_pause_in_guest(vcpu->kvm) || !old) + if (kvm_pause_in_guest(vcpu->kvm)) return; control->pause_filter_count = __grow_ple_window(old, @@ -930,7 +942,7 @@ static void shrink_ple_window(struct kvm_vcpu *vcpu) struct vmcb_control_area *control = &svm->vmcb->control; int old = control->pause_filter_count; - if (kvm_pause_in_guest(vcpu->kvm) || !old) + if (kvm_pause_in_guest(vcpu->kvm)) return; control->pause_filter_count = @@ -999,11 +1011,12 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); } -void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier) +static void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier) { - wrmsrl(MSR_AMD64_TSC_RATIO, multiplier); + __svm_write_tsc_multiplier(multiplier); } + /* Evaluate instruction intercepts that depend on guest CPUID features. */ static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu, struct vcpu_svm *svm) @@ -1199,15 +1212,8 @@ static void init_vmcb(struct kvm_vcpu *vcpu) svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK; } - if (sev_guest(vcpu->kvm)) { - svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE; - clr_exception_intercept(svm, UD_VECTOR); - - if (sev_es_guest(vcpu->kvm)) { - /* Perform SEV-ES specific VMCB updates */ - sev_es_init_vmcb(svm); - } - } + if (sev_guest(vcpu->kvm)) + sev_init_vmcb(svm); svm_hv_init_vmcb(vmcb); init_vmcb_after_set_cpuid(vcpu); @@ -1363,13 +1369,8 @@ static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu) sev_es_prepare_switch_to_guest(hostsa); } - if (tsc_scaling) { - u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio; - if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) { - __this_cpu_write(current_tsc_ratio, tsc_ratio); - wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio); - } - } + if (tsc_scaling) + __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio); if (likely(tsc_aux_uret_slot >= 0)) kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull); @@ -1392,13 +1393,13 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) indirect_branch_prediction_barrier(); } if (kvm_vcpu_apicv_active(vcpu)) - __avic_vcpu_load(vcpu, cpu); + avic_vcpu_load(vcpu, cpu); } static void svm_vcpu_put(struct kvm_vcpu *vcpu) { if (kvm_vcpu_apicv_active(vcpu)) - __avic_vcpu_put(vcpu); + avic_vcpu_put(vcpu); svm_prepare_host_switch(vcpu); @@ -4255,6 +4256,8 @@ out: static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu) { + if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_INTR) + vcpu->arch.at_instruction_boundary = true; } static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu) diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 21c5460e947a..9223ac100ef5 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -590,7 +590,7 @@ int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, bool has_error_code, u32 error_code); int nested_svm_exit_special(struct vcpu_svm *svm); void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu); -void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier); +void __svm_write_tsc_multiplier(u64 multiplier); void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm, struct vmcb_control_area *control); void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm, @@ -610,8 +610,8 @@ void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb); int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu); int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu); int avic_init_vcpu(struct vcpu_svm *svm); -void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu); -void __avic_vcpu_put(struct kvm_vcpu *vcpu); +void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu); +void avic_vcpu_put(struct kvm_vcpu *vcpu); void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu); void avic_set_virtual_apic_mode(struct kvm_vcpu *vcpu); void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu); @@ -649,10 +649,10 @@ void __init sev_set_cpu_caps(void); void __init sev_hardware_setup(void); void sev_hardware_unsetup(void); int sev_cpu_init(struct svm_cpu_data *sd); +void sev_init_vmcb(struct vcpu_svm *svm); void sev_free_vcpu(struct kvm_vcpu *vcpu); int sev_handle_vmgexit(struct kvm_vcpu *vcpu); int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in); -void sev_es_init_vmcb(struct vcpu_svm *svm); void sev_es_vcpu_reset(struct vcpu_svm *svm); void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector); void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index a07e8cd753ec..3a919e49129b 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -229,6 +229,9 @@ static const struct { #define L1D_CACHE_ORDER 4 static void *vmx_l1d_flush_pages; +/* Control for disabling CPU Fill buffer clear */ +static bool __read_mostly vmx_fb_clear_ctrl_available; + static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf) { struct page *page; @@ -360,6 +363,60 @@ static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp) return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option); } +static void vmx_setup_fb_clear_ctrl(void) +{ + u64 msr; + + if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES) && + !boot_cpu_has_bug(X86_BUG_MDS) && + !boot_cpu_has_bug(X86_BUG_TAA)) { + rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr); + if (msr & ARCH_CAP_FB_CLEAR_CTRL) + vmx_fb_clear_ctrl_available = true; + } +} + +static __always_inline void vmx_disable_fb_clear(struct vcpu_vmx *vmx) +{ + u64 msr; + + if (!vmx->disable_fb_clear) + return; + + rdmsrl(MSR_IA32_MCU_OPT_CTRL, msr); + msr |= FB_CLEAR_DIS; + wrmsrl(MSR_IA32_MCU_OPT_CTRL, msr); + /* Cache the MSR value to avoid reading it later */ + vmx->msr_ia32_mcu_opt_ctrl = msr; +} + +static __always_inline void vmx_enable_fb_clear(struct vcpu_vmx *vmx) +{ + if (!vmx->disable_fb_clear) + return; + + vmx->msr_ia32_mcu_opt_ctrl &= ~FB_CLEAR_DIS; + wrmsrl(MSR_IA32_MCU_OPT_CTRL, vmx->msr_ia32_mcu_opt_ctrl); +} + +static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx) +{ + vmx->disable_fb_clear = vmx_fb_clear_ctrl_available; + + /* + * If guest will not execute VERW, there is no need to set FB_CLEAR_DIS + * at VMEntry. Skip the MSR read/write when a guest has no use case to + * execute VERW. + */ + if ((vcpu->arch.arch_capabilities & ARCH_CAP_FB_CLEAR) || + ((vcpu->arch.arch_capabilities & ARCH_CAP_MDS_NO) && + (vcpu->arch.arch_capabilities & ARCH_CAP_TAA_NO) && + (vcpu->arch.arch_capabilities & ARCH_CAP_PSDP_NO) && + (vcpu->arch.arch_capabilities & ARCH_CAP_FBSDP_NO) && + (vcpu->arch.arch_capabilities & ARCH_CAP_SBDR_SSDP_NO))) + vmx->disable_fb_clear = false; +} + static const struct kernel_param_ops vmentry_l1d_flush_ops = { .set = vmentry_l1d_flush_set, .get = vmentry_l1d_flush_get, @@ -2252,6 +2309,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ret = kvm_set_msr_common(vcpu, msr_info); } + /* FB_CLEAR may have changed, also update the FB_CLEAR_DIS behavior */ + if (msr_index == MSR_IA32_ARCH_CAPABILITIES) + vmx_update_fb_clear_dis(vcpu, vmx); + return ret; } @@ -4553,6 +4614,8 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); vpid_sync_context(vmx->vpid); + + vmx_update_fb_clear_dis(vcpu, vmx); } static void vmx_enable_irq_window(struct kvm_vcpu *vcpu) @@ -6547,6 +6610,7 @@ static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu) return; handle_interrupt_nmi_irqoff(vcpu, gate_offset(desc)); + vcpu->arch.at_instruction_boundary = true; } static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu) @@ -6771,6 +6835,11 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, vmx_l1d_flush(vcpu); else if (static_branch_unlikely(&mds_user_clear)) mds_clear_cpu_buffers(); + else if (static_branch_unlikely(&mmio_stale_data_clear) && + kvm_arch_has_assigned_device(vcpu->kvm)) + mds_clear_cpu_buffers(); + + vmx_disable_fb_clear(vmx); if (vcpu->arch.cr2 != native_read_cr2()) native_write_cr2(vcpu->arch.cr2); @@ -6780,6 +6849,8 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, vcpu->arch.cr2 = native_read_cr2(); + vmx_enable_fb_clear(vmx); + guest_state_exit_irqoff(); } @@ -7708,7 +7779,9 @@ static bool vmx_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason) ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) | BIT(APICV_INHIBIT_REASON_ABSENT) | BIT(APICV_INHIBIT_REASON_HYPERV) | - BIT(APICV_INHIBIT_REASON_BLOCKIRQ); + BIT(APICV_INHIBIT_REASON_BLOCKIRQ) | + BIT(APICV_INHIBIT_REASON_APIC_ID_MODIFIED) | + BIT(APICV_INHIBIT_REASON_APIC_BASE_MODIFIED); return supported & BIT(reason); } @@ -8211,6 +8284,8 @@ static int __init vmx_init(void) return r; } + vmx_setup_fb_clear_ctrl(); + for_each_possible_cpu(cpu) { INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu)); diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index b98c7e96697a..8d2342ede0c5 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -348,6 +348,8 @@ struct vcpu_vmx { u64 msr_ia32_feature_control_valid_bits; /* SGX Launch Control public key hash */ u64 msr_ia32_sgxlepubkeyhash[4]; + u64 msr_ia32_mcu_opt_ctrl; + bool disable_fb_clear; struct pt_desc pt_desc; struct lbr_desc lbr_desc; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e9473c7c7390..1910e1e78b15 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -296,6 +296,8 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { STATS_DESC_COUNTER(VCPU, nested_run), STATS_DESC_COUNTER(VCPU, directed_yield_attempted), STATS_DESC_COUNTER(VCPU, directed_yield_successful), + STATS_DESC_COUNTER(VCPU, preemption_reported), + STATS_DESC_COUNTER(VCPU, preemption_other), STATS_DESC_ICOUNTER(VCPU, guest_mode) }; @@ -1615,6 +1617,9 @@ static u64 kvm_get_arch_capabilities(void) */ } + /* Guests don't need to know "Fill buffer clear control" exists */ + data &= ~ARCH_CAP_FB_CLEAR_CTRL; + return data; } @@ -4625,6 +4630,19 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) struct kvm_memslots *slots; static const u8 preempted = KVM_VCPU_PREEMPTED; + /* + * The vCPU can be marked preempted if and only if the VM-Exit was on + * an instruction boundary and will not trigger guest emulation of any + * kind (see vcpu_run). Vendor specific code controls (conservatively) + * when this is true, for example allowing the vCPU to be marked + * preempted if and only if the VM-Exit was due to a host interrupt. + */ + if (!vcpu->arch.at_instruction_boundary) { + vcpu->stat.preemption_other++; + return; + } + + vcpu->stat.preemption_reported++; if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) return; @@ -4654,19 +4672,21 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { int idx; - if (vcpu->preempted && !vcpu->arch.guest_state_protected) - vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu); + if (vcpu->preempted) { + if (!vcpu->arch.guest_state_protected) + vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu); - /* - * Take the srcu lock as memslots will be accessed to check the gfn - * cache generation against the memslots generation. - */ - idx = srcu_read_lock(&vcpu->kvm->srcu); - if (kvm_xen_msr_enabled(vcpu->kvm)) - kvm_xen_runstate_set_preempted(vcpu); - else - kvm_steal_time_set_preempted(vcpu); - srcu_read_unlock(&vcpu->kvm->srcu, idx); + /* + * Take the srcu lock as memslots will be accessed to check the gfn + * cache generation against the memslots generation. + */ + idx = srcu_read_lock(&vcpu->kvm->srcu); + if (kvm_xen_msr_enabled(vcpu->kvm)) + kvm_xen_runstate_set_preempted(vcpu); + else + kvm_steal_time_set_preempted(vcpu); + srcu_read_unlock(&vcpu->kvm->srcu, idx); + } static_call(kvm_x86_vcpu_put)(vcpu); vcpu->arch.last_host_tsc = rdtsc(); @@ -9833,6 +9853,7 @@ void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu) return; down_read(&vcpu->kvm->arch.apicv_update_lock); + preempt_disable(); activate = kvm_vcpu_apicv_activated(vcpu); @@ -9853,6 +9874,7 @@ void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu) kvm_make_request(KVM_REQ_EVENT, vcpu); out: + preempt_enable(); up_read(&vcpu->kvm->arch.apicv_update_lock); } EXPORT_SYMBOL_GPL(kvm_vcpu_update_apicv); @@ -10422,6 +10444,13 @@ static int vcpu_run(struct kvm_vcpu *vcpu) vcpu->arch.l1tf_flush_l1d = true; for (;;) { + /* + * If another guest vCPU requests a PV TLB flush in the middle + * of instruction emulation, the rest of the emulation could + * use a stale page translation. Assume that any code after + * this point can start executing an instruction. + */ + vcpu->arch.at_instruction_boundary = false; if (kvm_vcpu_running(vcpu)) { r = vcpu_enter_guest(vcpu); } else { diff --git a/arch/x86/kvm/xen.h b/arch/x86/kvm/xen.h index ee5c4ae0755c..532a535a9e99 100644 --- a/arch/x86/kvm/xen.h +++ b/arch/x86/kvm/xen.h @@ -159,8 +159,10 @@ static inline void kvm_xen_runstate_set_preempted(struct kvm_vcpu *vcpu) * behalf of the vCPU. Only if the VMM does actually block * does it need to enter RUNSTATE_blocked. */ - if (vcpu->preempted) - kvm_xen_update_runstate_guest(vcpu, RUNSTATE_runnable); + if (WARN_ON_ONCE(!vcpu->preempted)) + return; + + kvm_xen_update_runstate_guest(vcpu, RUNSTATE_runnable); } /* 32-bit compatibility definitions, also used natively in 32-bit build */ diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 11350e2fd736..9f27e14e185f 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -12,7 +12,6 @@ #include <linux/swiotlb.h> #include <linux/cc_platform.h> #include <linux/mem_encrypt.h> -#include <linux/virtio_config.h> /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */ bool force_dma_unencrypted(struct device *dev) @@ -87,9 +86,3 @@ void __init mem_encrypt_init(void) print_mem_encrypt_feature_info(); } - -int arch_has_restricted_virtio_memory_access(void) -{ - return cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT); -} -EXPORT_SYMBOL_GPL(arch_has_restricted_virtio_memory_access); diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c index e8f7953fda83..f6d038e2cd8e 100644 --- a/arch/x86/mm/mem_encrypt_amd.c +++ b/arch/x86/mm/mem_encrypt_amd.c @@ -21,6 +21,7 @@ #include <linux/dma-mapping.h> #include <linux/virtio_config.h> #include <linux/cc_platform.h> +#include <linux/platform-feature.h> #include <asm/tlbflush.h> #include <asm/fixmap.h> @@ -242,6 +243,9 @@ void __init sev_setup_arch(void) size = total_mem * 6 / 100; size = clamp_val(size, IO_TLB_DEFAULT_SIZE, SZ_1G); swiotlb_adjust_size(size); + + /* Set restricted memory access for virtio. */ + platform_set(PLATFORM_VIRTIO_RESTRICTED_MEM_ACCESS); } static unsigned long pg_level_to_pfn(int level, pte_t *kpte, pgprot_t *ret_prot) diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index f298b18a9a3d..c98b8c0ed3b8 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -1420,8 +1420,9 @@ st: if (is_imm8(insn->off)) case BPF_JMP | BPF_CALL: func = (u8 *) __bpf_call_base + imm32; if (tail_call_reachable) { + /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */ EMIT3_off32(0x48, 0x8B, 0x85, - -(bpf_prog->aux->stack_depth + 8)); + -round_up(bpf_prog->aux->stack_depth, 8) - 8); if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7)) return -EINVAL; } else { diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index a4f43054bc79..2f82480fd430 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c @@ -8,7 +8,6 @@ #include <linux/pci-acpi.h> #include <asm/numa.h> #include <asm/pci_x86.h> -#include <asm/e820/api.h> struct pci_root_info { struct acpi_pci_root_info common; @@ -20,7 +19,7 @@ struct pci_root_info { #endif }; -static bool pci_use_e820 = true; +bool pci_use_e820 = true; static bool pci_use_crs = true; static bool pci_ignore_seg; @@ -387,11 +386,6 @@ static int pci_acpi_root_prepare_resources(struct acpi_pci_root_info *ci) status = acpi_pci_probe_root_resources(ci); - if (pci_use_e820) { - resource_list_for_each_entry(entry, &ci->resources) - remove_e820_regions(&device->dev, entry->res); - } - if (pci_use_crs) { resource_list_for_each_entry_safe(entry, tmp, &ci->resources) if (resource_is_pcicfg_ioport(entry->res)) diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c index 517a9d8d8f94..8b71b1dd7639 100644 --- a/arch/x86/xen/enlighten_hvm.c +++ b/arch/x86/xen/enlighten_hvm.c @@ -195,6 +195,8 @@ static void __init xen_hvm_guest_init(void) if (xen_pv_domain()) return; + xen_set_restricted_virtio_memory_access(); + init_hvm_pv_info(); reserve_shared_info(); diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index f33a4421e7cd..e3297b15701c 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -109,6 +109,8 @@ static DEFINE_PER_CPU(struct tls_descs, shadow_tls_desc); static void __init xen_pv_init_platform(void) { + xen_set_restricted_virtio_memory_access(); + populate_extra_pte(fix_to_virt(FIX_PARAVIRT_BOOTMAP)); set_fixmap(FIX_PARAVIRT_BOOTMAP, xen_start_info->shared_info); diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index e3eae648ba2e..ab30bcb46290 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -2173,7 +2173,7 @@ ENDPROC(ret_from_kernel_thread) #ifdef CONFIG_HIBERNATION - .bss + .section .bss, "aw" .align 4 .Lsaved_regs: #if defined(__XTENSA_WINDOWED_ABI__) diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c index e8ceb1528608..16b8a6273772 100644 --- a/arch/xtensa/kernel/time.c +++ b/arch/xtensa/kernel/time.c @@ -154,6 +154,7 @@ static void __init calibrate_ccount(void) cpu = of_find_compatible_node(NULL, NULL, "cdns,xtensa-cpu"); if (cpu) { clk = of_clk_get(cpu, 0); + of_node_put(cpu); if (!IS_ERR(clk)) { ccount_freq = clk_get_rate(clk); return; diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c index 538e6748e85a..c79c1d09ea86 100644 --- a/arch/xtensa/platforms/xtfpga/setup.c +++ b/arch/xtensa/platforms/xtfpga/setup.c @@ -133,6 +133,7 @@ static int __init machine_setup(void) if ((eth = of_find_compatible_node(eth, NULL, "opencores,ethoc"))) update_local_mac(eth); + of_node_put(eth); return 0; } arch_initcall(machine_setup); diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 0d46cb728bbf..e6d7e6b01a05 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -7046,6 +7046,7 @@ static void bfq_exit_queue(struct elevator_queue *e) spin_unlock_irq(&bfqd->lock); #endif + blk_stat_disable_accounting(bfqd->queue); wbt_enable_default(bfqd->queue); kfree(bfqd); @@ -7188,7 +7189,12 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) bfq_init_root_group(bfqd->root_group, bfqd); bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group); + /* We dispatch from request queue wide instead of hw queue */ + blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q); + wbt_disable_default(q); + blk_stat_enable_accounting(q); + return 0; out_free: diff --git a/block/bio.c b/block/bio.c index f92d0223247b..51c99f2c5c90 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1747,26 +1747,6 @@ bad: } EXPORT_SYMBOL(bioset_init); -/* - * Initialize and setup a new bio_set, based on the settings from - * another bio_set. - */ -int bioset_init_from_src(struct bio_set *bs, struct bio_set *src) -{ - int flags; - - flags = 0; - if (src->bvec_pool.min_nr) - flags |= BIOSET_NEED_BVECS; - if (src->rescue_workqueue) - flags |= BIOSET_NEED_RESCUER; - if (src->cache) - flags |= BIOSET_PERCPU_CACHE; - - return bioset_init(bs, src->bio_pool.min_nr, src->front_pad, flags); -} -EXPORT_SYMBOL(bioset_init_from_src); - static int __init init_bio(void) { int i; diff --git a/block/blk-core.c b/block/blk-core.c index 06ff5bbfe8f6..27fb1357ad4b 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -322,19 +322,6 @@ void blk_cleanup_queue(struct request_queue *q) blk_mq_exit_queue(q); } - /* - * In theory, request pool of sched_tags belongs to request queue. - * However, the current implementation requires tag_set for freeing - * requests, so free the pool now. - * - * Queue has become frozen, there can't be any in-queue requests, so - * it is safe to free requests now. - */ - mutex_lock(&q->sysfs_lock); - if (q->elevator) - blk_mq_sched_free_rqs(q); - mutex_unlock(&q->sysfs_lock); - /* @q is and will stay empty, shutdown and put */ blk_put_queue(q); } diff --git a/block/blk-ia-ranges.c b/block/blk-ia-ranges.c index 56ed48d2954e..47c89e65b57f 100644 --- a/block/blk-ia-ranges.c +++ b/block/blk-ia-ranges.c @@ -144,7 +144,6 @@ int disk_register_independent_access_ranges(struct gendisk *disk, } for (i = 0; i < iars->nr_ia_ranges; i++) { - iars->ia_range[i].queue = q; ret = kobject_init_and_add(&iars->ia_range[i].kobj, &blk_ia_range_ktype, &iars->kobj, "%d", i); diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 7e4136a60e1c..4d1ce9ef4318 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -711,11 +711,6 @@ void blk_mq_debugfs_register(struct request_queue *q) } } -void blk_mq_debugfs_unregister(struct request_queue *q) -{ - q->sched_debugfs_dir = NULL; -} - static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx) { @@ -746,6 +741,8 @@ void blk_mq_debugfs_register_hctx(struct request_queue *q, void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx) { + if (!hctx->queue->debugfs_dir) + return; debugfs_remove_recursive(hctx->debugfs_dir); hctx->sched_debugfs_dir = NULL; hctx->debugfs_dir = NULL; @@ -773,6 +770,8 @@ void blk_mq_debugfs_register_sched(struct request_queue *q) { struct elevator_type *e = q->elevator->type; + lockdep_assert_held(&q->debugfs_mutex); + /* * If the parent directory has not been created yet, return, we will be * called again later on and the directory/files will be created then. @@ -790,6 +789,8 @@ void blk_mq_debugfs_register_sched(struct request_queue *q) void blk_mq_debugfs_unregister_sched(struct request_queue *q) { + lockdep_assert_held(&q->debugfs_mutex); + debugfs_remove_recursive(q->sched_debugfs_dir); q->sched_debugfs_dir = NULL; } @@ -811,6 +812,10 @@ static const char *rq_qos_id_to_name(enum rq_qos_id id) void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos) { + lockdep_assert_held(&rqos->q->debugfs_mutex); + + if (!rqos->q->debugfs_dir) + return; debugfs_remove_recursive(rqos->debugfs_dir); rqos->debugfs_dir = NULL; } @@ -820,6 +825,8 @@ void blk_mq_debugfs_register_rqos(struct rq_qos *rqos) struct request_queue *q = rqos->q; const char *dir_name = rq_qos_id_to_name(rqos->id); + lockdep_assert_held(&q->debugfs_mutex); + if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs) return; @@ -833,17 +840,13 @@ void blk_mq_debugfs_register_rqos(struct rq_qos *rqos) debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs); } -void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q) -{ - debugfs_remove_recursive(q->rqos_debugfs_dir); - q->rqos_debugfs_dir = NULL; -} - void blk_mq_debugfs_register_sched_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx) { struct elevator_type *e = q->elevator->type; + lockdep_assert_held(&q->debugfs_mutex); + /* * If the parent debugfs directory has not been created yet, return; * We will be called again later on with appropriate parent debugfs @@ -863,6 +866,10 @@ void blk_mq_debugfs_register_sched_hctx(struct request_queue *q, void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx) { + lockdep_assert_held(&hctx->queue->debugfs_mutex); + + if (!hctx->queue->debugfs_dir) + return; debugfs_remove_recursive(hctx->sched_debugfs_dir); hctx->sched_debugfs_dir = NULL; } diff --git a/block/blk-mq-debugfs.h b/block/blk-mq-debugfs.h index 69918f4170d6..9c7d4b6117d4 100644 --- a/block/blk-mq-debugfs.h +++ b/block/blk-mq-debugfs.h @@ -21,7 +21,6 @@ int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq); int blk_mq_debugfs_rq_show(struct seq_file *m, void *v); void blk_mq_debugfs_register(struct request_queue *q); -void blk_mq_debugfs_unregister(struct request_queue *q); void blk_mq_debugfs_register_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx); void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx); @@ -36,16 +35,11 @@ void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx); void blk_mq_debugfs_register_rqos(struct rq_qos *rqos); void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos); -void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q); #else static inline void blk_mq_debugfs_register(struct request_queue *q) { } -static inline void blk_mq_debugfs_unregister(struct request_queue *q) -{ -} - static inline void blk_mq_debugfs_register_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx) { @@ -87,10 +81,6 @@ static inline void blk_mq_debugfs_register_rqos(struct rq_qos *rqos) static inline void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos) { } - -static inline void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q) -{ -} #endif #ifdef CONFIG_BLK_DEBUG_FS_ZONED diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 9e56a69422b6..a4f7c101b53b 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -564,6 +564,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) int ret; if (!e) { + blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q); q->elevator = NULL; q->nr_requests = q->tag_set->queue_depth; return 0; @@ -593,7 +594,9 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) if (ret) goto err_free_map_and_rqs; + mutex_lock(&q->debugfs_mutex); blk_mq_debugfs_register_sched(q); + mutex_unlock(&q->debugfs_mutex); queue_for_each_hw_ctx(q, hctx, i) { if (e->ops.init_hctx) { @@ -606,7 +609,9 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) return ret; } } + mutex_lock(&q->debugfs_mutex); blk_mq_debugfs_register_sched_hctx(q, hctx); + mutex_unlock(&q->debugfs_mutex); } return 0; @@ -647,14 +652,21 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) unsigned int flags = 0; queue_for_each_hw_ctx(q, hctx, i) { + mutex_lock(&q->debugfs_mutex); blk_mq_debugfs_unregister_sched_hctx(hctx); + mutex_unlock(&q->debugfs_mutex); + if (e->type->ops.exit_hctx && hctx->sched_data) { e->type->ops.exit_hctx(hctx, i); hctx->sched_data = NULL; } flags = hctx->flags; } + + mutex_lock(&q->debugfs_mutex); blk_mq_debugfs_unregister_sched(q); + mutex_unlock(&q->debugfs_mutex); + if (e->type->ops.exit_sched) e->type->ops.exit_sched(e); blk_mq_sched_tags_teardown(q, flags); diff --git a/block/blk-mq.c b/block/blk-mq.c index e9bf950983c7..93d9d60980fb 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -579,6 +579,8 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, if (!blk_mq_hw_queue_mapped(data.hctx)) goto out_queue_exit; cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask); + if (cpu >= nr_cpu_ids) + goto out_queue_exit; data.ctx = __blk_mq_get_ctx(q, cpu); if (!q->elevator) @@ -2141,20 +2143,6 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) EXPORT_SYMBOL(blk_mq_run_hw_queue); /* - * Is the request queue handled by an IO scheduler that does not respect - * hardware queues when dispatching? - */ -static bool blk_mq_has_sqsched(struct request_queue *q) -{ - struct elevator_queue *e = q->elevator; - - if (e && e->type->ops.dispatch_request && - !(e->type->elevator_features & ELEVATOR_F_MQ_AWARE)) - return true; - return false; -} - -/* * Return prefered queue to dispatch from (if any) for non-mq aware IO * scheduler. */ @@ -2186,7 +2174,7 @@ void blk_mq_run_hw_queues(struct request_queue *q, bool async) unsigned long i; sq_hctx = NULL; - if (blk_mq_has_sqsched(q)) + if (blk_queue_sq_sched(q)) sq_hctx = blk_mq_get_sq_hctx(q); queue_for_each_hw_ctx(q, hctx, i) { if (blk_mq_hctx_stopped(hctx)) @@ -2214,7 +2202,7 @@ void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs) unsigned long i; sq_hctx = NULL; - if (blk_mq_has_sqsched(q)) + if (blk_queue_sq_sched(q)) sq_hctx = blk_mq_get_sq_hctx(q); queue_for_each_hw_ctx(q, hctx, i) { if (blk_mq_hctx_stopped(hctx)) @@ -2777,15 +2765,20 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q, return NULL; } - rq_qos_throttle(q, *bio); - if (blk_mq_get_hctx_type((*bio)->bi_opf) != rq->mq_hctx->type) return NULL; if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf)) return NULL; - rq->cmd_flags = (*bio)->bi_opf; + /* + * If any qos ->throttle() end up blocking, we will have flushed the + * plug and hence killed the cached_rq list as well. Pop this entry + * before we throttle. + */ plug->cached_rq = rq_list_next(rq); + rq_qos_throttle(q, *bio); + + rq->cmd_flags = (*bio)->bi_opf; INIT_LIST_HEAD(&rq->queuelist); return rq; } @@ -3443,8 +3436,9 @@ static void blk_mq_exit_hctx(struct request_queue *q, if (blk_mq_hw_queue_mapped(hctx)) blk_mq_tag_idle(hctx); - blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx], - set->queue_depth, flush_rq); + if (blk_queue_init_done(q)) + blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx], + set->queue_depth, flush_rq); if (set->ops->exit_request) set->ops->exit_request(set, flush_rq, hctx_idx); @@ -4438,12 +4432,14 @@ static bool blk_mq_elv_switch_none(struct list_head *head, if (!qe) return false; + /* q->elevator needs protection from ->sysfs_lock */ + mutex_lock(&q->sysfs_lock); + INIT_LIST_HEAD(&qe->node); qe->q = q; qe->type = q->elevator->type; list_add(&qe->node, head); - mutex_lock(&q->sysfs_lock); /* * After elevator_switch_mq, the previous elevator_queue will be * released by elevator_release. The reference of the io scheduler diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c index e83af7bc7591..d3a75693adbf 100644 --- a/block/blk-rq-qos.c +++ b/block/blk-rq-qos.c @@ -294,8 +294,6 @@ void rq_qos_wait(struct rq_wait *rqw, void *private_data, void rq_qos_exit(struct request_queue *q) { - blk_mq_debugfs_unregister_queue_rqos(q); - while (q->rq_qos) { struct rq_qos *rqos = q->rq_qos; q->rq_qos = rqos->next; diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h index 68267007da1c..0e46052b018a 100644 --- a/block/blk-rq-qos.h +++ b/block/blk-rq-qos.h @@ -104,8 +104,11 @@ static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos) blk_mq_unfreeze_queue(q); - if (rqos->ops->debugfs_attrs) + if (rqos->ops->debugfs_attrs) { + mutex_lock(&q->debugfs_mutex); blk_mq_debugfs_register_rqos(rqos); + mutex_unlock(&q->debugfs_mutex); + } } static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos) @@ -129,7 +132,9 @@ static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos) blk_mq_unfreeze_queue(q); + mutex_lock(&q->debugfs_mutex); blk_mq_debugfs_unregister_rqos(rqos); + mutex_unlock(&q->debugfs_mutex); } typedef bool (acquire_inflight_cb_t)(struct rq_wait *rqw, void *private_data); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 88bd41d4cb59..9b905e9443e4 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -779,14 +779,6 @@ static void blk_release_queue(struct kobject *kobj) if (queue_is_mq(q)) blk_mq_release(q); - blk_trace_shutdown(q); - mutex_lock(&q->debugfs_mutex); - debugfs_remove_recursive(q->debugfs_dir); - mutex_unlock(&q->debugfs_mutex); - - if (queue_is_mq(q)) - blk_mq_debugfs_unregister(q); - bioset_exit(&q->bio_split); if (blk_queue_has_srcu(q)) @@ -836,17 +828,16 @@ int blk_register_queue(struct gendisk *disk) goto unlock; } + if (queue_is_mq(q)) + __blk_mq_register_dev(dev, q); + mutex_lock(&q->sysfs_lock); + mutex_lock(&q->debugfs_mutex); q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent), blk_debugfs_root); - mutex_unlock(&q->debugfs_mutex); - - if (queue_is_mq(q)) { - __blk_mq_register_dev(dev, q); + if (queue_is_mq(q)) blk_mq_debugfs_register(q); - } - - mutex_lock(&q->sysfs_lock); + mutex_unlock(&q->debugfs_mutex); ret = disk_register_independent_access_ranges(disk, NULL); if (ret) @@ -948,8 +939,15 @@ void blk_unregister_queue(struct gendisk *disk) /* Now that we've deleted all child objects, we can delete the queue. */ kobject_uevent(&q->kobj, KOBJ_REMOVE); kobject_del(&q->kobj); - mutex_unlock(&q->sysfs_dir_lock); + mutex_lock(&q->debugfs_mutex); + blk_trace_shutdown(q); + debugfs_remove_recursive(q->debugfs_dir); + q->debugfs_dir = NULL; + q->sched_debugfs_dir = NULL; + q->rqos_debugfs_dir = NULL; + mutex_unlock(&q->debugfs_mutex); + kobject_put(&disk_to_dev(disk)->kobj); } diff --git a/block/genhd.c b/block/genhd.c index 27205ae47d59..278227ba1d53 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -623,6 +623,7 @@ void del_gendisk(struct gendisk *disk) * Prevent new I/O from crossing bio_queue_enter(). */ blk_queue_start_drain(q); + blk_mq_freeze_queue_wait(q); if (!(disk->flags & GENHD_FL_HIDDEN)) { sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi"); @@ -646,12 +647,21 @@ void del_gendisk(struct gendisk *disk) pm_runtime_set_memalloc_noio(disk_to_dev(disk), false); device_del(disk_to_dev(disk)); - blk_mq_freeze_queue_wait(q); - blk_throtl_cancel_bios(disk->queue); blk_sync_queue(q); blk_flush_integrity(); + blk_mq_cancel_work_sync(q); + + blk_mq_quiesce_queue(q); + if (q->elevator) { + mutex_lock(&q->sysfs_lock); + elevator_exit(q); + mutex_unlock(&q->sysfs_lock); + } + rq_qos_exit(q); + blk_mq_unquiesce_queue(q); + /* * Allow using passthrough request again after the queue is torn down. */ @@ -1120,31 +1130,6 @@ static const struct attribute_group *disk_attr_groups[] = { NULL }; -static void disk_release_mq(struct request_queue *q) -{ - blk_mq_cancel_work_sync(q); - - /* - * There can't be any non non-passthrough bios in flight here, but - * requests stay around longer, including passthrough ones so we - * still need to freeze the queue here. - */ - blk_mq_freeze_queue(q); - - /* - * Since the I/O scheduler exit code may access cgroup information, - * perform I/O scheduler exit before disassociating from the block - * cgroup controller. - */ - if (q->elevator) { - mutex_lock(&q->sysfs_lock); - elevator_exit(q); - mutex_unlock(&q->sysfs_lock); - } - rq_qos_exit(q); - __blk_mq_unfreeze_queue(q, true); -} - /** * disk_release - releases all allocated resources of the gendisk * @dev: the device representing this disk @@ -1166,9 +1151,6 @@ static void disk_release(struct device *dev) might_sleep(); WARN_ON_ONCE(disk_live(disk)); - if (queue_is_mq(disk->queue)) - disk_release_mq(disk->queue); - blkcg_exit_queue(disk->queue); disk_release_events(disk); diff --git a/block/holder.c b/block/holder.c index 8d750281a1cd..5283bc804cc1 100644 --- a/block/holder.c +++ b/block/holder.c @@ -79,10 +79,6 @@ int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk) WARN_ON_ONCE(!bdev->bd_holder); - /* FIXME: remove the following once add_disk() handles errors */ - if (WARN_ON(!bdev->bd_holder_dir)) - goto out_unlock; - holder = bd_find_holder_disk(bdev, disk); if (holder) { holder->refcnt++; diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c index 70ff2a599ef6..8f7c745b4a57 100644 --- a/block/kyber-iosched.c +++ b/block/kyber-iosched.c @@ -421,6 +421,8 @@ static int kyber_init_sched(struct request_queue *q, struct elevator_type *e) blk_stat_enable_accounting(q); + blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q); + eq->elevator_data = kqd; q->elevator = eq; @@ -1033,7 +1035,6 @@ static struct elevator_type kyber_sched = { #endif .elevator_attrs = kyber_sched_attrs, .elevator_name = "kyber", - .elevator_features = ELEVATOR_F_MQ_AWARE, .elevator_owner = THIS_MODULE, }; diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 6ed602b2f80a..1a9e835e816c 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -642,6 +642,9 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e) spin_lock_init(&dd->lock); spin_lock_init(&dd->zone_lock); + /* We dispatch from request queue wide instead of hw queue */ + blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q); + q->elevator = eq; return 0; diff --git a/certs/.gitignore b/certs/.gitignore index 56637aceaf81..cec5465f31c1 100644 --- a/certs/.gitignore +++ b/certs/.gitignore @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only -/blacklist_hashes_checked +/blacklist_hash_list /extract-cert /x509_certificate_list /x509_revocation_list diff --git a/certs/Makefile b/certs/Makefile index bb904f90f139..88a73b28d254 100644 --- a/certs/Makefile +++ b/certs/Makefile @@ -3,26 +3,26 @@ # Makefile for the linux kernel signature checking certificates. # -obj-$(CONFIG_SYSTEM_TRUSTED_KEYRING) += system_keyring.o system_certificates.o common.o -obj-$(CONFIG_SYSTEM_BLACKLIST_KEYRING) += blacklist.o common.o +obj-$(CONFIG_SYSTEM_TRUSTED_KEYRING) += system_keyring.o system_certificates.o +obj-$(CONFIG_SYSTEM_BLACKLIST_KEYRING) += blacklist.o obj-$(CONFIG_SYSTEM_REVOCATION_LIST) += revocation_certificates.o ifneq ($(CONFIG_SYSTEM_BLACKLIST_HASH_LIST),) -quiet_cmd_check_blacklist_hashes = CHECK $(patsubst "%",%,$(2)) - cmd_check_blacklist_hashes = $(AWK) -f $(srctree)/scripts/check-blacklist-hashes.awk $(2); touch $@ -$(eval $(call config_filename,SYSTEM_BLACKLIST_HASH_LIST)) +$(obj)/blacklist_hashes.o: $(obj)/blacklist_hash_list +CFLAGS_blacklist_hashes.o := -I $(obj) -$(obj)/blacklist_hashes.o: $(obj)/blacklist_hashes_checked +quiet_cmd_check_and_copy_blacklist_hash_list = GEN $@ + cmd_check_and_copy_blacklist_hash_list = \ + $(AWK) -f $(srctree)/scripts/check-blacklist-hashes.awk $(CONFIG_SYSTEM_BLACKLIST_HASH_LIST) >&2; \ + cat $(CONFIG_SYSTEM_BLACKLIST_HASH_LIST) > $@ -CFLAGS_blacklist_hashes.o += -I$(srctree) - -targets += blacklist_hashes_checked -$(obj)/blacklist_hashes_checked: $(SYSTEM_BLACKLIST_HASH_LIST_SRCPREFIX)$(SYSTEM_BLACKLIST_HASH_LIST_FILENAME) scripts/check-blacklist-hashes.awk FORCE - $(call if_changed,check_blacklist_hashes,$(SYSTEM_BLACKLIST_HASH_LIST_SRCPREFIX)$(CONFIG_SYSTEM_BLACKLIST_HASH_LIST)) +$(obj)/blacklist_hash_list: $(CONFIG_SYSTEM_BLACKLIST_HASH_LIST) FORCE + $(call if_changed,check_and_copy_blacklist_hash_list) obj-$(CONFIG_SYSTEM_BLACKLIST_KEYRING) += blacklist_hashes.o else obj-$(CONFIG_SYSTEM_BLACKLIST_KEYRING) += blacklist_nohashes.o endif +targets += blacklist_hash_list quiet_cmd_extract_certs = CERT $@ cmd_extract_certs = $(obj)/extract-cert $(extract-cert-in) $@ @@ -33,7 +33,7 @@ $(obj)/system_certificates.o: $(obj)/x509_certificate_list $(obj)/x509_certificate_list: $(CONFIG_SYSTEM_TRUSTED_KEYS) $(obj)/extract-cert FORCE $(call if_changed,extract_certs) -targets += x509_certificate_list blacklist_hashes_checked +targets += x509_certificate_list # If module signing is requested, say by allyesconfig, but a key has not been # supplied, then one will need to be generated to make sure the build does not diff --git a/certs/blacklist.c b/certs/blacklist.c index 25094ea73600..41f10601cc72 100644 --- a/certs/blacklist.c +++ b/certs/blacklist.c @@ -15,10 +15,9 @@ #include <linux/err.h> #include <linux/seq_file.h> #include <linux/uidgid.h> -#include <linux/verification.h> +#include <keys/asymmetric-type.h> #include <keys/system_keyring.h> #include "blacklist.h" -#include "common.h" /* * According to crypto/asymmetric_keys/x509_cert_parser.c:x509_note_pkey_algo(), @@ -365,8 +364,9 @@ static __init int load_revocation_certificate_list(void) if (revocation_certificate_list_size) pr_notice("Loading compiled-in revocation X.509 certificates\n"); - return load_certificate_list(revocation_certificate_list, revocation_certificate_list_size, - blacklist_keyring); + return x509_load_certificate_list(revocation_certificate_list, + revocation_certificate_list_size, + blacklist_keyring); } late_initcall(load_revocation_certificate_list); #endif diff --git a/certs/blacklist_hashes.c b/certs/blacklist_hashes.c index 344892337be0..86d66fe11348 100644 --- a/certs/blacklist_hashes.c +++ b/certs/blacklist_hashes.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include "blacklist.h" -const char __initdata *const blacklist_hashes[] = { -#include CONFIG_SYSTEM_BLACKLIST_HASH_LIST +const char __initconst *const blacklist_hashes[] = { +#include "blacklist_hash_list" , NULL }; diff --git a/certs/common.h b/certs/common.h deleted file mode 100644 index abdb5795936b..000000000000 --- a/certs/common.h +++ /dev/null @@ -1,9 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ - -#ifndef _CERT_COMMON_H -#define _CERT_COMMON_H - -int load_certificate_list(const u8 cert_list[], const unsigned long list_size, - const struct key *keyring); - -#endif diff --git a/certs/extract-cert.c b/certs/extract-cert.c index f7ef7862f207..8c1fb9a70d66 100644 --- a/certs/extract-cert.c +++ b/certs/extract-cert.c @@ -23,6 +23,13 @@ #include <openssl/err.h> #include <openssl/engine.h> +/* + * OpenSSL 3.0 deprecates the OpenSSL's ENGINE API. + * + * Remove this if/when that API is no longer used + */ +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + #define PKEY_ID_PKCS7 2 static __attribute__((noreturn)) diff --git a/certs/system_keyring.c b/certs/system_keyring.c index 05b66ce9d1c9..5042cc54fa5e 100644 --- a/certs/system_keyring.c +++ b/certs/system_keyring.c @@ -16,7 +16,6 @@ #include <keys/asymmetric-type.h> #include <keys/system_keyring.h> #include <crypto/pkcs7.h> -#include "common.h" static struct key *builtin_trusted_keys; #ifdef CONFIG_SECONDARY_TRUSTED_KEYRING @@ -183,7 +182,8 @@ __init int load_module_cert(struct key *keyring) pr_notice("Loading compiled-in module X.509 certificates\n"); - return load_certificate_list(system_certificate_list, module_cert_size, keyring); + return x509_load_certificate_list(system_certificate_list, + module_cert_size, keyring); } /* @@ -204,7 +204,7 @@ static __init int load_system_certificate_list(void) size = system_certificate_list_size - module_cert_size; #endif - return load_certificate_list(p, size, builtin_trusted_keys); + return x509_load_certificate_list(p, size, builtin_trusted_keys); } late_initcall(load_system_certificate_list); diff --git a/crypto/Kconfig b/crypto/Kconfig index 19197469cfab..1d44893a997b 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -15,6 +15,7 @@ source "crypto/async_tx/Kconfig" # menuconfig CRYPTO tristate "Cryptographic API" + select LIB_MEMNEQ help This option provides the core Cryptographic API. diff --git a/crypto/Makefile b/crypto/Makefile index 43bc33e247d1..ceaaa9f34145 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -4,7 +4,7 @@ # obj-$(CONFIG_CRYPTO) += crypto.o -crypto-y := api.o cipher.o compress.o memneq.o +crypto-y := api.o cipher.o compress.o obj-$(CONFIG_CRYPTO_ENGINE) += crypto_engine.o obj-$(CONFIG_CRYPTO_FIPS) += fips.o diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig index 460bc5d0a828..3df3fe4ed95f 100644 --- a/crypto/asymmetric_keys/Kconfig +++ b/crypto/asymmetric_keys/Kconfig @@ -75,4 +75,14 @@ config SIGNED_PE_FILE_VERIFICATION This option provides support for verifying the signature(s) on a signed PE binary. +config FIPS_SIGNATURE_SELFTEST + bool "Run FIPS selftests on the X.509+PKCS7 signature verification" + help + This option causes some selftests to be run on the signature + verification code, using some built in data. This is required + for FIPS. + depends on KEYS + depends on ASYMMETRIC_KEY_TYPE + depends on PKCS7_MESSAGE_PARSER + endif # ASYMMETRIC_KEY_TYPE diff --git a/crypto/asymmetric_keys/Makefile b/crypto/asymmetric_keys/Makefile index c38424f55b08..0d1fa1b692c6 100644 --- a/crypto/asymmetric_keys/Makefile +++ b/crypto/asymmetric_keys/Makefile @@ -20,7 +20,9 @@ x509_key_parser-y := \ x509.asn1.o \ x509_akid.asn1.o \ x509_cert_parser.o \ + x509_loader.o \ x509_public_key.o +x509_key_parser-$(CONFIG_FIPS_SIGNATURE_SELFTEST) += selftest.o $(obj)/x509_cert_parser.o: \ $(obj)/x509.asn1.h \ diff --git a/crypto/asymmetric_keys/selftest.c b/crypto/asymmetric_keys/selftest.c new file mode 100644 index 000000000000..fa0bf7f24284 --- /dev/null +++ b/crypto/asymmetric_keys/selftest.c @@ -0,0 +1,224 @@ +/* Self-testing for signature checking. + * + * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#include <linux/kernel.h> +#include <linux/cred.h> +#include <linux/key.h> +#include <crypto/pkcs7.h> +#include "x509_parser.h" + +struct certs_test { + const u8 *data; + size_t data_len; + const u8 *pkcs7; + size_t pkcs7_len; +}; + +/* + * Set of X.509 certificates to provide public keys for the tests. These will + * be loaded into a temporary keyring for the duration of the testing. + */ +static const __initconst u8 certs_selftest_keys[] = { + "\x30\x82\x05\x55\x30\x82\x03\x3d\xa0\x03\x02\x01\x02\x02\x14\x73" + "\x98\xea\x98\x2d\xd0\x2e\xa8\xb1\xcf\x57\xc7\xf2\x97\xb3\xe6\x1a" + "\xfc\x8c\x0a\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01\x0b" + "\x05\x00\x30\x34\x31\x32\x30\x30\x06\x03\x55\x04\x03\x0c\x29\x43" + "\x65\x72\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69\x66" + "\x69\x63\x61\x74\x69\x6f\x6e\x20\x73\x65\x6c\x66\x2d\x74\x65\x73" + "\x74\x69\x6e\x67\x20\x6b\x65\x79\x30\x20\x17\x0d\x32\x32\x30\x35" + "\x31\x38\x32\x32\x33\x32\x34\x31\x5a\x18\x0f\x32\x31\x32\x32\x30" + "\x34\x32\x34\x32\x32\x33\x32\x34\x31\x5a\x30\x34\x31\x32\x30\x30" + "\x06\x03\x55\x04\x03\x0c\x29\x43\x65\x72\x74\x69\x66\x69\x63\x61" + "\x74\x65\x20\x76\x65\x72\x69\x66\x69\x63\x61\x74\x69\x6f\x6e\x20" + "\x73\x65\x6c\x66\x2d\x74\x65\x73\x74\x69\x6e\x67\x20\x6b\x65\x79" + "\x30\x82\x02\x22\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01" + "\x01\x05\x00\x03\x82\x02\x0f\x00\x30\x82\x02\x0a\x02\x82\x02\x01" + "\x00\xcc\xac\x49\xdd\x3b\xca\xb0\x15\x7e\x84\x6a\xb2\x0a\x69\x5f" + "\x1c\x0a\x61\x82\x3b\x4f\x2c\xa3\x95\x2c\x08\x58\x4b\xb1\x5d\x99" + "\xe0\xc3\xc1\x79\xc2\xb3\xeb\xc0\x1e\x6d\x3e\x54\x1d\xbd\xb7\x92" + "\x7b\x4d\xb5\x95\x58\xb2\x52\x2e\xc6\x24\x4b\x71\x63\x80\x32\x77" + "\xa7\x38\x5e\xdb\x72\xae\x6e\x0d\xec\xfb\xb6\x6d\x01\x7f\xe9\x55" + "\x66\xdf\xbf\x1d\x76\x78\x02\x31\xe8\xe5\x07\xf8\xb7\x82\x5c\x0d" + "\xd4\xbb\xfb\xa2\x59\x0d\x2e\x3a\x78\x95\x3a\x8b\x46\x06\x47\x44" + "\x46\xd7\xcd\x06\x6a\x41\x13\xe3\x19\xf6\xbb\x6e\x38\xf4\x83\x01" + "\xa3\xbf\x4a\x39\x4f\xd7\x0a\xe9\x38\xb3\xf5\x94\x14\x4e\xdd\xf7" + "\x43\xfd\x24\xb2\x49\x3c\xa5\xf7\x7a\x7c\xd4\x45\x3d\x97\x75\x68" + "\xf1\xed\x4c\x42\x0b\x70\xca\x85\xf3\xde\xe5\x88\x2c\xc5\xbe\xb6" + "\x97\x34\xba\x24\x02\xcd\x8b\x86\x9f\xa9\x73\xca\x73\xcf\x92\x81" + "\xee\x75\x55\xbb\x18\x67\x5c\xff\x3f\xb5\xdd\x33\x1b\x0c\xe9\x78" + "\xdb\x5c\xcf\xaa\x5c\x43\x42\xdf\x5e\xa9\x6d\xec\xd7\xd7\xff\xe6" + "\xa1\x3a\x92\x1a\xda\xae\xf6\x8c\x6f\x7b\xd5\xb4\x6e\x06\xe9\x8f" + "\xe8\xde\x09\x31\x89\xed\x0e\x11\xa1\xfa\x8a\xe9\xe9\x64\x59\x62" + "\x53\xda\xd1\x70\xbe\x11\xd4\x99\x97\x11\xcf\x99\xde\x0b\x9d\x94" + "\x7e\xaa\xb8\x52\xea\x37\xdb\x90\x7e\x35\xbd\xd9\xfe\x6d\x0a\x48" + "\x70\x28\xdd\xd5\x0d\x7f\x03\x80\x93\x14\x23\x8f\xb9\x22\xcd\x7c" + "\x29\xfe\xf1\x72\xb5\x5c\x0b\x12\xcf\x9c\x15\xf6\x11\x4c\x7a\x45" + "\x25\x8c\x45\x0a\x34\xac\x2d\x9a\x81\xca\x0b\x13\x22\xcd\xeb\x1a" + "\x38\x88\x18\x97\x96\x08\x81\xaa\xcc\x8f\x0f\x8a\x32\x7b\x76\x68" + "\x03\x68\x43\xbf\x11\xba\x55\x60\xfd\x80\x1c\x0d\x9b\x69\xb6\x09" + "\x72\xbc\x0f\x41\x2f\x07\x82\xc6\xe3\xb2\x13\x91\xc4\x6d\x14\x95" + "\x31\xbe\x19\xbd\xbc\xed\xe1\x4c\x74\xa2\xe0\x78\x0b\xbb\x94\xec" + "\x4c\x53\x3a\xa2\xb5\x84\x1d\x4b\x65\x7e\xdc\xf7\xdb\x36\x7d\xbe" + "\x9e\x3b\x36\x66\x42\x66\x76\x35\xbf\xbe\xf0\xc1\x3c\x7c\xe9\x42" + "\x5c\x24\x53\x03\x05\xa8\x67\x24\x50\x02\x75\xff\x24\x46\x3b\x35" + "\x89\x76\xe6\x70\xda\xc5\x51\x8c\x9a\xe5\x05\xb0\x0b\xd0\x2d\xd4" + "\x7d\x57\x75\x94\x6b\xf9\x0a\xad\x0e\x41\x00\x15\xd0\x4f\xc0\x7f" + "\x90\x2d\x18\x48\x8f\x28\xfe\x5d\xa7\xcd\x99\x9e\xbd\x02\x6c\x8a" + "\x31\xf3\x1c\xc7\x4b\xe6\x93\xcd\x42\xa2\xe4\x68\x10\x47\x9d\xfc" + "\x21\x02\x03\x01\x00\x01\xa3\x5d\x30\x5b\x30\x0c\x06\x03\x55\x1d" + "\x13\x01\x01\xff\x04\x02\x30\x00\x30\x0b\x06\x03\x55\x1d\x0f\x04" + "\x04\x03\x02\x07\x80\x30\x1d\x06\x03\x55\x1d\x0e\x04\x16\x04\x14" + "\xf5\x87\x03\xbb\x33\xce\x1b\x73\xee\x02\xec\xcd\xee\x5b\x88\x17" + "\x51\x8f\xe3\xdb\x30\x1f\x06\x03\x55\x1d\x23\x04\x18\x30\x16\x80" + "\x14\xf5\x87\x03\xbb\x33\xce\x1b\x73\xee\x02\xec\xcd\xee\x5b\x88" + "\x17\x51\x8f\xe3\xdb\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01" + "\x01\x0b\x05\x00\x03\x82\x02\x01\x00\xc0\x2e\x12\x41\x7b\x73\x85" + "\x16\xc8\xdb\x86\x79\xe8\xf5\xcd\x44\xf4\xc6\xe2\x81\x23\x5e\x47" + "\xcb\xab\x25\xf1\x1e\x58\x3e\x31\x7f\x78\xad\x85\xeb\xfe\x14\x88" + "\x60\xf7\x7f\xd2\x26\xa2\xf4\x98\x2a\xfd\xba\x05\x0c\x20\x33\x12" + "\xcc\x4d\x14\x61\x64\x81\x93\xd3\x33\xed\xc8\xff\xf1\x78\xcc\x5f" + "\x51\x9f\x09\xd7\xbe\x0d\x5c\x74\xfd\x9b\xdf\x52\x4a\xc9\xa8\x71" + "\x25\x33\x04\x10\x67\x36\xd0\xb3\x0b\xc9\xa1\x40\x72\xae\x41\x7b" + "\x68\xe6\xe4\x7b\xd0\x28\xf7\x6d\xe7\x3f\x50\xfc\x91\x7c\x91\x56" + "\xd4\xdf\xa6\xbb\xe8\x4d\x1b\x58\xaa\x28\xfa\xc1\x19\xeb\x11\x2f" + "\x24\x8b\x7c\xc5\xa9\x86\x26\xaa\x6e\xb7\x9b\xd5\xf8\x06\xfb\x02" + "\x52\x7b\x9c\x9e\xa1\xe0\x07\x8b\x5e\xe4\xb8\x55\x29\xf6\x48\x52" + "\x1c\x1b\x54\x2d\x46\xd8\xe5\x71\xb9\x60\xd1\x45\xb5\x92\x89\x8a" + "\x63\x58\x2a\xb3\xc6\xb2\x76\xe2\x3c\x82\x59\x04\xae\x5a\xc4\x99" + "\x7b\x2e\x4b\x46\x57\xb8\x29\x24\xb2\xfd\xee\x2c\x0d\xa4\x83\xfa" + "\x65\x2a\x07\x35\x8b\x97\xcf\xbd\x96\x2e\xd1\x7e\x6c\xc2\x1e\x87" + "\xb6\x6c\x76\x65\xb5\xb2\x62\xda\x8b\xe9\x73\xe3\xdb\x33\xdd\x13" + "\x3a\x17\x63\x6a\x76\xde\x8d\x8f\xe0\x47\x61\x28\x3a\x83\xff\x8f" + "\xe7\xc7\xe0\x4a\xa3\xe5\x07\xcf\xe9\x8c\x35\x35\x2e\xe7\x80\x66" + "\x31\xbf\x91\x58\x0a\xe1\x25\x3d\x38\xd3\xa4\xf0\x59\x34\x47\x07" + "\x62\x0f\xbe\x30\xdd\x81\x88\x58\xf0\x28\xb0\x96\xe5\x82\xf8\x05" + "\xb7\x13\x01\xbc\xfa\xc6\x1f\x86\x72\xcc\xf9\xee\x8e\xd9\xd6\x04" + "\x8c\x24\x6c\xbf\x0f\x5d\x37\x39\xcf\x45\xc1\x93\x3a\xd2\xed\x5c" + "\x58\x79\x74\x86\x62\x30\x7e\x8e\xbb\xdd\x7a\xa9\xed\xca\x40\xcb" + "\x62\x47\xf4\xb4\x9f\x52\x7f\x72\x63\xa8\xf0\x2b\xaf\x45\x2a\x48" + "\x19\x6d\xe3\xfb\xf9\x19\x66\x69\xc8\xcc\x62\x87\x6c\x53\x2b\x2d" + "\x6e\x90\x6c\x54\x3a\x82\x25\x41\xcb\x18\x6a\xa4\x22\xa8\xa1\xc4" + "\x47\xd7\x81\x00\x1c\x15\x51\x0f\x1a\xaf\xef\x9f\xa6\x61\x8c\xbd" + "\x6b\x8b\xed\xe6\xac\x0e\xb6\x3a\x4c\x92\xe6\x0f\x91\x0a\x0f\x71" + "\xc7\xa0\xb9\x0d\x3a\x17\x5a\x6f\x35\xc8\xe7\x50\x4f\x46\xe8\x70" + "\x60\x48\x06\x82\x8b\x66\x58\xe6\x73\x91\x9c\x12\x3d\x35\x8e\x46" + "\xad\x5a\xf5\xb3\xdb\x69\x21\x04\xfd\xd3\x1c\xdf\x94\x9d\x56\xb0" + "\x0a\xd1\x95\x76\x8d\xec\x9e\xdd\x0b\x15\x97\x64\xad\xe5\xf2\x62" + "\x02\xfc\x9e\x5f\x56\x42\x39\x05\xb3" +}; + +/* + * Signed data and detached signature blobs that form the verification tests. + */ +static const __initconst u8 certs_selftest_1_data[] = { + "\x54\x68\x69\x73\x20\x69\x73\x20\x73\x6f\x6d\x65\x20\x74\x65\x73" + "\x74\x20\x64\x61\x74\x61\x20\x75\x73\x65\x64\x20\x66\x6f\x72\x20" + "\x73\x65\x6c\x66\x2d\x74\x65\x73\x74\x69\x6e\x67\x20\x63\x65\x72" + "\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69\x66\x69\x63" + "\x61\x74\x69\x6f\x6e\x2e\x0a" +}; + +static const __initconst u8 certs_selftest_1_pkcs7[] = { + "\x30\x82\x02\xab\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x07\x02\xa0" + "\x82\x02\x9c\x30\x82\x02\x98\x02\x01\x01\x31\x0d\x30\x0b\x06\x09" + "\x60\x86\x48\x01\x65\x03\x04\x02\x01\x30\x0b\x06\x09\x2a\x86\x48" + "\x86\xf7\x0d\x01\x07\x01\x31\x82\x02\x75\x30\x82\x02\x71\x02\x01" + "\x01\x30\x4c\x30\x34\x31\x32\x30\x30\x06\x03\x55\x04\x03\x0c\x29" + "\x43\x65\x72\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69" + "\x66\x69\x63\x61\x74\x69\x6f\x6e\x20\x73\x65\x6c\x66\x2d\x74\x65" + "\x73\x74\x69\x6e\x67\x20\x6b\x65\x79\x02\x14\x73\x98\xea\x98\x2d" + "\xd0\x2e\xa8\xb1\xcf\x57\xc7\xf2\x97\xb3\xe6\x1a\xfc\x8c\x0a\x30" + "\x0b\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x30\x0d\x06\x09" + "\x2a\x86\x48\x86\xf7\x0d\x01\x01\x01\x05\x00\x04\x82\x02\x00\xac" + "\xb0\xf2\x07\xd6\x99\x6d\xc0\xc0\xd9\x8d\x31\x0d\x7e\x04\xeb\xc3" + "\x88\x90\xc4\x58\x46\xd4\xe2\xa0\xa3\x25\xe3\x04\x50\x37\x85\x8c" + "\x91\xc6\xfc\xc5\xd4\x92\xfd\x05\xd8\xb8\xa3\xb8\xba\x89\x13\x00" + "\x88\x79\x99\x51\x6b\x5b\x28\x31\xc0\xb3\x1b\x7a\x68\x2c\x00\xdb" + "\x4b\x46\x11\xf3\xfa\x50\x8e\x19\x89\xa2\x4c\xda\x4c\x89\x01\x11" + "\x89\xee\xd3\xc8\xc1\xe7\xa7\xf6\xb2\xa2\xf8\x65\xb8\x35\x20\x33" + "\xba\x12\x62\xd5\xbd\xaa\x71\xe5\x5b\xc0\x6a\x32\xff\x6a\x2e\x23" + "\xef\x2b\xb6\x58\xb1\xfb\x5f\x82\x34\x40\x6d\x9f\xbc\x27\xac\x37" + "\x23\x99\xcf\x7d\x20\xb2\x39\x01\xc0\x12\xce\xd7\x5d\x2f\xb6\xab" + "\xb5\x56\x4f\xef\xf4\x72\x07\x58\x65\xa9\xeb\x1f\x75\x1c\x5f\x0c" + "\x88\xe0\xa4\xe2\xcd\x73\x2b\x9e\xb2\x05\x7e\x12\xf8\xd0\x66\x41" + "\xcc\x12\x63\xd4\xd6\xac\x9b\x1d\x14\x77\x8d\x1c\x57\xd5\x27\xc6" + "\x49\xa2\x41\x43\xf3\x59\x29\xe5\xcb\xd1\x75\xbc\x3a\x97\x2a\x72" + "\x22\x66\xc5\x3b\xc1\xba\xfc\x53\x18\x98\xe2\x21\x64\xc6\x52\x87" + "\x13\xd5\x7c\x42\xe8\xfb\x9c\x9a\x45\x32\xd5\xa5\x22\x62\x9d\xd4" + "\xcb\xa4\xfa\x77\xbb\x50\x24\x0b\x8b\x88\x99\x15\x56\xa9\x1e\x92" + "\xbf\x5d\x94\x77\xb6\xf1\x67\x01\x60\x06\x58\x5c\xdf\x18\x52\x79" + "\x37\x30\x93\x7d\x87\x04\xf1\xe0\x55\x59\x52\xf3\xc2\xb1\x1c\x5b" + "\x12\x7c\x49\x87\xfb\xf7\xed\xdd\x95\x71\xec\x4b\x1a\x85\x08\xb0" + "\xa0\x36\xc4\x7b\xab\x40\xe0\xf1\x98\xcc\xaf\x19\x40\x8f\x47\x6f" + "\xf0\x6c\x84\x29\x7f\x7f\x04\x46\xcb\x08\x0f\xe0\xc1\xc9\x70\x6e" + "\x95\x3b\xa4\xbc\x29\x2b\x53\x67\x45\x1b\x0d\xbc\x13\xa5\x76\x31" + "\xaf\xb9\xd0\xe0\x60\x12\xd2\xf4\xb7\x7c\x58\x7e\xf6\x2d\xbb\x24" + "\x14\x5a\x20\x24\xa8\x12\xdf\x25\xbd\x42\xce\x96\x7c\x2e\xba\x14" + "\x1b\x81\x9f\x18\x45\xa4\xc6\x70\x3e\x0e\xf0\xd3\x7b\x9c\x10\xbe" + "\xb8\x7a\x89\xc5\x9e\xd9\x97\xdf\xd7\xe7\xc6\x1d\xc0\x20\x6c\xb8" + "\x1e\x3a\x63\xb8\x39\x8e\x8e\x62\xd5\xd2\xb4\xcd\xff\x46\xfc\x8e" + "\xec\x07\x35\x0c\xff\xb0\x05\xe6\xf4\xe5\xfe\xa2\xe3\x0a\xe6\x36" + "\xa7\x4a\x7e\x62\x1d\xc4\x50\x39\x35\x4e\x28\xcb\x4a\xfb\x9d\xdb" + "\xdd\x23\xd6\x53\xb1\x74\x77\x12\xf7\x9c\xf0\x9a\x6b\xf7\xa9\x64" + "\x2d\x86\x21\x2a\xcf\xc6\x54\xf5\xc9\xad\xfa\xb5\x12\xb4\xf3\x51" + "\x77\x55\x3c\x6f\x0c\x32\xd3\x8c\x44\x39\x71\x25\xfe\x96\xd2" +}; + +/* + * List of tests to be run. + */ +#define TEST(data, pkcs7) { data, sizeof(data) - 1, pkcs7, sizeof(pkcs7) - 1 } +static const struct certs_test certs_tests[] __initconst = { + TEST(certs_selftest_1_data, certs_selftest_1_pkcs7), +}; + +int __init fips_signature_selftest(void) +{ + struct key *keyring; + int ret, i; + + pr_notice("Running certificate verification selftests\n"); + + keyring = keyring_alloc(".certs_selftest", + GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, current_cred(), + (KEY_POS_ALL & ~KEY_POS_SETATTR) | + KEY_USR_VIEW | KEY_USR_READ | + KEY_USR_SEARCH, + KEY_ALLOC_NOT_IN_QUOTA, + NULL, NULL); + if (IS_ERR(keyring)) + panic("Can't allocate certs selftest keyring: %ld\n", + PTR_ERR(keyring)); + + ret = x509_load_certificate_list(certs_selftest_keys, + sizeof(certs_selftest_keys) - 1, keyring); + if (ret < 0) + panic("Can't allocate certs selftest keyring: %d\n", ret); + + for (i = 0; i < ARRAY_SIZE(certs_tests); i++) { + const struct certs_test *test = &certs_tests[i]; + struct pkcs7_message *pkcs7; + + pkcs7 = pkcs7_parse_message(test->pkcs7, test->pkcs7_len); + if (IS_ERR(pkcs7)) + panic("Certs selftest %d: pkcs7_parse_message() = %d\n", i, ret); + + pkcs7_supply_detached_data(pkcs7, test->data, test->data_len); + + ret = pkcs7_verify(pkcs7, VERIFYING_MODULE_SIGNATURE); + if (ret < 0) + panic("Certs selftest %d: pkcs7_verify() = %d\n", i, ret); + + ret = pkcs7_validate_trust(pkcs7, keyring); + if (ret < 0) + panic("Certs selftest %d: pkcs7_validate_trust() = %d\n", i, ret); + + pkcs7_free_message(pkcs7); + } + + key_put(keyring); + return 0; +} diff --git a/certs/common.c b/crypto/asymmetric_keys/x509_loader.c index 16a220887a53..1bc169dee22e 100644 --- a/certs/common.c +++ b/crypto/asymmetric_keys/x509_loader.c @@ -2,11 +2,11 @@ #include <linux/kernel.h> #include <linux/key.h> -#include "common.h" +#include <keys/asymmetric-type.h> -int load_certificate_list(const u8 cert_list[], - const unsigned long list_size, - const struct key *keyring) +int x509_load_certificate_list(const u8 cert_list[], + const unsigned long list_size, + const struct key *keyring) { key_ref_t key; const u8 *p, *end; diff --git a/crypto/asymmetric_keys/x509_parser.h b/crypto/asymmetric_keys/x509_parser.h index 97a886cbe01c..a299c9c56f40 100644 --- a/crypto/asymmetric_keys/x509_parser.h +++ b/crypto/asymmetric_keys/x509_parser.h @@ -41,6 +41,15 @@ struct x509_certificate { }; /* + * selftest.c + */ +#ifdef CONFIG_FIPS_SIGNATURE_SELFTEST +extern int __init fips_signature_selftest(void); +#else +static inline int fips_signature_selftest(void) { return 0; } +#endif + +/* * x509_cert_parser.c */ extern void x509_free_certificate(struct x509_certificate *cert); diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c index 77ed4e93ad56..0b4943a4592b 100644 --- a/crypto/asymmetric_keys/x509_public_key.c +++ b/crypto/asymmetric_keys/x509_public_key.c @@ -244,9 +244,15 @@ static struct asymmetric_key_parser x509_key_parser = { /* * Module stuff */ +extern int __init certs_selftest(void); static int __init x509_key_init(void) { - return register_asymmetric_key_parser(&x509_key_parser); + int ret; + + ret = register_asymmetric_key_parser(&x509_key_parser); + if (ret < 0) + return ret; + return fips_signature_selftest(); } static void __exit x509_key_exit(void) diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 40e816419f48..9601fa92950a 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2010,16 +2010,16 @@ retry: return err_mask; } -static bool ata_log_supported(struct ata_device *dev, u8 log) +static int ata_log_supported(struct ata_device *dev, u8 log) { struct ata_port *ap = dev->link->ap; if (dev->horkage & ATA_HORKAGE_NO_LOG_DIR) - return false; + return 0; if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1)) - return false; - return get_unaligned_le16(&ap->sector_buf[log * 2]) ? true : false; + return 0; + return get_unaligned_le16(&ap->sector_buf[log * 2]); } static bool ata_identify_page_supported(struct ata_device *dev, u8 page) @@ -2455,15 +2455,20 @@ static void ata_dev_config_cpr(struct ata_device *dev) struct ata_cpr_log *cpr_log = NULL; u8 *desc, *buf = NULL; - if (ata_id_major_version(dev->id) < 11 || - !ata_log_supported(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES)) + if (ata_id_major_version(dev->id) < 11) + goto out; + + buf_len = ata_log_supported(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES); + if (buf_len == 0) goto out; /* * Read the concurrent positioning ranges log (0x47). We can have at - * most 255 32B range descriptors plus a 64B header. + * most 255 32B range descriptors plus a 64B header. This log varies in + * size, so use the size reported in the GPL directory. Reading beyond + * the supported length will result in an error. */ - buf_len = (64 + 255 * 32 + 511) & ~511; + buf_len <<= 9; buf = kzalloc(buf_len, GFP_KERNEL); if (!buf) goto out; @@ -5462,7 +5467,7 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev, const struct ata_port_info * const * ppi, int n_ports) { - const struct ata_port_info *pi; + const struct ata_port_info *pi = &ata_dummy_port_info; struct ata_host *host; int i, j; @@ -5470,7 +5475,7 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev, if (!host) return NULL; - for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) { + for (i = 0, j = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; if (ppi[j]) diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 42cecf95a4e5..86dbb1cdfabd 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -2125,7 +2125,7 @@ static unsigned int ata_scsiop_inq_b9(struct ata_scsi_args *args, u8 *rbuf) /* SCSI Concurrent Positioning Ranges VPD page: SBC-5 rev 1 or later */ rbuf[1] = 0xb9; - put_unaligned_be16(64 + (int)cpr_log->nr_cpr * 32 - 4, &rbuf[3]); + put_unaligned_be16(64 + (int)cpr_log->nr_cpr * 32 - 4, &rbuf[2]); for (i = 0; i < cpr_log->nr_cpr; i++, desc += 32) { desc[0] = cpr_log->cpr[i].num; diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c index ca129854a88c..c38027887499 100644 --- a/drivers/ata/libata-transport.c +++ b/drivers/ata/libata-transport.c @@ -196,7 +196,7 @@ static struct { { XFER_PIO_0, "XFER_PIO_0" }, { XFER_PIO_SLOW, "XFER_PIO_SLOW" } }; -ata_bitfield_name_match(xfer,ata_xfer_names) +ata_bitfield_name_search(xfer, ata_xfer_names) /* * ATA Port attributes diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c index 6b5ed3046b44..35608a0cf552 100644 --- a/drivers/ata/pata_octeon_cf.c +++ b/drivers/ata/pata_octeon_cf.c @@ -856,12 +856,14 @@ static int octeon_cf_probe(struct platform_device *pdev) int i; res_dma = platform_get_resource(dma_dev, IORESOURCE_MEM, 0); if (!res_dma) { + put_device(&dma_dev->dev); of_node_put(dma_node); return -EINVAL; } cf_port->dma_base = (u64)devm_ioremap(&pdev->dev, res_dma->start, resource_size(res_dma)); if (!cf_port->dma_base) { + put_device(&dma_dev->dev); of_node_put(dma_node); return -EINVAL; } @@ -871,6 +873,7 @@ static int octeon_cf_probe(struct platform_device *pdev) irq = i; irq_handler = octeon_cf_interrupt; } + put_device(&dma_dev->dev); } of_node_put(dma_node); } diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 2ef23fce0860..a97776ea9d99 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -564,6 +564,12 @@ ssize_t __weak cpu_show_srbds(struct device *dev, return sysfs_emit(buf, "Not affected\n"); } +ssize_t __weak cpu_show_mmio_stale_data(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "Not affected\n"); +} + static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); @@ -573,6 +579,7 @@ static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL); static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL); static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL); static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL); +static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL); static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_meltdown.attr, @@ -584,6 +591,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_tsx_async_abort.attr, &dev_attr_itlb_multihit.attr, &dev_attr_srbds.attr, + &dev_attr_mmio_stale_data.attr, NULL }; diff --git a/drivers/base/init.c b/drivers/base/init.c index d8d0fe687111..397eb9880cec 100644 --- a/drivers/base/init.c +++ b/drivers/base/init.c @@ -8,6 +8,7 @@ #include <linux/init.h> #include <linux/memory.h> #include <linux/of.h> +#include <linux/backing-dev.h> #include "base.h" @@ -20,6 +21,7 @@ void __init driver_init(void) { /* These are the core pieces */ + bdi_init(&noop_backing_dev_info); devtmpfs_init(); devices_init(); buses_init(); diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 084d67fd55cc..bc60c9cd3230 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -558,7 +558,7 @@ static ssize_t hard_offline_page_store(struct device *dev, if (kstrtoull(buf, 0, &pfn) < 0) return -EINVAL; pfn >>= PAGE_SHIFT; - ret = memory_failure(pfn, 0); + ret = memory_failure(pfn, MF_SW_SIMULATED); if (ret == -EOPNOTSUPP) ret = 0; return ret ? ret : count; diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index 400c7412a7dc..a6db605707b0 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -252,6 +252,7 @@ static void regmap_irq_enable(struct irq_data *data) struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); struct regmap *map = d->map; const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); + unsigned int reg = irq_data->reg_offset / map->reg_stride; unsigned int mask, type; type = irq_data->type.type_falling_val | irq_data->type.type_rising_val; @@ -268,14 +269,14 @@ static void regmap_irq_enable(struct irq_data *data) * at the corresponding offset in regmap_irq_set_type(). */ if (d->chip->type_in_mask && type) - mask = d->type_buf[irq_data->reg_offset / map->reg_stride]; + mask = d->type_buf[reg] & irq_data->mask; else mask = irq_data->mask; if (d->chip->clear_on_unmask) d->clear_status = true; - d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~mask; + d->mask_buf[reg] &= ~mask; } static void regmap_irq_disable(struct irq_data *data) @@ -386,6 +387,7 @@ static inline int read_sub_irq_data(struct regmap_irq_chip_data *data, subreg = &chip->sub_reg_offsets[b]; for (i = 0; i < subreg->num_regs; i++) { unsigned int offset = subreg->offset[i]; + unsigned int index = offset / map->reg_stride; if (chip->not_fixed_stride) ret = regmap_read(map, @@ -394,7 +396,7 @@ static inline int read_sub_irq_data(struct regmap_irq_chip_data *data, else ret = regmap_read(map, chip->status_base + offset, - &data->status_buf[offset]); + &data->status_buf[index]); if (ret) break; diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 2221d9863831..c3517ccc3159 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -1880,8 +1880,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, */ bool regmap_can_raw_write(struct regmap *map) { - return map->bus && map->bus->write && map->format.format_val && - map->format.format_reg; + return map->write && map->format.format_val && map->format.format_reg; } EXPORT_SYMBOL_GPL(regmap_can_raw_write); @@ -2155,10 +2154,9 @@ int regmap_noinc_write(struct regmap *map, unsigned int reg, size_t write_len; int ret; - if (!map->bus) - return -EINVAL; - if (!map->bus->write) + if (!map->write) return -ENOTSUPP; + if (val_len % map->format.val_bytes) return -EINVAL; if (!IS_ALIGNED(reg, map->reg_stride)) @@ -2278,7 +2276,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, * Some devices don't support bulk write, for them we have a series of * single write operations. */ - if (!map->bus || !map->format.parse_inplace) { + if (!map->write || !map->format.parse_inplace) { map->lock(map->lock_arg); for (i = 0; i < val_count; i++) { unsigned int ival; @@ -2904,6 +2902,9 @@ int regmap_noinc_read(struct regmap *map, unsigned int reg, size_t read_len; int ret; + if (!map->read) + return -ENOTSUPP; + if (val_len % map->format.val_bytes) return -EINVAL; if (!IS_ALIGNED(reg, map->reg_stride)) @@ -3017,7 +3018,7 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, if (val_count == 0) return -EINVAL; - if (map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { + if (map->read && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { ret = regmap_raw_read(map, reg, val, val_bytes * val_count); if (ret != 0) return ret; diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index a88ce4426400..33f04ef78984 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -2114,9 +2114,11 @@ static void blkfront_closing(struct blkfront_info *info) return; /* No more blkif_request(). */ - blk_mq_stop_hw_queues(info->rq); - blk_mark_disk_dead(info->gd); - set_capacity(info->gd, 0); + if (info->rq && info->gd) { + blk_mq_stop_hw_queues(info->rq); + blk_mark_disk_dead(info->gd); + set_capacity(info->gd, 0); + } for_each_rinfo(info, rinfo, i) { /* No more gnttab callback work. */ @@ -2457,16 +2459,19 @@ static int blkfront_remove(struct xenbus_device *xbdev) dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename); - del_gendisk(info->gd); + if (info->gd) + del_gendisk(info->gd); mutex_lock(&blkfront_mutex); list_del(&info->info_list); mutex_unlock(&blkfront_mutex); blkif_free(info, 0); - xlbd_release_minors(info->gd->first_minor, info->gd->minors); - blk_cleanup_disk(info->gd); - blk_mq_free_tag_set(&info->tag_set); + if (info->gd) { + xlbd_release_minors(info->gd->first_minor, info->gd->minors); + blk_cleanup_disk(info->gd); + blk_mq_free_tag_set(&info->tag_set); + } kfree(info); return 0; diff --git a/drivers/bus/bt1-apb.c b/drivers/bus/bt1-apb.c index b25ff941e7c7..63b1b4a76671 100644 --- a/drivers/bus/bt1-apb.c +++ b/drivers/bus/bt1-apb.c @@ -175,10 +175,9 @@ static int bt1_apb_request_rst(struct bt1_apb *apb) int ret; apb->prst = devm_reset_control_get_optional_exclusive(apb->dev, "prst"); - if (IS_ERR(apb->prst)) { - dev_warn(apb->dev, "Couldn't get reset control line\n"); - return PTR_ERR(apb->prst); - } + if (IS_ERR(apb->prst)) + return dev_err_probe(apb->dev, PTR_ERR(apb->prst), + "Couldn't get reset control line\n"); ret = reset_control_deassert(apb->prst); if (ret) @@ -199,10 +198,9 @@ static int bt1_apb_request_clk(struct bt1_apb *apb) int ret; apb->pclk = devm_clk_get(apb->dev, "pclk"); - if (IS_ERR(apb->pclk)) { - dev_err(apb->dev, "Couldn't get APB clock descriptor\n"); - return PTR_ERR(apb->pclk); - } + if (IS_ERR(apb->pclk)) + return dev_err_probe(apb->dev, PTR_ERR(apb->pclk), + "Couldn't get APB clock descriptor\n"); ret = clk_prepare_enable(apb->pclk); if (ret) { diff --git a/drivers/bus/bt1-axi.c b/drivers/bus/bt1-axi.c index e7a6744acc7b..70e49a6e5374 100644 --- a/drivers/bus/bt1-axi.c +++ b/drivers/bus/bt1-axi.c @@ -135,10 +135,9 @@ static int bt1_axi_request_rst(struct bt1_axi *axi) int ret; axi->arst = devm_reset_control_get_optional_exclusive(axi->dev, "arst"); - if (IS_ERR(axi->arst)) { - dev_warn(axi->dev, "Couldn't get reset control line\n"); - return PTR_ERR(axi->arst); - } + if (IS_ERR(axi->arst)) + return dev_err_probe(axi->dev, PTR_ERR(axi->arst), + "Couldn't get reset control line\n"); ret = reset_control_deassert(axi->arst); if (ret) @@ -159,10 +158,9 @@ static int bt1_axi_request_clk(struct bt1_axi *axi) int ret; axi->aclk = devm_clk_get(axi->dev, "aclk"); - if (IS_ERR(axi->aclk)) { - dev_err(axi->dev, "Couldn't get AXI Interconnect clock\n"); - return PTR_ERR(axi->aclk); - } + if (IS_ERR(axi->aclk)) + return dev_err_probe(axi->dev, PTR_ERR(axi->aclk), + "Couldn't get AXI Interconnect clock\n"); ret = clk_prepare_enable(axi->aclk); if (ret) { diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c index e81a9700cfd0..6143dbf31f31 100644 --- a/drivers/bus/fsl-mc/fsl-mc-bus.c +++ b/drivers/bus/fsl-mc/fsl-mc-bus.c @@ -1239,14 +1239,14 @@ error_cleanup_mc_io: static int fsl_mc_bus_remove(struct platform_device *pdev) { struct fsl_mc *mc = platform_get_drvdata(pdev); + struct fsl_mc_io *mc_io; if (!fsl_mc_is_root_dprc(&mc->root_mc_bus_dev->dev)) return -EINVAL; + mc_io = mc->root_mc_bus_dev->mc_io; fsl_mc_device_remove(mc->root_mc_bus_dev); - - fsl_destroy_mc_io(mc->root_mc_bus_dev->mc_io); - mc->root_mc_bus_dev->mc_io = NULL; + fsl_destroy_mc_io(mc_io); bus_unregister_notifier(&fsl_mc_bus_type, &fsl_mc_nb); diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 69fd31ffb847..0b6c03643ddc 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -429,28 +429,40 @@ config ADI driver include crash and makedumpfile. config RANDOM_TRUST_CPU - bool "Trust the CPU manufacturer to initialize Linux's CRNG" + bool "Initialize RNG using CPU RNG instructions" + default y depends on ARCH_RANDOM - default n help - Assume that CPU manufacturer (e.g., Intel or AMD for RDSEED or - RDRAND, IBM for the S390 and Power PC architectures) is trustworthy - for the purposes of initializing Linux's CRNG. Since this is not - something that can be independently audited, this amounts to trusting - that CPU manufacturer (perhaps with the insistence or mandate - of a Nation State's intelligence or law enforcement agencies) - has not installed a hidden back door to compromise the CPU's - random number generation facilities. This can also be configured - at boot with "random.trust_cpu=on/off". + Initialize the RNG using random numbers supplied by the CPU's + RNG instructions (e.g. RDRAND), if supported and available. These + random numbers are never used directly, but are rather hashed into + the main input pool, and this happens regardless of whether or not + this option is enabled. Instead, this option controls whether the + they are credited and hence can initialize the RNG. Additionally, + other sources of randomness are always used, regardless of this + setting. Enabling this implies trusting that the CPU can supply high + quality and non-backdoored random numbers. + + Say Y here unless you have reason to mistrust your CPU or believe + its RNG facilities may be faulty. This may also be configured at + boot time with "random.trust_cpu=on/off". config RANDOM_TRUST_BOOTLOADER - bool "Trust the bootloader to initialize Linux's CRNG" - help - Some bootloaders can provide entropy to increase the kernel's initial - device randomness. Say Y here to assume the entropy provided by the - booloader is trustworthy so it will be added to the kernel's entropy - pool. Otherwise, say N here so it will be regarded as device input that - only mixes the entropy pool. This can also be configured at boot with - "random.trust_bootloader=on/off". + bool "Initialize RNG using bootloader-supplied seed" + default y + help + Initialize the RNG using a seed supplied by the bootloader or boot + environment (e.g. EFI or a bootloader-generated device tree). This + seed is not used directly, but is rather hashed into the main input + pool, and this happens regardless of whether or not this option is + enabled. Instead, this option controls whether the seed is credited + and hence can initialize the RNG. Additionally, other sources of + randomness are always used, regardless of this setting. Enabling + this implies trusting that the bootloader can supply high quality and + non-backdoored seeds. + + Say Y here unless you have reason to mistrust your bootloader or + believe its RNG facilities may be faulty. This may also be configured + at boot time with "random.trust_bootloader=on/off". endmenu diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c index e856df7e285c..a6f3a8a2aca6 100644 --- a/drivers/char/hw_random/virtio-rng.c +++ b/drivers/char/hw_random/virtio-rng.c @@ -159,6 +159,8 @@ static int probe_common(struct virtio_device *vdev) goto err_find; } + virtio_device_ready(vdev); + /* we always have a pending entropy request */ request_entropy(vi); diff --git a/drivers/char/lp.c b/drivers/char/lp.c index 0e22e3b0a04e..38aad99ebb61 100644 --- a/drivers/char/lp.c +++ b/drivers/char/lp.c @@ -1019,7 +1019,7 @@ static struct parport_driver lp_driver = { static int __init lp_init(void) { - int i, err = 0; + int i, err; if (parport_nr[0] == LP_PARPORT_OFF) return 0; diff --git a/drivers/char/random.c b/drivers/char/random.c index b691b9d59503..e3dd1dd3dd22 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -87,7 +87,7 @@ static struct fasync_struct *fasync; /* Control how we warn userspace. */ static struct ratelimit_state urandom_warning = - RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3); + RATELIMIT_STATE_INIT_FLAGS("urandom_warning", HZ, 3, RATELIMIT_MSG_ON_RELEASE); static int ratelimit_disable __read_mostly = IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM); module_param_named(ratelimit_disable, ratelimit_disable, int, 0644); @@ -408,7 +408,7 @@ static ssize_t get_random_bytes_user(struct iov_iter *iter) /* * Immediately overwrite the ChaCha key at index 4 with random - * bytes, in case userspace causes copy_to_user() below to sleep + * bytes, in case userspace causes copy_to_iter() below to sleep * forever, so that we still retain forward secrecy in that case. */ crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE); @@ -650,7 +650,8 @@ static void __cold _credit_init_bits(size_t bits) if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) { crng_reseed(); /* Sets crng_init to CRNG_READY under base_crng.lock. */ - execute_in_process_context(crng_set_ready, &set_ready); + if (static_key_initialized) + execute_in_process_context(crng_set_ready, &set_ready); wake_up_interruptible(&crng_init_wait); kill_fasync(&fasync, SIGIO, POLL_IN); pr_notice("crng init done\n"); @@ -724,9 +725,8 @@ static void __cold _credit_init_bits(size_t bits) * **********************************************************************/ -static bool used_arch_random; -static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU); -static bool trust_bootloader __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER); +static bool trust_cpu __initdata = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU); +static bool trust_bootloader __initdata = IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER); static int __init parse_trust_cpu(char *arg) { return kstrtobool(arg, &trust_cpu); @@ -776,7 +776,7 @@ static struct notifier_block pm_notifier = { .notifier_call = random_pm_notifica int __init random_init(const char *command_line) { ktime_t now = ktime_get_real(); - unsigned int i, arch_bytes; + unsigned int i, arch_bits; unsigned long entropy; #if defined(LATENT_ENTROPY_PLUGIN) @@ -784,12 +784,12 @@ int __init random_init(const char *command_line) _mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed)); #endif - for (i = 0, arch_bytes = BLAKE2S_BLOCK_SIZE; + for (i = 0, arch_bits = BLAKE2S_BLOCK_SIZE * 8; i < BLAKE2S_BLOCK_SIZE; i += sizeof(entropy)) { if (!arch_get_random_seed_long_early(&entropy) && !arch_get_random_long_early(&entropy)) { entropy = random_get_entropy(); - arch_bytes -= sizeof(entropy); + arch_bits -= sizeof(entropy) * 8; } _mix_pool_bytes(&entropy, sizeof(entropy)); } @@ -798,11 +798,18 @@ int __init random_init(const char *command_line) _mix_pool_bytes(command_line, strlen(command_line)); add_latent_entropy(); + /* + * If we were initialized by the bootloader before jump labels are + * initialized, then we should enable the static branch here, where + * it's guaranteed that jump labels have been initialized. + */ + if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY) + crng_set_ready(NULL); + if (crng_ready()) crng_reseed(); else if (trust_cpu) - credit_init_bits(arch_bytes * 8); - used_arch_random = arch_bytes * 8 >= POOL_READY_BITS; + _credit_init_bits(arch_bits); WARN_ON(register_pm_notifier(&pm_notifier)); @@ -812,17 +819,6 @@ int __init random_init(const char *command_line) } /* - * Returns whether arch randomness has been mixed into the initial - * state of the RNG, regardless of whether or not that randomness - * was credited. Knowing this is only good for a very limited set - * of uses, such as early init printk pointer obfuscation. - */ -bool rng_has_arch_random(void) -{ - return used_arch_random; -} - -/* * Add device- or boot-specific data to the input pool to help * initialize it. * @@ -865,13 +861,12 @@ EXPORT_SYMBOL_GPL(add_hwgenerator_randomness); * Handle random seed passed by bootloader, and credit it if * CONFIG_RANDOM_TRUST_BOOTLOADER is set. */ -void __cold add_bootloader_randomness(const void *buf, size_t len) +void __init add_bootloader_randomness(const void *buf, size_t len) { mix_pool_bytes(buf, len); if (trust_bootloader) credit_init_bits(len * 8); } -EXPORT_SYMBOL_GPL(add_bootloader_randomness); #if IS_ENABLED(CONFIG_VMGENID) static BLOCKING_NOTIFIER_HEAD(vmfork_chain); @@ -1014,7 +1009,7 @@ void add_interrupt_randomness(int irq) if (new_count & MIX_INFLIGHT) return; - if (new_count < 64 && !time_is_before_jiffies(fast_pool->last + HZ)) + if (new_count < 1024 && !time_is_before_jiffies(fast_pool->last + HZ)) return; if (unlikely(!fast_pool->mix.func)) diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c index ff188ab68496..bb47610bbd1c 100644 --- a/drivers/clocksource/hyperv_timer.c +++ b/drivers/clocksource/hyperv_timer.c @@ -565,4 +565,3 @@ void __init hv_init_clocksource(void) hv_sched_clock_offset = hv_read_reference_counter(); hv_setup_sched_clock(read_hv_sched_clock_msr); } -EXPORT_SYMBOL_GPL(hv_init_clocksource); diff --git a/drivers/comedi/drivers/vmk80xx.c b/drivers/comedi/drivers/vmk80xx.c index 46023adc5395..4536ed43f65b 100644 --- a/drivers/comedi/drivers/vmk80xx.c +++ b/drivers/comedi/drivers/vmk80xx.c @@ -684,7 +684,7 @@ static int vmk80xx_alloc_usb_buffers(struct comedi_device *dev) if (!devpriv->usb_rx_buf) return -ENOMEM; - size = max(usb_endpoint_maxp(devpriv->ep_rx), MIN_BUF_SIZE); + size = max(usb_endpoint_maxp(devpriv->ep_tx), MIN_BUF_SIZE); devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL); if (!devpriv->usb_tx_buf) return -ENOMEM; diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index e7330684d3b8..9631f2fd2faf 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -32,8 +32,11 @@ static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct udmabuf *ubuf = vma->vm_private_data; + pgoff_t pgoff = vmf->pgoff; - vmf->page = ubuf->pages[vmf->pgoff]; + if (pgoff >= ubuf->pagecount) + return VM_FAULT_SIGBUS; + vmf->page = ubuf->pages[pgoff]; get_page(vmf->page); return 0; } diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index c9fe5903725a..9c89f7d53e99 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c @@ -1211,7 +1211,7 @@ static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg) struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2; struct fw_card *card = client->device->card; struct timespec64 ts = {0, 0}; - u32 cycle_time; + u32 cycle_time = 0; int ret = 0; local_irq_disable(); diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c index 90ed8fdaba75..adddd8c45d0c 100644 --- a/drivers/firewire/core-device.c +++ b/drivers/firewire/core-device.c @@ -372,8 +372,7 @@ static ssize_t rom_index_show(struct device *dev, struct fw_device *device = fw_device(dev->parent); struct fw_unit *unit = fw_unit(dev); - return snprintf(buf, PAGE_SIZE, "%d\n", - (int)(unit->directory - device->config_rom)); + return sysfs_emit(buf, "%td\n", unit->directory - device->config_rom); } static struct device_attribute fw_unit_attributes[] = { @@ -403,8 +402,7 @@ static ssize_t guid_show(struct device *dev, int ret; down_read(&fw_device_rwsem); - ret = snprintf(buf, PAGE_SIZE, "0x%08x%08x\n", - device->config_rom[3], device->config_rom[4]); + ret = sysfs_emit(buf, "0x%08x%08x\n", device->config_rom[3], device->config_rom[4]); up_read(&fw_device_rwsem); return ret; diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c index 20fba7370f4e..a52f084a6a87 100644 --- a/drivers/firmware/arm_scmi/base.c +++ b/drivers/firmware/arm_scmi/base.c @@ -36,7 +36,7 @@ struct scmi_msg_resp_base_attributes { struct scmi_msg_resp_base_discover_agent { __le32 agent_id; - u8 name[SCMI_MAX_STR_SIZE]; + u8 name[SCMI_SHORT_NAME_MAX_SIZE]; }; @@ -119,7 +119,7 @@ scmi_base_vendor_id_get(const struct scmi_protocol_handle *ph, bool sub_vendor) ret = ph->xops->do_xfer(ph, t); if (!ret) - memcpy(vendor_id, t->rx.buf, size); + strscpy(vendor_id, t->rx.buf, size); ph->xops->xfer_put(ph, t); @@ -221,11 +221,17 @@ scmi_base_implementation_list_get(const struct scmi_protocol_handle *ph, calc_list_sz = (1 + (loop_num_ret - 1) / sizeof(u32)) * sizeof(u32); if (calc_list_sz != real_list_sz) { - dev_err(dev, - "Malformed reply - real_sz:%zd calc_sz:%u\n", - real_list_sz, calc_list_sz); - ret = -EPROTO; - break; + dev_warn(dev, + "Malformed reply - real_sz:%zd calc_sz:%u (loop_num_ret:%d)\n", + real_list_sz, calc_list_sz, loop_num_ret); + /* + * Bail out if the expected list size is bigger than the + * total payload size of the received reply. + */ + if (calc_list_sz > real_list_sz) { + ret = -EPROTO; + break; + } } for (loop = 0; loop < loop_num_ret; loop++) @@ -270,7 +276,7 @@ static int scmi_base_discover_agent_get(const struct scmi_protocol_handle *ph, ret = ph->xops->do_xfer(ph, t); if (!ret) { agent_info = t->rx.buf; - strlcpy(name, agent_info->name, SCMI_MAX_STR_SIZE); + strscpy(name, agent_info->name, SCMI_SHORT_NAME_MAX_SIZE); } ph->xops->xfer_put(ph, t); @@ -369,7 +375,7 @@ static int scmi_base_protocol_init(const struct scmi_protocol_handle *ph) int id, ret; u8 *prot_imp; u32 version; - char name[SCMI_MAX_STR_SIZE]; + char name[SCMI_SHORT_NAME_MAX_SIZE]; struct device *dev = ph->dev; struct scmi_revision_info *rev = scmi_revision_area_get(ph); diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c index 4d36a9a133d1..c7a83f6e38e5 100644 --- a/drivers/firmware/arm_scmi/clock.c +++ b/drivers/firmware/arm_scmi/clock.c @@ -153,7 +153,7 @@ static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph, if (!ret) { u32 latency = 0; attributes = le32_to_cpu(attr->attributes); - strlcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE); + strscpy(clk->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE); /* clock_enable_latency field is present only since SCMI v3.1 */ if (PROTOCOL_REV_MAJOR(version) >= 0x2) latency = le32_to_cpu(attr->clock_enable_latency); @@ -266,9 +266,7 @@ scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id, struct scmi_clock_info *clk) { int ret; - void *iter; - struct scmi_msg_clock_describe_rates *msg; struct scmi_iterator_ops ops = { .prepare_message = iter_clk_describe_prepare_message, .update_state = iter_clk_describe_update_state, @@ -281,7 +279,8 @@ scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id, iter = ph->hops->iter_response_init(ph, &ops, SCMI_MAX_NUM_RATES, CLOCK_DESCRIBE_RATES, - sizeof(*msg), &cpriv); + sizeof(struct scmi_msg_clock_describe_rates), + &cpriv); if (IS_ERR(iter)) return PTR_ERR(iter); diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c index 8f4051aca220..bbb0331801ff 100644 --- a/drivers/firmware/arm_scmi/perf.c +++ b/drivers/firmware/arm_scmi/perf.c @@ -252,7 +252,7 @@ scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph, dom_info->mult_factor = (dom_info->sustained_freq_khz * 1000) / dom_info->sustained_perf_level; - strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE); + strscpy(dom_info->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE); } ph->xops->xfer_put(ph, t); @@ -332,7 +332,6 @@ scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph, u32 domain, { int ret; void *iter; - struct scmi_msg_perf_describe_levels *msg; struct scmi_iterator_ops ops = { .prepare_message = iter_perf_levels_prepare_message, .update_state = iter_perf_levels_update_state, @@ -345,7 +344,8 @@ scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph, u32 domain, iter = ph->hops->iter_response_init(ph, &ops, MAX_OPPS, PERF_DESCRIBE_LEVELS, - sizeof(*msg), &ppriv); + sizeof(struct scmi_msg_perf_describe_levels), + &ppriv); if (IS_ERR(iter)) return PTR_ERR(iter); diff --git a/drivers/firmware/arm_scmi/power.c b/drivers/firmware/arm_scmi/power.c index 964882cc8747..356e83631664 100644 --- a/drivers/firmware/arm_scmi/power.c +++ b/drivers/firmware/arm_scmi/power.c @@ -122,7 +122,7 @@ scmi_power_domain_attributes_get(const struct scmi_protocol_handle *ph, dom_info->state_set_notify = SUPPORTS_STATE_SET_NOTIFY(flags); dom_info->state_set_async = SUPPORTS_STATE_SET_ASYNC(flags); dom_info->state_set_sync = SUPPORTS_STATE_SET_SYNC(flags); - strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE); + strscpy(dom_info->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE); } ph->xops->xfer_put(ph, t); diff --git a/drivers/firmware/arm_scmi/protocols.h b/drivers/firmware/arm_scmi/protocols.h index 73304af5ec4a..c679f3fb8718 100644 --- a/drivers/firmware/arm_scmi/protocols.h +++ b/drivers/firmware/arm_scmi/protocols.h @@ -24,8 +24,6 @@ #include <asm/unaligned.h> -#define SCMI_SHORT_NAME_MAX_SIZE 16 - #define PROTOCOL_REV_MINOR_MASK GENMASK(15, 0) #define PROTOCOL_REV_MAJOR_MASK GENMASK(31, 16) #define PROTOCOL_REV_MAJOR(x) ((u16)(FIELD_GET(PROTOCOL_REV_MAJOR_MASK, (x)))) diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c index a420a9102094..673f3eb498f4 100644 --- a/drivers/firmware/arm_scmi/reset.c +++ b/drivers/firmware/arm_scmi/reset.c @@ -116,7 +116,7 @@ scmi_reset_domain_attributes_get(const struct scmi_protocol_handle *ph, dom_info->latency_us = le32_to_cpu(attr->latency); if (dom_info->latency_us == U32_MAX) dom_info->latency_us = 0; - strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE); + strscpy(dom_info->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE); } ph->xops->xfer_put(ph, t); diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c index 21e0ce89b153..7288c6117838 100644 --- a/drivers/firmware/arm_scmi/sensors.c +++ b/drivers/firmware/arm_scmi/sensors.c @@ -338,7 +338,6 @@ static int scmi_sensor_update_intervals(const struct scmi_protocol_handle *ph, struct scmi_sensor_info *s) { void *iter; - struct scmi_msg_sensor_list_update_intervals *msg; struct scmi_iterator_ops ops = { .prepare_message = iter_intervals_prepare_message, .update_state = iter_intervals_update_state, @@ -351,22 +350,28 @@ static int scmi_sensor_update_intervals(const struct scmi_protocol_handle *ph, iter = ph->hops->iter_response_init(ph, &ops, s->intervals.count, SENSOR_LIST_UPDATE_INTERVALS, - sizeof(*msg), &upriv); + sizeof(struct scmi_msg_sensor_list_update_intervals), + &upriv); if (IS_ERR(iter)) return PTR_ERR(iter); return ph->hops->iter_response_run(iter); } +struct scmi_apriv { + bool any_axes_support_extended_names; + struct scmi_sensor_info *s; +}; + static void iter_axes_desc_prepare_message(void *message, const unsigned int desc_index, const void *priv) { struct scmi_msg_sensor_axis_description_get *msg = message; - const struct scmi_sensor_info *s = priv; + const struct scmi_apriv *apriv = priv; /* Set the number of sensors to be skipped/already read */ - msg->id = cpu_to_le32(s->id); + msg->id = cpu_to_le32(apriv->s->id); msg->axis_desc_index = cpu_to_le32(desc_index); } @@ -393,19 +398,21 @@ iter_axes_desc_process_response(const struct scmi_protocol_handle *ph, u32 attrh, attrl; struct scmi_sensor_axis_info *a; size_t dsize = SCMI_MSG_RESP_AXIS_DESCR_BASE_SZ; - struct scmi_sensor_info *s = priv; + struct scmi_apriv *apriv = priv; const struct scmi_axis_descriptor *adesc = st->priv; attrl = le32_to_cpu(adesc->attributes_low); + if (SUPPORTS_EXTENDED_AXIS_NAMES(attrl)) + apriv->any_axes_support_extended_names = true; - a = &s->axis[st->desc_index + st->loop_idx]; + a = &apriv->s->axis[st->desc_index + st->loop_idx]; a->id = le32_to_cpu(adesc->id); a->extended_attrs = SUPPORTS_EXTEND_ATTRS(attrl); attrh = le32_to_cpu(adesc->attributes_high); a->scale = S32_EXT(SENSOR_SCALE(attrh)); a->type = SENSOR_TYPE(attrh); - strscpy(a->name, adesc->name, SCMI_MAX_STR_SIZE); + strscpy(a->name, adesc->name, SCMI_SHORT_NAME_MAX_SIZE); if (a->extended_attrs) { unsigned int ares = le32_to_cpu(adesc->resolution); @@ -444,10 +451,19 @@ iter_axes_extended_name_process_response(const struct scmi_protocol_handle *ph, void *priv) { struct scmi_sensor_axis_info *a; - const struct scmi_sensor_info *s = priv; + const struct scmi_apriv *apriv = priv; struct scmi_sensor_axis_name_descriptor *adesc = st->priv; + u32 axis_id = le32_to_cpu(adesc->axis_id); - a = &s->axis[st->desc_index + st->loop_idx]; + if (axis_id >= st->max_resources) + return -EPROTO; + + /* + * Pick the corresponding descriptor based on the axis_id embedded + * in the reply since the list of axes supporting extended names + * can be a subset of all the axes. + */ + a = &apriv->s->axis[axis_id]; strscpy(a->name, adesc->name, SCMI_MAX_STR_SIZE); st->priv = ++adesc; @@ -458,21 +474,36 @@ static int scmi_sensor_axis_extended_names_get(const struct scmi_protocol_handle *ph, struct scmi_sensor_info *s) { + int ret; void *iter; - struct scmi_msg_sensor_axis_description_get *msg; struct scmi_iterator_ops ops = { .prepare_message = iter_axes_desc_prepare_message, .update_state = iter_axes_extended_name_update_state, .process_response = iter_axes_extended_name_process_response, }; + struct scmi_apriv apriv = { + .any_axes_support_extended_names = false, + .s = s, + }; iter = ph->hops->iter_response_init(ph, &ops, s->num_axis, SENSOR_AXIS_NAME_GET, - sizeof(*msg), s); + sizeof(struct scmi_msg_sensor_axis_description_get), + &apriv); if (IS_ERR(iter)) return PTR_ERR(iter); - return ph->hops->iter_response_run(iter); + /* + * Do not cause whole protocol initialization failure when failing to + * get extended names for axes. + */ + ret = ph->hops->iter_response_run(iter); + if (ret) + dev_warn(ph->dev, + "Failed to get axes extended names for %s (ret:%d).\n", + s->name, ret); + + return 0; } static int scmi_sensor_axis_description(const struct scmi_protocol_handle *ph, @@ -481,12 +512,15 @@ static int scmi_sensor_axis_description(const struct scmi_protocol_handle *ph, { int ret; void *iter; - struct scmi_msg_sensor_axis_description_get *msg; struct scmi_iterator_ops ops = { .prepare_message = iter_axes_desc_prepare_message, .update_state = iter_axes_desc_update_state, .process_response = iter_axes_desc_process_response, }; + struct scmi_apriv apriv = { + .any_axes_support_extended_names = false, + .s = s, + }; s->axis = devm_kcalloc(ph->dev, s->num_axis, sizeof(*s->axis), GFP_KERNEL); @@ -495,7 +529,8 @@ static int scmi_sensor_axis_description(const struct scmi_protocol_handle *ph, iter = ph->hops->iter_response_init(ph, &ops, s->num_axis, SENSOR_AXIS_DESCRIPTION_GET, - sizeof(*msg), s); + sizeof(struct scmi_msg_sensor_axis_description_get), + &apriv); if (IS_ERR(iter)) return PTR_ERR(iter); @@ -503,7 +538,8 @@ static int scmi_sensor_axis_description(const struct scmi_protocol_handle *ph, if (ret) return ret; - if (PROTOCOL_REV_MAJOR(version) >= 0x3) + if (PROTOCOL_REV_MAJOR(version) >= 0x3 && + apriv.any_axes_support_extended_names) ret = scmi_sensor_axis_extended_names_get(ph, s); return ret; @@ -598,7 +634,7 @@ iter_sens_descr_process_response(const struct scmi_protocol_handle *ph, SUPPORTS_AXIS(attrh) ? SENSOR_AXIS_NUMBER(attrh) : 0, SCMI_MAX_NUM_SENSOR_AXIS); - strscpy(s->name, sdesc->name, SCMI_MAX_STR_SIZE); + strscpy(s->name, sdesc->name, SCMI_SHORT_NAME_MAX_SIZE); /* * If supported overwrite short name with the extended diff --git a/drivers/firmware/arm_scmi/voltage.c b/drivers/firmware/arm_scmi/voltage.c index 9d195d8719ab..eaa8d944926a 100644 --- a/drivers/firmware/arm_scmi/voltage.c +++ b/drivers/firmware/arm_scmi/voltage.c @@ -180,7 +180,6 @@ static int scmi_voltage_levels_get(const struct scmi_protocol_handle *ph, { int ret; void *iter; - struct scmi_msg_cmd_describe_levels *msg; struct scmi_iterator_ops ops = { .prepare_message = iter_volt_levels_prepare_message, .update_state = iter_volt_levels_update_state, @@ -193,7 +192,8 @@ static int scmi_voltage_levels_get(const struct scmi_protocol_handle *ph, iter = ph->hops->iter_response_init(ph, &ops, v->num_levels, VOLTAGE_DESCRIBE_LEVELS, - sizeof(*msg), &vpriv); + sizeof(struct scmi_msg_cmd_describe_levels), + &vpriv); if (IS_ERR(iter)) return PTR_ERR(iter); @@ -225,15 +225,14 @@ static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph, /* Retrieve domain attributes at first ... */ put_unaligned_le32(dom, td->tx.buf); - ret = ph->xops->do_xfer(ph, td); /* Skip domain on comms error */ - if (ret) + if (ph->xops->do_xfer(ph, td)) continue; v = vinfo->domains + dom; v->id = dom; attributes = le32_to_cpu(resp_dom->attr); - strlcpy(v->name, resp_dom->name, SCMI_MAX_STR_SIZE); + strscpy(v->name, resp_dom->name, SCMI_SHORT_NAME_MAX_SIZE); /* * If supported overwrite short name with the extended one; @@ -249,12 +248,8 @@ static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph, v->async_level_set = true; } - ret = scmi_voltage_levels_get(ph, v); /* Skip invalid voltage descriptors */ - if (ret) - continue; - - ph->xops->reset_rx_to_maxsz(ph, td); + scmi_voltage_levels_get(ph, v); } ph->xops->xfer_put(ph, td); diff --git a/drivers/firmware/efi/sysfb_efi.c b/drivers/firmware/efi/sysfb_efi.c index 4c7c9dd7733f..7882d4b3f2be 100644 --- a/drivers/firmware/efi/sysfb_efi.c +++ b/drivers/firmware/efi/sysfb_efi.c @@ -26,8 +26,6 @@ #include <linux/sysfb.h> #include <video/vga.h> -#include <asm/efi.h> - enum { OVERRIDE_NONE = 0x0, OVERRIDE_BASE = 0x1, diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c index b55c74a5e064..1ee62cd58582 100644 --- a/drivers/gpio/gpio-crystalcove.c +++ b/drivers/gpio/gpio-crystalcove.c @@ -15,6 +15,7 @@ #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/seq_file.h> +#include <linux/types.h> #define CRYSTALCOVE_GPIO_NUM 16 #define CRYSTALCOVE_VGPIO_NUM 95 @@ -110,8 +111,7 @@ static inline int to_reg(int gpio, enum ctrl_register reg_type) return reg + gpio % 8; } -static void crystalcove_update_irq_mask(struct crystalcove_gpio *cg, - int gpio) +static void crystalcove_update_irq_mask(struct crystalcove_gpio *cg, int gpio) { u8 mirqs0 = gpio < 8 ? MGPIO0IRQS0 : MGPIO1IRQS0; int mask = BIT(gpio % 8); @@ -140,8 +140,7 @@ static int crystalcove_gpio_dir_in(struct gpio_chip *chip, unsigned int gpio) return regmap_write(cg->regmap, reg, CTLO_INPUT_SET); } -static int crystalcove_gpio_dir_out(struct gpio_chip *chip, unsigned int gpio, - int value) +static int crystalcove_gpio_dir_out(struct gpio_chip *chip, unsigned int gpio, int value) { struct crystalcove_gpio *cg = gpiochip_get_data(chip); int reg = to_reg(gpio, CTRL_OUT); @@ -168,8 +167,7 @@ static int crystalcove_gpio_get(struct gpio_chip *chip, unsigned int gpio) return val & 0x1; } -static void crystalcove_gpio_set(struct gpio_chip *chip, - unsigned int gpio, int value) +static void crystalcove_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value) { struct crystalcove_gpio *cg = gpiochip_get_data(chip); int reg = to_reg(gpio, CTRL_OUT); @@ -185,10 +183,10 @@ static void crystalcove_gpio_set(struct gpio_chip *chip, static int crystalcove_irq_type(struct irq_data *data, unsigned int type) { - struct crystalcove_gpio *cg = - gpiochip_get_data(irq_data_get_irq_chip_data(data)); + struct crystalcove_gpio *cg = gpiochip_get_data(irq_data_get_irq_chip_data(data)); + irq_hw_number_t hwirq = irqd_to_hwirq(data); - if (data->hwirq >= CRYSTALCOVE_GPIO_NUM) + if (hwirq >= CRYSTALCOVE_GPIO_NUM) return 0; switch (type) { @@ -215,22 +213,20 @@ static int crystalcove_irq_type(struct irq_data *data, unsigned int type) static void crystalcove_bus_lock(struct irq_data *data) { - struct crystalcove_gpio *cg = - gpiochip_get_data(irq_data_get_irq_chip_data(data)); + struct crystalcove_gpio *cg = gpiochip_get_data(irq_data_get_irq_chip_data(data)); mutex_lock(&cg->buslock); } static void crystalcove_bus_sync_unlock(struct irq_data *data) { - struct crystalcove_gpio *cg = - gpiochip_get_data(irq_data_get_irq_chip_data(data)); - int gpio = data->hwirq; + struct crystalcove_gpio *cg = gpiochip_get_data(irq_data_get_irq_chip_data(data)); + irq_hw_number_t hwirq = irqd_to_hwirq(data); if (cg->update & UPDATE_IRQ_TYPE) - crystalcove_update_irq_ctrl(cg, gpio); + crystalcove_update_irq_ctrl(cg, hwirq); if (cg->update & UPDATE_IRQ_MASK) - crystalcove_update_irq_mask(cg, gpio); + crystalcove_update_irq_mask(cg, hwirq); cg->update = 0; mutex_unlock(&cg->buslock); @@ -238,34 +234,43 @@ static void crystalcove_bus_sync_unlock(struct irq_data *data) static void crystalcove_irq_unmask(struct irq_data *data) { - struct crystalcove_gpio *cg = - gpiochip_get_data(irq_data_get_irq_chip_data(data)); + struct gpio_chip *gc = irq_data_get_irq_chip_data(data); + struct crystalcove_gpio *cg = gpiochip_get_data(gc); + irq_hw_number_t hwirq = irqd_to_hwirq(data); - if (data->hwirq < CRYSTALCOVE_GPIO_NUM) { - cg->set_irq_mask = false; - cg->update |= UPDATE_IRQ_MASK; - } + if (hwirq >= CRYSTALCOVE_GPIO_NUM) + return; + + gpiochip_enable_irq(gc, hwirq); + + cg->set_irq_mask = false; + cg->update |= UPDATE_IRQ_MASK; } static void crystalcove_irq_mask(struct irq_data *data) { - struct crystalcove_gpio *cg = - gpiochip_get_data(irq_data_get_irq_chip_data(data)); + struct gpio_chip *gc = irq_data_get_irq_chip_data(data); + struct crystalcove_gpio *cg = gpiochip_get_data(gc); + irq_hw_number_t hwirq = irqd_to_hwirq(data); - if (data->hwirq < CRYSTALCOVE_GPIO_NUM) { - cg->set_irq_mask = true; - cg->update |= UPDATE_IRQ_MASK; - } + if (hwirq >= CRYSTALCOVE_GPIO_NUM) + return; + + cg->set_irq_mask = true; + cg->update |= UPDATE_IRQ_MASK; + + gpiochip_disable_irq(gc, hwirq); } -static struct irq_chip crystalcove_irqchip = { +static const struct irq_chip crystalcove_irqchip = { .name = "Crystal Cove", .irq_mask = crystalcove_irq_mask, .irq_unmask = crystalcove_irq_unmask, .irq_set_type = crystalcove_irq_type, .irq_bus_lock = crystalcove_bus_lock, .irq_bus_sync_unlock = crystalcove_bus_sync_unlock, - .flags = IRQCHIP_SKIP_SET_WAKE, + .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS, }; static irqreturn_t crystalcove_gpio_irq_handler(int irq, void *data) @@ -293,8 +298,7 @@ static irqreturn_t crystalcove_gpio_irq_handler(int irq, void *data) return IRQ_HANDLED; } -static void crystalcove_gpio_dbg_show(struct seq_file *s, - struct gpio_chip *chip) +static void crystalcove_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip) { struct crystalcove_gpio *cg = gpiochip_get_data(chip); int gpio, offset; @@ -353,7 +357,7 @@ static int crystalcove_gpio_probe(struct platform_device *pdev) cg->regmap = pmic->regmap; girq = &cg->chip.irq; - girq->chip = &crystalcove_irqchip; + gpio_irq_chip_set_chip(girq, &crystalcove_irqchip); /* This will let us handle the parent IRQ in the driver */ girq->parent_handler = NULL; girq->num_parents = 0; diff --git a/drivers/gpio/gpio-dln2.c b/drivers/gpio/gpio-dln2.c index 08b9e2cf4f2d..71fa437b491f 100644 --- a/drivers/gpio/gpio-dln2.c +++ b/drivers/gpio/gpio-dln2.c @@ -46,7 +46,6 @@ struct dln2_gpio { struct platform_device *pdev; struct gpio_chip gpio; - struct irq_chip irqchip; /* * Cache pin direction to save us one transfer, since the hardware has @@ -306,6 +305,7 @@ static void dln2_irq_unmask(struct irq_data *irqd) struct dln2_gpio *dln2 = gpiochip_get_data(gc); int pin = irqd_to_hwirq(irqd); + gpiochip_enable_irq(gc, pin); set_bit(pin, dln2->unmasked_irqs); } @@ -316,6 +316,7 @@ static void dln2_irq_mask(struct irq_data *irqd) int pin = irqd_to_hwirq(irqd); clear_bit(pin, dln2->unmasked_irqs); + gpiochip_disable_irq(gc, pin); } static int dln2_irq_set_type(struct irq_data *irqd, unsigned type) @@ -384,6 +385,17 @@ static void dln2_irq_bus_unlock(struct irq_data *irqd) mutex_unlock(&dln2->irq_lock); } +static const struct irq_chip dln2_irqchip = { + .name = "dln2-irq", + .irq_mask = dln2_irq_mask, + .irq_unmask = dln2_irq_unmask, + .irq_set_type = dln2_irq_set_type, + .irq_bus_lock = dln2_irq_bus_lock, + .irq_bus_sync_unlock = dln2_irq_bus_unlock, + .flags = IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS, +}; + static void dln2_gpio_event(struct platform_device *pdev, u16 echo, const void *data, int len) { @@ -465,15 +477,8 @@ static int dln2_gpio_probe(struct platform_device *pdev) dln2->gpio.direction_output = dln2_gpio_direction_output; dln2->gpio.set_config = dln2_gpio_set_config; - dln2->irqchip.name = "dln2-irq", - dln2->irqchip.irq_mask = dln2_irq_mask, - dln2->irqchip.irq_unmask = dln2_irq_unmask, - dln2->irqchip.irq_set_type = dln2_irq_set_type, - dln2->irqchip.irq_bus_lock = dln2_irq_bus_lock, - dln2->irqchip.irq_bus_sync_unlock = dln2_irq_bus_unlock, - girq = &dln2->gpio.irq; - girq->chip = &dln2->irqchip; + gpio_irq_chip_set_chip(girq, &dln2_irqchip); /* The event comes from the outside so no parent handler */ girq->parent_handler = NULL; girq->num_parents = 0; diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c index 04afe728e187..c22fcaa44a61 100644 --- a/drivers/gpio/gpio-dwapb.c +++ b/drivers/gpio/gpio-dwapb.c @@ -662,10 +662,9 @@ static int dwapb_get_clks(struct dwapb_gpio *gpio) gpio->clks[1].id = "db"; err = devm_clk_bulk_get_optional(gpio->dev, DWAPB_NR_CLOCKS, gpio->clks); - if (err) { - dev_err(gpio->dev, "Cannot get APB/Debounce clocks\n"); - return err; - } + if (err) + return dev_err_probe(gpio->dev, err, + "Cannot get APB/Debounce clocks\n"); err = clk_bulk_prepare_enable(DWAPB_NR_CLOCKS, gpio->clks); if (err) { diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c index df563616f943..bea0e32c195d 100644 --- a/drivers/gpio/gpio-grgpio.c +++ b/drivers/gpio/gpio-grgpio.c @@ -434,25 +434,13 @@ static int grgpio_probe(struct platform_device *ofdev) static int grgpio_remove(struct platform_device *ofdev) { struct grgpio_priv *priv = platform_get_drvdata(ofdev); - int i; - int ret = 0; - - if (priv->domain) { - for (i = 0; i < GRGPIO_MAX_NGPIO; i++) { - if (priv->uirqs[i].refcnt != 0) { - ret = -EBUSY; - goto out; - } - } - } gpiochip_remove(&priv->gc); if (priv->domain) irq_domain_remove(priv->domain); -out: - return ret; + return 0; } static const struct of_device_id grgpio_match[] = { diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c index f3d1baeacbe9..72ac09a59702 100644 --- a/drivers/gpio/gpio-merrifield.c +++ b/drivers/gpio/gpio-merrifield.c @@ -220,10 +220,8 @@ static void mrfld_irq_ack(struct irq_data *d) raw_spin_unlock_irqrestore(&priv->lock, flags); } -static void mrfld_irq_unmask_mask(struct irq_data *d, bool unmask) +static void mrfld_irq_unmask_mask(struct mrfld_gpio *priv, u32 gpio, bool unmask) { - struct mrfld_gpio *priv = irq_data_get_irq_chip_data(d); - u32 gpio = irqd_to_hwirq(d); void __iomem *gimr = gpio_reg(&priv->chip, gpio, GIMR); unsigned long flags; u32 value; @@ -241,12 +239,20 @@ static void mrfld_irq_unmask_mask(struct irq_data *d, bool unmask) static void mrfld_irq_mask(struct irq_data *d) { - mrfld_irq_unmask_mask(d, false); + struct mrfld_gpio *priv = irq_data_get_irq_chip_data(d); + u32 gpio = irqd_to_hwirq(d); + + mrfld_irq_unmask_mask(priv, gpio, false); + gpiochip_disable_irq(&priv->chip, gpio); } static void mrfld_irq_unmask(struct irq_data *d) { - mrfld_irq_unmask_mask(d, true); + struct mrfld_gpio *priv = irq_data_get_irq_chip_data(d); + u32 gpio = irqd_to_hwirq(d); + + gpiochip_enable_irq(&priv->chip, gpio); + mrfld_irq_unmask_mask(priv, gpio, true); } static int mrfld_irq_set_type(struct irq_data *d, unsigned int type) @@ -329,13 +335,15 @@ static int mrfld_irq_set_wake(struct irq_data *d, unsigned int on) return 0; } -static struct irq_chip mrfld_irqchip = { +static const struct irq_chip mrfld_irqchip = { .name = "gpio-merrifield", .irq_ack = mrfld_irq_ack, .irq_mask = mrfld_irq_mask, .irq_unmask = mrfld_irq_unmask, .irq_set_type = mrfld_irq_set_type, .irq_set_wake = mrfld_irq_set_wake, + .flags = IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS, }; static void mrfld_irq_handler(struct irq_desc *desc) @@ -482,7 +490,7 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id return retval; girq = &priv->chip.irq; - girq->chip = &mrfld_irqchip; + gpio_irq_chip_set_chip(girq, &mrfld_irqchip); girq->init_hw = mrfld_irq_init_hw; girq->parent_handler = mrfld_irq_handler; girq->num_parents = 1; diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c index c5166cd47c9c..7f59e5d936c2 100644 --- a/drivers/gpio/gpio-mxs.c +++ b/drivers/gpio/gpio-mxs.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0+ // -// MXC GPIO support. (c) 2008 Daniel Mack <daniel@caiaq.de> +// MXS GPIO support. (c) 2008 Daniel Mack <daniel@caiaq.de> // Copyright 2008 Juergen Beisert, kernel@pengutronix.de // // Based on code from Freescale, diff --git a/drivers/gpio/gpio-realtek-otto.c b/drivers/gpio/gpio-realtek-otto.c index c52b2cb1acae..63dcf42f7c20 100644 --- a/drivers/gpio/gpio-realtek-otto.c +++ b/drivers/gpio/gpio-realtek-otto.c @@ -172,6 +172,8 @@ static void realtek_gpio_irq_unmask(struct irq_data *data) unsigned long flags; u16 m; + gpiochip_enable_irq(&ctrl->gc, line); + raw_spin_lock_irqsave(&ctrl->lock, flags); m = ctrl->intr_mask[port]; m |= realtek_gpio_imr_bits(port_pin, REALTEK_GPIO_IMR_LINE_MASK); @@ -195,6 +197,8 @@ static void realtek_gpio_irq_mask(struct irq_data *data) ctrl->intr_mask[port] = m; realtek_gpio_write_imr(ctrl, port, ctrl->intr_type[port], m); raw_spin_unlock_irqrestore(&ctrl->lock, flags); + + gpiochip_disable_irq(&ctrl->gc, line); } static int realtek_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type) @@ -315,13 +319,15 @@ static int realtek_gpio_irq_init(struct gpio_chip *gc) return 0; } -static struct irq_chip realtek_gpio_irq_chip = { +static const struct irq_chip realtek_gpio_irq_chip = { .name = "realtek-otto-gpio", .irq_ack = realtek_gpio_irq_ack, .irq_mask = realtek_gpio_irq_mask, .irq_unmask = realtek_gpio_irq_unmask, .irq_set_type = realtek_gpio_irq_set_type, .irq_set_affinity = realtek_gpio_irq_set_affinity, + .flags = IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS, }; static const struct of_device_id realtek_gpio_of_match[] = { @@ -404,7 +410,7 @@ static int realtek_gpio_probe(struct platform_device *pdev) irq = platform_get_irq_optional(pdev, 0); if (!(dev_flags & GPIO_INTERRUPTS_DISABLED) && irq > 0) { girq = &ctrl->gc.irq; - girq->chip = &realtek_gpio_irq_chip; + gpio_irq_chip_set_chip(girq, &realtek_gpio_irq_chip); girq->default_type = IRQ_TYPE_NONE; girq->handler = handle_bad_irq; girq->parent_handler = realtek_gpio_irq_handler; diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c index acda4c5052d3..8a83f7bf4382 100644 --- a/drivers/gpio/gpio-sch.c +++ b/drivers/gpio/gpio-sch.c @@ -38,7 +38,6 @@ struct sch_gpio { struct gpio_chip chip; - struct irq_chip irqchip; spinlock_t lock; unsigned short iobase; unsigned short resume_base; @@ -218,11 +217,9 @@ static void sch_irq_ack(struct irq_data *d) spin_unlock_irqrestore(&sch->lock, flags); } -static void sch_irq_mask_unmask(struct irq_data *d, int val) +static void sch_irq_mask_unmask(struct gpio_chip *gc, irq_hw_number_t gpio_num, int val) { - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct sch_gpio *sch = gpiochip_get_data(gc); - irq_hw_number_t gpio_num = irqd_to_hwirq(d); unsigned long flags; spin_lock_irqsave(&sch->lock, flags); @@ -232,14 +229,32 @@ static void sch_irq_mask_unmask(struct irq_data *d, int val) static void sch_irq_mask(struct irq_data *d) { - sch_irq_mask_unmask(d, 0); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + irq_hw_number_t gpio_num = irqd_to_hwirq(d); + + sch_irq_mask_unmask(gc, gpio_num, 0); + gpiochip_disable_irq(gc, gpio_num); } static void sch_irq_unmask(struct irq_data *d) { - sch_irq_mask_unmask(d, 1); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + irq_hw_number_t gpio_num = irqd_to_hwirq(d); + + gpiochip_enable_irq(gc, gpio_num); + sch_irq_mask_unmask(gc, gpio_num, 1); } +static const struct irq_chip sch_irqchip = { + .name = "sch_gpio", + .irq_ack = sch_irq_ack, + .irq_mask = sch_irq_mask, + .irq_unmask = sch_irq_unmask, + .irq_set_type = sch_irq_type, + .flags = IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS, +}; + static u32 sch_gpio_gpe_handler(acpi_handle gpe_device, u32 gpe, void *context) { struct sch_gpio *sch = context; @@ -367,14 +382,8 @@ static int sch_gpio_probe(struct platform_device *pdev) platform_set_drvdata(pdev, sch); - sch->irqchip.name = "sch_gpio"; - sch->irqchip.irq_ack = sch_irq_ack; - sch->irqchip.irq_mask = sch_irq_mask; - sch->irqchip.irq_unmask = sch_irq_unmask; - sch->irqchip.irq_set_type = sch_irq_type; - girq = &sch->chip.irq; - girq->chip = &sch->irqchip; + gpio_irq_chip_set_chip(girq, &sch_irqchip); girq->num_parents = 0; girq->parents = NULL; girq->parent_handler = NULL; diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c index 98cd715ccc33..8d09b619c166 100644 --- a/drivers/gpio/gpio-vr41xx.c +++ b/drivers/gpio/gpio-vr41xx.c @@ -217,8 +217,6 @@ static int giu_get_irq(unsigned int irq) printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n", maskl, pendl, maskh, pendh); - atomic_inc(&irq_err_count); - return -EINVAL; } diff --git a/drivers/gpio/gpio-wcove.c b/drivers/gpio/gpio-wcove.c index 16a0fae1e32e..c18b6b47384f 100644 --- a/drivers/gpio/gpio-wcove.c +++ b/drivers/gpio/gpio-wcove.c @@ -299,6 +299,8 @@ static void wcove_irq_unmask(struct irq_data *data) if (gpio >= WCOVE_GPIO_NUM) return; + gpiochip_enable_irq(chip, gpio); + wg->set_irq_mask = false; wg->update |= UPDATE_IRQ_MASK; } @@ -314,15 +316,19 @@ static void wcove_irq_mask(struct irq_data *data) wg->set_irq_mask = true; wg->update |= UPDATE_IRQ_MASK; + + gpiochip_disable_irq(chip, gpio); } -static struct irq_chip wcove_irqchip = { +static const struct irq_chip wcove_irqchip = { .name = "Whiskey Cove", .irq_mask = wcove_irq_mask, .irq_unmask = wcove_irq_unmask, .irq_set_type = wcove_irq_type, .irq_bus_lock = wcove_bus_lock, .irq_bus_sync_unlock = wcove_bus_sync_unlock, + .flags = IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS, }; static irqreturn_t wcove_gpio_irq_handler(int irq, void *data) @@ -452,7 +458,7 @@ static int wcove_gpio_probe(struct platform_device *pdev) } girq = &wg->chip.irq; - girq->chip = &wcove_irqchip; + gpio_irq_chip_set_chip(girq, &wcove_irqchip); /* This will let us handle the parent IRQ in the driver */ girq->parent_handler = NULL; girq->num_parents = 0; diff --git a/drivers/gpio/gpio-winbond.c b/drivers/gpio/gpio-winbond.c index 7f8f5b02e31d..4b61d975cc0e 100644 --- a/drivers/gpio/gpio-winbond.c +++ b/drivers/gpio/gpio-winbond.c @@ -385,12 +385,13 @@ static int winbond_gpio_get(struct gpio_chip *gc, unsigned int offset) unsigned long *base = gpiochip_get_data(gc); const struct winbond_gpio_info *info; bool val; + int ret; winbond_gpio_get_info(&offset, &info); - val = winbond_sio_enter(*base); - if (val) - return val; + ret = winbond_sio_enter(*base); + if (ret) + return ret; winbond_sio_select_logical(*base, info->dev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 67abf8dcd30a..6b6d46e29e6e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1918,9 +1918,6 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct amdgpu_device *adev, return -EINVAL; } - /* delete kgd_mem from kfd_bo_list to avoid re-validating - * this BO in BO's restoring after eviction. - */ mutex_lock(&mem->process_info->lock); ret = amdgpu_bo_reserve(bo, true); @@ -1943,7 +1940,6 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct amdgpu_device *adev, amdgpu_amdkfd_remove_eviction_fence( bo, mem->process_info->eviction_fence); - list_del_init(&mem->validate_list.head); if (size) *size = amdgpu_bo_size(bo); @@ -2512,12 +2508,15 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) process_info->eviction_fence = new_fence; *ef = dma_fence_get(&new_fence->base); - /* Attach new eviction fence to all BOs */ + /* Attach new eviction fence to all BOs except pinned ones */ list_for_each_entry(mem, &process_info->kfd_bo_list, - validate_list.head) + validate_list.head) { + if (mem->bo->tbo.pin_count) + continue; + amdgpu_bo_fence(mem->bo, &process_info->eviction_fence->base, true); - + } /* Attach eviction fence to PD / PT BOs */ list_for_each_entry(peer_vm, &process_info->vm_list_head, vm_list_node) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index ede2fa56f6c9..16699158e00d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -594,17 +594,20 @@ int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value) int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) { int r; - r = amdgpu_ras_block_late_init(adev, ras_block); - if (r) - return r; if (amdgpu_ras_is_supported(adev, ras_block->block)) { if (!amdgpu_persistent_edc_harvesting_supported(adev)) amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX); + r = amdgpu_ras_block_late_init(adev, ras_block); + if (r) + return r; + r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0); if (r) goto late_fini; + } else { + amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 798c56214a23..aebc384531ac 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -518,6 +518,8 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev) case IP_VERSION(9, 1, 0): /* RENOIR looks like RAVEN */ case IP_VERSION(9, 3, 0): + /* GC 10.3.7 */ + case IP_VERSION(10, 3, 7): if (amdgpu_tmz == 0) { adev->gmc.tmz_enabled = false; dev_info(adev->dev, @@ -540,8 +542,6 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev) case IP_VERSION(10, 3, 1): /* YELLOW_CARP*/ case IP_VERSION(10, 3, 3): - /* GC 10.3.7 */ - case IP_VERSION(10, 3, 7): /* Don't enable it by default yet. */ if (amdgpu_tmz < 1) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 801f6fa692e9..6de63ea6687e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -642,7 +642,6 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) atomic64_read(&adev->visible_pin_size), vram_gtt.vram_size); vram_gtt.gtt_size = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT)->size; - vram_gtt.gtt_size *= PAGE_SIZE; vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size); return copy_to_user(out, &vram_gtt, min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0; @@ -675,7 +674,6 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) mem.cpu_accessible_vram.usable_heap_size * 3 / 4; mem.gtt.total_heap_size = gtt_man->size; - mem.gtt.total_heap_size *= PAGE_SIZE; mem.gtt.usable_heap_size = mem.gtt.total_heap_size - atomic64_read(&adev->gart_pin_size); mem.gtt.heap_usage = ttm_resource_manager_usage(gtt_man); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 2de9309a4193..dac202ae864d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -197,6 +197,13 @@ static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf, if (amdgpu_ras_query_error_status(obj->adev, &info)) return -EINVAL; + /* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */ + if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && + obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) { + if (amdgpu_ras_reset_error_status(obj->adev, info.head.block)) + dev_warn(obj->adev->dev, "Failed to reset error counter and error status"); + } + s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n", "ue", info.ue_count, "ce", info.ce_count); @@ -550,9 +557,10 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev, if (amdgpu_ras_query_error_status(obj->adev, &info)) return -EINVAL; - if (obj->adev->asic_type == CHIP_ALDEBARAN) { + if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && + obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) { if (amdgpu_ras_reset_error_status(obj->adev, info.head.block)) - DRM_WARN("Failed to reset error counter and error status"); + dev_warn(obj->adev->dev, "Failed to reset error counter and error status"); } return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count, @@ -1027,9 +1035,6 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, } } - if (!amdgpu_persistent_edc_harvesting_supported(adev)) - amdgpu_ras_reset_error_status(adev, info->head.block); - return 0; } @@ -1149,6 +1154,12 @@ int amdgpu_ras_query_error_count(struct amdgpu_device *adev, if (res) return res; + if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && + adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) { + if (amdgpu_ras_reset_error_status(adev, info.head.block)) + dev_warn(adev->dev, "Failed to reset error counter and error status"); + } + ce += info.ce_count; ue += info.ue_count; } @@ -1792,6 +1803,12 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev) continue; amdgpu_ras_query_error_status(adev, &info); + + if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && + adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) { + if (amdgpu_ras_reset_error_status(adev, info.head.block)) + dev_warn(adev->dev, "Failed to reset error counter and error status"); + } } } @@ -2278,8 +2295,9 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev) !amdgpu_ras_asic_supported(adev)) return; - if (!(amdgpu_sriov_vf(adev) && - (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)))) + /* If driver run on sriov guest side, only enable ras for aldebaran */ + if (amdgpu_sriov_vf(adev) && + adev->ip_versions[MP1_HWIP][0] != IP_VERSION(13, 0, 2)) return; if (!adev->gmc.xgmi.connected_to_cpu) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index be6f76a30ac6..3b4c19412625 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1798,18 +1798,26 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) DRM_INFO("amdgpu: %uM of VRAM memory ready\n", (unsigned) (adev->gmc.real_vram_size / (1024 * 1024))); - /* Compute GTT size, either bsaed on 3/4th the size of RAM size + /* Compute GTT size, either based on 1/2 the size of RAM size * or whatever the user passed on module init */ if (amdgpu_gtt_size == -1) { struct sysinfo si; si_meminfo(&si); - gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), - adev->gmc.mc_vram_size), - ((uint64_t)si.totalram * si.mem_unit * 3/4)); - } - else + /* Certain GL unit tests for large textures can cause problems + * with the OOM killer since there is no way to link this memory + * to a process. This was originally mitigated (but not necessarily + * eliminated) by limiting the GTT size. The problem is this limit + * is often too low for many modern games so just make the limit 1/2 + * of system memory which aligns with TTM. The OOM accounting needs + * to be addressed, but we shouldn't prevent common 3D applications + * from being usable just to potentially mitigate that corner case. + */ + gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), + (u64)si.totalram * si.mem_unit / 2); + } else { gtt_size = (uint64_t)amdgpu_gtt_size << 20; + } /* Initialize GTT memory pool */ r = amdgpu_gtt_mgr_init(adev, gtt_size); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 2ceeaa4c793a..dc76d2b3ce52 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -679,6 +679,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev, { struct amdgpu_vm_update_params params; struct amdgpu_vm_bo_base *entry; + bool flush_tlb_needed = false; int r, idx; if (list_empty(&vm->relocated)) @@ -697,6 +698,9 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev, goto error; list_for_each_entry(entry, &vm->relocated, vm_status) { + /* vm_flush_needed after updating moved PDEs */ + flush_tlb_needed |= entry->moved; + r = amdgpu_vm_pde_update(¶ms, entry); if (r) goto error; @@ -706,8 +710,8 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev, if (r) goto error; - /* vm_flush_needed after updating PDEs */ - atomic64_inc(&vm->tlb_seq); + if (flush_tlb_needed) + atomic64_inc(&vm->tlb_seq); while (!list_empty(&vm->relocated)) { entry = list_first_entry(&vm->relocated, @@ -789,6 +793,11 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, flush_tlb |= adev->gmc.xgmi.num_physical_nodes && adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0); + /* + * On GFX8 and older any 8 PTE block with a valid bit set enters the TLB + */ + flush_tlb |= adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 0, 0); + memset(¶ms, 0, sizeof(params)); params.adev = adev; params.vm = vm; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 8c0a3fc7aaa6..a4a6751b1e44 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -1096,6 +1096,7 @@ static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2); dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1); dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0); + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_MODE); } static void gfx_v11_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, @@ -1316,7 +1317,7 @@ static void gfx_v11_0_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *ade memset(ptr + toc_offset + fw_size, 0, toc_fw_size - fw_size); if ((id != SOC21_FIRMWARE_ID_RS64_PFP) && (id != SOC21_FIRMWARE_ID_RS64_ME)) - *(uint64_t *)fw_autoload_mask |= 1 << id; + *(uint64_t *)fw_autoload_mask |= 1ULL << id; } static void gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev, @@ -1983,7 +1984,7 @@ static int gfx_v11_0_init_csb(struct amdgpu_device *adev) return 0; } -void gfx_v11_0_rlc_stop(struct amdgpu_device *adev) +static void gfx_v11_0_rlc_stop(struct amdgpu_device *adev) { u32 tmp = RREG32_SOC15(GC, 0, regRLC_CNTL); @@ -6028,6 +6029,7 @@ static void gfx_v11_0_handle_priv_fault(struct amdgpu_device *adev, break; default: BUG(); + break; } } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index a0c0b7d9f444..7f4b480ae66e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -638,6 +638,12 @@ static int gmc_v11_0_mc_init(struct amdgpu_device *adev) adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); +#ifdef CONFIG_X86_64 + if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) { + adev->gmc.aper_base = adev->mmhub.funcs->get_mc_fb_offset(adev); + adev->gmc.aper_size = adev->gmc.real_vram_size; + } +#endif /* In case the PCI BAR is larger than the actual amount of vram */ adev->gmc.visible_vram_size = adev->gmc.aper_size; if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size) diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c index 5d2dfeff8fe5..d63d3f2b8a16 100644 --- a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c @@ -299,7 +299,7 @@ static const struct imu_rlc_ram_golden imu_rlc_ram_golden_11_0_2[] = IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCPG_PSP_DEBUG, CPG_PSP_DEBUG__GPA_OVERRIDE_MASK, 0) }; -void program_imu_rlc_ram(struct amdgpu_device *adev, +static void program_imu_rlc_ram(struct amdgpu_device *adev, const struct imu_rlc_ram_golden *regs, const u32 array_size) { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c index d2722adabd1b..f3c1af5130ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c @@ -535,6 +535,10 @@ void jpeg_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring, { unsigned vmid = AMDGPU_JOB_GET_VMID(job); + amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_IH_CTRL_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, (vmid << JPEG_IH_CTRL__IH_VMID__SHIFT)); + amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET, 0, 0, PACKETJ_TYPE0)); amdgpu_ring_write(ring, (vmid | (vmid << 4))); @@ -768,7 +772,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = { 8 + /* jpeg_v2_0_dec_ring_emit_vm_flush */ 18 + 18 + /* jpeg_v2_0_dec_ring_emit_fence x2 vm fence */ 8 + 16, - .emit_ib_size = 22, /* jpeg_v2_0_dec_ring_emit_ib */ + .emit_ib_size = 24, /* jpeg_v2_0_dec_ring_emit_ib */ .emit_ib = jpeg_v2_0_dec_ring_emit_ib, .emit_fence = jpeg_v2_0_dec_ring_emit_fence, .emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush, diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h index 1a03baa59755..654e43e83e2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h @@ -41,6 +41,7 @@ #define mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET 0x4084 #define mmUVD_JRBC_STATUS_INTERNAL_OFFSET 0x4089 #define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f +#define mmUVD_JPEG_IH_CTRL_INTERNAL_OFFSET 0x4149 #define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000 diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index fcf51947bb18..7eee004cf3ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -541,7 +541,7 @@ static void mes_v11_0_enable(struct amdgpu_device *adev, bool enable) /* This function is for backdoor MES firmware */ static int mes_v11_0_load_microcode(struct amdgpu_device *adev, - enum admgpu_mes_pipe pipe) + enum admgpu_mes_pipe pipe, bool prime_icache) { int r; uint32_t data; @@ -593,16 +593,18 @@ static int mes_v11_0_load_microcode(struct amdgpu_device *adev, /* Set 0x3FFFF (256K-1) to CP_MES_MDBOUND_LO */ WREG32_SOC15(GC, 0, regCP_MES_MDBOUND_LO, 0x3FFFF); - /* invalidate ICACHE */ - data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL); - data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 0); - data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, INVALIDATE_CACHE, 1); - WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data); - - /* prime the ICACHE. */ - data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL); - data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 1); - WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data); + if (prime_icache) { + /* invalidate ICACHE */ + data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL); + data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 0); + data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, INVALIDATE_CACHE, 1); + WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data); + + /* prime the ICACHE. */ + data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL); + data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 1); + WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data); + } soc21_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); @@ -1044,17 +1046,19 @@ static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev) int r = 0; if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { - r = mes_v11_0_load_microcode(adev, AMDGPU_MES_KIQ_PIPE); + + r = mes_v11_0_load_microcode(adev, AMDGPU_MES_SCHED_PIPE, false); if (r) { - DRM_ERROR("failed to load MES kiq fw, r=%d\n", r); + DRM_ERROR("failed to load MES fw, r=%d\n", r); return r; } - r = mes_v11_0_load_microcode(adev, AMDGPU_MES_SCHED_PIPE); + r = mes_v11_0_load_microcode(adev, AMDGPU_MES_KIQ_PIPE, true); if (r) { - DRM_ERROR("failed to load MES fw, r=%d\n", r); + DRM_ERROR("failed to load MES kiq fw, r=%d\n", r); return r; } + } mes_v11_0_enable(adev, true); @@ -1086,7 +1090,7 @@ static int mes_v11_0_hw_init(void *handle) if (!adev->enable_mes_kiq) { if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { r = mes_v11_0_load_microcode(adev, - AMDGPU_MES_SCHED_PIPE); + AMDGPU_MES_SCHED_PIPE, true); if (r) { DRM_ERROR("failed to MES fw, r=%d\n", r); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index d016e3c3e221..b3fba8dea63c 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -170,6 +170,7 @@ static const struct amdgpu_video_codec_info yc_video_codecs_decode_array[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, }; static const struct amdgpu_video_codecs yc_video_codecs_decode = { diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c index 06b2635b142a..83c6ccaaa9e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c @@ -469,6 +469,7 @@ static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se } } + /** * sdma_v5_2_gfx_stop - stop the gfx async dma engines * @@ -514,21 +515,17 @@ static void sdma_v5_2_rlc_stop(struct amdgpu_device *adev) } /** - * sdma_v5_2_ctx_switch_enable_for_instance - start the async dma engines - * context switch for an instance + * sdma_v5_2_ctx_switch_enable - stop the async dma engines context switch * * @adev: amdgpu_device pointer - * @instance_idx: the index of the SDMA instance + * @enable: enable/disable the DMA MEs context switch. * - * Unhalt the async dma engines context switch. + * Halt or unhalt the async dma engines context switch. */ -static void sdma_v5_2_ctx_switch_enable_for_instance(struct amdgpu_device *adev, int instance_idx) +static void sdma_v5_2_ctx_switch_enable(struct amdgpu_device *adev, bool enable) { u32 f32_cntl, phase_quantum = 0; - - if (WARN_ON(instance_idx >= adev->sdma.num_instances)) { - return; - } + int i; if (amdgpu_sdma_phase_quantum) { unsigned value = amdgpu_sdma_phase_quantum; @@ -552,68 +549,50 @@ static void sdma_v5_2_ctx_switch_enable_for_instance(struct amdgpu_device *adev, phase_quantum = value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT | unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT; - - WREG32_SOC15_IP(GC, - sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_PHASE0_QUANTUM), - phase_quantum); - WREG32_SOC15_IP(GC, - sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_PHASE1_QUANTUM), - phase_quantum); - WREG32_SOC15_IP(GC, - sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_PHASE2_QUANTUM), - phase_quantum); } - if (!amdgpu_sriov_vf(adev)) { - f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_CNTL)); - f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, - AUTO_CTXSW_ENABLE, 1); - WREG32(sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_CNTL), f32_cntl); + for (i = 0; i < adev->sdma.num_instances; i++) { + if (enable && amdgpu_sdma_phase_quantum) { + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM), + phase_quantum); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM), + phase_quantum); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM), + phase_quantum); + } + + if (!amdgpu_sriov_vf(adev)) { + f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL)); + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, + AUTO_CTXSW_ENABLE, enable ? 1 : 0); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl); + } } + } /** - * sdma_v5_2_ctx_switch_disable_all - stop the async dma engines context switch + * sdma_v5_2_enable - stop the async dma engines * * @adev: amdgpu_device pointer + * @enable: enable/disable the DMA MEs. * - * Halt the async dma engines context switch. + * Halt or unhalt the async dma engines. */ -static void sdma_v5_2_ctx_switch_disable_all(struct amdgpu_device *adev) +static void sdma_v5_2_enable(struct amdgpu_device *adev, bool enable) { u32 f32_cntl; int i; - if (amdgpu_sriov_vf(adev)) - return; - - for (i = 0; i < adev->sdma.num_instances; i++) { - f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL)); - f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, - AUTO_CTXSW_ENABLE, 0); - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl); + if (!enable) { + sdma_v5_2_gfx_stop(adev); + sdma_v5_2_rlc_stop(adev); } -} - -/** - * sdma_v5_2_halt - stop the async dma engines - * - * @adev: amdgpu_device pointer - * - * Halt the async dma engines. - */ -static void sdma_v5_2_halt(struct amdgpu_device *adev) -{ - int i; - u32 f32_cntl; - - sdma_v5_2_gfx_stop(adev); - sdma_v5_2_rlc_stop(adev); if (!amdgpu_sriov_vf(adev)) { for (i = 0; i < adev->sdma.num_instances; i++) { f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); - f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1); + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1); WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl); } } @@ -625,9 +604,6 @@ static void sdma_v5_2_halt(struct amdgpu_device *adev) * @adev: amdgpu_device pointer * * Set up the gfx DMA ring buffers and enable them. - * It assumes that the dma engine is stopped for each instance. - * The function enables the engine and preemptions sequentially for each instance. - * * Returns 0 for success, error for failure. */ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev) @@ -769,7 +745,10 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev) ring->sched.ready = true; - sdma_v5_2_ctx_switch_enable_for_instance(adev, i); + if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */ + sdma_v5_2_ctx_switch_enable(adev, true); + sdma_v5_2_enable(adev, true); + } r = amdgpu_ring_test_ring(ring); if (r) { @@ -813,7 +792,7 @@ static int sdma_v5_2_load_microcode(struct amdgpu_device *adev) int i, j; /* halt the MEs */ - sdma_v5_2_halt(adev); + sdma_v5_2_enable(adev, false); for (i = 0; i < adev->sdma.num_instances; i++) { if (!adev->sdma.instance[i].fw) @@ -885,8 +864,8 @@ static int sdma_v5_2_start(struct amdgpu_device *adev) int r = 0; if (amdgpu_sriov_vf(adev)) { - sdma_v5_2_ctx_switch_disable_all(adev); - sdma_v5_2_halt(adev); + sdma_v5_2_ctx_switch_enable(adev, false); + sdma_v5_2_enable(adev, false); /* set RB registers */ r = sdma_v5_2_gfx_resume(adev); @@ -910,10 +889,12 @@ static int sdma_v5_2_start(struct amdgpu_device *adev) amdgpu_gfx_off_ctrl(adev, false); sdma_v5_2_soft_reset(adev); + /* unhalt the MEs */ + sdma_v5_2_enable(adev, true); + /* enable sdma ring preemption */ + sdma_v5_2_ctx_switch_enable(adev, true); - /* Soft reset supposes to disable the dma engine and preemption. - * Now start the gfx rings and rlc compute queues. - */ + /* start the gfx rings and rlc compute queues */ r = sdma_v5_2_gfx_resume(adev); if (adev->in_s0ix) amdgpu_gfx_off_ctrl(adev, true); @@ -1447,8 +1428,8 @@ static int sdma_v5_2_hw_fini(void *handle) if (amdgpu_sriov_vf(adev)) return 0; - sdma_v5_2_ctx_switch_disable_all(adev); - sdma_v5_2_halt(adev); + sdma_v5_2_ctx_switch_enable(adev, false); + sdma_v5_2_enable(adev, false); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index 3cabceee5f57..39405f0db824 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -1761,23 +1761,21 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_sw_ring_vm_funcs = { .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, }; -static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p, - struct amdgpu_job *job) +static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p) { struct drm_gpu_scheduler **scheds; /* The create msg must be in the first IB submitted */ - if (atomic_read(&job->base.entity->fence_seq)) + if (atomic_read(&p->entity->fence_seq)) return -EINVAL; scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC] [AMDGPU_RING_PRIO_DEFAULT].sched; - drm_sched_entity_modify_sched(job->base.entity, scheds, 1); + drm_sched_entity_modify_sched(p->entity, scheds, 1); return 0; } -static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job, - uint64_t addr) +static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, uint64_t addr) { struct ttm_operation_ctx ctx = { false, false }; struct amdgpu_bo_va_mapping *map; @@ -1848,7 +1846,7 @@ static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job, if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11) continue; - r = vcn_v3_0_limit_sched(p, job); + r = vcn_v3_0_limit_sched(p); if (r) goto out; } @@ -1862,7 +1860,7 @@ static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, struct amdgpu_job *job, struct amdgpu_ib *ib) { - struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched); + struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched); uint32_t msg_lo = 0, msg_hi = 0; unsigned i; int r; @@ -1881,8 +1879,7 @@ static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, msg_hi = val; } else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0) && val == 0) { - r = vcn_v3_0_dec_msg(p, job, - ((u64)msg_hi) << 32 | msg_lo); + r = vcn_v3_0_dec_msg(p, ((u64)msg_hi) << 32 | msg_lo); if (r) return r; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 5e9adbc71bbd..cbfb32b3d235 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -1516,6 +1516,8 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev, num_of_cache_types = ARRAY_SIZE(beige_goby_cache_info); break; case IP_VERSION(10, 3, 3): + case IP_VERSION(10, 3, 6): /* TODO: Double check these on production silicon */ + case IP_VERSION(10, 3, 7): /* TODO: Double check these on production silicon */ pcache_info = yellow_carp_cache_info; num_of_cache_types = ARRAY_SIZE(yellow_carp_cache_info); break; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 8667e3df2d0b..bf4200457772 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -73,6 +73,8 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd) case IP_VERSION(4, 1, 2):/* RENOIR */ case IP_VERSION(5, 2, 1):/* VANGOGH */ case IP_VERSION(5, 2, 3):/* YELLOW_CARP */ + case IP_VERSION(5, 2, 6):/* GC 10.3.6 */ + case IP_VERSION(5, 2, 7):/* GC 10.3.7 */ case IP_VERSION(6, 0, 1): kfd->device_info.num_sdma_queues_per_engine = 2; break; @@ -127,6 +129,8 @@ static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd) case IP_VERSION(9, 4, 2): /* ALDEBARAN */ case IP_VERSION(10, 3, 1): /* VANGOGH */ case IP_VERSION(10, 3, 3): /* YELLOW_CARP */ + case IP_VERSION(10, 3, 6): /* GC 10.3.6 */ + case IP_VERSION(10, 3, 7): /* GC 10.3.7 */ case IP_VERSION(10, 1, 3): /* CYAN_SKILLFISH */ case IP_VERSION(10, 1, 4): case IP_VERSION(10, 1, 10): /* NAVI10 */ @@ -178,7 +182,9 @@ static void kfd_device_info_init(struct kfd_dev *kfd, if (gc_version < IP_VERSION(11, 0, 0)) { /* Navi2x+, Navi1x+ */ - if (gc_version >= IP_VERSION(10, 3, 0)) + if (gc_version == IP_VERSION(10, 3, 6)) + kfd->device_info.no_atomic_fw_version = 14; + else if (gc_version >= IP_VERSION(10, 3, 0)) kfd->device_info.no_atomic_fw_version = 92; else if (gc_version >= IP_VERSION(10, 1, 1)) kfd->device_info.no_atomic_fw_version = 145; @@ -368,6 +374,16 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) if (!vf) f2g = &gfx_v10_3_kfd2kgd; break; + case IP_VERSION(10, 3, 6): + gfx_target_version = 100306; + if (!vf) + f2g = &gfx_v10_3_kfd2kgd; + break; + case IP_VERSION(10, 3, 7): + gfx_target_version = 100307; + if (!vf) + f2g = &gfx_v10_3_kfd2kgd; + break; case IP_VERSION(11, 0, 0): gfx_target_version = 110000; f2g = &gfx_v11_kfd2kgd; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 997650d597ec..e44376c2ecdc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -296,7 +296,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, struct migrate_vma *migrate, struct dma_fence **mfence, dma_addr_t *scratch) { - uint64_t npages = migrate->cpages; + uint64_t npages = migrate->npages; struct device *dev = adev->dev; struct amdgpu_res_cursor cursor; dma_addr_t *src; @@ -344,7 +344,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, mfence); if (r) goto out_free_vram_pages; - amdgpu_res_next(&cursor, j << PAGE_SHIFT); + amdgpu_res_next(&cursor, (j + 1) << PAGE_SHIFT); j = 0; } else { amdgpu_res_next(&cursor, PAGE_SIZE); @@ -590,7 +590,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange, continue; } src[i] = svm_migrate_addr(adev, spage); - if (i > 0 && src[i] != src[i - 1] + PAGE_SIZE) { + if (j > 0 && src[i] != src[i - 1] + PAGE_SIZE) { r = svm_migrate_copy_memory_gart(adev, dst + i - j, src + i - j, j, FROM_VRAM_TO_RAM, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 2ebf0132c25b..7b332246eda3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1295,7 +1295,7 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange, r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, NULL, last_start, prange->start + i, pte_flags, - last_start - prange->start, + (last_start - prange->start) << PAGE_SHIFT, bo_adev ? bo_adev->vm_manager.vram_base_offset : 0, NULL, dma_addr, &vm->last_update); @@ -2307,6 +2307,8 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni, if (range->event == MMU_NOTIFY_RELEASE) return true; + if (!mmget_not_zero(mni->mm)) + return true; start = mni->interval_tree.start; last = mni->interval_tree.last; @@ -2333,6 +2335,7 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni, } svm_range_unlock(prange); + mmput(mni->mm); return true; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 70be67a56673..39b425d83bb1 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2812,7 +2812,7 @@ static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = { static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) { - u32 max_cll, min_cll, max, min, q, r; + u32 max_avg, min_cll, max, min, q, r; struct amdgpu_dm_backlight_caps *caps; struct amdgpu_display_manager *dm; struct drm_connector *conn_base; @@ -2842,7 +2842,7 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) caps = &dm->backlight_caps[i]; caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps; caps->aux_support = false; - max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll; + max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall; min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll; if (caps->ext_caps->bits.oled == 1 /*|| @@ -2870,8 +2870,8 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) * The results of the above expressions can be verified at * pre_computed_values. */ - q = max_cll >> 5; - r = max_cll % 32; + q = max_avg >> 5; + r = max_avg % 32; max = (1 << q) * pre_computed_values[r]; // min luminance: maxLum * (CV/255)^2 / 100 diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c index ceb34376decb..bca5f01da763 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c @@ -287,8 +287,11 @@ static void dcn31_enable_pme_wa(struct clk_mgr *clk_mgr_base) void dcn31_init_clocks(struct clk_mgr *clk_mgr) { + uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz; + memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); // Assumption is that boot state always supports pstate + clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk clk_mgr->clks.p_state_change_support = true; clk_mgr->clks.prev_p_state_change_support = true; clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN; @@ -638,8 +641,14 @@ static void dcn31_set_low_power_state(struct clk_mgr *clk_mgr_base) } } +int dcn31_get_dtb_ref_freq_khz(struct clk_mgr *clk_mgr_base) +{ + return clk_mgr_base->clks.ref_dtbclk_khz; +} + static struct clk_mgr_funcs dcn31_funcs = { .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, + .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz, .update_clocks = dcn31_update_clocks, .init_clocks = dcn31_init_clocks, .enable_pme_wa = dcn31_enable_pme_wa, @@ -719,7 +728,7 @@ void dcn31_clk_mgr_construct( } clk_mgr->base.base.dprefclk_khz = 600000; - clk_mgr->base.dccg->ref_dtbclk_khz = 600000; + clk_mgr->base.base.clks.ref_dtbclk_khz = 600000; dce_clock_read_ss_info(&clk_mgr->base); /*if bios enabled SS, driver needs to adjust dtb clock, only enable with correct bios*/ //clk_mgr->base.dccg->ref_dtbclk_khz = dce_adjust_dp_ref_freq_for_ss(clk_mgr_internal, clk_mgr->base.base.dprefclk_khz); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h index 961b10a49486..be06fdbd0c22 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h @@ -51,6 +51,8 @@ void dcn31_clk_mgr_construct(struct dc_context *ctx, struct pp_smu_funcs *pp_smu, struct dccg *dccg); +int dcn31_get_dtb_ref_freq_khz(struct clk_mgr *clk_mgr_base); + void dcn31_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int); #endif //__DCN31_CLK_MGR_H__ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c index a2ade6e93f5e..f4381725b210 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c @@ -41,9 +41,7 @@ #include "dc_dmub_srv.h" -#if defined (CONFIG_DRM_AMD_DC_DP2_0) #include "dc_link_dp.h" -#endif #define TO_CLK_MGR_DCN315(clk_mgr)\ container_of(clk_mgr, struct clk_mgr_dcn315, base) @@ -552,7 +550,7 @@ static void dcn315_clk_mgr_helper_populate_bw_params( if (!bw_params->clk_table.entries[i].dtbclk_mhz) bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz; } - ASSERT(bw_params->clk_table.entries[i].dcfclk_mhz); + ASSERT(bw_params->clk_table.entries[i-1].dcfclk_mhz); bw_params->vram_type = bios_info->memory_type; bw_params->num_channels = bios_info->ma_channel_number; if (!bw_params->num_channels) @@ -580,6 +578,7 @@ static void dcn315_enable_pme_wa(struct clk_mgr *clk_mgr_base) static struct clk_mgr_funcs dcn315_funcs = { .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, + .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz, .update_clocks = dcn315_update_clocks, .init_clocks = dcn31_init_clocks, .enable_pme_wa = dcn315_enable_pme_wa, @@ -656,9 +655,9 @@ void dcn315_clk_mgr_construct( clk_mgr->base.base.dprefclk_khz = 600000; clk_mgr->base.base.dprefclk_khz = dcn315_smu_get_dpref_clk(&clk_mgr->base); - clk_mgr->base.dccg->ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz; + clk_mgr->base.base.clks.ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz; dce_clock_read_ss_info(&clk_mgr->base); - clk_mgr->base.dccg->ref_dtbclk_khz = dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz); + clk_mgr->base.base.clks.ref_dtbclk_khz = dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz); clk_mgr->base.base.bw_params = &dcn315_bw_params; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c index fc3af81ed6c6..e4bb9c6193b5 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c @@ -571,6 +571,7 @@ static void dcn316_clk_mgr_helper_populate_bw_params( static struct clk_mgr_funcs dcn316_funcs = { .enable_pme_wa = dcn316_enable_pme_wa, .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, + .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz, .update_clocks = dcn316_update_clocks, .init_clocks = dcn31_init_clocks, .are_clock_states_equal = dcn31_are_clock_states_equal, @@ -685,7 +686,7 @@ void dcn316_clk_mgr_construct( clk_mgr->base.base.dprefclk_khz = 600000; clk_mgr->base.base.dprefclk_khz = dcn316_smu_get_dpref_clk(&clk_mgr->base); - clk_mgr->base.dccg->ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz; + clk_mgr->base.base.clks.ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz; dce_clock_read_ss_info(&clk_mgr->base); /*clk_mgr->base.dccg->ref_dtbclk_khz = dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz);*/ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index dc30ac366a50..d8eee89e63ce 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -114,8 +114,8 @@ static const struct dc_link_settings fail_safe_link_settings = { static bool decide_fallback_link_setting( struct dc_link *link, - struct dc_link_settings initial_link_settings, - struct dc_link_settings *current_link_setting, + struct dc_link_settings *max, + struct dc_link_settings *cur, enum link_training_result training_result); static void maximize_lane_settings(const struct link_training_settings *lt_settings, struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]); @@ -944,7 +944,7 @@ static void override_lane_settings(const struct link_training_settings *lt_setti return; - for (lane = 1; lane < LANE_COUNT_DP_MAX; lane++) { + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { if (lt_settings->voltage_swing) lane_settings[lane].VOLTAGE_SWING = *lt_settings->voltage_swing; if (lt_settings->pre_emphasis) @@ -2784,6 +2784,7 @@ bool perform_link_training_with_retries( enum dp_panel_mode panel_mode = dp_get_panel_mode(link); enum link_training_result status = LINK_TRAINING_CR_FAIL_LANE0; struct dc_link_settings cur_link_settings = *link_setting; + struct dc_link_settings max_link_settings = *link_setting; const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); int fail_count = 0; bool is_link_bw_low = false; /* link bandwidth < stream bandwidth */ @@ -2793,7 +2794,6 @@ bool perform_link_training_with_retries( dp_trace_commit_lt_init(link); - if (dp_get_link_encoding_format(&cur_link_settings) == DP_8b_10b_ENCODING) /* We need to do this before the link training to ensure the idle * pattern in SST mode will be sent right after the link training @@ -2909,19 +2909,15 @@ bool perform_link_training_with_retries( uint32_t req_bw; uint32_t link_bw; - decide_fallback_link_setting(link, *link_setting, &cur_link_settings, status); - /* Flag if reduced link bandwidth no longer meets stream requirements or fallen back to - * minimum link bandwidth. + decide_fallback_link_setting(link, &max_link_settings, + &cur_link_settings, status); + /* Fail link training if reduced link bandwidth no longer meets + * stream requirements. */ req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing); link_bw = dc_link_bandwidth_kbps(link, &cur_link_settings); - is_link_bw_low = (req_bw > link_bw); - is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) && - (cur_link_settings.lane_count <= LANE_COUNT_ONE)); - - if (is_link_bw_low) - DC_LOG_WARNING("%s: Link bandwidth too low after fallback req_bw(%d) > link_bw(%d)\n", - __func__, req_bw, link_bw); + if (req_bw > link_bw) + break; } msleep(delay_between_attempts); @@ -3309,7 +3305,7 @@ static bool dp_verify_link_cap( int *fail_count) { struct dc_link_settings cur_link_settings = {0}; - struct dc_link_settings initial_link_settings = *known_limit_link_setting; + struct dc_link_settings max_link_settings = *known_limit_link_setting; bool success = false; bool skip_video_pattern; enum clock_source_id dp_cs_id = get_clock_source_id(link); @@ -3318,7 +3314,7 @@ static bool dp_verify_link_cap( struct link_resource link_res; memset(&irq_data, 0, sizeof(irq_data)); - cur_link_settings = initial_link_settings; + cur_link_settings = max_link_settings; /* Grant extended timeout request */ if ((link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (link->dpcd_caps.lttpr_caps.max_ext_timeout > 0)) { @@ -3361,7 +3357,7 @@ static bool dp_verify_link_cap( dp_trace_lt_result_update(link, status, true); dp_disable_link_phy(link, &link_res, link->connector_signal); } while (!success && decide_fallback_link_setting(link, - initial_link_settings, &cur_link_settings, status)); + &max_link_settings, &cur_link_settings, status)); link->verified_link_cap = success ? cur_link_settings : fail_safe_link_settings; @@ -3596,16 +3592,19 @@ static bool decide_fallback_link_setting_max_bw_policy( */ static bool decide_fallback_link_setting( struct dc_link *link, - struct dc_link_settings initial_link_settings, - struct dc_link_settings *current_link_setting, + struct dc_link_settings *max, + struct dc_link_settings *cur, enum link_training_result training_result) { - if (!current_link_setting) + if (!cur) + return false; + if (!max) return false; - if (dp_get_link_encoding_format(&initial_link_settings) == DP_128b_132b_ENCODING || + + if (dp_get_link_encoding_format(max) == DP_128b_132b_ENCODING || link->dc->debug.force_dp2_lt_fallback_method) - return decide_fallback_link_setting_max_bw_policy(link, &initial_link_settings, - current_link_setting, training_result); + return decide_fallback_link_setting_max_bw_policy(link, max, cur, + training_result); switch (training_result) { case LINK_TRAINING_CR_FAIL_LANE0: @@ -3613,28 +3612,18 @@ static bool decide_fallback_link_setting( case LINK_TRAINING_CR_FAIL_LANE23: case LINK_TRAINING_LQA_FAIL: { - if (!reached_minimum_link_rate - (current_link_setting->link_rate)) { - current_link_setting->link_rate = - reduce_link_rate( - current_link_setting->link_rate); - } else if (!reached_minimum_lane_count - (current_link_setting->lane_count)) { - current_link_setting->link_rate = - initial_link_settings.link_rate; + if (!reached_minimum_link_rate(cur->link_rate)) { + cur->link_rate = reduce_link_rate(cur->link_rate); + } else if (!reached_minimum_lane_count(cur->lane_count)) { + cur->link_rate = max->link_rate; if (training_result == LINK_TRAINING_CR_FAIL_LANE0) return false; else if (training_result == LINK_TRAINING_CR_FAIL_LANE1) - current_link_setting->lane_count = - LANE_COUNT_ONE; - else if (training_result == - LINK_TRAINING_CR_FAIL_LANE23) - current_link_setting->lane_count = - LANE_COUNT_TWO; + cur->lane_count = LANE_COUNT_ONE; + else if (training_result == LINK_TRAINING_CR_FAIL_LANE23) + cur->lane_count = LANE_COUNT_TWO; else - current_link_setting->lane_count = - reduce_lane_count( - current_link_setting->lane_count); + cur->lane_count = reduce_lane_count(cur->lane_count); } else { return false; } @@ -3642,17 +3631,17 @@ static bool decide_fallback_link_setting( } case LINK_TRAINING_EQ_FAIL_EQ: { - if (!reached_minimum_lane_count - (current_link_setting->lane_count)) { - current_link_setting->lane_count = - reduce_lane_count( - current_link_setting->lane_count); - } else if (!reached_minimum_link_rate - (current_link_setting->link_rate)) { - current_link_setting->link_rate = - reduce_link_rate( - current_link_setting->link_rate); - current_link_setting->lane_count = initial_link_settings.lane_count; + if (!reached_minimum_lane_count(cur->lane_count)) { + cur->lane_count = reduce_lane_count(cur->lane_count); + } else if (!reached_minimum_link_rate(cur->link_rate)) { + cur->link_rate = reduce_link_rate(cur->link_rate); + /* Reduce max link rate to avoid potential infinite loop. + * Needed so that any subsequent CR_FAIL fallback can't + * re-set the link rate higher than the link rate from + * the latest EQ_FAIL fallback. + */ + max->link_rate = cur->link_rate; + cur->lane_count = max->lane_count; } else { return false; } @@ -3660,12 +3649,15 @@ static bool decide_fallback_link_setting( } case LINK_TRAINING_EQ_FAIL_CR: { - if (!reached_minimum_link_rate - (current_link_setting->link_rate)) { - current_link_setting->link_rate = - reduce_link_rate( - current_link_setting->link_rate); - current_link_setting->lane_count = initial_link_settings.lane_count; + if (!reached_minimum_link_rate(cur->link_rate)) { + cur->link_rate = reduce_link_rate(cur->link_rate); + /* Reduce max link rate to avoid potential infinite loop. + * Needed so that any subsequent CR_FAIL fallback can't + * re-set the link rate higher than the link rate from + * the latest EQ_FAIL fallback. + */ + max->link_rate = cur->link_rate; + cur->lane_count = max->lane_count; } else { return false; } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 3960c74482be..817028d3c4a0 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -47,7 +47,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.186" +#define DC_VER "3.2.187" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -416,6 +416,7 @@ struct dc_clocks { bool p_state_change_support; enum dcn_zstate_support_state zstate_support; bool dtbclk_en; + int ref_dtbclk_khz; enum dcn_pwr_state pwr_state; /* * Elements below are not compared for the purposes of @@ -719,6 +720,8 @@ struct dc_debug_options { bool apply_vendor_specific_lttpr_wa; bool extended_blank_optimization; union aux_wake_wa_options aux_wake_wa; + /* uses value at boot and disables switch */ + bool disable_dtb_ref_clk_switch; uint8_t psr_power_use_phy_fsm; enum dml_hostvm_override_opts dml_hostvm_override; }; diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 7eff7811769d..5f2afa5b4814 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -1766,29 +1766,9 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) break; } } - - /* - * TO-DO: So far the code logic below only addresses single eDP case. - * For dual eDP case, there are a few things that need to be - * implemented first: - * - * 1. Change the fastboot logic above, so eDP link[0 or 1]'s - * stream[0 or 1] will all be checked. - * - * 2. Change keep_edp_vdd_on to an array, and maintain keep_edp_vdd_on - * for each eDP. - * - * Once above 2 things are completed, we can then change the logic below - * correspondingly, so dual eDP case will be fully covered. - */ - - // We are trying to enable eDP, don't power down VDD if eDP stream is existing - if ((edp_stream_num == 1 && edp_streams[0] != NULL) || can_apply_edp_fast_boot) { + // We are trying to enable eDP, don't power down VDD + if (can_apply_edp_fast_boot) keep_edp_vdd_on = true; - DC_LOG_EVENT_LINK_TRAINING("Keep eDP Vdd on\n"); - } else { - DC_LOG_EVENT_LINK_TRAINING("No eDP stream enabled, turn eDP Vdd off\n"); - } } // Check seamless boot support diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c index 970b65efeac1..eaa7032f0f1a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c @@ -212,6 +212,9 @@ static void dpp2_cnv_setup ( break; } + /* Set default color space based on format if none is given. */ + color_space = input_color_space ? input_color_space : color_space; + if (is_2bit == 1 && alpha_2bit_lut != NULL) { REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, alpha_2bit_lut->lut0); REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, alpha_2bit_lut->lut1); diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c index 8b6505b7dca8..f50ab961bc17 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c @@ -153,6 +153,9 @@ static void dpp201_cnv_setup( break; } + /* Set default color space based on format if none is given. */ + color_space = input_color_space ? input_color_space : color_space; + if (is_2bit == 1 && alpha_2bit_lut != NULL) { REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, alpha_2bit_lut->lut0); REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, alpha_2bit_lut->lut1); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c index ab3918c0a15b..0dcc07531643 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c @@ -294,6 +294,9 @@ static void dpp3_cnv_setup ( break; } + /* Set default color space based on format if none is given. */ + color_space = input_color_space ? input_color_space : color_space; + if (is_2bit == 1 && alpha_2bit_lut != NULL) { REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, alpha_2bit_lut->lut0); REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, alpha_2bit_lut->lut1); diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c index 287a1066b547..bbc58d167c63 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c @@ -513,12 +513,10 @@ void dccg31_set_physymclk( /* Controls the generation of pixel valid for OTG in (OTG -> HPO case) */ static void dccg31_set_dtbclk_dto( struct dccg *dccg, - int dtbclk_inst, - int req_dtbclk_khz, - int num_odm_segments, - const struct dc_crtc_timing *timing) + struct dtbclk_dto_params *params) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + int req_dtbclk_khz = params->pixclk_khz; uint32_t dtbdto_div; /* Mode DTBDTO Rate DTBCLK_DTO<x>_DIV Register @@ -529,57 +527,53 @@ static void dccg31_set_dtbclk_dto( * DSC native 4:2:2 pixel rate/2 4 * Other modes pixel rate 8 */ - if (num_odm_segments == 4) { + if (params->num_odm_segments == 4) { dtbdto_div = 2; - req_dtbclk_khz = req_dtbclk_khz / 4; - } else if ((num_odm_segments == 2) || - (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) || - (timing->flags.DSC && timing->pixel_encoding == PIXEL_ENCODING_YCBCR422 - && !timing->dsc_cfg.ycbcr422_simple)) { + req_dtbclk_khz = params->pixclk_khz / 4; + } else if ((params->num_odm_segments == 2) || + (params->timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) || + (params->timing->flags.DSC && params->timing->pixel_encoding == PIXEL_ENCODING_YCBCR422 + && !params->timing->dsc_cfg.ycbcr422_simple)) { dtbdto_div = 4; - req_dtbclk_khz = req_dtbclk_khz / 2; + req_dtbclk_khz = params->pixclk_khz / 2; } else dtbdto_div = 8; - if (dccg->ref_dtbclk_khz && req_dtbclk_khz) { + if (params->ref_dtbclk_khz && req_dtbclk_khz) { uint32_t modulo, phase; // phase / modulo = dtbclk / dtbclk ref - modulo = dccg->ref_dtbclk_khz * 1000; - phase = div_u64((((unsigned long long)modulo * req_dtbclk_khz) + dccg->ref_dtbclk_khz - 1), - dccg->ref_dtbclk_khz); + modulo = params->ref_dtbclk_khz * 1000; + phase = div_u64((((unsigned long long)modulo * req_dtbclk_khz) + params->ref_dtbclk_khz - 1), + params->ref_dtbclk_khz); - REG_UPDATE(OTG_PIXEL_RATE_CNTL[dtbclk_inst], - DTBCLK_DTO_DIV[dtbclk_inst], dtbdto_div); + REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst], + DTBCLK_DTO_DIV[params->otg_inst], dtbdto_div); - REG_WRITE(DTBCLK_DTO_MODULO[dtbclk_inst], modulo); - REG_WRITE(DTBCLK_DTO_PHASE[dtbclk_inst], phase); + REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], modulo); + REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], phase); - REG_UPDATE(OTG_PIXEL_RATE_CNTL[dtbclk_inst], - DTBCLK_DTO_ENABLE[dtbclk_inst], 1); + REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst], + DTBCLK_DTO_ENABLE[params->otg_inst], 1); - REG_WAIT(OTG_PIXEL_RATE_CNTL[dtbclk_inst], - DTBCLKDTO_ENABLE_STATUS[dtbclk_inst], 1, + REG_WAIT(OTG_PIXEL_RATE_CNTL[params->otg_inst], + DTBCLKDTO_ENABLE_STATUS[params->otg_inst], 1, 1, 100); /* The recommended programming sequence to enable DTBCLK DTO to generate * valid pixel HPO DPSTREAM ENCODER, specifies that DTO source select should * be set only after DTO is enabled */ - REG_UPDATE(OTG_PIXEL_RATE_CNTL[dtbclk_inst], - PIPE_DTO_SRC_SEL[dtbclk_inst], 1); - - dccg->dtbclk_khz[dtbclk_inst] = req_dtbclk_khz; + REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst], + PIPE_DTO_SRC_SEL[params->otg_inst], 1); } else { - REG_UPDATE_3(OTG_PIXEL_RATE_CNTL[dtbclk_inst], - DTBCLK_DTO_ENABLE[dtbclk_inst], 0, - PIPE_DTO_SRC_SEL[dtbclk_inst], 0, - DTBCLK_DTO_DIV[dtbclk_inst], dtbdto_div); + REG_UPDATE_3(OTG_PIXEL_RATE_CNTL[params->otg_inst], + DTBCLK_DTO_ENABLE[params->otg_inst], 0, + PIPE_DTO_SRC_SEL[params->otg_inst], 0, + DTBCLK_DTO_DIV[params->otg_inst], dtbdto_div); - REG_WRITE(DTBCLK_DTO_MODULO[dtbclk_inst], 0); - REG_WRITE(DTBCLK_DTO_PHASE[dtbclk_inst], 0); - - dccg->dtbclk_khz[dtbclk_inst] = 0; + REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], 0); + REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], 0); } } @@ -606,16 +600,12 @@ void dccg31_set_audio_dtbclk_dto( REG_UPDATE(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, 4); // 04 - DCCG_AUDIO_DTO_SEL_AUDIO_DTO_DTBCLK - - dccg->audio_dtbclk_khz = req_audio_dtbclk_khz; } else { REG_WRITE(DCCG_AUDIO_DTBCLK_DTO_PHASE, 0); REG_WRITE(DCCG_AUDIO_DTBCLK_DTO_MODULO, 0); REG_UPDATE(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, 3); // 03 - DCCG_AUDIO_DTO_SEL_NO_AUDIO_DTO - - dccg->audio_dtbclk_khz = 0; } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c index d94fd1010deb..8b12b4111c88 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c @@ -230,9 +230,7 @@ static void enc31_hw_init(struct link_encoder *enc) AUX_RX_PHASE_DETECT_LEN, [21,20] = 0x3 default is 3 AUX_RX_DETECTION_THRESHOLD [30:28] = 1 */ - AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110); - - AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c7a); + // dmub will read AUX_DPHY_RX_CONTROL0/AUX_DPHY_TX_CONTROL from vbios table in dp_aux_init //AUX_DPHY_TX_REF_CONTROL'AUX_TX_REF_DIV HW default is 0x32; // Set AUX_TX_REF_DIV Divider to generate 2 MHz reference from refclk diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c index 789f7562cdc7..d2273674e872 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c @@ -1284,10 +1284,8 @@ static bool is_dtbclk_required(struct dc *dc, struct dc_state *context) for (i = 0; i < dc->res_pool->pipe_count; i++) { if (!context->res_ctx.pipe_ctx[i].stream) continue; -#if defined (CONFIG_DRM_AMD_DC_DP2_0) if (is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i])) return true; -#endif } return false; } diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index 46ce5a0ee4ec..b5570aa8e39d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h @@ -237,6 +237,7 @@ struct clk_mgr_funcs { bool safe_to_lower); int (*get_dp_ref_clk_frequency)(struct clk_mgr *clk_mgr); + int (*get_dtb_ref_clk_frequency)(struct clk_mgr *clk_mgr); void (*set_low_power_state)(struct clk_mgr *clk_mgr); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h index b2fa4de47734..c7021915bac8 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h @@ -60,8 +60,17 @@ struct dccg { const struct dccg_funcs *funcs; int pipe_dppclk_khz[MAX_PIPES]; int ref_dppclk; - int dtbclk_khz[MAX_PIPES]; - int audio_dtbclk_khz; + //int dtbclk_khz[MAX_PIPES];/* TODO needs to be removed */ + //int audio_dtbclk_khz;/* TODO needs to be removed */ + int ref_dtbclk_khz;/* TODO needs to be removed */ +}; + +struct dtbclk_dto_params { + const struct dc_crtc_timing *timing; + int otg_inst; + int pixclk_khz; + int req_audio_dtbclk_khz; + int num_odm_segments; int ref_dtbclk_khz; }; @@ -111,10 +120,7 @@ struct dccg_funcs { void (*set_dtbclk_dto)( struct dccg *dccg, - int dtbclk_inst, - int req_dtbclk_khz, - int num_odm_segments, - const struct dc_crtc_timing *timing); + struct dtbclk_dto_params *dto_params); void (*set_audio_dtbclk_dto)( struct dccg *dccg, diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c index 87972dc8443d..ea6cf8bfce30 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c @@ -27,6 +27,7 @@ #include "core_types.h" #include "dccg.h" #include "dc_link_dp.h" +#include "clk_mgr.h" static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link) { @@ -106,14 +107,18 @@ static void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx) struct hpo_dp_link_encoder *link_enc = pipe_ctx->link_res.hpo_dp_link_enc; struct dccg *dccg = dc->res_pool->dccg; struct timing_generator *tg = pipe_ctx->stream_res.tg; - int odm_segment_count = get_odm_segment_count(pipe_ctx); + struct dtbclk_dto_params dto_params = {0}; enum phyd32clk_clock_source phyd32clk = get_phyd32clk_src(pipe_ctx->stream->link); + dto_params.otg_inst = tg->inst; + dto_params.pixclk_khz = pipe_ctx->stream->phy_pix_clk; + dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx); + dto_params.timing = &pipe_ctx->stream->timing; + dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr); + dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst); dccg->funcs->enable_symclk32_se(dccg, stream_enc->inst, phyd32clk); - dccg->funcs->set_dtbclk_dto(dccg, tg->inst, pipe_ctx->stream->phy_pix_clk, - odm_segment_count, - &pipe_ctx->stream->timing); + dccg->funcs->set_dtbclk_dto(dccg, &dto_params); stream_enc->funcs->enable_stream(stream_enc); stream_enc->funcs->map_stream_to_link(stream_enc, stream_enc->inst, link_enc->inst); } @@ -124,9 +129,13 @@ static void reset_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx) struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc; struct dccg *dccg = dc->res_pool->dccg; struct timing_generator *tg = pipe_ctx->stream_res.tg; + struct dtbclk_dto_params dto_params = {0}; + + dto_params.otg_inst = tg->inst; + dto_params.timing = &pipe_ctx->stream->timing; stream_enc->funcs->disable(stream_enc); - dccg->funcs->set_dtbclk_dto(dccg, tg->inst, 0, 0, &pipe_ctx->stream->timing); + dccg->funcs->set_dtbclk_dto(dccg, &dto_params); dccg->funcs->disable_symclk32_se(dccg, stream_enc->inst); dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst); } diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c index 7c9330a61ac1..c7bd7e216710 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c @@ -84,7 +84,7 @@ void dmub_dcn31_reset(struct dmub_srv *dmub) { union dmub_gpint_data_register cmd; const uint32_t timeout = 100; - uint32_t in_reset, scratch, i; + uint32_t in_reset, scratch, i, pwait_mode; REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset); @@ -115,6 +115,13 @@ void dmub_dcn31_reset(struct dmub_srv *dmub) udelay(1); } + for (i = 0; i < timeout; ++i) { + REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &pwait_mode); + if (pwait_mode & (1 << 0)) + break; + + udelay(1); + } /* Force reset in case we timed out, DMCUB is likely hung. */ } @@ -125,6 +132,8 @@ void dmub_dcn31_reset(struct dmub_srv *dmub) REG_WRITE(DMCUB_INBOX1_WPTR, 0); REG_WRITE(DMCUB_OUTBOX1_RPTR, 0); REG_WRITE(DMCUB_OUTBOX1_WPTR, 0); + REG_WRITE(DMCUB_OUTBOX0_RPTR, 0); + REG_WRITE(DMCUB_OUTBOX0_WPTR, 0); REG_WRITE(DMCUB_SCRATCH0, 0); /* Clear the GPINT command manually so we don't send anything during boot. */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h index 59ddc81b5a0e..f6db6f89d45d 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h @@ -151,7 +151,8 @@ struct dmub_srv; DMUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET) \ DMUB_SF(DMCUB_INBOX0_WPTR, DMCUB_INBOX0_WPTR) \ DMUB_SF(DMCUB_INTERRUPT_ENABLE, DMCUB_GPINT_IH_INT_EN) \ - DMUB_SF(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK) + DMUB_SF(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK) \ + DMUB_SF(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS) struct dmub_srv_dcn31_reg_offset { #define DMUB_SR(reg) uint32_t reg; diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h index 73b9e0a87e54..20a3d4e23f66 100644 --- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h +++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h @@ -127,6 +127,8 @@ struct av_sync_data { static const uint8_t DP_SINK_DEVICE_STR_ID_1[] = {7, 1, 8, 7, 3, 0}; static const uint8_t DP_SINK_DEVICE_STR_ID_2[] = {7, 1, 8, 7, 5, 0}; +static const u8 DP_SINK_BRANCH_DEV_NAME_7580[] = "7580\x80u"; + /*MST Dock*/ static const uint8_t SYNAPTICS_DEVICE_ID[] = "SYNA"; diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_7_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_7_pptable.h index 247c6e9632ba..1cb399dbc7cc 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_7_pptable.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_7_pptable.h @@ -22,6 +22,7 @@ #ifndef SMU_11_0_7_PPTABLE_H #define SMU_11_0_7_PPTABLE_H +#pragma pack(push, 1) #define SMU_11_0_7_TABLE_FORMAT_REVISION 15 @@ -139,7 +140,7 @@ struct smu_11_0_7_overdrive_table uint32_t max[SMU_11_0_7_MAX_ODSETTING]; //default maximum settings uint32_t min[SMU_11_0_7_MAX_ODSETTING]; //default minimum settings int16_t pm_setting[SMU_11_0_7_MAX_PMSETTING]; //Optimized power mode feature settings -} __attribute__((packed)); +}; enum SMU_11_0_7_PPCLOCK_ID { SMU_11_0_7_PPCLOCK_GFXCLK = 0, @@ -166,7 +167,7 @@ struct smu_11_0_7_power_saving_clock_table uint32_t count; //power_saving_clock_count = SMU_11_0_7_PPCLOCK_COUNT uint32_t max[SMU_11_0_7_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Maximum array In MHz uint32_t min[SMU_11_0_7_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Minimum array In MHz -} __attribute__((packed)); +}; struct smu_11_0_7_powerplay_table { @@ -191,6 +192,8 @@ struct smu_11_0_7_powerplay_table struct smu_11_0_7_overdrive_table overdrive_table; PPTable_t smc_pptable; //PPTable_t in smu11_driver_if.h -} __attribute__((packed)); +}; + +#pragma pack(pop) #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h index 7a63cf8e85ed..0116e3d04fad 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h @@ -22,6 +22,7 @@ #ifndef SMU_11_0_PPTABLE_H #define SMU_11_0_PPTABLE_H +#pragma pack(push, 1) #define SMU_11_0_TABLE_FORMAT_REVISION 12 @@ -109,7 +110,7 @@ struct smu_11_0_overdrive_table uint8_t cap[SMU_11_0_MAX_ODFEATURE]; //OD feature support flags uint32_t max[SMU_11_0_MAX_ODSETTING]; //default maximum settings uint32_t min[SMU_11_0_MAX_ODSETTING]; //default minimum settings -} __attribute__((packed)); +}; enum SMU_11_0_PPCLOCK_ID { SMU_11_0_PPCLOCK_GFXCLK = 0, @@ -133,7 +134,7 @@ struct smu_11_0_power_saving_clock_table uint32_t count; //power_saving_clock_count = SMU_11_0_PPCLOCK_COUNT uint32_t max[SMU_11_0_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Maximum array In MHz uint32_t min[SMU_11_0_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Minimum array In MHz -} __attribute__((packed)); +}; struct smu_11_0_powerplay_table { @@ -162,6 +163,8 @@ struct smu_11_0_powerplay_table #ifndef SMU_11_0_PARTIAL_PPTABLE PPTable_t smc_pptable; //PPTable_t in smu11_driver_if.h #endif -} __attribute__((packed)); +}; + +#pragma pack(pop) #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h index 3f29f4327378..478862ded0bd 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h @@ -22,6 +22,8 @@ #ifndef SMU_13_0_7_PPTABLE_H #define SMU_13_0_7_PPTABLE_H +#pragma pack(push, 1) + #define SMU_13_0_7_TABLE_FORMAT_REVISION 15 //// POWERPLAYTABLE::ulPlatformCaps @@ -194,7 +196,8 @@ struct smu_13_0_7_powerplay_table struct smu_13_0_7_overdrive_table overdrive_table; uint8_t padding1; PPTable_t smc_pptable; //PPTable_t in driver_if.h -} __attribute__((packed)); +}; +#pragma pack(pop) #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_pptable.h index 1f311396b706..043307485528 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_pptable.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_pptable.h @@ -22,6 +22,8 @@ #ifndef SMU_13_0_PPTABLE_H #define SMU_13_0_PPTABLE_H +#pragma pack(push, 1) + #define SMU_13_0_TABLE_FORMAT_REVISION 1 //// POWERPLAYTABLE::ulPlatformCaps @@ -109,7 +111,7 @@ struct smu_13_0_overdrive_table { uint8_t cap[SMU_13_0_MAX_ODFEATURE]; //OD feature support flags uint32_t max[SMU_13_0_MAX_ODSETTING]; //default maximum settings uint32_t min[SMU_13_0_MAX_ODSETTING]; //default minimum settings -} __attribute__((packed)); +}; enum SMU_13_0_PPCLOCK_ID { SMU_13_0_PPCLOCK_GFXCLK = 0, @@ -132,7 +134,7 @@ struct smu_13_0_power_saving_clock_table { uint32_t count; //power_saving_clock_count = SMU_11_0_PPCLOCK_COUNT uint32_t max[SMU_13_0_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Maximum array In MHz uint32_t min[SMU_13_0_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Minimum array In MHz -} __attribute__((packed)); +}; struct smu_13_0_powerplay_table { struct atom_common_table_header header; @@ -160,6 +162,8 @@ struct smu_13_0_powerplay_table { #ifndef SMU_13_0_PARTIAL_PPTABLE PPTable_t smc_pptable; //PPTable_t in driver_if.h #endif -} __attribute__((packed)); +}; + +#pragma pack(pop) #endif diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c index 4551bc8a3ecf..f573d582407e 100644 --- a/drivers/gpu/drm/ast/ast_dp.c +++ b/drivers/gpu/drm/ast/ast_dp.c @@ -160,13 +160,12 @@ void ast_dp_launch(struct drm_device *dev, u8 bPower) } if (bDPExecute) - ast->tx_chip_type = AST_TX_ASTDP; + ast->tx_chip_types |= BIT(AST_TX_ASTDP); ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5, (u8) ~ASTDP_HOST_EDID_READ_DONE_MASK, ASTDP_HOST_EDID_READ_DONE); - } else - ast->tx_chip_type = AST_TX_NONE; + } } diff --git a/drivers/gpu/drm/ast/ast_dp501.c b/drivers/gpu/drm/ast/ast_dp501.c index 204c926a18ea..4f75a9efb610 100644 --- a/drivers/gpu/drm/ast/ast_dp501.c +++ b/drivers/gpu/drm/ast/ast_dp501.c @@ -450,7 +450,7 @@ void ast_init_3rdtx(struct drm_device *dev) ast_init_dvo(dev); break; default: - if (ast->tx_chip_type == AST_TX_SIL164) + if (ast->tx_chip_types & BIT(AST_TX_SIL164)) ast_init_dvo(dev); else ast_init_analog(dev); diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index afebe35f205e..a34db4380f68 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -73,6 +73,11 @@ enum ast_tx_chip { AST_TX_ASTDP, }; +#define AST_TX_NONE_BIT BIT(AST_TX_NONE) +#define AST_TX_SIL164_BIT BIT(AST_TX_SIL164) +#define AST_TX_DP501_BIT BIT(AST_TX_DP501) +#define AST_TX_ASTDP_BIT BIT(AST_TX_ASTDP) + #define AST_DRAM_512Mx16 0 #define AST_DRAM_1Gx16 1 #define AST_DRAM_512Mx32 2 @@ -173,7 +178,7 @@ struct ast_private { struct drm_plane primary_plane; struct ast_cursor_plane cursor_plane; struct drm_crtc crtc; - union { + struct { struct { struct drm_encoder encoder; struct ast_vga_connector vga_connector; @@ -199,7 +204,7 @@ struct ast_private { ast_use_defaults } config_mode; - enum ast_tx_chip tx_chip_type; + unsigned long tx_chip_types; /* bitfield of enum ast_chip_type */ u8 *dp501_fw_addr; const struct firmware *dp501_fw; /* dp501 fw */ }; diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index d770d5a23c1a..067453266897 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -216,7 +216,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) } /* Check 3rd Tx option (digital output afaik) */ - ast->tx_chip_type = AST_TX_NONE; + ast->tx_chip_types |= AST_TX_NONE_BIT; /* * VGACRA3 Enhanced Color Mode Register, check if DVO is already @@ -229,7 +229,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) if (!*need_post) { jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xff); if (jreg & 0x80) - ast->tx_chip_type = AST_TX_SIL164; + ast->tx_chip_types = AST_TX_SIL164_BIT; } if ((ast->chip == AST2300) || (ast->chip == AST2400) || (ast->chip == AST2500)) { @@ -241,7 +241,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff); switch (jreg) { case 0x04: - ast->tx_chip_type = AST_TX_SIL164; + ast->tx_chip_types = AST_TX_SIL164_BIT; break; case 0x08: ast->dp501_fw_addr = drmm_kzalloc(dev, 32*1024, GFP_KERNEL); @@ -254,22 +254,19 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) } fallthrough; case 0x0c: - ast->tx_chip_type = AST_TX_DP501; + ast->tx_chip_types = AST_TX_DP501_BIT; } } else if (ast->chip == AST2600) ast_dp_launch(&ast->base, 0); /* Print stuff for diagnostic purposes */ - switch(ast->tx_chip_type) { - case AST_TX_SIL164: + if (ast->tx_chip_types & AST_TX_NONE_BIT) + drm_info(dev, "Using analog VGA\n"); + if (ast->tx_chip_types & AST_TX_SIL164_BIT) drm_info(dev, "Using Sil164 TMDS transmitter\n"); - break; - case AST_TX_DP501: + if (ast->tx_chip_types & AST_TX_DP501_BIT) drm_info(dev, "Using DP501 DisplayPort transmitter\n"); - break; - default: - drm_info(dev, "Analog VGA only\n"); - } + return 0; } diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 323af2746aa9..db2010a55674 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -997,10 +997,10 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode) case DRM_MODE_DPMS_ON: ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x01, 0xdf, 0); ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xfc, 0); - if (ast->tx_chip_type == AST_TX_DP501) + if (ast->tx_chip_types & AST_TX_DP501_BIT) ast_set_dp501_video_output(crtc->dev, 1); - if (ast->tx_chip_type == AST_TX_ASTDP) { + if (ast->tx_chip_types & AST_TX_ASTDP_BIT) { ast_dp_power_on_off(crtc->dev, AST_DP_POWER_ON); ast_wait_for_vretrace(ast); ast_dp_set_on_off(crtc->dev, 1); @@ -1012,17 +1012,17 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode) case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: ch = mode; - if (ast->tx_chip_type == AST_TX_DP501) + if (ast->tx_chip_types & AST_TX_DP501_BIT) ast_set_dp501_video_output(crtc->dev, 0); - break; - if (ast->tx_chip_type == AST_TX_ASTDP) { + if (ast->tx_chip_types & AST_TX_ASTDP_BIT) { ast_dp_set_on_off(crtc->dev, 0); ast_dp_power_on_off(crtc->dev, AST_DP_POWER_OFF); } ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x01, 0xdf, 0x20); ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xfc, ch); + break; } } @@ -1155,7 +1155,7 @@ ast_crtc_helper_atomic_flush(struct drm_crtc *crtc, ast_crtc_load_lut(ast, crtc); //Set Aspeed Display-Port - if (ast->tx_chip_type == AST_TX_ASTDP) + if (ast->tx_chip_types & AST_TX_ASTDP_BIT) ast_dp_set_mode(crtc, vbios_mode_info); mutex_unlock(&ast->ioregs_lock); @@ -1739,22 +1739,26 @@ int ast_mode_config_init(struct ast_private *ast) ast_crtc_init(dev); - switch (ast->tx_chip_type) { - case AST_TX_NONE: + if (ast->tx_chip_types & AST_TX_NONE_BIT) { ret = ast_vga_output_init(ast); - break; - case AST_TX_SIL164: + if (ret) + return ret; + } + if (ast->tx_chip_types & AST_TX_SIL164_BIT) { ret = ast_sil164_output_init(ast); - break; - case AST_TX_DP501: + if (ret) + return ret; + } + if (ast->tx_chip_types & AST_TX_DP501_BIT) { ret = ast_dp501_output_init(ast); - break; - case AST_TX_ASTDP: + if (ret) + return ret; + } + if (ast->tx_chip_types & AST_TX_ASTDP_BIT) { ret = ast_astdp_output_init(ast); - break; + if (ret) + return ret; } - if (ret) - return ret; drm_mode_config_reset(dev); diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c index 0aa9cf0fb5c3..82fd3c8adee1 100644 --- a/drivers/gpu/drm/ast/ast_post.c +++ b/drivers/gpu/drm/ast/ast_post.c @@ -391,7 +391,7 @@ void ast_post_gpu(struct drm_device *dev) ast_init_3rdtx(dev); } else { - if (ast->tx_chip_type != AST_TX_NONE) + if (ast->tx_chip_types & AST_TX_SIL164_BIT) ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */ } } diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c index b97f6e8f0f6b..01c8b80e34ec 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c @@ -1267,6 +1267,25 @@ static int analogix_dp_bridge_attach(struct drm_bridge *bridge, } static +struct drm_crtc *analogix_dp_get_old_crtc(struct analogix_dp_device *dp, + struct drm_atomic_state *state) +{ + struct drm_encoder *encoder = dp->encoder; + struct drm_connector *connector; + struct drm_connector_state *conn_state; + + connector = drm_atomic_get_old_connector_for_encoder(state, encoder); + if (!connector) + return NULL; + + conn_state = drm_atomic_get_old_connector_state(state, connector); + if (!conn_state) + return NULL; + + return conn_state->crtc; +} + +static struct drm_crtc *analogix_dp_get_new_crtc(struct analogix_dp_device *dp, struct drm_atomic_state *state) { @@ -1446,14 +1465,16 @@ analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge, { struct drm_atomic_state *old_state = old_bridge_state->base.state; struct analogix_dp_device *dp = bridge->driver_private; - struct drm_crtc *crtc; + struct drm_crtc *old_crtc, *new_crtc; + struct drm_crtc_state *old_crtc_state = NULL; struct drm_crtc_state *new_crtc_state = NULL; + int ret; - crtc = analogix_dp_get_new_crtc(dp, old_state); - if (!crtc) + new_crtc = analogix_dp_get_new_crtc(dp, old_state); + if (!new_crtc) goto out; - new_crtc_state = drm_atomic_get_new_crtc_state(old_state, crtc); + new_crtc_state = drm_atomic_get_new_crtc_state(old_state, new_crtc); if (!new_crtc_state) goto out; @@ -1462,6 +1483,19 @@ analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge, return; out: + old_crtc = analogix_dp_get_old_crtc(dp, old_state); + if (old_crtc) { + old_crtc_state = drm_atomic_get_old_crtc_state(old_state, + old_crtc); + + /* When moving from PSR to fully disabled, exit PSR first. */ + if (old_crtc_state && old_crtc_state->self_refresh_active) { + ret = analogix_dp_disable_psr(dp); + if (ret) + DRM_ERROR("Failed to disable psr (%d)\n", ret); + } + } + analogix_dp_bridge_disable(bridge); } diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c index 2831f0813c3a..ac66f408b40c 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c @@ -577,7 +577,7 @@ static int sn65dsi83_parse_dt(struct sn65dsi83 *ctx, enum sn65dsi83_model model) ctx->host_node = of_graph_get_remote_port_parent(endpoint); of_node_put(endpoint); - if (ctx->dsi_lanes < 0 || ctx->dsi_lanes > 4) { + if (ctx->dsi_lanes <= 0 || ctx->dsi_lanes > 4) { ret = -EINVAL; goto err_put_node; } diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 9603193d2fa1..987e4b212e9f 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1011,9 +1011,19 @@ crtc_needs_disable(struct drm_crtc_state *old_state, return drm_atomic_crtc_effectively_active(old_state); /* - * We need to run through the crtc_funcs->disable() function if the CRTC - * is currently on, if it's transitioning to self refresh mode, or if - * it's in self refresh mode and needs to be fully disabled. + * We need to disable bridge(s) and CRTC if we're transitioning out of + * self-refresh and changing CRTCs at the same time, because the + * bridge tracks self-refresh status via CRTC state. + */ + if (old_state->self_refresh_active && + old_state->crtc != new_state->crtc) + return true; + + /* + * We also need to run through the crtc_funcs->disable() function if + * the CRTC is currently on, if it's transitioning to self refresh + * mode, or if it's in self refresh mode and needs to be fully + * disabled. */ return old_state->active || (old_state->self_refresh_active && !new_state->active) || diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index 4e853acfd1e8..df87ba99a87c 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -152,6 +152,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "AYA NEO 2021"), }, .driver_data = (void *)&lcd800x1280_rightside_up, + }, { /* AYA NEO NEXT */ + .matches = { + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AYANEO"), + DMI_MATCH(DMI_BOARD_NAME, "NEXT"), + }, + .driver_data = (void *)&lcd800x1280_rightside_up, }, { /* Chuwi HiBook (CWI514) */ .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"), diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 424ea23eec32..16c539657f73 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -177,15 +177,15 @@ static struct exynos_drm_driver_info exynos_drm_drivers[] = { DRV_PTR(mixer_driver, CONFIG_DRM_EXYNOS_MIXER), DRM_COMPONENT_DRIVER }, { - DRV_PTR(mic_driver, CONFIG_DRM_EXYNOS_MIC), - DRM_COMPONENT_DRIVER - }, { DRV_PTR(dp_driver, CONFIG_DRM_EXYNOS_DP), DRM_COMPONENT_DRIVER }, { DRV_PTR(dsi_driver, CONFIG_DRM_EXYNOS_DSI), DRM_COMPONENT_DRIVER }, { + DRV_PTR(mic_driver, CONFIG_DRM_EXYNOS_MIC), + DRM_COMPONENT_DRIVER + }, { DRV_PTR(hdmi_driver, CONFIG_DRM_EXYNOS_HDMI), DRM_COMPONENT_DRIVER }, { diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c index 9e06f8e2a863..09ce28ee08d9 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_mic.c +++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c @@ -26,6 +26,7 @@ #include <drm/drm_print.h> #include "exynos_drm_drv.h" +#include "exynos_drm_crtc.h" /* Sysreg registers for MIC */ #define DSD_CFG_MUX 0x1004 @@ -100,9 +101,7 @@ struct exynos_mic { bool i80_mode; struct videomode vm; - struct drm_encoder *encoder; struct drm_bridge bridge; - struct drm_bridge *next_bridge; bool enabled; }; @@ -229,8 +228,6 @@ static void mic_set_reg_on(struct exynos_mic *mic, bool enable) writel(reg, mic->reg + MIC_OP); } -static void mic_disable(struct drm_bridge *bridge) { } - static void mic_post_disable(struct drm_bridge *bridge) { struct exynos_mic *mic = bridge->driver_private; @@ -297,34 +294,30 @@ unlock: mutex_unlock(&mic_mutex); } -static void mic_enable(struct drm_bridge *bridge) { } - -static int mic_attach(struct drm_bridge *bridge, - enum drm_bridge_attach_flags flags) -{ - struct exynos_mic *mic = bridge->driver_private; - - return drm_bridge_attach(bridge->encoder, mic->next_bridge, - &mic->bridge, flags); -} - static const struct drm_bridge_funcs mic_bridge_funcs = { - .disable = mic_disable, .post_disable = mic_post_disable, .mode_set = mic_mode_set, .pre_enable = mic_pre_enable, - .enable = mic_enable, - .attach = mic_attach, }; static int exynos_mic_bind(struct device *dev, struct device *master, void *data) { struct exynos_mic *mic = dev_get_drvdata(dev); + struct drm_device *drm_dev = data; + struct exynos_drm_crtc *crtc = exynos_drm_crtc_get_by_type(drm_dev, + EXYNOS_DISPLAY_TYPE_LCD); + struct drm_encoder *e, *encoder = NULL; + + drm_for_each_encoder(e, drm_dev) + if (e->possible_crtcs == drm_crtc_mask(&crtc->base)) + encoder = e; + if (!encoder) + return -ENODEV; mic->bridge.driver_private = mic; - return 0; + return drm_bridge_attach(encoder, &mic->bridge, NULL, 0); } static void exynos_mic_unbind(struct device *dev, struct device *master, @@ -388,7 +381,6 @@ static int exynos_mic_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct exynos_mic *mic; - struct device_node *remote; struct resource res; int ret, i; @@ -432,16 +424,6 @@ static int exynos_mic_probe(struct platform_device *pdev) } } - remote = of_graph_get_remote_node(dev->of_node, 1, 0); - mic->next_bridge = of_drm_find_bridge(remote); - if (IS_ERR(mic->next_bridge)) { - DRM_DEV_ERROR(dev, "mic: Failed to find next bridge\n"); - ret = PTR_ERR(mic->next_bridge); - goto err; - } - - of_node_put(remote); - platform_set_drvdata(pdev, mic); mic->bridge.funcs = &mic_bridge_funcs; diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index e4a79c11fd25..ff67899522cf 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -388,13 +388,23 @@ static int dg2_max_source_rate(struct intel_dp *intel_dp) return intel_dp_is_edp(intel_dp) ? 810000 : 1350000; } +static bool is_low_voltage_sku(struct drm_i915_private *i915, enum phy phy) +{ + u32 voltage; + + voltage = intel_de_read(i915, ICL_PORT_COMP_DW3(phy)) & VOLTAGE_INFO_MASK; + + return voltage == VOLTAGE_INFO_0_85V; +} + static int icl_max_source_rate(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); - if (intel_phy_is_combo(dev_priv, phy) && !intel_dp_is_edp(intel_dp)) + if (intel_phy_is_combo(dev_priv, phy) && + (is_low_voltage_sku(dev_priv, phy) || !intel_dp_is_edp(intel_dp))) return 540000; return 810000; @@ -402,7 +412,23 @@ static int icl_max_source_rate(struct intel_dp *intel_dp) static int ehl_max_source_rate(struct intel_dp *intel_dp) { - if (intel_dp_is_edp(intel_dp)) + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); + + if (intel_dp_is_edp(intel_dp) || is_low_voltage_sku(dev_priv, phy)) + return 540000; + + return 810000; +} + +static int dg1_max_source_rate(struct intel_dp *intel_dp) +{ + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + enum phy phy = intel_port_to_phy(i915, dig_port->base.port); + + if (intel_phy_is_combo(i915, phy) && is_low_voltage_sku(i915, phy)) return 540000; return 810000; @@ -445,7 +471,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp) max_rate = dg2_max_source_rate(intel_dp); else if (IS_ALDERLAKE_P(dev_priv) || IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) - max_rate = 810000; + max_rate = dg1_max_source_rate(intel_dp); else if (IS_JSL_EHL(dev_priv)) max_rate = ehl_max_source_rate(intel_dp); else diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index 22f55574a35c..88c2f38aa870 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -2396,7 +2396,7 @@ static void icl_wrpll_params_populate(struct skl_wrpll_params *params, } /* - * Display WA #22010492432: ehl, tgl, adl-p + * Display WA #22010492432: ehl, tgl, adl-s, adl-p * Program half of the nominal DCO divider fraction value. */ static bool @@ -2404,7 +2404,7 @@ ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915) { return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) && IS_JSL_EHL_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) || - IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) && + IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) && i915->dpll.ref_clks.nssc == 38400; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index c326bd2b444f..30fe847c6664 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -999,7 +999,8 @@ static int eb_validate_vmas(struct i915_execbuffer *eb) } } - err = dma_resv_reserve_fences(vma->obj->base.resv, 1); + /* Reserve enough slots to accommodate composite fences */ + err = dma_resv_reserve_fences(vma->obj->base.resv, eb->num_batches); if (err) return err; diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 53307ca0eed0..51a0fe60c050 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -785,6 +785,7 @@ void intel_gt_driver_unregister(struct intel_gt *gt) { intel_wakeref_t wakeref; + intel_gt_sysfs_unregister(gt); intel_rps_driver_unregister(>->rps); intel_gsc_fini(>->gsc); diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c index 8ec8bc660c8c..9e4ebf53379b 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c @@ -24,7 +24,7 @@ bool is_object_gt(struct kobject *kobj) static struct intel_gt *kobj_to_gt(struct kobject *kobj) { - return container_of(kobj, struct kobj_gt, base)->gt; + return container_of(kobj, struct intel_gt, sysfs_gt); } struct intel_gt *intel_gt_sysfs_get_drvdata(struct device *dev, @@ -72,9 +72,9 @@ static struct attribute *id_attrs[] = { }; ATTRIBUTE_GROUPS(id); +/* A kobject needs a release() method even if it does nothing */ static void kobj_gt_release(struct kobject *kobj) { - kfree(kobj); } static struct kobj_type kobj_gt_type = { @@ -85,8 +85,6 @@ static struct kobj_type kobj_gt_type = { void intel_gt_sysfs_register(struct intel_gt *gt) { - struct kobj_gt *kg; - /* * We need to make things right with the * ABI compatibility. The files were originally @@ -98,25 +96,22 @@ void intel_gt_sysfs_register(struct intel_gt *gt) if (gt_is_root(gt)) intel_gt_sysfs_pm_init(gt, gt_get_parent_obj(gt)); - kg = kzalloc(sizeof(*kg), GFP_KERNEL); - if (!kg) + /* init and xfer ownership to sysfs tree */ + if (kobject_init_and_add(>->sysfs_gt, &kobj_gt_type, + gt->i915->sysfs_gt, "gt%d", gt->info.id)) goto exit_fail; - kobject_init(&kg->base, &kobj_gt_type); - kg->gt = gt; - - /* xfer ownership to sysfs tree */ - if (kobject_add(&kg->base, gt->i915->sysfs_gt, "gt%d", gt->info.id)) - goto exit_kobj_put; - - intel_gt_sysfs_pm_init(gt, &kg->base); + intel_gt_sysfs_pm_init(gt, >->sysfs_gt); return; -exit_kobj_put: - kobject_put(&kg->base); - exit_fail: + kobject_put(>->sysfs_gt); drm_warn(>->i915->drm, "failed to initialize gt%d sysfs root\n", gt->info.id); } + +void intel_gt_sysfs_unregister(struct intel_gt *gt) +{ + kobject_put(>->sysfs_gt); +} diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h index 9471b26752cf..a99aa7e8b01a 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h @@ -13,11 +13,6 @@ struct intel_gt; -struct kobj_gt { - struct kobject base; - struct intel_gt *gt; -}; - bool is_object_gt(struct kobject *kobj); struct drm_i915_private *kobj_to_i915(struct kobject *kobj); @@ -28,6 +23,7 @@ intel_gt_create_kobj(struct intel_gt *gt, const char *name); void intel_gt_sysfs_register(struct intel_gt *gt); +void intel_gt_sysfs_unregister(struct intel_gt *gt); struct intel_gt *intel_gt_sysfs_get_drvdata(struct device *dev, const char *name); diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h index b06611c1d4ad..edd7a3cf5f5f 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -224,6 +224,9 @@ struct intel_gt { } mocs; struct intel_pxp pxp; + + /* gt/gtN sysfs */ + struct kobject sysfs_gt; }; enum intel_gt_scratch_field { diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index d078f884b5e3..f0d7b57b741e 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -156,7 +156,7 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw) [INTEL_UC_FW_TYPE_GUC] = { blobs_guc, ARRAY_SIZE(blobs_guc) }, [INTEL_UC_FW_TYPE_HUC] = { blobs_huc, ARRAY_SIZE(blobs_huc) }, }; - static const struct uc_fw_platform_requirement *fw_blobs; + const struct uc_fw_platform_requirement *fw_blobs; enum intel_platform p = INTEL_INFO(i915)->platform; u32 fw_count; u8 rev = INTEL_REVID(i915); diff --git a/drivers/gpu/drm/i915/i915_drm_client.c b/drivers/gpu/drm/i915/i915_drm_client.c index 18d38cb59923..b09d1d386574 100644 --- a/drivers/gpu/drm/i915/i915_drm_client.c +++ b/drivers/gpu/drm/i915/i915_drm_client.c @@ -116,8 +116,9 @@ show_client_class(struct seq_file *m, total += busy_add(ctx, class); rcu_read_unlock(); - seq_printf(m, "drm-engine-%s:\t%llu ns\n", - uabi_class_names[class], total); + if (capacity) + seq_printf(m, "drm-engine-%s:\t%llu ns\n", + uabi_class_names[class], total); if (capacity > 1) seq_printf(m, "drm-engine-capacity-%s:\t%u\n", diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 8521daba212a..1e2750210831 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -166,7 +166,14 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj, struct device *kdev = kobj_to_dev(kobj); struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); struct i915_gpu_coredump *gpu; - ssize_t ret; + ssize_t ret = 0; + + /* + * FIXME: Concurrent clients triggering resets and reading + clearing + * dumps can cause inconsistent sysfs reads when a user calls in with a + * non-zero offset to complete a prior partial read but the + * gpu_coredump has been cleared or replaced. + */ gpu = i915_first_error_state(i915); if (IS_ERR(gpu)) { @@ -178,8 +185,10 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj, const char *str = "No error state collected\n"; size_t len = strlen(str); - ret = min_t(size_t, count, len - off); - memcpy(buf, str + off, ret); + if (off < len) { + ret = min_t(size_t, count, len - off); + memcpy(buf, str + off, ret); + } } return ret; @@ -259,4 +268,6 @@ void i915_teardown_sysfs(struct drm_i915_private *dev_priv) device_remove_bin_file(kdev, &dpf_attrs_1); device_remove_bin_file(kdev, &dpf_attrs); + + kobject_put(dev_priv->sysfs_gt); } diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 4f6db539571a..0bffb70b3c5f 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -23,6 +23,7 @@ */ #include <linux/sched/mm.h> +#include <linux/dma-fence-array.h> #include <drm/drm_gem.h> #include "display/intel_frontbuffer.h" @@ -1823,6 +1824,21 @@ int _i915_vma_move_to_active(struct i915_vma *vma, if (unlikely(err)) return err; + /* + * Reserve fences slot early to prevent an allocation after preparing + * the workload and associating fences with dma_resv. + */ + if (fence && !(flags & __EXEC_OBJECT_NO_RESERVE)) { + struct dma_fence *curr; + int idx; + + dma_fence_array_for_each(curr, idx, fence) + ; + err = dma_resv_reserve_fences(vma->obj->base.resv, idx); + if (unlikely(err)) + return err; + } + if (flags & EXEC_OBJECT_WRITE) { struct intel_frontbuffer *front; @@ -1832,31 +1848,23 @@ int _i915_vma_move_to_active(struct i915_vma *vma, i915_active_add_request(&front->write, rq); intel_frontbuffer_put(front); } + } - if (!(flags & __EXEC_OBJECT_NO_RESERVE)) { - err = dma_resv_reserve_fences(vma->obj->base.resv, 1); - if (unlikely(err)) - return err; - } + if (fence) { + struct dma_fence *curr; + enum dma_resv_usage usage; + int idx; - if (fence) { - dma_resv_add_fence(vma->obj->base.resv, fence, - DMA_RESV_USAGE_WRITE); + obj->read_domains = 0; + if (flags & EXEC_OBJECT_WRITE) { + usage = DMA_RESV_USAGE_WRITE; obj->write_domain = I915_GEM_DOMAIN_RENDER; - obj->read_domains = 0; - } - } else { - if (!(flags & __EXEC_OBJECT_NO_RESERVE)) { - err = dma_resv_reserve_fences(vma->obj->base.resv, 1); - if (unlikely(err)) - return err; + } else { + usage = DMA_RESV_USAGE_READ; } - if (fence) { - dma_resv_add_fence(vma->obj->base.resv, fence, - DMA_RESV_USAGE_READ); - obj->write_domain = 0; - } + dma_fence_array_for_each(curr, idx, fence) + dma_resv_add_fence(vma->obj->base.resv, curr, usage); } if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence) diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index 9c8829f945b2..f7863d6dea80 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c @@ -69,7 +69,7 @@ static void ipu_crtc_disable_planes(struct ipu_crtc *ipu_crtc, drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) { if (plane == &ipu_crtc->plane[0]->base) disable_full = true; - if (&ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base) + if (ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base) disable_partial = true; } diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 4e665c806a14..efe9840e28fa 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -498,10 +498,15 @@ int adreno_hw_init(struct msm_gpu *gpu) ring->cur = ring->start; ring->next = ring->start; - - /* reset completed fence seqno: */ - ring->memptrs->fence = ring->fctx->completed_fence; ring->memptrs->rptr = 0; + + /* Detect and clean up an impossible fence, ie. if GPU managed + * to scribble something invalid, we don't want that to confuse + * us into mistakingly believing that submits have completed. + */ + if (fence_before(ring->fctx->last_fence, ring->memptrs->fence)) { + ring->memptrs->fence = ring->fctx->last_fence; + } } return 0; @@ -1057,7 +1062,8 @@ void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu) for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++) release_firmware(adreno_gpu->fw[i]); - pm_runtime_disable(&priv->gpu_pdev->dev); + if (pm_runtime_enabled(&priv->gpu_pdev->dev)) + pm_runtime_disable(&priv->gpu_pdev->dev); msm_gpu_cleanup(&adreno_gpu->base); } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c index 399115e4e217..2fd787079f9b 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c @@ -11,7 +11,14 @@ static int dpu_wb_conn_get_modes(struct drm_connector *connector) struct msm_drm_private *priv = dev->dev_private; struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms); - return drm_add_modes_noedid(connector, dpu_kms->catalog->caps->max_linewidth, + /* + * We should ideally be limiting the modes only to the maxlinewidth but + * on some chipsets this will allow even 4k modes to be added which will + * fail the per SSPP bandwidth checks. So, till we have dual-SSPP support + * and source split support added lets limit the modes based on max_mixer_width + * as 4K modes can then be supported. + */ + return drm_add_modes_noedid(connector, dpu_kms->catalog->caps->max_mixer_width, dev->mode_config.max_height); } diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c index fb48c8c19ec3..17cb1fc78379 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c @@ -216,6 +216,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms, encoder = mdp4_lcdc_encoder_init(dev, panel_node); if (IS_ERR(encoder)) { DRM_DEV_ERROR(dev->dev, "failed to construct LCDC encoder\n"); + of_node_put(panel_node); return PTR_ERR(encoder); } @@ -225,6 +226,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms, connector = mdp4_lvds_connector_init(dev, panel_node, encoder); if (IS_ERR(connector)) { DRM_DEV_ERROR(dev->dev, "failed to initialize LVDS connector\n"); + of_node_put(panel_node); return PTR_ERR(connector); } diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c index b7f5b8d3bbd6..703249384e7c 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.c +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c @@ -1534,6 +1534,8 @@ end: return ret; } +static int dp_ctrl_on_stream_phy_test_report(struct dp_ctrl *dp_ctrl); + static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl) { int ret = 0; @@ -1557,7 +1559,7 @@ static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl) ret = dp_ctrl_on_link(&ctrl->dp_ctrl); if (!ret) - ret = dp_ctrl_on_stream(&ctrl->dp_ctrl); + ret = dp_ctrl_on_stream_phy_test_report(&ctrl->dp_ctrl); else DRM_ERROR("failed to enable DP link controller\n"); @@ -1813,7 +1815,27 @@ static int dp_ctrl_link_retrain(struct dp_ctrl_private *ctrl) return dp_ctrl_setup_main_link(ctrl, &training_step); } -int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl) +static int dp_ctrl_on_stream_phy_test_report(struct dp_ctrl *dp_ctrl) +{ + int ret; + struct dp_ctrl_private *ctrl; + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock; + + ret = dp_ctrl_enable_stream_clocks(ctrl); + if (ret) { + DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret); + return ret; + } + + dp_ctrl_send_phy_test_pattern(ctrl); + + return 0; +} + +int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train) { int ret = 0; bool mainlink_ready = false; @@ -1849,12 +1871,7 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl) goto end; } - if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) { - dp_ctrl_send_phy_test_pattern(ctrl); - return 0; - } - - if (!dp_ctrl_channel_eq_ok(ctrl)) + if (force_link_train || !dp_ctrl_channel_eq_ok(ctrl)) dp_ctrl_link_retrain(ctrl); /* stop txing train pattern to end link training */ diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h index 0745fde01b45..b563e2e3bfe5 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.h +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h @@ -21,7 +21,7 @@ struct dp_ctrl { }; int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl); -int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl); +int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train); int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl); int dp_ctrl_off_link(struct dp_ctrl *dp_ctrl); int dp_ctrl_off(struct dp_ctrl *dp_ctrl); diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index bce77935394f..ec26855079c1 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -309,7 +309,8 @@ static void dp_display_unbind(struct device *dev, struct device *master, struct msm_drm_private *priv = dev_get_drvdata(master); /* disable all HPD interrupts */ - dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false); + if (dp->core_initialized) + dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false); kthread_stop(dp->ev_tsk); @@ -872,7 +873,7 @@ static int dp_display_enable(struct dp_display_private *dp, u32 data) return 0; } - rc = dp_ctrl_on_stream(dp->ctrl); + rc = dp_ctrl_on_stream(dp->ctrl, data); if (!rc) dp_display->power_on = true; @@ -1659,6 +1660,7 @@ void dp_bridge_enable(struct drm_bridge *drm_bridge) int rc = 0; struct dp_display_private *dp_display; u32 state; + bool force_link_train = false; dp_display = container_of(dp, struct dp_display_private, dp_display); if (!dp_display->dp_mode.drm_mode.clock) { @@ -1693,10 +1695,12 @@ void dp_bridge_enable(struct drm_bridge *drm_bridge) state = dp_display->hpd_state; - if (state == ST_DISPLAY_OFF) + if (state == ST_DISPLAY_OFF) { dp_display_host_phy_init(dp_display); + force_link_train = true; + } - dp_display_enable(dp_display, 0); + dp_display_enable(dp_display, force_link_train); rc = dp_display_post_enable(dp); if (rc) { @@ -1705,10 +1709,6 @@ void dp_bridge_enable(struct drm_bridge *drm_bridge) dp_display_unprepare(dp); } - /* manual kick off plug event to train link */ - if (state == ST_DISPLAY_OFF) - dp_add_event(dp_display, EV_IRQ_HPD_INT, 0, 0); - /* completed connection */ dp_display->hpd_state = ST_CONNECTED; diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 44485363f37a..14ab9a627d8b 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -964,7 +964,7 @@ static const struct drm_driver msm_driver = { .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import_sg_table = msm_gem_prime_import_sg_table, - .gem_prime_mmap = drm_gem_prime_mmap, + .gem_prime_mmap = msm_gem_prime_mmap, #ifdef CONFIG_DEBUG_FS .debugfs_init = msm_debugfs_init, #endif diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 08388d742d65..099a67d10c3a 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -246,6 +246,7 @@ unsigned long msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_t void msm_gem_shrinker_init(struct drm_device *dev); void msm_gem_shrinker_cleanup(struct drm_device *dev); +int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj); int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map); void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map); diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c index 3df255402a33..38e3323bc232 100644 --- a/drivers/gpu/drm/msm/msm_fence.c +++ b/drivers/gpu/drm/msm/msm_fence.c @@ -46,12 +46,14 @@ bool msm_fence_completed(struct msm_fence_context *fctx, uint32_t fence) (int32_t)(*fctx->fenceptr - fence) >= 0; } -/* called from workqueue */ +/* called from irq handler and workqueue (in recover path) */ void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence) { - spin_lock(&fctx->spinlock); + unsigned long flags; + + spin_lock_irqsave(&fctx->spinlock, flags); fctx->completed_fence = max(fence, fctx->completed_fence); - spin_unlock(&fctx->spinlock); + spin_unlock_irqrestore(&fctx->spinlock, flags); } struct msm_fence { diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 97d5b4d8b9b0..7f92231785a0 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -439,14 +439,12 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma) return ret; } -void msm_gem_unpin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma) +void msm_gem_unpin_locked(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); GEM_WARN_ON(!msm_gem_is_locked(obj)); - msm_gem_unpin_vma(vma); - msm_obj->pin_count--; GEM_WARN_ON(msm_obj->pin_count < 0); @@ -586,7 +584,8 @@ void msm_gem_unpin_iova(struct drm_gem_object *obj, msm_gem_lock(obj); vma = lookup_vma(obj, aspace); if (!GEM_WARN_ON(!vma)) { - msm_gem_unpin_vma_locked(obj, vma); + msm_gem_unpin_vma(vma); + msm_gem_unpin_locked(obj); } msm_gem_unlock(obj); } diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index c75d3b879a53..6b7d5bb3b575 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -145,7 +145,7 @@ struct msm_gem_object { uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj); int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma); -void msm_gem_unpin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma); +void msm_gem_unpin_locked(struct drm_gem_object *obj); struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj, struct msm_gem_address_space *aspace); int msm_gem_get_iova(struct drm_gem_object *obj, @@ -377,10 +377,11 @@ struct msm_gem_submit { } *cmd; /* array of size nr_cmds */ struct { /* make sure these don't conflict w/ MSM_SUBMIT_BO_x */ -#define BO_VALID 0x8000 /* is current addr in cmdstream correct/valid? */ -#define BO_LOCKED 0x4000 /* obj lock is held */ -#define BO_ACTIVE 0x2000 /* active refcnt is held */ -#define BO_PINNED 0x1000 /* obj is pinned and on active list */ +#define BO_VALID 0x8000 /* is current addr in cmdstream correct/valid? */ +#define BO_LOCKED 0x4000 /* obj lock is held */ +#define BO_ACTIVE 0x2000 /* active refcnt is held */ +#define BO_OBJ_PINNED 0x1000 /* obj (pages) is pinned and on active list */ +#define BO_VMA_PINNED 0x0800 /* vma (virtual address) is pinned */ uint32_t flags; union { struct msm_gem_object *obj; diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c index 94ab705e9b8a..dcc8a573bc76 100644 --- a/drivers/gpu/drm/msm/msm_gem_prime.c +++ b/drivers/gpu/drm/msm/msm_gem_prime.c @@ -11,6 +11,21 @@ #include "msm_drv.h" #include "msm_gem.h" +int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) +{ + int ret; + + /* Ensure the mmap offset is initialized. We lazily initialize it, + * so if it has not been first mmap'd directly as a GEM object, the + * mmap offset will not be already initialized. + */ + ret = drm_gem_create_mmap_offset(obj); + if (ret) + return ret; + + return drm_gem_prime_mmap(obj, vma); +} + struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 80975229b4de..3c3a0cfade36 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -232,8 +232,11 @@ static void submit_cleanup_bo(struct msm_gem_submit *submit, int i, */ submit->bos[i].flags &= ~cleanup_flags; - if (flags & BO_PINNED) - msm_gem_unpin_vma_locked(obj, submit->bos[i].vma); + if (flags & BO_VMA_PINNED) + msm_gem_unpin_vma(submit->bos[i].vma); + + if (flags & BO_OBJ_PINNED) + msm_gem_unpin_locked(obj); if (flags & BO_ACTIVE) msm_gem_active_put(obj); @@ -244,7 +247,9 @@ static void submit_cleanup_bo(struct msm_gem_submit *submit, int i, static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i) { - submit_cleanup_bo(submit, i, BO_PINNED | BO_ACTIVE | BO_LOCKED); + unsigned cleanup_flags = BO_VMA_PINNED | BO_OBJ_PINNED | + BO_ACTIVE | BO_LOCKED; + submit_cleanup_bo(submit, i, cleanup_flags); if (!(submit->bos[i].flags & BO_VALID)) submit->bos[i].iova = 0; @@ -375,7 +380,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit) if (ret) break; - submit->bos[i].flags |= BO_PINNED; + submit->bos[i].flags |= BO_OBJ_PINNED | BO_VMA_PINNED; submit->bos[i].vma = vma; if (vma->iova == submit->bos[i].iova) { @@ -511,7 +516,7 @@ static void submit_cleanup(struct msm_gem_submit *submit, bool error) unsigned i; if (error) - cleanup_flags |= BO_PINNED | BO_ACTIVE; + cleanup_flags |= BO_VMA_PINNED | BO_OBJ_PINNED | BO_ACTIVE; for (i = 0; i < submit->nr_bos; i++) { struct msm_gem_object *msm_obj = submit->bos[i].obj; @@ -529,7 +534,8 @@ void msm_submit_retire(struct msm_gem_submit *submit) struct drm_gem_object *obj = &submit->bos[i].obj->base; msm_gem_lock(obj); - submit_cleanup_bo(submit, i, BO_PINNED | BO_ACTIVE); + /* Note, VMA already fence-unpinned before submit: */ + submit_cleanup_bo(submit, i, BO_OBJ_PINNED | BO_ACTIVE); msm_gem_unlock(obj); drm_gem_object_put(obj); } diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index 3c1dc9241831..c471aebcdbab 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -62,8 +62,7 @@ void msm_gem_purge_vma(struct msm_gem_address_space *aspace, unsigned size = vma->node.size; /* Print a message if we try to purge a vma in use */ - if (GEM_WARN_ON(msm_gem_vma_inuse(vma))) - return; + GEM_WARN_ON(msm_gem_vma_inuse(vma)); /* Don't do anything if the memory isn't mapped */ if (!vma->mapped) @@ -128,8 +127,7 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace, void msm_gem_close_vma(struct msm_gem_address_space *aspace, struct msm_gem_vma *vma) { - if (GEM_WARN_ON(msm_gem_vma_inuse(vma) || vma->mapped)) - return; + GEM_WARN_ON(msm_gem_vma_inuse(vma) || vma->mapped); spin_lock(&aspace->lock); if (vma->iova) diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index eb8a6663f309..c8cd9bfa3eeb 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -164,24 +164,6 @@ int msm_gpu_hw_init(struct msm_gpu *gpu) return ret; } -static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring, - uint32_t fence) -{ - struct msm_gem_submit *submit; - unsigned long flags; - - spin_lock_irqsave(&ring->submit_lock, flags); - list_for_each_entry(submit, &ring->submits, node) { - if (fence_after(submit->seqno, fence)) - break; - - msm_update_fence(submit->ring->fctx, - submit->hw_fence->seqno); - dma_fence_signal(submit->hw_fence); - } - spin_unlock_irqrestore(&ring->submit_lock, flags); -} - #ifdef CONFIG_DEV_COREDUMP static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset, size_t count, void *data, size_t datalen) @@ -436,9 +418,9 @@ static void recover_worker(struct kthread_work *work) * one more to clear the faulting submit */ if (ring == cur_ring) - fence++; + ring->memptrs->fence = ++fence; - update_fences(gpu, ring, fence); + msm_update_fence(ring->fctx, fence); } if (msm_gpu_active(gpu)) { @@ -672,7 +654,6 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring, msm_submit_retire(submit); pm_runtime_mark_last_busy(&gpu->pdev->dev); - pm_runtime_put_autosuspend(&gpu->pdev->dev); spin_lock_irqsave(&ring->submit_lock, flags); list_del(&submit->node); @@ -686,6 +667,8 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring, msm_devfreq_idle(gpu); mutex_unlock(&gpu->active_lock); + pm_runtime_put_autosuspend(&gpu->pdev->dev); + msm_gem_submit_put(submit); } @@ -735,7 +718,7 @@ void msm_gpu_retire(struct msm_gpu *gpu) int i; for (i = 0; i < gpu->nr_rings; i++) - update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence); + msm_update_fence(gpu->rb[i]->fctx, gpu->rb[i]->memptrs->fence); kthread_queue_work(gpu->worker, &gpu->retire_work); update_sw_cntrs(gpu); diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index bcaddbba564d..a54ed354578b 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -58,7 +58,7 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova, u64 addr = iova; unsigned int i; - for_each_sg(sgt->sgl, sg, sgt->nents, i) { + for_each_sgtable_sg(sgt, sg, i) { size_t size = sg->length; phys_addr_t phys = sg_phys(sg); diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index 43066320ff8c..56eecb4a72dc 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -25,7 +25,7 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job) msm_gem_lock(obj); msm_gem_unpin_vma_fenced(submit->bos[i].vma, fctx); - submit->bos[i].flags &= ~BO_PINNED; + submit->bos[i].flags &= ~BO_VMA_PINNED; msm_gem_unlock(obj); } diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 7fcbc2a5b6cd..087e69b98d06 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -233,6 +233,7 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data, struct drm_file *file) { struct panfrost_device *pfdev = dev->dev_private; + struct panfrost_file_priv *file_priv = file->driver_priv; struct drm_panfrost_submit *args = data; struct drm_syncobj *sync_out = NULL; struct panfrost_job *job; @@ -262,12 +263,12 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data, job->jc = args->jc; job->requirements = args->requirements; job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev); - job->file_priv = file->driver_priv; + job->mmu = file_priv->mmu; slot = panfrost_job_get_slot(job); ret = drm_sched_job_init(&job->base, - &job->file_priv->sched_entity[slot], + &file_priv->sched_entity[slot], NULL); if (ret) goto out_put_job; diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index fda5871aebe3..7c4208476fbd 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -201,7 +201,7 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js) return; } - cfg = panfrost_mmu_as_get(pfdev, job->file_priv->mmu); + cfg = panfrost_mmu_as_get(pfdev, job->mmu); job_write(pfdev, JS_HEAD_NEXT_LO(js), lower_32_bits(jc_head)); job_write(pfdev, JS_HEAD_NEXT_HI(js), upper_32_bits(jc_head)); @@ -435,7 +435,7 @@ static void panfrost_job_handle_err(struct panfrost_device *pfdev, job->jc = 0; } - panfrost_mmu_as_put(pfdev, job->file_priv->mmu); + panfrost_mmu_as_put(pfdev, job->mmu); panfrost_devfreq_record_idle(&pfdev->pfdevfreq); if (signal_fence) @@ -456,7 +456,7 @@ static void panfrost_job_handle_done(struct panfrost_device *pfdev, * happen when we receive the DONE interrupt while doing a GPU reset). */ job->jc = 0; - panfrost_mmu_as_put(pfdev, job->file_priv->mmu); + panfrost_mmu_as_put(pfdev, job->mmu); panfrost_devfreq_record_idle(&pfdev->pfdevfreq); dma_fence_signal_locked(job->done_fence); diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h index 77e6d0e6f612..8becc1ba0eb9 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.h +++ b/drivers/gpu/drm/panfrost/panfrost_job.h @@ -17,7 +17,7 @@ struct panfrost_job { struct kref refcount; struct panfrost_device *pfdev; - struct panfrost_file_priv *file_priv; + struct panfrost_mmu *mmu; /* Fence to be signaled by IRQ handler when the job is complete. */ struct dma_fence *done_fence; diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index 275f7e4a03ae..6eb1aabdb161 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -7,6 +7,7 @@ */ #include <linux/component.h> +#include <linux/dma-mapping.h> #include <linux/kfifo.h> #include <linux/module.h> #include <linux/of_graph.h> @@ -73,7 +74,6 @@ static int sun4i_drv_bind(struct device *dev) goto free_drm; } - dev_set_drvdata(dev, drm); drm->dev_private = drv; INIT_LIST_HEAD(&drv->frontend_list); INIT_LIST_HEAD(&drv->engine_list); @@ -114,6 +114,8 @@ static int sun4i_drv_bind(struct device *dev) drm_fbdev_generic_setup(drm, 32); + dev_set_drvdata(dev, drm); + return 0; finish_poll: @@ -130,6 +132,7 @@ static void sun4i_drv_unbind(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); + dev_set_drvdata(dev, NULL); drm_dev_unregister(drm); drm_kms_helper_poll_fini(drm); drm_atomic_helper_shutdown(drm); @@ -367,6 +370,13 @@ static int sun4i_drv_probe(struct platform_device *pdev) INIT_KFIFO(list.fifo); + /* + * DE2 and DE3 cores actually supports 40-bit addresses, but + * driver does not. + */ + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + dma_set_max_seg_size(&pdev->dev, UINT_MAX); + for (i = 0;; i++) { struct device_node *pipeline = of_parse_phandle(np, "allwinner,pipelines", diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.c b/drivers/gpu/drm/sun4i/sun4i_layer.c index 6d43080791a0..85fb9e800ddf 100644 --- a/drivers/gpu/drm/sun4i/sun4i_layer.c +++ b/drivers/gpu/drm/sun4i/sun4i_layer.c @@ -117,7 +117,7 @@ static bool sun4i_layer_format_mod_supported(struct drm_plane *plane, struct sun4i_layer *layer = plane_to_sun4i_layer(plane); if (IS_ERR_OR_NULL(layer->backend->frontend)) - sun4i_backend_format_is_supported(format, modifier); + return sun4i_backend_format_is_supported(format, modifier); return sun4i_backend_format_is_supported(format, modifier) || sun4i_frontend_format_is_supported(format, modifier); diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c index a8d75fd7e9f4..477cb6985b4d 100644 --- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c +++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c @@ -93,34 +93,10 @@ crtcs_exit: return crtcs; } -static int sun8i_dw_hdmi_find_connector_pdev(struct device *dev, - struct platform_device **pdev_out) -{ - struct platform_device *pdev; - struct device_node *remote; - - remote = of_graph_get_remote_node(dev->of_node, 1, -1); - if (!remote) - return -ENODEV; - - if (!of_device_is_compatible(remote, "hdmi-connector")) { - of_node_put(remote); - return -ENODEV; - } - - pdev = of_find_device_by_node(remote); - of_node_put(remote); - if (!pdev) - return -ENODEV; - - *pdev_out = pdev; - return 0; -} - static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master, void *data) { - struct platform_device *pdev = to_platform_device(dev), *connector_pdev; + struct platform_device *pdev = to_platform_device(dev); struct dw_hdmi_plat_data *plat_data; struct drm_device *drm = data; struct device_node *phy_node; @@ -167,30 +143,16 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master, return dev_err_probe(dev, PTR_ERR(hdmi->regulator), "Couldn't get regulator\n"); - ret = sun8i_dw_hdmi_find_connector_pdev(dev, &connector_pdev); - if (!ret) { - hdmi->ddc_en = gpiod_get_optional(&connector_pdev->dev, - "ddc-en", GPIOD_OUT_HIGH); - platform_device_put(connector_pdev); - - if (IS_ERR(hdmi->ddc_en)) { - dev_err(dev, "Couldn't get ddc-en gpio\n"); - return PTR_ERR(hdmi->ddc_en); - } - } - ret = regulator_enable(hdmi->regulator); if (ret) { dev_err(dev, "Failed to enable regulator\n"); - goto err_unref_ddc_en; + return ret; } - gpiod_set_value(hdmi->ddc_en, 1); - ret = reset_control_deassert(hdmi->rst_ctrl); if (ret) { dev_err(dev, "Could not deassert ctrl reset control\n"); - goto err_disable_ddc_en; + goto err_disable_regulator; } ret = clk_prepare_enable(hdmi->clk_tmds); @@ -245,12 +207,8 @@ err_disable_clk_tmds: clk_disable_unprepare(hdmi->clk_tmds); err_assert_ctrl_reset: reset_control_assert(hdmi->rst_ctrl); -err_disable_ddc_en: - gpiod_set_value(hdmi->ddc_en, 0); +err_disable_regulator: regulator_disable(hdmi->regulator); -err_unref_ddc_en: - if (hdmi->ddc_en) - gpiod_put(hdmi->ddc_en); return ret; } @@ -264,11 +222,7 @@ static void sun8i_dw_hdmi_unbind(struct device *dev, struct device *master, sun8i_hdmi_phy_deinit(hdmi->phy); clk_disable_unprepare(hdmi->clk_tmds); reset_control_assert(hdmi->rst_ctrl); - gpiod_set_value(hdmi->ddc_en, 0); regulator_disable(hdmi->regulator); - - if (hdmi->ddc_en) - gpiod_put(hdmi->ddc_en); } static const struct component_ops sun8i_dw_hdmi_ops = { diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h index bffe1b9cd3dc..9ad09522947a 100644 --- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h +++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h @@ -9,7 +9,6 @@ #include <drm/bridge/dw_hdmi.h> #include <drm/drm_encoder.h> #include <linux/clk.h> -#include <linux/gpio/consumer.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/reset.h> @@ -193,7 +192,6 @@ struct sun8i_dw_hdmi { struct regulator *regulator; const struct sun8i_dw_hdmi_quirks *quirks; struct reset_control *rst_ctrl; - struct gpio_desc *ddc_en; }; extern struct platform_driver sun8i_hdmi_phy_driver; diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 75d308ec173d..406e9c324e76 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -109,11 +109,11 @@ void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo, return; spin_lock(&bo->bdev->lru_lock); - if (bo->bulk_move && bo->resource) - ttm_lru_bulk_move_del(bo->bulk_move, bo->resource); + if (bo->resource) + ttm_resource_del_bulk_move(bo->resource, bo); bo->bulk_move = bulk; - if (bo->bulk_move && bo->resource) - ttm_lru_bulk_move_add(bo->bulk_move, bo->resource); + if (bo->resource) + ttm_resource_add_bulk_move(bo->resource, bo); spin_unlock(&bo->bdev->lru_lock); } EXPORT_SYMBOL(ttm_bo_set_bulk_move); @@ -689,8 +689,11 @@ void ttm_bo_pin(struct ttm_buffer_object *bo) { dma_resv_assert_held(bo->base.resv); WARN_ON_ONCE(!kref_read(&bo->kref)); - if (!(bo->pin_count++) && bo->bulk_move && bo->resource) - ttm_lru_bulk_move_del(bo->bulk_move, bo->resource); + spin_lock(&bo->bdev->lru_lock); + if (bo->resource) + ttm_resource_del_bulk_move(bo->resource, bo); + ++bo->pin_count; + spin_unlock(&bo->bdev->lru_lock); } EXPORT_SYMBOL(ttm_bo_pin); @@ -707,8 +710,11 @@ void ttm_bo_unpin(struct ttm_buffer_object *bo) if (WARN_ON_ONCE(!bo->pin_count)) return; - if (!(--bo->pin_count) && bo->bulk_move && bo->resource) - ttm_lru_bulk_move_add(bo->bulk_move, bo->resource); + spin_lock(&bo->bdev->lru_lock); + --bo->pin_count; + if (bo->resource) + ttm_resource_add_bulk_move(bo->resource, bo); + spin_unlock(&bo->bdev->lru_lock); } EXPORT_SYMBOL(ttm_bo_unpin); diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c index a0562ab386f5..e7147e304637 100644 --- a/drivers/gpu/drm/ttm/ttm_device.c +++ b/drivers/gpu/drm/ttm/ttm_device.c @@ -156,8 +156,12 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx, ttm_resource_manager_for_each_res(man, &cursor, res) { struct ttm_buffer_object *bo = res->bo; - uint32_t num_pages = PFN_UP(bo->base.size); + uint32_t num_pages; + if (!bo) + continue; + + num_pages = PFN_UP(bo->base.size); ret = ttm_bo_swapout(bo, ctx, gfp_flags); /* ttm_bo_swapout has dropped the lru_lock */ if (!ret) diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c index 65889b3caf50..20f9adcc3235 100644 --- a/drivers/gpu/drm/ttm/ttm_resource.c +++ b/drivers/gpu/drm/ttm/ttm_resource.c @@ -91,8 +91,8 @@ static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos, } /* Add the resource to a bulk_move cursor */ -void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk, - struct ttm_resource *res) +static void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk, + struct ttm_resource *res) { struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res); @@ -105,8 +105,8 @@ void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk, } /* Remove the resource from a bulk_move range */ -void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk, - struct ttm_resource *res) +static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk, + struct ttm_resource *res) { struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res); @@ -122,6 +122,22 @@ void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk, } } +/* Add the resource to a bulk move if the BO is configured for it */ +void ttm_resource_add_bulk_move(struct ttm_resource *res, + struct ttm_buffer_object *bo) +{ + if (bo->bulk_move && !bo->pin_count) + ttm_lru_bulk_move_add(bo->bulk_move, res); +} + +/* Remove the resource from a bulk move if the BO is configured for it */ +void ttm_resource_del_bulk_move(struct ttm_resource *res, + struct ttm_buffer_object *bo) +{ + if (bo->bulk_move && !bo->pin_count) + ttm_lru_bulk_move_del(bo->bulk_move, res); +} + /* Move a resource to the LRU or bulk tail */ void ttm_resource_move_to_lru_tail(struct ttm_resource *res) { @@ -169,15 +185,14 @@ void ttm_resource_init(struct ttm_buffer_object *bo, res->bus.is_iomem = false; res->bus.caching = ttm_cached; res->bo = bo; - INIT_LIST_HEAD(&res->lru); man = ttm_manager_type(bo->bdev, place->mem_type); spin_lock(&bo->bdev->lru_lock); - man->usage += res->num_pages << PAGE_SHIFT; - if (bo->bulk_move) - ttm_lru_bulk_move_add(bo->bulk_move, res); + if (bo->pin_count) + list_add_tail(&res->lru, &bo->bdev->pinned); else - ttm_resource_move_to_lru_tail(res); + list_add_tail(&res->lru, &man->lru[bo->priority]); + man->usage += res->num_pages << PAGE_SHIFT; spin_unlock(&bo->bdev->lru_lock); } EXPORT_SYMBOL(ttm_resource_init); @@ -210,8 +225,16 @@ int ttm_resource_alloc(struct ttm_buffer_object *bo, { struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, place->mem_type); + int ret; + + ret = man->func->alloc(man, bo, place, res_ptr); + if (ret) + return ret; - return man->func->alloc(man, bo, place, res_ptr); + spin_lock(&bo->bdev->lru_lock); + ttm_resource_add_bulk_move(*res_ptr, bo); + spin_unlock(&bo->bdev->lru_lock); + return 0; } void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res) @@ -221,12 +244,9 @@ void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res) if (!*res) return; - if (bo->bulk_move) { - spin_lock(&bo->bdev->lru_lock); - ttm_lru_bulk_move_del(bo->bulk_move, *res); - spin_unlock(&bo->bdev->lru_lock); - } - + spin_lock(&bo->bdev->lru_lock); + ttm_resource_del_bulk_move(*res, bo); + spin_unlock(&bo->bdev->lru_lock); man = ttm_manager_type(bo->bdev, (*res)->mem_type); man->func->free(man, *res); *res = NULL; diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c index 49c0f2ac868b..b8d856312846 100644 --- a/drivers/gpu/drm/vc4/vc4_bo.c +++ b/drivers/gpu/drm/vc4/vc4_bo.c @@ -248,6 +248,9 @@ void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo) { struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); + if (WARN_ON_ONCE(vc4->is_vc5)) + return; + mutex_lock(&vc4->purgeable.lock); list_add_tail(&bo->size_head, &vc4->purgeable.list); vc4->purgeable.num++; @@ -259,6 +262,9 @@ static void vc4_bo_remove_from_purgeable_pool_locked(struct vc4_bo *bo) { struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); + if (WARN_ON_ONCE(vc4->is_vc5)) + return; + /* list_del_init() is used here because the caller might release * the purgeable lock in order to acquire the madv one and update the * madv status. @@ -387,6 +393,9 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size) struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_bo *bo; + if (WARN_ON_ONCE(vc4->is_vc5)) + return ERR_PTR(-ENODEV); + bo = kzalloc(sizeof(*bo), GFP_KERNEL); if (!bo) return ERR_PTR(-ENOMEM); @@ -413,6 +422,9 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size, struct drm_gem_cma_object *cma_obj; struct vc4_bo *bo; + if (WARN_ON_ONCE(vc4->is_vc5)) + return ERR_PTR(-ENODEV); + if (size == 0) return ERR_PTR(-EINVAL); @@ -471,19 +483,20 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size, return bo; } -int vc4_dumb_create(struct drm_file *file_priv, - struct drm_device *dev, - struct drm_mode_create_dumb *args) +int vc4_bo_dumb_create(struct drm_file *file_priv, + struct drm_device *dev, + struct drm_mode_create_dumb *args) { - int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); + struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_bo *bo = NULL; int ret; - if (args->pitch < min_pitch) - args->pitch = min_pitch; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; - if (args->size < args->pitch * args->height) - args->size = args->pitch * args->height; + ret = vc4_dumb_fixup_args(args); + if (ret) + return ret; bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_DUMB); if (IS_ERR(bo)) @@ -601,8 +614,12 @@ static void vc4_bo_cache_time_work(struct work_struct *work) int vc4_bo_inc_usecnt(struct vc4_bo *bo) { + struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); int ret; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + /* Fast path: if the BO is already retained by someone, no need to * check the madv status. */ @@ -637,6 +654,11 @@ int vc4_bo_inc_usecnt(struct vc4_bo *bo) void vc4_bo_dec_usecnt(struct vc4_bo *bo) { + struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); + + if (WARN_ON_ONCE(vc4->is_vc5)) + return; + /* Fast path: if the BO is still retained by someone, no need to test * the madv value. */ @@ -756,6 +778,9 @@ int vc4_create_bo_ioctl(struct drm_device *dev, void *data, struct vc4_bo *bo = NULL; int ret; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + ret = vc4_grab_bin_bo(vc4, vc4file); if (ret) return ret; @@ -779,9 +804,13 @@ int vc4_create_bo_ioctl(struct drm_device *dev, void *data, int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_vc4_mmap_bo *args = data; struct drm_gem_object *gem_obj; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + gem_obj = drm_gem_object_lookup(file_priv, args->handle); if (!gem_obj) { DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); @@ -805,6 +834,9 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data, struct vc4_bo *bo = NULL; int ret; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + if (args->size == 0) return -EINVAL; @@ -875,11 +907,15 @@ fail: int vc4_set_tiling_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_vc4_set_tiling *args = data; struct drm_gem_object *gem_obj; struct vc4_bo *bo; bool t_format; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + if (args->flags != 0) return -EINVAL; @@ -918,10 +954,14 @@ int vc4_set_tiling_ioctl(struct drm_device *dev, void *data, int vc4_get_tiling_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_vc4_get_tiling *args = data; struct drm_gem_object *gem_obj; struct vc4_bo *bo; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + if (args->flags != 0 || args->modifier != 0) return -EINVAL; @@ -948,6 +988,9 @@ int vc4_bo_cache_init(struct drm_device *dev) struct vc4_dev *vc4 = to_vc4_dev(dev); int i; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + /* Create the initial set of BO labels that the kernel will * use. This lets us avoid a bunch of string reallocation in * the kernel's draw and BO allocation paths. @@ -1007,6 +1050,9 @@ int vc4_label_bo_ioctl(struct drm_device *dev, void *data, struct drm_gem_object *gem_obj; int ret = 0, label; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + if (!args->len) return -EINVAL; diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 59b20c8f132b..9355213dc883 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -256,7 +256,7 @@ static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format) * Removing 1 from the FIFO full level however * seems to completely remove that issue. */ - if (!vc4->hvs->hvs5) + if (!vc4->is_vc5) return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX - 1; return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX; @@ -389,7 +389,7 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encode if (is_dsi) CRTC_WRITE(PV_HACT_ACT, mode->hdisplay * pixel_rep); - if (vc4->hvs->hvs5) + if (vc4->is_vc5) CRTC_WRITE(PV_MUX_CFG, VC4_SET_FIELD(PV_MUX_CFG_RGB_PIXEL_MUX_MODE_NO_SWAP, PV_MUX_CFG_RGB_PIXEL_MUX_MODE)); @@ -775,17 +775,18 @@ struct vc4_async_flip_state { struct drm_framebuffer *old_fb; struct drm_pending_vblank_event *event; - struct vc4_seqno_cb cb; + union { + struct dma_fence_cb fence; + struct vc4_seqno_cb seqno; + } cb; }; /* Called when the V3D execution for the BO being flipped to is done, so that * we can actually update the plane's address to point to it. */ static void -vc4_async_page_flip_complete(struct vc4_seqno_cb *cb) +vc4_async_page_flip_complete(struct vc4_async_flip_state *flip_state) { - struct vc4_async_flip_state *flip_state = - container_of(cb, struct vc4_async_flip_state, cb); struct drm_crtc *crtc = flip_state->crtc; struct drm_device *dev = crtc->dev; struct drm_plane *plane = crtc->primary; @@ -802,59 +803,96 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb) drm_crtc_vblank_put(crtc); drm_framebuffer_put(flip_state->fb); - /* Decrement the BO usecnt in order to keep the inc/dec calls balanced - * when the planes are updated through the async update path. - * FIXME: we should move to generic async-page-flip when it's - * available, so that we can get rid of this hand-made cleanup_fb() - * logic. - */ - if (flip_state->old_fb) { - struct drm_gem_cma_object *cma_bo; - struct vc4_bo *bo; + if (flip_state->old_fb) + drm_framebuffer_put(flip_state->old_fb); + + kfree(flip_state); +} + +static void vc4_async_page_flip_seqno_complete(struct vc4_seqno_cb *cb) +{ + struct vc4_async_flip_state *flip_state = + container_of(cb, struct vc4_async_flip_state, cb.seqno); + struct vc4_bo *bo = NULL; - cma_bo = drm_fb_cma_get_gem_obj(flip_state->old_fb, 0); + if (flip_state->old_fb) { + struct drm_gem_cma_object *cma_bo = + drm_fb_cma_get_gem_obj(flip_state->old_fb, 0); bo = to_vc4_bo(&cma_bo->base); - vc4_bo_dec_usecnt(bo); - drm_framebuffer_put(flip_state->old_fb); } - kfree(flip_state); + vc4_async_page_flip_complete(flip_state); + + /* + * Decrement the BO usecnt in order to keep the inc/dec + * calls balanced when the planes are updated through + * the async update path. + * + * FIXME: we should move to generic async-page-flip when + * it's available, so that we can get rid of this + * hand-made cleanup_fb() logic. + */ + if (bo) + vc4_bo_dec_usecnt(bo); } -/* Implements async (non-vblank-synced) page flips. - * - * The page flip ioctl needs to return immediately, so we grab the - * modeset semaphore on the pipe, and queue the address update for - * when V3D is done with the BO being flipped to. - */ -static int vc4_async_page_flip(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event, - uint32_t flags) +static void vc4_async_page_flip_fence_complete(struct dma_fence *fence, + struct dma_fence_cb *cb) { - struct drm_device *dev = crtc->dev; - struct drm_plane *plane = crtc->primary; - int ret = 0; - struct vc4_async_flip_state *flip_state; + struct vc4_async_flip_state *flip_state = + container_of(cb, struct vc4_async_flip_state, cb.fence); + + vc4_async_page_flip_complete(flip_state); + dma_fence_put(fence); +} + +static int vc4_async_set_fence_cb(struct drm_device *dev, + struct vc4_async_flip_state *flip_state) +{ + struct drm_framebuffer *fb = flip_state->fb; struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0); - struct vc4_bo *bo = to_vc4_bo(&cma_bo->base); + struct vc4_dev *vc4 = to_vc4_dev(dev); + struct dma_fence *fence; + int ret; - /* Increment the BO usecnt here, so that we never end up with an - * unbalanced number of vc4_bo_{dec,inc}_usecnt() calls when the - * plane is later updated through the non-async path. - * FIXME: we should move to generic async-page-flip when it's - * available, so that we can get rid of this hand-made prepare_fb() - * logic. - */ - ret = vc4_bo_inc_usecnt(bo); + if (!vc4->is_vc5) { + struct vc4_bo *bo = to_vc4_bo(&cma_bo->base); + + return vc4_queue_seqno_cb(dev, &flip_state->cb.seqno, bo->seqno, + vc4_async_page_flip_seqno_complete); + } + + ret = dma_resv_get_singleton(cma_bo->base.resv, DMA_RESV_USAGE_READ, &fence); if (ret) return ret; + /* If there's no fence, complete the page flip immediately */ + if (!fence) { + vc4_async_page_flip_fence_complete(fence, &flip_state->cb.fence); + return 0; + } + + /* If the fence has already been completed, complete the page flip */ + if (dma_fence_add_callback(fence, &flip_state->cb.fence, + vc4_async_page_flip_fence_complete)) + vc4_async_page_flip_fence_complete(fence, &flip_state->cb.fence); + + return 0; +} + +static int +vc4_async_page_flip_common(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t flags) +{ + struct drm_device *dev = crtc->dev; + struct drm_plane *plane = crtc->primary; + struct vc4_async_flip_state *flip_state; + flip_state = kzalloc(sizeof(*flip_state), GFP_KERNEL); - if (!flip_state) { - vc4_bo_dec_usecnt(bo); + if (!flip_state) return -ENOMEM; - } drm_framebuffer_get(fb); flip_state->fb = fb; @@ -881,23 +919,79 @@ static int vc4_async_page_flip(struct drm_crtc *crtc, */ drm_atomic_set_fb_for_plane(plane->state, fb); - vc4_queue_seqno_cb(dev, &flip_state->cb, bo->seqno, - vc4_async_page_flip_complete); + vc4_async_set_fence_cb(dev, flip_state); /* Driver takes ownership of state on successful async commit. */ return 0; } +/* Implements async (non-vblank-synced) page flips. + * + * The page flip ioctl needs to return immediately, so we grab the + * modeset semaphore on the pipe, and queue the address update for + * when V3D is done with the BO being flipped to. + */ +static int vc4_async_page_flip(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t flags) +{ + struct drm_device *dev = crtc->dev; + struct vc4_dev *vc4 = to_vc4_dev(dev); + struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0); + struct vc4_bo *bo = to_vc4_bo(&cma_bo->base); + int ret; + + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + + /* + * Increment the BO usecnt here, so that we never end up with an + * unbalanced number of vc4_bo_{dec,inc}_usecnt() calls when the + * plane is later updated through the non-async path. + * + * FIXME: we should move to generic async-page-flip when + * it's available, so that we can get rid of this + * hand-made prepare_fb() logic. + */ + ret = vc4_bo_inc_usecnt(bo); + if (ret) + return ret; + + ret = vc4_async_page_flip_common(crtc, fb, event, flags); + if (ret) { + vc4_bo_dec_usecnt(bo); + return ret; + } + + return 0; +} + +static int vc5_async_page_flip(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t flags) +{ + return vc4_async_page_flip_common(crtc, fb, event, flags); +} + int vc4_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, uint32_t flags, struct drm_modeset_acquire_ctx *ctx) { - if (flags & DRM_MODE_PAGE_FLIP_ASYNC) - return vc4_async_page_flip(crtc, fb, event, flags); - else + if (flags & DRM_MODE_PAGE_FLIP_ASYNC) { + struct drm_device *dev = crtc->dev; + struct vc4_dev *vc4 = to_vc4_dev(dev); + + if (vc4->is_vc5) + return vc5_async_page_flip(crtc, fb, event, flags); + else + return vc4_async_page_flip(crtc, fb, event, flags); + } else { return drm_atomic_helper_page_flip(crtc, fb, event, flags, ctx); + } } struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc) @@ -1149,7 +1243,7 @@ int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc, crtc_funcs, NULL); drm_crtc_helper_add(crtc, crtc_helper_funcs); - if (!vc4->hvs->hvs5) { + if (!vc4->is_vc5) { drm_mode_crtc_set_gamma_size(crtc, ARRAY_SIZE(vc4_crtc->lut_r)); drm_crtc_enable_color_mgmt(crtc, 0, false, crtc->gamma_size); diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index 162bc18e7497..0f0f0263e744 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -63,6 +63,32 @@ void __iomem *vc4_ioremap_regs(struct platform_device *pdev, int index) return map; } +int vc4_dumb_fixup_args(struct drm_mode_create_dumb *args) +{ + int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); + + if (args->pitch < min_pitch) + args->pitch = min_pitch; + + if (args->size < args->pitch * args->height) + args->size = args->pitch * args->height; + + return 0; +} + +static int vc5_dumb_create(struct drm_file *file_priv, + struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + int ret; + + ret = vc4_dumb_fixup_args(args); + if (ret) + return ret; + + return drm_gem_cma_dumb_create_internal(file_priv, dev, args); +} + static int vc4_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -73,6 +99,9 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data, if (args->pad != 0) return -EINVAL; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + if (!vc4->v3d) return -ENODEV; @@ -116,11 +145,16 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data, static int vc4_open(struct drm_device *dev, struct drm_file *file) { + struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_file *vc4file; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + vc4file = kzalloc(sizeof(*vc4file), GFP_KERNEL); if (!vc4file) return -ENOMEM; + vc4file->dev = vc4; vc4_perfmon_open_file(vc4file); file->driver_priv = vc4file; @@ -132,6 +166,9 @@ static void vc4_close(struct drm_device *dev, struct drm_file *file) struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_file *vc4file = file->driver_priv; + if (WARN_ON_ONCE(vc4->is_vc5)) + return; + if (vc4file->bin_bo_used) vc4_v3d_bin_bo_put(vc4); @@ -160,7 +197,7 @@ static const struct drm_ioctl_desc vc4_drm_ioctls[] = { DRM_IOCTL_DEF_DRV(VC4_PERFMON_GET_VALUES, vc4_perfmon_get_values_ioctl, DRM_RENDER_ALLOW), }; -static struct drm_driver vc4_drm_driver = { +static const struct drm_driver vc4_drm_driver = { .driver_features = (DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_GEM | @@ -175,7 +212,7 @@ static struct drm_driver vc4_drm_driver = { .gem_create_object = vc4_create_object, - DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(vc4_dumb_create), + DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(vc4_bo_dumb_create), .ioctls = vc4_drm_ioctls, .num_ioctls = ARRAY_SIZE(vc4_drm_ioctls), @@ -189,6 +226,27 @@ static struct drm_driver vc4_drm_driver = { .patchlevel = DRIVER_PATCHLEVEL, }; +static const struct drm_driver vc5_drm_driver = { + .driver_features = (DRIVER_MODESET | + DRIVER_ATOMIC | + DRIVER_GEM), + +#if defined(CONFIG_DEBUG_FS) + .debugfs_init = vc4_debugfs_init, +#endif + + DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(vc5_dumb_create), + + .fops = &vc4_drm_fops, + + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .patchlevel = DRIVER_PATCHLEVEL, +}; + static void vc4_match_add_drivers(struct device *dev, struct component_match **match, struct platform_driver *const *drivers, @@ -212,42 +270,49 @@ static void vc4_match_add_drivers(struct device *dev, static int vc4_drm_bind(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); + const struct drm_driver *driver; struct rpi_firmware *firmware = NULL; struct drm_device *drm; struct vc4_dev *vc4; struct device_node *node; struct drm_crtc *crtc; + bool is_vc5; int ret = 0; dev->coherent_dma_mask = DMA_BIT_MASK(32); - /* If VC4 V3D is missing, don't advertise render nodes. */ - node = of_find_matching_node_and_match(NULL, vc4_v3d_dt_match, NULL); - if (!node || !of_device_is_available(node)) - vc4_drm_driver.driver_features &= ~DRIVER_RENDER; - of_node_put(node); + is_vc5 = of_device_is_compatible(dev->of_node, "brcm,bcm2711-vc5"); + if (is_vc5) + driver = &vc5_drm_driver; + else + driver = &vc4_drm_driver; - vc4 = devm_drm_dev_alloc(dev, &vc4_drm_driver, struct vc4_dev, base); + vc4 = devm_drm_dev_alloc(dev, driver, struct vc4_dev, base); if (IS_ERR(vc4)) return PTR_ERR(vc4); + vc4->is_vc5 = is_vc5; drm = &vc4->base; platform_set_drvdata(pdev, drm); INIT_LIST_HEAD(&vc4->debugfs_list); - mutex_init(&vc4->bin_bo_lock); + if (!is_vc5) { + mutex_init(&vc4->bin_bo_lock); - ret = vc4_bo_cache_init(drm); - if (ret) - return ret; + ret = vc4_bo_cache_init(drm); + if (ret) + return ret; + } ret = drmm_mode_config_init(drm); if (ret) return ret; - ret = vc4_gem_init(drm); - if (ret) - return ret; + if (!is_vc5) { + ret = vc4_gem_init(drm); + if (ret) + return ret; + } node = of_find_compatible_node(NULL, NULL, "raspberrypi,bcm2835-firmware"); if (node) { @@ -258,7 +323,7 @@ static int vc4_drm_bind(struct device *dev) return -EPROBE_DEFER; } - ret = drm_aperture_remove_framebuffers(false, &vc4_drm_driver); + ret = drm_aperture_remove_framebuffers(false, driver); if (ret) return ret; diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index 15e0c2ac3940..93fd55b9e99e 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h @@ -48,6 +48,8 @@ enum vc4_kernel_bo_type { * done. This way, only events related to a specific job will be counted. */ struct vc4_perfmon { + struct vc4_dev *dev; + /* Tracks the number of users of the perfmon, when this counter reaches * zero the perfmon is destroyed. */ @@ -74,6 +76,8 @@ struct vc4_perfmon { struct vc4_dev { struct drm_device base; + bool is_vc5; + unsigned int irq; struct vc4_hvs *hvs; @@ -316,6 +320,7 @@ struct vc4_v3d { }; struct vc4_hvs { + struct vc4_dev *vc4; struct platform_device *pdev; void __iomem *regs; u32 __iomem *dlist; @@ -333,9 +338,6 @@ struct vc4_hvs { struct drm_mm_node mitchell_netravali_filter; struct debugfs_regset32 regset; - - /* HVS version 5 flag, therefore requires updated dlist structures */ - bool hvs5; }; struct vc4_plane { @@ -580,6 +582,8 @@ to_vc4_crtc_state(struct drm_crtc_state *crtc_state) #define VC4_REG32(reg) { .name = #reg, .offset = reg } struct vc4_exec_info { + struct vc4_dev *dev; + /* Sequence number for this bin/render job. */ uint64_t seqno; @@ -701,6 +705,8 @@ struct vc4_exec_info { * released when the DRM file is closed should be placed here. */ struct vc4_file { + struct vc4_dev *dev; + struct { struct idr idr; struct mutex lock; @@ -814,9 +820,9 @@ struct vc4_validated_shader_info { struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size); struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size, bool from_cache, enum vc4_kernel_bo_type type); -int vc4_dumb_create(struct drm_file *file_priv, - struct drm_device *dev, - struct drm_mode_create_dumb *args); +int vc4_bo_dumb_create(struct drm_file *file_priv, + struct drm_device *dev, + struct drm_mode_create_dumb *args); int vc4_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data, @@ -885,6 +891,7 @@ static inline void vc4_debugfs_add_regset32(struct drm_device *drm, /* vc4_drv.c */ void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index); +int vc4_dumb_fixup_args(struct drm_mode_create_dumb *args); /* vc4_dpi.c */ extern struct platform_driver vc4_dpi_driver; diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index 9eaf304fc20d..fe10d9c3fff8 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -76,6 +76,9 @@ vc4_get_hang_state_ioctl(struct drm_device *dev, void *data, u32 i; int ret = 0; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + if (!vc4->v3d) { DRM_DEBUG("VC4_GET_HANG_STATE with no VC4 V3D probed\n"); return -ENODEV; @@ -386,6 +389,9 @@ vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns, unsigned long timeout_expire; DEFINE_WAIT(wait); + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + if (vc4->finished_seqno >= seqno) return 0; @@ -468,6 +474,9 @@ vc4_submit_next_bin_job(struct drm_device *dev) struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_exec_info *exec; + if (WARN_ON_ONCE(vc4->is_vc5)) + return; + again: exec = vc4_first_bin_job(vc4); if (!exec) @@ -513,6 +522,9 @@ vc4_submit_next_render_job(struct drm_device *dev) if (!exec) return; + if (WARN_ON_ONCE(vc4->is_vc5)) + return; + /* A previous RCL may have written to one of our textures, and * our full cache flush at bin time may have occurred before * that RCL completed. Flush the texture cache now, but not @@ -531,6 +543,9 @@ vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec) struct vc4_dev *vc4 = to_vc4_dev(dev); bool was_empty = list_empty(&vc4->render_job_list); + if (WARN_ON_ONCE(vc4->is_vc5)) + return; + list_move_tail(&exec->head, &vc4->render_job_list); if (was_empty) vc4_submit_next_render_job(dev); @@ -997,6 +1012,9 @@ vc4_job_handle_completed(struct vc4_dev *vc4) unsigned long irqflags; struct vc4_seqno_cb *cb, *cb_temp; + if (WARN_ON_ONCE(vc4->is_vc5)) + return; + spin_lock_irqsave(&vc4->job_lock, irqflags); while (!list_empty(&vc4->job_done_list)) { struct vc4_exec_info *exec = @@ -1033,6 +1051,9 @@ int vc4_queue_seqno_cb(struct drm_device *dev, struct vc4_dev *vc4 = to_vc4_dev(dev); unsigned long irqflags; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + cb->func = func; INIT_WORK(&cb->work, vc4_seqno_cb_work); @@ -1083,8 +1104,12 @@ int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_vc4_wait_seqno *args = data; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno, &args->timeout_ns); } @@ -1093,11 +1118,15 @@ int vc4_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct vc4_dev *vc4 = to_vc4_dev(dev); int ret; struct drm_vc4_wait_bo *args = data; struct drm_gem_object *gem_obj; struct vc4_bo *bo; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + if (args->pad != 0) return -EINVAL; @@ -1144,6 +1173,9 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data, args->shader_rec_size, args->bo_handle_count); + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + if (!vc4->v3d) { DRM_DEBUG("VC4_SUBMIT_CL with no VC4 V3D probed\n"); return -ENODEV; @@ -1167,6 +1199,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data, DRM_ERROR("malloc failure on exec struct\n"); return -ENOMEM; } + exec->dev = vc4; ret = vc4_v3d_pm_get(vc4); if (ret) { @@ -1276,6 +1309,9 @@ int vc4_gem_init(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + vc4->dma_fence_context = dma_fence_context_alloc(1); INIT_LIST_HEAD(&vc4->bin_job_list); @@ -1321,11 +1357,15 @@ static void vc4_gem_destroy(struct drm_device *dev, void *unused) int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_vc4_gem_madvise *args = data; struct drm_gem_object *gem_obj; struct vc4_bo *bo; int ret; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + switch (args->madv) { case VC4_MADV_DONTNEED: case VC4_MADV_WILLNEED: diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 823d812f4982..ce9d16666d91 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -1481,7 +1481,7 @@ vc4_hdmi_encoder_compute_mode_clock(const struct drm_display_mode *mode, unsigned int bpc, enum vc4_hdmi_output_format fmt) { - unsigned long long clock = mode->clock * 1000; + unsigned long long clock = mode->clock * 1000ULL; if (mode->flags & DRM_MODE_FLAG_DBLCLK) clock = clock * 2; diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c index 2a58fc421cf6..ba2c8e5a9b64 100644 --- a/drivers/gpu/drm/vc4/vc4_hvs.c +++ b/drivers/gpu/drm/vc4/vc4_hvs.c @@ -220,10 +220,11 @@ u8 vc4_hvs_get_fifo_frame_count(struct vc4_hvs *hvs, unsigned int fifo) int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output) { + struct vc4_dev *vc4 = hvs->vc4; u32 reg; int ret; - if (!hvs->hvs5) + if (!vc4->is_vc5) return output; switch (output) { @@ -273,6 +274,7 @@ int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output) static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc, struct drm_display_mode *mode, bool oneshot) { + struct vc4_dev *vc4 = hvs->vc4; struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); struct vc4_crtc_state *vc4_crtc_state = to_vc4_crtc_state(crtc->state); unsigned int chan = vc4_crtc_state->assigned_channel; @@ -291,7 +293,7 @@ static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc, */ dispctrl = SCALER_DISPCTRLX_ENABLE; - if (!hvs->hvs5) + if (!vc4->is_vc5) dispctrl |= VC4_SET_FIELD(mode->hdisplay, SCALER_DISPCTRLX_WIDTH) | VC4_SET_FIELD(mode->vdisplay, @@ -312,7 +314,7 @@ static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc, HVS_WRITE(SCALER_DISPBKGNDX(chan), dispbkgndx | SCALER_DISPBKGND_AUTOHS | - ((!hvs->hvs5) ? SCALER_DISPBKGND_GAMMA : 0) | + ((!vc4->is_vc5) ? SCALER_DISPBKGND_GAMMA : 0) | (interlace ? SCALER_DISPBKGND_INTERLACE : 0)); /* Reload the LUT, since the SRAMs would have been disabled if @@ -617,11 +619,9 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) if (!hvs) return -ENOMEM; + hvs->vc4 = vc4; hvs->pdev = pdev; - if (of_device_is_compatible(pdev->dev.of_node, "brcm,bcm2711-hvs")) - hvs->hvs5 = true; - hvs->regs = vc4_ioremap_regs(pdev, 0); if (IS_ERR(hvs->regs)) return PTR_ERR(hvs->regs); @@ -630,7 +630,7 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) hvs->regset.regs = hvs_regs; hvs->regset.nregs = ARRAY_SIZE(hvs_regs); - if (hvs->hvs5) { + if (vc4->is_vc5) { hvs->core_clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(hvs->core_clk)) { dev_err(&pdev->dev, "Couldn't get core clock\n"); @@ -644,7 +644,7 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) } } - if (!hvs->hvs5) + if (!vc4->is_vc5) hvs->dlist = hvs->regs + SCALER_DLIST_START; else hvs->dlist = hvs->regs + SCALER5_DLIST_START; @@ -665,7 +665,7 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) * between planes when they don't overlap on the screen, but * for now we just allocate globally. */ - if (!hvs->hvs5) + if (!vc4->is_vc5) /* 48k words of 2x12-bit pixels */ drm_mm_init(&hvs->lbm_mm, 0, 48 * 1024); else diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c index 4342fb43e8c1..2eacfb6773d2 100644 --- a/drivers/gpu/drm/vc4/vc4_irq.c +++ b/drivers/gpu/drm/vc4/vc4_irq.c @@ -265,6 +265,9 @@ vc4_irq_enable(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); + if (WARN_ON_ONCE(vc4->is_vc5)) + return; + if (!vc4->v3d) return; @@ -279,6 +282,9 @@ vc4_irq_disable(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); + if (WARN_ON_ONCE(vc4->is_vc5)) + return; + if (!vc4->v3d) return; @@ -296,8 +302,12 @@ vc4_irq_disable(struct drm_device *dev) int vc4_irq_install(struct drm_device *dev, int irq) { + struct vc4_dev *vc4 = to_vc4_dev(dev); int ret; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + if (irq == IRQ_NOTCONNECTED) return -ENOTCONN; @@ -316,6 +326,9 @@ void vc4_irq_uninstall(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); + if (WARN_ON_ONCE(vc4->is_vc5)) + return; + vc4_irq_disable(dev); free_irq(vc4->irq, dev); } @@ -326,6 +339,9 @@ void vc4_irq_reset(struct drm_device *dev) struct vc4_dev *vc4 = to_vc4_dev(dev); unsigned long irqflags; + if (WARN_ON_ONCE(vc4->is_vc5)) + return; + /* Acknowledge any stale IRQs. */ V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS); diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index c169bd72e53b..893d831b24aa 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -393,7 +393,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state) old_hvs_state->fifo_state[channel].pending_commit = NULL; } - if (vc4->hvs->hvs5) { + if (vc4->is_vc5) { unsigned long state_rate = max(old_hvs_state->core_clock_rate, new_hvs_state->core_clock_rate); unsigned long core_rate = max_t(unsigned long, @@ -412,7 +412,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state) vc4_ctm_commit(vc4, state); - if (vc4->hvs->hvs5) + if (vc4->is_vc5) vc5_hvs_pv_muxing_commit(vc4, state); else vc4_hvs_pv_muxing_commit(vc4, state); @@ -430,7 +430,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state) drm_atomic_helper_cleanup_planes(dev, state); - if (vc4->hvs->hvs5) { + if (vc4->is_vc5) { drm_dbg(dev, "Running the core clock at %lu Hz\n", new_hvs_state->core_clock_rate); @@ -479,8 +479,12 @@ static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev, struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd) { + struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_mode_fb_cmd2 mode_cmd_local; + if (WARN_ON_ONCE(vc4->is_vc5)) + return ERR_PTR(-ENODEV); + /* If the user didn't specify a modifier, use the * vc4_set_tiling_ioctl() state for the BO. */ @@ -997,11 +1001,15 @@ static const struct drm_mode_config_funcs vc4_mode_funcs = { .fb_create = vc4_fb_create, }; +static const struct drm_mode_config_funcs vc5_mode_funcs = { + .atomic_check = vc4_atomic_check, + .atomic_commit = drm_atomic_helper_commit, + .fb_create = drm_gem_fb_create, +}; + int vc4_kms_load(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); - bool is_vc5 = of_device_is_compatible(dev->dev->of_node, - "brcm,bcm2711-vc5"); int ret; /* @@ -1009,7 +1017,7 @@ int vc4_kms_load(struct drm_device *dev) * the BCM2711, but the load tracker computations are used for * the core clock rate calculation. */ - if (!is_vc5) { + if (!vc4->is_vc5) { /* Start with the load tracker enabled. Can be * disabled through the debugfs load_tracker file. */ @@ -1025,7 +1033,7 @@ int vc4_kms_load(struct drm_device *dev) return ret; } - if (is_vc5) { + if (vc4->is_vc5) { dev->mode_config.max_width = 7680; dev->mode_config.max_height = 7680; } else { @@ -1033,7 +1041,7 @@ int vc4_kms_load(struct drm_device *dev) dev->mode_config.max_height = 2048; } - dev->mode_config.funcs = &vc4_mode_funcs; + dev->mode_config.funcs = vc4->is_vc5 ? &vc5_mode_funcs : &vc4_mode_funcs; dev->mode_config.helper_private = &vc4_mode_config_helpers; dev->mode_config.preferred_depth = 24; dev->mode_config.async_page_flip = true; diff --git a/drivers/gpu/drm/vc4/vc4_perfmon.c b/drivers/gpu/drm/vc4/vc4_perfmon.c index 18abc06335c1..c7f5adb6bcf8 100644 --- a/drivers/gpu/drm/vc4/vc4_perfmon.c +++ b/drivers/gpu/drm/vc4/vc4_perfmon.c @@ -17,13 +17,27 @@ void vc4_perfmon_get(struct vc4_perfmon *perfmon) { + struct vc4_dev *vc4 = perfmon->dev; + + if (WARN_ON_ONCE(vc4->is_vc5)) + return; + if (perfmon) refcount_inc(&perfmon->refcnt); } void vc4_perfmon_put(struct vc4_perfmon *perfmon) { - if (perfmon && refcount_dec_and_test(&perfmon->refcnt)) + struct vc4_dev *vc4; + + if (!perfmon) + return; + + vc4 = perfmon->dev; + if (WARN_ON_ONCE(vc4->is_vc5)) + return; + + if (refcount_dec_and_test(&perfmon->refcnt)) kfree(perfmon); } @@ -32,6 +46,9 @@ void vc4_perfmon_start(struct vc4_dev *vc4, struct vc4_perfmon *perfmon) unsigned int i; u32 mask; + if (WARN_ON_ONCE(vc4->is_vc5)) + return; + if (WARN_ON_ONCE(!perfmon || vc4->active_perfmon)) return; @@ -49,6 +66,9 @@ void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon, { unsigned int i; + if (WARN_ON_ONCE(vc4->is_vc5)) + return; + if (WARN_ON_ONCE(!vc4->active_perfmon || perfmon != vc4->active_perfmon)) return; @@ -64,8 +84,12 @@ void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon, struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id) { + struct vc4_dev *vc4 = vc4file->dev; struct vc4_perfmon *perfmon; + if (WARN_ON_ONCE(vc4->is_vc5)) + return NULL; + mutex_lock(&vc4file->perfmon.lock); perfmon = idr_find(&vc4file->perfmon.idr, id); vc4_perfmon_get(perfmon); @@ -76,8 +100,14 @@ struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id) void vc4_perfmon_open_file(struct vc4_file *vc4file) { + struct vc4_dev *vc4 = vc4file->dev; + + if (WARN_ON_ONCE(vc4->is_vc5)) + return; + mutex_init(&vc4file->perfmon.lock); idr_init_base(&vc4file->perfmon.idr, VC4_PERFMONID_MIN); + vc4file->dev = vc4; } static int vc4_perfmon_idr_del(int id, void *elem, void *data) @@ -91,6 +121,11 @@ static int vc4_perfmon_idr_del(int id, void *elem, void *data) void vc4_perfmon_close_file(struct vc4_file *vc4file) { + struct vc4_dev *vc4 = vc4file->dev; + + if (WARN_ON_ONCE(vc4->is_vc5)) + return; + mutex_lock(&vc4file->perfmon.lock); idr_for_each(&vc4file->perfmon.idr, vc4_perfmon_idr_del, NULL); idr_destroy(&vc4file->perfmon.idr); @@ -107,6 +142,9 @@ int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data, unsigned int i; int ret; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + if (!vc4->v3d) { DRM_DEBUG("Creating perfmon no VC4 V3D probed\n"); return -ENODEV; @@ -127,6 +165,7 @@ int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data, GFP_KERNEL); if (!perfmon) return -ENOMEM; + perfmon->dev = vc4; for (i = 0; i < req->ncounters; i++) perfmon->events[i] = req->events[i]; @@ -157,6 +196,9 @@ int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data, struct drm_vc4_perfmon_destroy *req = data; struct vc4_perfmon *perfmon; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + if (!vc4->v3d) { DRM_DEBUG("Destroying perfmon no VC4 V3D probed\n"); return -ENODEV; @@ -182,6 +224,9 @@ int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data, struct vc4_perfmon *perfmon; int ret; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + if (!vc4->v3d) { DRM_DEBUG("Getting perfmon no VC4 V3D probed\n"); return -ENODEV; diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index b3438f4a81ce..1e866dc00ac3 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -489,10 +489,10 @@ static u32 vc4_lbm_size(struct drm_plane_state *state) } /* Align it to 64 or 128 (hvs5) bytes */ - lbm = roundup(lbm, vc4->hvs->hvs5 ? 128 : 64); + lbm = roundup(lbm, vc4->is_vc5 ? 128 : 64); /* Each "word" of the LBM memory contains 2 or 4 (hvs5) pixels */ - lbm /= vc4->hvs->hvs5 ? 4 : 2; + lbm /= vc4->is_vc5 ? 4 : 2; return lbm; } @@ -608,7 +608,7 @@ static int vc4_plane_allocate_lbm(struct drm_plane_state *state) ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm, &vc4_state->lbm, lbm_size, - vc4->hvs->hvs5 ? 64 : 32, + vc4->is_vc5 ? 64 : 32, 0, 0); spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags); @@ -917,7 +917,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane, mix_plane_alpha = state->alpha != DRM_BLEND_ALPHA_OPAQUE && fb->format->has_alpha; - if (!vc4->hvs->hvs5) { + if (!vc4->is_vc5) { /* Control word */ vc4_dlist_write(vc4_state, SCALER_CTL0_VALID | @@ -1321,6 +1321,10 @@ static int vc4_plane_atomic_async_check(struct drm_plane *plane, old_vc4_state = to_vc4_plane_state(plane->state); new_vc4_state = to_vc4_plane_state(new_plane_state); + + if (!new_vc4_state->hw_dlist) + return -EINVAL; + if (old_vc4_state->dlist_count != new_vc4_state->dlist_count || old_vc4_state->pos0_offset != new_vc4_state->pos0_offset || old_vc4_state->pos2_offset != new_vc4_state->pos2_offset || @@ -1385,6 +1389,13 @@ static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = { .atomic_async_update = vc4_plane_atomic_async_update, }; +static const struct drm_plane_helper_funcs vc5_plane_helper_funcs = { + .atomic_check = vc4_plane_atomic_check, + .atomic_update = vc4_plane_atomic_update, + .atomic_async_check = vc4_plane_atomic_async_check, + .atomic_async_update = vc4_plane_atomic_async_update, +}; + static bool vc4_format_mod_supported(struct drm_plane *plane, uint32_t format, uint64_t modifier) @@ -1453,14 +1464,13 @@ static const struct drm_plane_funcs vc4_plane_funcs = { struct drm_plane *vc4_plane_init(struct drm_device *dev, enum drm_plane_type type) { + struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_plane *plane = NULL; struct vc4_plane *vc4_plane; u32 formats[ARRAY_SIZE(hvs_formats)]; int num_formats = 0; int ret = 0; unsigned i; - bool hvs5 = of_device_is_compatible(dev->dev->of_node, - "brcm,bcm2711-vc5"); static const uint64_t modifiers[] = { DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED, DRM_FORMAT_MOD_BROADCOM_SAND128, @@ -1476,7 +1486,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev, return ERR_PTR(-ENOMEM); for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) { - if (!hvs_formats[i].hvs5_only || hvs5) { + if (!hvs_formats[i].hvs5_only || vc4->is_vc5) { formats[num_formats] = hvs_formats[i].drm; num_formats++; } @@ -1490,7 +1500,10 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev, if (ret) return ERR_PTR(ret); - drm_plane_helper_add(plane, &vc4_plane_helper_funcs); + if (vc4->is_vc5) + drm_plane_helper_add(plane, &vc5_plane_helper_funcs); + else + drm_plane_helper_add(plane, &vc4_plane_helper_funcs); drm_plane_create_alpha_property(plane); drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0, diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c index 3c918eeaf56e..f6b7dc3df08c 100644 --- a/drivers/gpu/drm/vc4/vc4_render_cl.c +++ b/drivers/gpu/drm/vc4/vc4_render_cl.c @@ -593,11 +593,15 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec, int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec) { + struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_rcl_setup setup = {0}; struct drm_vc4_submit_cl *args = exec->args; bool has_bin = args->bin_cl_size != 0; int ret; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + if (args->min_x_tile > args->max_x_tile || args->min_y_tile > args->max_y_tile) { DRM_DEBUG("Bad render tile set (%d,%d)-(%d,%d)\n", diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c index 7bb3067f8425..cc714dcfe1f2 100644 --- a/drivers/gpu/drm/vc4/vc4_v3d.c +++ b/drivers/gpu/drm/vc4/vc4_v3d.c @@ -127,6 +127,9 @@ static int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused) int vc4_v3d_pm_get(struct vc4_dev *vc4) { + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + mutex_lock(&vc4->power_lock); if (vc4->power_refcount++ == 0) { int ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev); @@ -145,6 +148,9 @@ vc4_v3d_pm_get(struct vc4_dev *vc4) void vc4_v3d_pm_put(struct vc4_dev *vc4) { + if (WARN_ON_ONCE(vc4->is_vc5)) + return; + mutex_lock(&vc4->power_lock); if (--vc4->power_refcount == 0) { pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev); @@ -172,6 +178,9 @@ int vc4_v3d_get_bin_slot(struct vc4_dev *vc4) uint64_t seqno = 0; struct vc4_exec_info *exec; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + try_again: spin_lock_irqsave(&vc4->job_lock, irqflags); slot = ffs(~vc4->bin_alloc_used); @@ -316,6 +325,9 @@ int vc4_v3d_bin_bo_get(struct vc4_dev *vc4, bool *used) { int ret = 0; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + mutex_lock(&vc4->bin_bo_lock); if (used && *used) @@ -348,6 +360,9 @@ static void bin_bo_release(struct kref *ref) void vc4_v3d_bin_bo_put(struct vc4_dev *vc4) { + if (WARN_ON_ONCE(vc4->is_vc5)) + return; + mutex_lock(&vc4->bin_bo_lock); kref_put(&vc4->bin_bo_kref, bin_bo_release); mutex_unlock(&vc4->bin_bo_lock); diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c index eec76af49f04..2feba55bcef7 100644 --- a/drivers/gpu/drm/vc4/vc4_validate.c +++ b/drivers/gpu/drm/vc4/vc4_validate.c @@ -105,9 +105,13 @@ size_is_lt(uint32_t width, uint32_t height, int cpp) struct drm_gem_cma_object * vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex) { + struct vc4_dev *vc4 = exec->dev; struct drm_gem_cma_object *obj; struct vc4_bo *bo; + if (WARN_ON_ONCE(vc4->is_vc5)) + return NULL; + if (hindex >= exec->bo_count) { DRM_DEBUG("BO index %d greater than BO count %d\n", hindex, exec->bo_count); @@ -160,10 +164,14 @@ vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo, uint32_t offset, uint8_t tiling_format, uint32_t width, uint32_t height, uint8_t cpp) { + struct vc4_dev *vc4 = exec->dev; uint32_t aligned_width, aligned_height, stride, size; uint32_t utile_w = utile_width(cpp); uint32_t utile_h = utile_height(cpp); + if (WARN_ON_ONCE(vc4->is_vc5)) + return false; + /* The shaded vertex format stores signed 12.4 fixed point * (-2048,2047) offsets from the viewport center, so we should * never have a render target larger than 4096. The texture @@ -482,10 +490,14 @@ vc4_validate_bin_cl(struct drm_device *dev, void *unvalidated, struct vc4_exec_info *exec) { + struct vc4_dev *vc4 = to_vc4_dev(dev); uint32_t len = exec->args->bin_cl_size; uint32_t dst_offset = 0; uint32_t src_offset = 0; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + while (src_offset < len) { void *dst_pkt = validated + dst_offset; void *src_pkt = unvalidated + src_offset; @@ -926,9 +938,13 @@ int vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec) { + struct vc4_dev *vc4 = to_vc4_dev(dev); uint32_t i; int ret = 0; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + for (i = 0; i < exec->shader_state_count; i++) { ret = validate_gl_shader_rec(dev, exec, &exec->shader_state[i]); if (ret) diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c index 7cf82b071de2..e315aeb5fef5 100644 --- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c +++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c @@ -778,6 +778,7 @@ vc4_handle_branch_target(struct vc4_shader_validation_state *validation_state) struct vc4_validated_shader_info * vc4_validate_shader(struct drm_gem_cma_object *shader_obj) { + struct vc4_dev *vc4 = to_vc4_dev(shader_obj->base.dev); bool found_shader_end = false; int shader_end_ip = 0; uint32_t last_thread_switch_ip = -3; @@ -785,6 +786,9 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj) struct vc4_validated_shader_info *validated_shader = NULL; struct vc4_shader_validation_state validation_state; + if (WARN_ON_ONCE(vc4->is_vc5)) + return NULL; + memset(&validation_state, 0, sizeof(validation_state)); validation_state.shader = shader_obj->vaddr; validation_state.max_ip = shader_obj->base.size / sizeof(uint64_t); diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c index 5a5bf4e5b717..e31554d7139f 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_gem.c +++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c @@ -71,7 +71,7 @@ static int xen_drm_front_gem_object_mmap(struct drm_gem_object *gem_obj, * the whole buffer. */ vma->vm_flags &= ~VM_PFNMAP; - vma->vm_flags |= VM_MIXEDMAP; + vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND; vma->vm_pgoff = 0; /* diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c index 978ee2aab2d4..e0bc73124196 100644 --- a/drivers/hid/hid-hyperv.c +++ b/drivers/hid/hid-hyperv.c @@ -199,7 +199,8 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device, if (!input_device->hid_desc) goto cleanup; - input_device->report_desc_size = desc->desc[0].wDescriptorLength; + input_device->report_desc_size = le16_to_cpu( + desc->desc[0].wDescriptorLength); if (input_device->report_desc_size == 0) { input_device->dev_info_status = -EINVAL; goto cleanup; @@ -217,7 +218,7 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device, memcpy(input_device->report_desc, ((unsigned char *)desc) + desc->bLength, - desc->desc[0].wDescriptorLength); + le16_to_cpu(desc->desc[0].wDescriptorLength)); /* Send the ack */ memset(&ack, 0, sizeof(struct mousevsc_prt_msg)); diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index b60f13481bdc..5b120402d405 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -21,6 +21,7 @@ #include <linux/cpu.h> #include <linux/hyperv.h> #include <asm/mshyperv.h> +#include <linux/sched/isolation.h> #include "hyperv_vmbus.h" @@ -638,6 +639,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) */ if (newchannel->offermsg.offer.sub_channel_index == 0) { mutex_unlock(&vmbus_connection.channel_mutex); + cpus_read_unlock(); /* * Don't call free_channel(), because newchannel->kobj * is not initialized yet. @@ -728,16 +730,20 @@ static void init_vp_index(struct vmbus_channel *channel) u32 i, ncpu = num_online_cpus(); cpumask_var_t available_mask; struct cpumask *allocated_mask; + const struct cpumask *hk_mask = housekeeping_cpumask(HK_TYPE_MANAGED_IRQ); u32 target_cpu; int numa_node; if (!perf_chn || - !alloc_cpumask_var(&available_mask, GFP_KERNEL)) { + !alloc_cpumask_var(&available_mask, GFP_KERNEL) || + cpumask_empty(hk_mask)) { /* * If the channel is not a performance critical * channel, bind it to VMBUS_CONNECT_CPU. * In case alloc_cpumask_var() fails, bind it to * VMBUS_CONNECT_CPU. + * If all the cpus are isolated, bind it to + * VMBUS_CONNECT_CPU. */ channel->target_cpu = VMBUS_CONNECT_CPU; if (perf_chn) @@ -758,17 +764,19 @@ static void init_vp_index(struct vmbus_channel *channel) } allocated_mask = &hv_context.hv_numa_map[numa_node]; - if (cpumask_equal(allocated_mask, cpumask_of_node(numa_node))) { +retry: + cpumask_xor(available_mask, allocated_mask, cpumask_of_node(numa_node)); + cpumask_and(available_mask, available_mask, hk_mask); + + if (cpumask_empty(available_mask)) { /* * We have cycled through all the CPUs in the node; * reset the allocated map. */ cpumask_clear(allocated_mask); + goto retry; } - cpumask_xor(available_mask, allocated_mask, - cpumask_of_node(numa_node)); - target_cpu = cpumask_first(available_mask); cpumask_set_cpu(target_cpu, allocated_mask); diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c index c698592b83e4..d35b60c06114 100644 --- a/drivers/hv/hv_kvp.c +++ b/drivers/hv/hv_kvp.c @@ -394,7 +394,7 @@ kvp_send_key(struct work_struct *dummy) in_msg = kvp_transaction.kvp_msg; /* - * The key/value strings sent from the host are encoded in + * The key/value strings sent from the host are encoded * in utf16; convert it to utf8 strings. * The host assures us that the utf16 strings will not exceed * the max lengths specified. We will however, reserve room diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 714d549b7b46..547ae334e5cd 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -21,6 +21,7 @@ #include <linux/kernel_stat.h> #include <linux/clockchips.h> #include <linux/cpu.h> +#include <linux/sched/isolation.h> #include <linux/sched/task_stack.h> #include <linux/delay.h> @@ -1770,6 +1771,9 @@ static ssize_t target_cpu_store(struct vmbus_channel *channel, if (target_cpu >= nr_cpumask_bits) return -EINVAL; + if (!cpumask_test_cpu(target_cpu, housekeeping_cpumask(HK_TYPE_MANAGED_IRQ))) + return -EINVAL; + /* No CPUs should come up or down during this. */ cpus_read_lock(); diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c index 57e11b2bab74..3633ab691662 100644 --- a/drivers/hwmon/asus-ec-sensors.c +++ b/drivers/hwmon/asus-ec-sensors.c @@ -259,7 +259,7 @@ static const struct ec_board_info board_info[] = { }, { .board_names = { - "ROG CROSSHAIR VIII FORMULA" + "ROG CROSSHAIR VIII FORMULA", "ROG CROSSHAIR VIII HERO", "ROG CROSSHAIR VIII HERO (WI-FI)", }, diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c index d78f4bebc718..ea070b91e5b9 100644 --- a/drivers/hwmon/occ/common.c +++ b/drivers/hwmon/occ/common.c @@ -1228,10 +1228,15 @@ EXPORT_SYMBOL_GPL(occ_setup); void occ_shutdown(struct occ *occ) { + mutex_lock(&occ->lock); + occ_shutdown_sysfs(occ); if (occ->hwmon) hwmon_device_unregister(occ->hwmon); + occ->hwmon = NULL; + + mutex_unlock(&occ->lock); } EXPORT_SYMBOL_GPL(occ_shutdown); diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c index e7d316b1401a..c023b691441e 100644 --- a/drivers/i2c/busses/i2c-designware-common.c +++ b/drivers/i2c/busses/i2c-designware-common.c @@ -477,9 +477,6 @@ int i2c_dw_prepare_clk(struct dw_i2c_dev *dev, bool prepare) { int ret; - if (IS_ERR(dev->clk)) - return PTR_ERR(dev->clk); - if (prepare) { /* Optional interface clock */ ret = clk_prepare_enable(dev->pclk); diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 70ade5306e45..ba043b547393 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -320,8 +320,17 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) goto exit_reset; } - dev->clk = devm_clk_get(&pdev->dev, NULL); - if (!i2c_dw_prepare_clk(dev, true)) { + dev->clk = devm_clk_get_optional(&pdev->dev, NULL); + if (IS_ERR(dev->clk)) { + ret = PTR_ERR(dev->clk); + goto exit_reset; + } + + ret = i2c_dw_prepare_clk(dev, true); + if (ret) + goto exit_reset; + + if (dev->clk) { u64 clk_khz; dev->get_clk_rate_khz = i2c_dw_get_clk_rate_khz; diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c index bdecb78bfc26..8e6985354fd5 100644 --- a/drivers/i2c/busses/i2c-mt65xx.c +++ b/drivers/i2c/busses/i2c-mt65xx.c @@ -1420,17 +1420,22 @@ static int mtk_i2c_probe(struct platform_device *pdev) if (ret < 0) { dev_err(&pdev->dev, "Request I2C IRQ %d fail\n", irq); - return ret; + goto err_bulk_unprepare; } i2c_set_adapdata(&i2c->adap, i2c); ret = i2c_add_adapter(&i2c->adap); if (ret) - return ret; + goto err_bulk_unprepare; platform_set_drvdata(pdev, i2c); return 0; + +err_bulk_unprepare: + clk_bulk_unprepare(I2C_MT65XX_CLK_MAX, i2c->clocks); + + return ret; } static int mtk_i2c_remove(struct platform_device *pdev) diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c index 5960ccde6574..aede9d551130 100644 --- a/drivers/i2c/busses/i2c-npcm7xx.c +++ b/drivers/i2c/busses/i2c-npcm7xx.c @@ -2372,8 +2372,7 @@ static struct platform_driver npcm_i2c_bus_driver = { static int __init npcm_i2c_init(void) { npcm_i2c_debugfs_dir = debugfs_create_dir("npcm_i2c", NULL); - platform_driver_register(&npcm_i2c_bus_driver); - return 0; + return platform_driver_register(&npcm_i2c_bus_driver); } module_init(npcm_i2c_init); diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index b9bb94bd0f67..424ef470223d 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -115,6 +115,18 @@ static unsigned int mwait_substates __initdata; #define flg2MWAIT(flags) (((flags) >> 24) & 0xFF) #define MWAIT2flg(eax) ((eax & 0xFF) << 24) +static __always_inline int __intel_idle(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + struct cpuidle_state *state = &drv->states[index]; + unsigned long eax = flg2MWAIT(state->flags); + unsigned long ecx = 1; /* break on interrupt flag */ + + mwait_idle_with_hints(eax, ecx); + + return index; +} + /** * intel_idle - Ask the processor to enter the given idle state. * @dev: cpuidle device of the target CPU. @@ -132,16 +144,19 @@ static unsigned int mwait_substates __initdata; static __cpuidle int intel_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { - struct cpuidle_state *state = &drv->states[index]; - unsigned long eax = flg2MWAIT(state->flags); - unsigned long ecx = 1; /* break on interrupt flag */ + return __intel_idle(dev, drv, index); +} - if (state->flags & CPUIDLE_FLAG_IRQ_ENABLE) - local_irq_enable(); +static __cpuidle int intel_idle_irq(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + int ret; - mwait_idle_with_hints(eax, ecx); + raw_local_irq_enable(); + ret = __intel_idle(dev, drv, index); + raw_local_irq_disable(); - return index; + return ret; } /** @@ -1801,6 +1816,9 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv) /* Structure copy. */ drv->states[drv->state_count] = cpuidle_state_table[cstate]; + if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_IRQ_ENABLE) + drv->states[drv->state_count].enter = intel_idle_irq; + if ((disabled_states_mask & BIT(drv->state_count)) || ((icpu->use_acpi || force_use_acpi) && intel_idle_off_by_default(mwait_hint) && diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c index 4f73bc827eec..9c9e98578667 100644 --- a/drivers/iio/accel/bma180.c +++ b/drivers/iio/accel/bma180.c @@ -1006,11 +1006,12 @@ static int bma180_probe(struct i2c_client *client, data->trig->ops = &bma180_trigger_ops; iio_trigger_set_drvdata(data->trig, indio_dev); - indio_dev->trig = iio_trigger_get(data->trig); ret = iio_trigger_register(data->trig); if (ret) goto err_trigger_free; + + indio_dev->trig = iio_trigger_get(data->trig); } ret = iio_triggered_buffer_setup(indio_dev, NULL, diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c index ac74cdcd2bc8..748b35c2f0c3 100644 --- a/drivers/iio/accel/kxcjk-1013.c +++ b/drivers/iio/accel/kxcjk-1013.c @@ -1554,12 +1554,12 @@ static int kxcjk1013_probe(struct i2c_client *client, data->dready_trig->ops = &kxcjk1013_trigger_ops; iio_trigger_set_drvdata(data->dready_trig, indio_dev); - indio_dev->trig = data->dready_trig; - iio_trigger_get(indio_dev->trig); ret = iio_trigger_register(data->dready_trig); if (ret) goto err_poweroff; + indio_dev->trig = iio_trigger_get(data->dready_trig); + data->motion_trig->ops = &kxcjk1013_trigger_ops; iio_trigger_set_drvdata(data->motion_trig, indio_dev); ret = iio_trigger_register(data->motion_trig); diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c index 912a447e6310..c7d9ca96dbaa 100644 --- a/drivers/iio/accel/mma8452.c +++ b/drivers/iio/accel/mma8452.c @@ -1511,10 +1511,14 @@ static int mma8452_reset(struct i2c_client *client) int i; int ret; - ret = i2c_smbus_write_byte_data(client, MMA8452_CTRL_REG2, + /* + * Find on fxls8471, after config reset bit, it reset immediately, + * and will not give ACK, so here do not check the return value. + * The following code will read the reset register, and check whether + * this reset works. + */ + i2c_smbus_write_byte_data(client, MMA8452_CTRL_REG2, MMA8452_CTRL_REG2_RST); - if (ret < 0) - return ret; for (i = 0; i < 10; i++) { usleep_range(100, 200); @@ -1557,11 +1561,13 @@ static int mma8452_probe(struct i2c_client *client, mutex_init(&data->lock); data->chip_info = device_get_match_data(&client->dev); - if (!data->chip_info && id) { - data->chip_info = &mma_chip_info_table[id->driver_data]; - } else { - dev_err(&client->dev, "unknown device model\n"); - return -ENODEV; + if (!data->chip_info) { + if (id) { + data->chip_info = &mma_chip_info_table[id->driver_data]; + } else { + dev_err(&client->dev, "unknown device model\n"); + return -ENODEV; + } } ret = iio_read_mount_matrix(&client->dev, &data->orientation); diff --git a/drivers/iio/accel/mxc4005.c b/drivers/iio/accel/mxc4005.c index b3afbf064915..df600d2917c0 100644 --- a/drivers/iio/accel/mxc4005.c +++ b/drivers/iio/accel/mxc4005.c @@ -456,8 +456,6 @@ static int mxc4005_probe(struct i2c_client *client, data->dready_trig->ops = &mxc4005_trigger_ops; iio_trigger_set_drvdata(data->dready_trig, indio_dev); - indio_dev->trig = data->dready_trig; - iio_trigger_get(indio_dev->trig); ret = devm_iio_trigger_register(&client->dev, data->dready_trig); if (ret) { @@ -465,6 +463,8 @@ static int mxc4005_probe(struct i2c_client *client, "failed to register trigger\n"); return ret; } + + indio_dev->trig = iio_trigger_get(data->dready_trig); } return devm_iio_device_register(&client->dev, indio_dev); diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c index a73e3c2d212f..a9e655e69eaa 100644 --- a/drivers/iio/adc/adi-axi-adc.c +++ b/drivers/iio/adc/adi-axi-adc.c @@ -322,16 +322,19 @@ static struct adi_axi_adc_client *adi_axi_adc_attach_client(struct device *dev) if (!try_module_get(cl->dev->driver->owner)) { mutex_unlock(®istered_clients_lock); + of_node_put(cln); return ERR_PTR(-ENODEV); } get_device(cl->dev); cl->info = info; mutex_unlock(®istered_clients_lock); + of_node_put(cln); return cl; } mutex_unlock(®istered_clients_lock); + of_node_put(cln); return ERR_PTR(-EPROBE_DEFER); } diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c index 0793d2474cdc..9341e0e0eb55 100644 --- a/drivers/iio/adc/aspeed_adc.c +++ b/drivers/iio/adc/aspeed_adc.c @@ -186,6 +186,7 @@ static int aspeed_adc_set_trim_data(struct iio_dev *indio_dev) return -EOPNOTSUPP; } scu = syscon_node_to_regmap(syscon); + of_node_put(syscon); if (IS_ERR(scu)) { dev_warn(data->dev, "Failed to get syscon regmap\n"); return -EOPNOTSUPP; diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c index a4b8be5b8f88..580361bd9849 100644 --- a/drivers/iio/adc/axp288_adc.c +++ b/drivers/iio/adc/axp288_adc.c @@ -196,6 +196,14 @@ static const struct dmi_system_id axp288_adc_ts_bias_override[] = { }, .driver_data = (void *)(uintptr_t)AXP288_ADC_TS_BIAS_80UA, }, + { + /* Nuvision Solo 10 Draw */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TMAX"), + DMI_MATCH(DMI_PRODUCT_NAME, "TM101W610L"), + }, + .driver_data = (void *)(uintptr_t)AXP288_ADC_TS_BIAS_80UA, + }, {} }; diff --git a/drivers/iio/adc/rzg2l_adc.c b/drivers/iio/adc/rzg2l_adc.c index 7585144b9715..5b09a93fdf34 100644 --- a/drivers/iio/adc/rzg2l_adc.c +++ b/drivers/iio/adc/rzg2l_adc.c @@ -334,11 +334,15 @@ static int rzg2l_adc_parse_properties(struct platform_device *pdev, struct rzg2l i = 0; device_for_each_child_node(&pdev->dev, fwnode) { ret = fwnode_property_read_u32(fwnode, "reg", &channel); - if (ret) + if (ret) { + fwnode_handle_put(fwnode); return ret; + } - if (channel >= RZG2L_ADC_MAX_CHANNELS) + if (channel >= RZG2L_ADC_MAX_CHANNELS) { + fwnode_handle_put(fwnode); return -EINVAL; + } chan_array[i].type = IIO_VOLTAGE; chan_array[i].indexed = 1; diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c index 142656232157..3efb8c404ccc 100644 --- a/drivers/iio/adc/stm32-adc-core.c +++ b/drivers/iio/adc/stm32-adc-core.c @@ -64,6 +64,7 @@ struct stm32_adc_priv; * @max_clk_rate_hz: maximum analog clock rate (Hz, from datasheet) * @has_syscfg: SYSCFG capability flags * @num_irqs: number of interrupt lines + * @num_adcs: maximum number of ADC instances in the common registers */ struct stm32_adc_priv_cfg { const struct stm32_adc_common_regs *regs; @@ -71,6 +72,7 @@ struct stm32_adc_priv_cfg { u32 max_clk_rate_hz; unsigned int has_syscfg; unsigned int num_irqs; + unsigned int num_adcs; }; /** @@ -352,7 +354,7 @@ static void stm32_adc_irq_handler(struct irq_desc *desc) * before invoking the interrupt handler (e.g. call ISR only for * IRQ-enabled ADCs). */ - for (i = 0; i < priv->cfg->num_irqs; i++) { + for (i = 0; i < priv->cfg->num_adcs; i++) { if ((status & priv->cfg->regs->eoc_msk[i] && stm32_adc_eoc_enabled(priv, i)) || (status & priv->cfg->regs->ovr_msk[i])) @@ -792,6 +794,7 @@ static const struct stm32_adc_priv_cfg stm32f4_adc_priv_cfg = { .clk_sel = stm32f4_adc_clk_sel, .max_clk_rate_hz = 36000000, .num_irqs = 1, + .num_adcs = 3, }; static const struct stm32_adc_priv_cfg stm32h7_adc_priv_cfg = { @@ -800,14 +803,16 @@ static const struct stm32_adc_priv_cfg stm32h7_adc_priv_cfg = { .max_clk_rate_hz = 36000000, .has_syscfg = HAS_VBOOSTER, .num_irqs = 1, + .num_adcs = 2, }; static const struct stm32_adc_priv_cfg stm32mp1_adc_priv_cfg = { .regs = &stm32h7_adc_common_regs, .clk_sel = stm32h7_adc_clk_sel, - .max_clk_rate_hz = 40000000, + .max_clk_rate_hz = 36000000, .has_syscfg = HAS_VBOOSTER | HAS_ANASWVDD, .num_irqs = 2, + .num_adcs = 2, }; static const struct of_device_id stm32_adc_of_match[] = { diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c index a68ecbda6480..11ef873d6453 100644 --- a/drivers/iio/adc/stm32-adc.c +++ b/drivers/iio/adc/stm32-adc.c @@ -1365,7 +1365,7 @@ static int stm32_adc_read_raw(struct iio_dev *indio_dev, else ret = -EINVAL; - if (mask == IIO_CHAN_INFO_PROCESSED && adc->vrefint.vrefint_cal) + if (mask == IIO_CHAN_INFO_PROCESSED) *val = STM32_ADC_VREFINT_VOLTAGE * adc->vrefint.vrefint_cal / *val; iio_device_release_direct_mode(indio_dev); @@ -1407,7 +1407,6 @@ static irqreturn_t stm32_adc_threaded_isr(int irq, void *data) struct stm32_adc *adc = iio_priv(indio_dev); const struct stm32_adc_regspec *regs = adc->cfg->regs; u32 status = stm32_adc_readl(adc, regs->isr_eoc.reg); - u32 mask = stm32_adc_readl(adc, regs->ier_eoc.reg); /* Check ovr status right now, as ovr mask should be already disabled */ if (status & regs->isr_ovr.mask) { @@ -1422,11 +1421,6 @@ static irqreturn_t stm32_adc_threaded_isr(int irq, void *data) return IRQ_HANDLED; } - if (!(status & mask)) - dev_err_ratelimited(&indio_dev->dev, - "Unexpected IRQ: IER=0x%08x, ISR=0x%08x\n", - mask, status); - return IRQ_NONE; } @@ -1436,10 +1430,6 @@ static irqreturn_t stm32_adc_isr(int irq, void *data) struct stm32_adc *adc = iio_priv(indio_dev); const struct stm32_adc_regspec *regs = adc->cfg->regs; u32 status = stm32_adc_readl(adc, regs->isr_eoc.reg); - u32 mask = stm32_adc_readl(adc, regs->ier_eoc.reg); - - if (!(status & mask)) - return IRQ_WAKE_THREAD; if (status & regs->isr_ovr.mask) { /* @@ -1979,10 +1969,10 @@ static int stm32_adc_populate_int_ch(struct iio_dev *indio_dev, const char *ch_n for (i = 0; i < STM32_ADC_INT_CH_NB; i++) { if (!strncmp(stm32_adc_ic[i].name, ch_name, STM32_ADC_CH_SZ)) { - adc->int_ch[i] = chan; - - if (stm32_adc_ic[i].idx != STM32_ADC_INT_CH_VREFINT) - continue; + if (stm32_adc_ic[i].idx != STM32_ADC_INT_CH_VREFINT) { + adc->int_ch[i] = chan; + break; + } /* Get calibration data for vrefint channel */ ret = nvmem_cell_read_u16(&indio_dev->dev, "vrefint", &vrefint); @@ -1990,10 +1980,15 @@ static int stm32_adc_populate_int_ch(struct iio_dev *indio_dev, const char *ch_n return dev_err_probe(indio_dev->dev.parent, ret, "nvmem access error\n"); } - if (ret == -ENOENT) - dev_dbg(&indio_dev->dev, "vrefint calibration not found\n"); - else - adc->vrefint.vrefint_cal = vrefint; + if (ret == -ENOENT) { + dev_dbg(&indio_dev->dev, "vrefint calibration not found. Skip vrefint channel\n"); + return ret; + } else if (!vrefint) { + dev_dbg(&indio_dev->dev, "Null vrefint calibration value. Skip vrefint channel\n"); + return -ENOENT; + } + adc->int_ch[i] = chan; + adc->vrefint.vrefint_cal = vrefint; } } @@ -2030,7 +2025,9 @@ static int stm32_adc_generic_chan_init(struct iio_dev *indio_dev, } strncpy(adc->chan_name[val], name, STM32_ADC_CH_SZ); ret = stm32_adc_populate_int_ch(indio_dev, name, val); - if (ret) + if (ret == -ENOENT) + continue; + else if (ret) goto err; } else if (ret != -EINVAL) { dev_err(&indio_dev->dev, "Invalid label %d\n", ret); diff --git a/drivers/iio/adc/ti-ads131e08.c b/drivers/iio/adc/ti-ads131e08.c index 0c2025a22575..80a09817c119 100644 --- a/drivers/iio/adc/ti-ads131e08.c +++ b/drivers/iio/adc/ti-ads131e08.c @@ -739,7 +739,7 @@ static int ads131e08_alloc_channels(struct iio_dev *indio_dev) device_for_each_child_node(dev, node) { ret = fwnode_property_read_u32(node, "reg", &channel); if (ret) - return ret; + goto err_child_out; ret = fwnode_property_read_u32(node, "ti,gain", &tmp); if (ret) { @@ -747,7 +747,7 @@ static int ads131e08_alloc_channels(struct iio_dev *indio_dev) } else { ret = ads131e08_pga_gain_to_field_value(st, tmp); if (ret < 0) - return ret; + goto err_child_out; channel_config[i].pga_gain = tmp; } @@ -758,7 +758,7 @@ static int ads131e08_alloc_channels(struct iio_dev *indio_dev) } else { ret = ads131e08_validate_channel_mux(st, tmp); if (ret) - return ret; + goto err_child_out; channel_config[i].mux = tmp; } @@ -784,6 +784,10 @@ static int ads131e08_alloc_channels(struct iio_dev *indio_dev) st->channel_config = channel_config; return 0; + +err_child_out: + fwnode_handle_put(node); + return ret; } static void ads131e08_regulator_disable(void *data) diff --git a/drivers/iio/adc/xilinx-ams.c b/drivers/iio/adc/xilinx-ams.c index a55396c1f8b2..a7687706012d 100644 --- a/drivers/iio/adc/xilinx-ams.c +++ b/drivers/iio/adc/xilinx-ams.c @@ -1409,7 +1409,7 @@ static int ams_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) - return ret; + return irq; ret = devm_request_irq(&pdev->dev, irq, &ams_irq, 0, "ams-irq", indio_dev); diff --git a/drivers/iio/afe/iio-rescale.c b/drivers/iio/afe/iio-rescale.c index c6cf709f0f05..6949d2151025 100644 --- a/drivers/iio/afe/iio-rescale.c +++ b/drivers/iio/afe/iio-rescale.c @@ -277,7 +277,7 @@ static int rescale_configure_channel(struct device *dev, chan->ext_info = rescale->ext_info; chan->type = rescale->cfg->type; - if (iio_channel_has_info(schan, IIO_CHAN_INFO_RAW) || + if (iio_channel_has_info(schan, IIO_CHAN_INFO_RAW) && iio_channel_has_info(schan, IIO_CHAN_INFO_SCALE)) { dev_info(dev, "using raw+scale source channel\n"); } else if (iio_channel_has_info(schan, IIO_CHAN_INFO_PROCESSED)) { diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c index 847194fa1e46..80ef1aa9aae3 100644 --- a/drivers/iio/chemical/ccs811.c +++ b/drivers/iio/chemical/ccs811.c @@ -499,11 +499,11 @@ static int ccs811_probe(struct i2c_client *client, data->drdy_trig->ops = &ccs811_trigger_ops; iio_trigger_set_drvdata(data->drdy_trig, indio_dev); - indio_dev->trig = data->drdy_trig; - iio_trigger_get(indio_dev->trig); ret = iio_trigger_register(data->drdy_trig); if (ret) goto err_poweroff; + + indio_dev->trig = iio_trigger_get(data->drdy_trig); } ret = iio_triggered_buffer_setup(indio_dev, NULL, diff --git a/drivers/iio/frequency/admv1014.c b/drivers/iio/frequency/admv1014.c index a7994f8e6b9b..1aac5665b5de 100644 --- a/drivers/iio/frequency/admv1014.c +++ b/drivers/iio/frequency/admv1014.c @@ -700,8 +700,10 @@ static int admv1014_init(struct admv1014_state *st) ADMV1014_DET_EN_MSK; enable_reg = FIELD_PREP(ADMV1014_P1DB_COMPENSATION_MSK, st->p1db_comp ? 3 : 0) | - FIELD_PREP(ADMV1014_IF_AMP_PD_MSK, !(st->input_mode)) | - FIELD_PREP(ADMV1014_BB_AMP_PD_MSK, st->input_mode) | + FIELD_PREP(ADMV1014_IF_AMP_PD_MSK, + (st->input_mode == ADMV1014_IF_MODE) ? 0 : 1) | + FIELD_PREP(ADMV1014_BB_AMP_PD_MSK, + (st->input_mode == ADMV1014_IF_MODE) ? 1 : 0) | FIELD_PREP(ADMV1014_DET_EN_MSK, st->det_en); return __admv1014_spi_update_bits(st, ADMV1014_REG_ENABLE, enable_reg_msk, enable_reg); diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c index 4f19dc7ffe57..5908a96ca8af 100644 --- a/drivers/iio/gyro/mpu3050-core.c +++ b/drivers/iio/gyro/mpu3050-core.c @@ -875,6 +875,7 @@ static int mpu3050_power_up(struct mpu3050 *mpu3050) ret = regmap_update_bits(mpu3050->map, MPU3050_PWR_MGM, MPU3050_PWR_MGM_SLEEP, 0); if (ret) { + regulator_bulk_disable(ARRAY_SIZE(mpu3050->regs), mpu3050->regs); dev_err(mpu3050->dev, "error setting power mode\n"); return ret; } diff --git a/drivers/iio/humidity/hts221_buffer.c b/drivers/iio/humidity/hts221_buffer.c index f29692b9d2db..66b32413cf5e 100644 --- a/drivers/iio/humidity/hts221_buffer.c +++ b/drivers/iio/humidity/hts221_buffer.c @@ -135,9 +135,12 @@ int hts221_allocate_trigger(struct iio_dev *iio_dev) iio_trigger_set_drvdata(hw->trig, iio_dev); hw->trig->ops = &hts221_trigger_ops; + + err = devm_iio_trigger_register(hw->dev, hw->trig); + iio_dev->trig = iio_trigger_get(hw->trig); - return devm_iio_trigger_register(hw->dev, hw->trig); + return err; } static int hts221_buffer_preenable(struct iio_dev *iio_dev) diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600.h b/drivers/iio/imu/inv_icm42600/inv_icm42600.h index c0f5059b13b3..995a9dc06521 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600.h +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600.h @@ -17,6 +17,7 @@ #include "inv_icm42600_buffer.h" enum inv_icm42600_chip { + INV_CHIP_INVALID, INV_CHIP_ICM42600, INV_CHIP_ICM42602, INV_CHIP_ICM42605, diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c index 86858da9cc38..ca85fccc9839 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c @@ -565,7 +565,7 @@ int inv_icm42600_core_probe(struct regmap *regmap, int chip, int irq, bool open_drain; int ret; - if (chip < 0 || chip >= INV_CHIP_NB) { + if (chip <= INV_CHIP_INVALID || chip >= INV_CHIP_NB) { dev_err(dev, "invalid chip = %d\n", chip); return -ENODEV; } diff --git a/drivers/iio/magnetometer/yamaha-yas530.c b/drivers/iio/magnetometer/yamaha-yas530.c index 9ff7b0e56cf6..b2bc637150bf 100644 --- a/drivers/iio/magnetometer/yamaha-yas530.c +++ b/drivers/iio/magnetometer/yamaha-yas530.c @@ -639,7 +639,7 @@ static int yas532_get_calibration_data(struct yas5xx *yas5xx) dev_dbg(yas5xx->dev, "calibration data: %*ph\n", 14, data); /* Sanity check, is this all zeroes? */ - if (memchr_inv(data, 0x00, 13)) { + if (memchr_inv(data, 0x00, 13) == NULL) { if (!(data[13] & BIT(7))) dev_warn(yas5xx->dev, "calibration is blank!\n"); } diff --git a/drivers/iio/proximity/sx9324.c b/drivers/iio/proximity/sx9324.c index 70c37f664f6d..63fbcaa4cac8 100644 --- a/drivers/iio/proximity/sx9324.c +++ b/drivers/iio/proximity/sx9324.c @@ -885,6 +885,9 @@ sx9324_get_default_reg(struct device *dev, int idx, break; ret = device_property_read_u32_array(dev, prop, pin_defs, ARRAY_SIZE(pin_defs)); + if (ret) + break; + for (pin = 0; pin < SX9324_NUM_PINS; pin++) raw |= (pin_defs[pin] << (2 * pin)) & SX9324_REG_AFE_PH0_PIN_MASK(pin); diff --git a/drivers/iio/test/Kconfig b/drivers/iio/test/Kconfig index 56ca0ad7e77a..4c66c3f18c34 100644 --- a/drivers/iio/test/Kconfig +++ b/drivers/iio/test/Kconfig @@ -6,7 +6,7 @@ # Keep in alphabetical order config IIO_RESCALE_KUNIT_TEST bool "Test IIO rescale conversion functions" - depends on KUNIT=y && !IIO_RESCALE + depends on KUNIT=y && IIO_RESCALE=y default KUNIT_ALL_TESTS help If you want to run tests on the iio-rescale code say Y here. diff --git a/drivers/iio/test/Makefile b/drivers/iio/test/Makefile index f15ae0a6394f..880360f8d02c 100644 --- a/drivers/iio/test/Makefile +++ b/drivers/iio/test/Makefile @@ -4,6 +4,6 @@ # # Keep in alphabetical order -obj-$(CONFIG_IIO_RESCALE_KUNIT_TEST) += iio-test-rescale.o ../afe/iio-rescale.o +obj-$(CONFIG_IIO_RESCALE_KUNIT_TEST) += iio-test-rescale.o obj-$(CONFIG_IIO_TEST_FORMAT) += iio-test-format.o CFLAGS_iio-test-format.o += $(DISABLE_STRUCTLEAK_PLUGIN) diff --git a/drivers/iio/trigger/iio-trig-sysfs.c b/drivers/iio/trigger/iio-trig-sysfs.c index f1a8704e6cc1..d6c5e9644738 100644 --- a/drivers/iio/trigger/iio-trig-sysfs.c +++ b/drivers/iio/trigger/iio-trig-sysfs.c @@ -190,6 +190,7 @@ static int iio_sysfs_trigger_remove(int id) } iio_trigger_unregister(t->trig); + irq_work_sync(&t->work); iio_trigger_free(t->trig); list_del(&t->l); diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig index 505a032e2786..9dcf3f51f2dd 100644 --- a/drivers/input/joystick/Kconfig +++ b/drivers/input/joystick/Kconfig @@ -402,6 +402,7 @@ config JOYSTICK_N64 config JOYSTICK_SENSEHAT tristate "Raspberry Pi Sense HAT joystick" depends on INPUT && I2C + depends on HAS_IOMEM select MFD_SIMPLE_MFD_I2C help Say Y here if you want to enable the driver for the diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c index cbb1599a520e..480476121c01 100644 --- a/drivers/input/misc/soc_button_array.c +++ b/drivers/input/misc/soc_button_array.c @@ -85,13 +85,13 @@ static const struct dmi_system_id dmi_use_low_level_irq[] = { }, { /* - * Lenovo Yoga Tab2 1051L, something messes with the home-button + * Lenovo Yoga Tab2 1051F/1051L, something messes with the home-button * IRQ settings, leading to a non working home-button. */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "60073"), - DMI_MATCH(DMI_PRODUCT_VERSION, "1051L"), + DMI_MATCH(DMI_PRODUCT_VERSION, "1051"), }, }, {} /* Terminating entry */ diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c index 59a14505b9cd..ca150618d32f 100644 --- a/drivers/input/mouse/bcm5974.c +++ b/drivers/input/mouse/bcm5974.c @@ -942,17 +942,22 @@ static int bcm5974_probe(struct usb_interface *iface, if (!dev->tp_data) goto err_free_bt_buffer; - if (dev->bt_urb) + if (dev->bt_urb) { usb_fill_int_urb(dev->bt_urb, udev, usb_rcvintpipe(udev, cfg->bt_ep), dev->bt_data, dev->cfg.bt_datalen, bcm5974_irq_button, dev, 1); + dev->bt_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + } + usb_fill_int_urb(dev->tp_urb, udev, usb_rcvintpipe(udev, cfg->tp_ep), dev->tp_data, dev->cfg.tp_datalen, bcm5974_irq_trackpad, dev, 1); + dev->tp_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + /* create bcm5974 device */ usb_make_path(udev, dev->phys, sizeof(dev->phys)); strlcat(dev->phys, "/input0", sizeof(dev->phys)); diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 8fdb84b3642b..1d42084d0276 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -987,7 +987,7 @@ static const struct of_device_id ipmmu_of_ids[] = { .compatible = "renesas,ipmmu-r8a779a0", .data = &ipmmu_features_rcar_gen4, }, { - .compatible = "renesas,rcar-gen4-ipmmu", + .compatible = "renesas,rcar-gen4-ipmmu-vmsa", .data = &ipmmu_features_rcar_gen4, }, { /* Terminator */ diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 4ab1038b5482..1f23a6be7d88 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -298,7 +298,7 @@ config XTENSA_MX config XILINX_INTC bool "Xilinx Interrupt Controller IP" - depends on MICROBLAZE || ARCH_ZYNQ || ARCH_ZYNQMP + depends on OF select IRQ_DOMAIN help Support for the Xilinx Interrupt Controller IP core. diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c index 12dd48727a15..5ac83185ff47 100644 --- a/drivers/irqchip/irq-apple-aic.c +++ b/drivers/irqchip/irq-apple-aic.c @@ -1035,6 +1035,7 @@ static void build_fiq_affinity(struct aic_irq_chip *ic, struct device_node *aff) continue; cpu = of_cpu_node_to_id(cpu_node); + of_node_put(cpu_node); if (WARN_ON(cpu < 0)) continue; @@ -1143,6 +1144,7 @@ static int __init aic_of_ic_init(struct device_node *node, struct device_node *p for_each_child_of_node(affs, chld) build_fiq_affinity(irqc, chld); } + of_node_put(affs); set_handle_irq(aic_handle_irq); set_handle_fiq(aic_handle_fiq); diff --git a/drivers/irqchip/irq-gic-realview.c b/drivers/irqchip/irq-gic-realview.c index b4c1924f0255..38fab02ffe9d 100644 --- a/drivers/irqchip/irq-gic-realview.c +++ b/drivers/irqchip/irq-gic-realview.c @@ -57,6 +57,7 @@ realview_gic_of_init(struct device_node *node, struct device_node *parent) /* The PB11MPCore GIC needs to be configured in the syscon */ map = syscon_node_to_regmap(np); + of_node_put(np); if (!IS_ERR(map)) { /* new irq mode with no DCC */ regmap_write(map, REALVIEW_SYS_LOCK_OFFSET, diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 2be8dea6b6b0..5c1cf907ee68 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -1932,7 +1932,7 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node) gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL); if (!gic_data.ppi_descs) - return; + goto out_put_node; nr_parts = of_get_child_count(parts_node); @@ -1973,12 +1973,15 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node) continue; cpu = of_cpu_node_to_id(cpu_node); - if (WARN_ON(cpu < 0)) + if (WARN_ON(cpu < 0)) { + of_node_put(cpu_node); continue; + } pr_cont("%pOF[%d] ", cpu_node, cpu); cpumask_set_cpu(cpu, &part->mask); + of_node_put(cpu_node); } pr_cont("}\n"); diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c index aed88857d90f..8d05d8bcf56f 100644 --- a/drivers/irqchip/irq-loongson-liointc.c +++ b/drivers/irqchip/irq-loongson-liointc.c @@ -39,6 +39,12 @@ #define LIOINTC_ERRATA_IRQ 10 +#if defined(CONFIG_MIPS) +#define liointc_core_id get_ebase_cpunum() +#else +#define liointc_core_id get_csr_cpuid() +#endif + struct liointc_handler_data { struct liointc_priv *priv; u32 parent_int_map; @@ -57,7 +63,7 @@ static void liointc_chained_handle_irq(struct irq_desc *desc) struct liointc_handler_data *handler = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); struct irq_chip_generic *gc = handler->priv->gc; - int core = cpu_logical_map(smp_processor_id()) % LIOINTC_NUM_CORES; + int core = liointc_core_id % LIOINTC_NUM_CORES; u32 pending; chained_irq_enter(chip, desc); diff --git a/drivers/irqchip/irq-realtek-rtl.c b/drivers/irqchip/irq-realtek-rtl.c index 50a56820c99b..56bf502d9c67 100644 --- a/drivers/irqchip/irq-realtek-rtl.c +++ b/drivers/irqchip/irq-realtek-rtl.c @@ -134,9 +134,9 @@ static int __init map_interrupts(struct device_node *node, struct irq_domain *do if (!cpu_ictl) return -EINVAL; ret = of_property_read_u32(cpu_ictl, "#interrupt-cells", &tmp); + of_node_put(cpu_ictl); if (ret || tmp != 1) return -EINVAL; - of_node_put(cpu_ictl); cpu_int = be32_to_cpup(imap + 2); if (cpu_int > 7 || cpu_int < 2) diff --git a/drivers/irqchip/irq-uniphier-aidet.c b/drivers/irqchip/irq-uniphier-aidet.c index 89121b39be26..716b1bb88bf2 100644 --- a/drivers/irqchip/irq-uniphier-aidet.c +++ b/drivers/irqchip/irq-uniphier-aidet.c @@ -237,6 +237,7 @@ static const struct of_device_id uniphier_aidet_match[] = { { .compatible = "socionext,uniphier-ld11-aidet" }, { .compatible = "socionext,uniphier-ld20-aidet" }, { .compatible = "socionext,uniphier-pxs3-aidet" }, + { .compatible = "socionext,uniphier-nx1-aidet" }, { /* sentinel */ } }; diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index d21648a923ea..c954ff91870e 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -33,6 +33,14 @@ struct dm_kobject_holder { * access their members! */ +/* + * For mempools pre-allocation at the table loading time. + */ +struct dm_md_mempools { + struct bio_set bs; + struct bio_set io_bs; +}; + struct mapped_device { struct mutex suspend_lock; @@ -110,8 +118,7 @@ struct mapped_device { /* * io objects are allocated from here. */ - struct bio_set io_bs; - struct bio_set bs; + struct dm_md_mempools *mempools; /* kobject and completion */ struct dm_kobject_holder kobj_holder; @@ -265,6 +272,7 @@ struct dm_io { atomic_t io_count; struct mapped_device *md; + struct bio *split_bio; /* The three fields represent mapped part of original bio */ struct bio *orig_bio; unsigned int sector_offset; /* offset to end of orig_bio */ diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c index 1f6bf152b3c7..e92c1afc3677 100644 --- a/drivers/md/dm-era-target.c +++ b/drivers/md/dm-era-target.c @@ -1400,7 +1400,7 @@ static void start_worker(struct era *era) static void stop_worker(struct era *era) { atomic_set(&era->suspended, 1); - flush_workqueue(era->wq); + drain_workqueue(era->wq); } /*---------------------------------------------------------------- @@ -1570,6 +1570,12 @@ static void era_postsuspend(struct dm_target *ti) } stop_worker(era); + + r = metadata_commit(era->md); + if (r) { + DMERR("%s: metadata_commit failed", __func__); + /* FIXME: fail mode */ + } } static int era_preresume(struct dm_target *ti) diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c index 06f328928a7f..0c6620e7b7bf 100644 --- a/drivers/md/dm-log.c +++ b/drivers/md/dm-log.c @@ -415,8 +415,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti, /* * Work out how many "unsigned long"s we need to hold the bitset. */ - bitset_size = dm_round_up(region_count, - sizeof(*lc->clean_bits) << BYTE_SHIFT); + bitset_size = dm_round_up(region_count, BITS_PER_LONG); bitset_size >>= BYTE_SHIFT; lc->bitset_uint32_count = bitset_size / sizeof(*lc->clean_bits); @@ -616,7 +615,7 @@ static int disk_resume(struct dm_dirty_log *log) log_clear_bit(lc, lc->clean_bits, i); /* clear any old bits -- device has shrunk */ - for (i = lc->region_count; i % (sizeof(*lc->clean_bits) << BYTE_SHIFT); i++) + for (i = lc->region_count; i % BITS_PER_LONG; i++) log_clear_bit(lc, lc->clean_bits, i); /* copy clean across to sync */ diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 5e41fbae3f6b..9526ccbedafb 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -3725,7 +3725,7 @@ static int raid_message(struct dm_target *ti, unsigned int argc, char **argv, if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) { if (mddev->sync_thread) { set_bit(MD_RECOVERY_INTR, &mddev->recovery); - md_reap_sync_thread(mddev, false); + md_reap_sync_thread(mddev); } } else if (decipher_sync_action(mddev, mddev->recovery) != st_idle) return -EBUSY; diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 6087cdcaad46..a83b98a8d2a9 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -319,7 +319,7 @@ static int setup_clone(struct request *clone, struct request *rq, { int r; - r = blk_rq_prep_clone(clone, rq, &tio->md->bs, gfp_mask, + r = blk_rq_prep_clone(clone, rq, &tio->md->mempools->bs, gfp_mask, dm_rq_bio_constructor, tio); if (r) return r; diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 0e833a154b31..bd539afbfe88 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1038,17 +1038,6 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device * return 0; } -void dm_table_free_md_mempools(struct dm_table *t) -{ - dm_free_md_mempools(t->mempools); - t->mempools = NULL; -} - -struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t) -{ - return t->mempools; -} - static int setup_indexes(struct dm_table *t) { int i; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index dfb0a551bd88..2b75f1ef7386 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -136,14 +136,6 @@ static int get_swap_bios(void) return latch; } -/* - * For mempools pre-allocation at the table loading time. - */ -struct dm_md_mempools { - struct bio_set bs; - struct bio_set io_bs; -}; - struct table_device { struct list_head list; refcount_t count; @@ -563,6 +555,10 @@ static void dm_start_io_acct(struct dm_io *io, struct bio *clone) unsigned long flags; /* Can afford locking given DM_TIO_IS_DUPLICATE_BIO */ spin_lock_irqsave(&io->lock, flags); + if (dm_io_flagged(io, DM_IO_ACCOUNTED)) { + spin_unlock_irqrestore(&io->lock, flags); + return; + } dm_io_set_flag(io, DM_IO_ACCOUNTED); spin_unlock_irqrestore(&io->lock, flags); } @@ -581,7 +577,7 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) struct dm_target_io *tio; struct bio *clone; - clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->io_bs); + clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->mempools->io_bs); /* Set default bdev, but target must bio_set_dev() before issuing IO */ clone->bi_bdev = md->disk->part0; @@ -598,6 +594,7 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) atomic_set(&io->io_count, 2); this_cpu_inc(*md->pending_io); io->orig_bio = bio; + io->split_bio = NULL; io->md = md; spin_lock_init(&io->lock); io->start_time = jiffies; @@ -628,7 +625,8 @@ static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti, } else { struct mapped_device *md = ci->io->md; - clone = bio_alloc_clone(NULL, ci->bio, gfp_mask, &md->bs); + clone = bio_alloc_clone(NULL, ci->bio, gfp_mask, + &md->mempools->bs); if (!clone) return NULL; /* Set default bdev, but target must bio_set_dev() before issuing IO */ @@ -718,18 +716,18 @@ static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) } static inline struct dm_table *dm_get_live_table_bio(struct mapped_device *md, - int *srcu_idx, struct bio *bio) + int *srcu_idx, unsigned bio_opf) { - if (bio->bi_opf & REQ_NOWAIT) + if (bio_opf & REQ_NOWAIT) return dm_get_live_table_fast(md); else return dm_get_live_table(md, srcu_idx); } static inline void dm_put_live_table_bio(struct mapped_device *md, int srcu_idx, - struct bio *bio) + unsigned bio_opf) { - if (bio->bi_opf & REQ_NOWAIT) + if (bio_opf & REQ_NOWAIT) dm_put_live_table_fast(md); else dm_put_live_table(md, srcu_idx); @@ -890,7 +888,7 @@ static void dm_io_complete(struct dm_io *io) { blk_status_t io_error; struct mapped_device *md = io->md; - struct bio *bio = io->orig_bio; + struct bio *bio = io->split_bio ? io->split_bio : io->orig_bio; if (io->status == BLK_STS_DM_REQUEUE) { unsigned long flags; @@ -942,9 +940,11 @@ static void dm_io_complete(struct dm_io *io) if (io_error == BLK_STS_AGAIN) { /* io_uring doesn't handle BLK_STS_AGAIN (yet) */ queue_io(md, bio); + return; } } - return; + if (io_error == BLK_STS_DM_REQUEUE) + return; } if (bio_is_flush_with_data(bio)) { @@ -1023,23 +1023,19 @@ static void clone_endio(struct bio *bio) struct dm_io *io = tio->io; struct mapped_device *md = io->md; - if (likely(bio->bi_bdev != md->disk->part0)) { - struct request_queue *q = bdev_get_queue(bio->bi_bdev); - - if (unlikely(error == BLK_STS_TARGET)) { - if (bio_op(bio) == REQ_OP_DISCARD && - !bdev_max_discard_sectors(bio->bi_bdev)) - disable_discard(md); - else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && - !q->limits.max_write_zeroes_sectors) - disable_write_zeroes(md); - } - - if (static_branch_unlikely(&zoned_enabled) && - unlikely(blk_queue_is_zoned(q))) - dm_zone_endio(io, bio); + if (unlikely(error == BLK_STS_TARGET)) { + if (bio_op(bio) == REQ_OP_DISCARD && + !bdev_max_discard_sectors(bio->bi_bdev)) + disable_discard(md); + else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && + !bdev_write_zeroes_sectors(bio->bi_bdev)) + disable_write_zeroes(md); } + if (static_branch_unlikely(&zoned_enabled) && + unlikely(blk_queue_is_zoned(bdev_get_queue(bio->bi_bdev)))) + dm_zone_endio(io, bio); + if (endio) { int r = endio(ti, bio, &error); switch (r) { @@ -1620,7 +1616,12 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci) ti = dm_table_find_target(ci->map, ci->sector); if (unlikely(!ti)) return BLK_STS_IOERR; - else if (unlikely(ci->is_abnormal_io)) + + if (unlikely((ci->bio->bi_opf & REQ_NOWAIT) != 0) && + unlikely(!dm_target_supports_nowait(ti->type))) + return BLK_STS_NOTSUPP; + + if (unlikely(ci->is_abnormal_io)) return __process_abnormal_io(ci, ti); /* @@ -1693,9 +1694,11 @@ static void dm_split_and_process_bio(struct mapped_device *md, * Remainder must be passed to submit_bio_noacct() so it gets handled * *after* bios already submitted have been completely processed. */ - bio_trim(bio, io->sectors, ci.sector_count); - trace_block_split(bio, bio->bi_iter.bi_sector); - bio_inc_remaining(bio); + WARN_ON_ONCE(!dm_io_flagged(io, DM_IO_WAS_SPLIT)); + io->split_bio = bio_split(bio, io->sectors, GFP_NOIO, + &md->queue->bio_split); + bio_chain(io->split_bio, bio); + trace_block_split(io->split_bio, bio->bi_iter.bi_sector); submit_bio_noacct(bio); out: /* @@ -1722,8 +1725,9 @@ static void dm_submit_bio(struct bio *bio) struct mapped_device *md = bio->bi_bdev->bd_disk->private_data; int srcu_idx; struct dm_table *map; + unsigned bio_opf = bio->bi_opf; - map = dm_get_live_table_bio(md, &srcu_idx, bio); + map = dm_get_live_table_bio(md, &srcu_idx, bio_opf); /* If suspended, or map not yet available, queue this IO for later */ if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) || @@ -1739,7 +1743,7 @@ static void dm_submit_bio(struct bio *bio) dm_split_and_process_bio(md, map, bio); out: - dm_put_live_table_bio(md, srcu_idx, bio); + dm_put_live_table_bio(md, srcu_idx, bio_opf); } static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob, @@ -1876,8 +1880,7 @@ static void cleanup_mapped_device(struct mapped_device *md) { if (md->wq) destroy_workqueue(md->wq); - bioset_exit(&md->bs); - bioset_exit(&md->io_bs); + dm_free_md_mempools(md->mempools); if (md->dax_dev) { dax_remove_host(md->disk); @@ -2049,48 +2052,6 @@ static void free_dev(struct mapped_device *md) kvfree(md); } -static int __bind_mempools(struct mapped_device *md, struct dm_table *t) -{ - struct dm_md_mempools *p = dm_table_get_md_mempools(t); - int ret = 0; - - if (dm_table_bio_based(t)) { - /* - * The md may already have mempools that need changing. - * If so, reload bioset because front_pad may have changed - * because a different table was loaded. - */ - bioset_exit(&md->bs); - bioset_exit(&md->io_bs); - - } else if (bioset_initialized(&md->bs)) { - /* - * There's no need to reload with request-based dm - * because the size of front_pad doesn't change. - * Note for future: If you are to reload bioset, - * prep-ed requests in the queue may refer - * to bio from the old bioset, so you must walk - * through the queue to unprep. - */ - goto out; - } - - BUG_ON(!p || - bioset_initialized(&md->bs) || - bioset_initialized(&md->io_bs)); - - ret = bioset_init_from_src(&md->bs, &p->bs); - if (ret) - goto out; - ret = bioset_init_from_src(&md->io_bs, &p->io_bs); - if (ret) - bioset_exit(&md->bs); -out: - /* mempool bind completed, no longer need any mempools in the table */ - dm_table_free_md_mempools(t); - return ret; -} - /* * Bind a table to the device. */ @@ -2144,12 +2105,28 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, * immutable singletons - used to optimize dm_mq_queue_rq. */ md->immutable_target = dm_table_get_immutable_target(t); - } - ret = __bind_mempools(md, t); - if (ret) { - old_map = ERR_PTR(ret); - goto out; + /* + * There is no need to reload with request-based dm because the + * size of front_pad doesn't change. + * + * Note for future: If you are to reload bioset, prep-ed + * requests in the queue may refer to bio from the old bioset, + * so you must walk through the queue to unprep. + */ + if (!md->mempools) { + md->mempools = t->mempools; + t->mempools = NULL; + } + } else { + /* + * The md may already have mempools that need changing. + * If so, reload bioset because front_pad may have changed + * because a different table was loaded. + */ + dm_free_md_mempools(md->mempools); + md->mempools = t->mempools; + t->mempools = NULL; } ret = dm_table_set_restrictions(t, md->queue, limits); diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 3f89664fea01..a8405ce305a9 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -71,8 +71,6 @@ struct dm_target *dm_table_get_immutable_target(struct dm_table *t); struct dm_target *dm_table_get_wildcard_target(struct dm_table *t); bool dm_table_bio_based(struct dm_table *t); bool dm_table_request_based(struct dm_table *t); -void dm_table_free_md_mempools(struct dm_table *t); -struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); void dm_lock_md_type(struct mapped_device *md); void dm_unlock_md_type(struct mapped_device *md); diff --git a/drivers/md/md.c b/drivers/md/md.c index 8273ac5eef06..c7ecb0bffda0 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -4831,7 +4831,7 @@ action_store(struct mddev *mddev, const char *page, size_t len) flush_workqueue(md_misc_wq); if (mddev->sync_thread) { set_bit(MD_RECOVERY_INTR, &mddev->recovery); - md_reap_sync_thread(mddev, true); + md_reap_sync_thread(mddev); } mddev_unlock(mddev); } @@ -6197,7 +6197,7 @@ static void __md_stop_writes(struct mddev *mddev) flush_workqueue(md_misc_wq); if (mddev->sync_thread) { set_bit(MD_RECOVERY_INTR, &mddev->recovery); - md_reap_sync_thread(mddev, true); + md_reap_sync_thread(mddev); } del_timer_sync(&mddev->safemode_timer); @@ -9303,7 +9303,7 @@ void md_check_recovery(struct mddev *mddev) * ->spare_active and clear saved_raid_disk */ set_bit(MD_RECOVERY_INTR, &mddev->recovery); - md_reap_sync_thread(mddev, true); + md_reap_sync_thread(mddev); clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); @@ -9338,7 +9338,7 @@ void md_check_recovery(struct mddev *mddev) goto unlock; } if (mddev->sync_thread) { - md_reap_sync_thread(mddev, true); + md_reap_sync_thread(mddev); goto unlock; } /* Set RUNNING before clearing NEEDED to avoid @@ -9411,18 +9411,14 @@ void md_check_recovery(struct mddev *mddev) } EXPORT_SYMBOL(md_check_recovery); -void md_reap_sync_thread(struct mddev *mddev, bool reconfig_mutex_held) +void md_reap_sync_thread(struct mddev *mddev) { struct md_rdev *rdev; sector_t old_dev_sectors = mddev->dev_sectors; bool is_reshaped = false; - if (reconfig_mutex_held) - mddev_unlock(mddev); /* resync has finished, collect result */ md_unregister_thread(&mddev->sync_thread); - if (reconfig_mutex_held) - mddev_lock_nointr(mddev); if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && mddev->degraded != mddev->raid_disks) { diff --git a/drivers/md/md.h b/drivers/md/md.h index 5f62c46ac2d3..cf2cbb17acbd 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -719,7 +719,7 @@ extern struct md_thread *md_register_thread( extern void md_unregister_thread(struct md_thread **threadp); extern void md_wakeup_thread(struct md_thread *thread); extern void md_check_recovery(struct mddev *mddev); -extern void md_reap_sync_thread(struct mddev *mddev, bool reconfig_mutex_held); +extern void md_reap_sync_thread(struct mddev *mddev); extern int mddev_init_writes_pending(struct mddev *mddev); extern bool md_write_start(struct mddev *mddev, struct bio *bi); extern void md_write_inc(struct mddev *mddev, struct bio *bi); diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c index 973e2e06f19c..0a2e4806b1ec 100644 --- a/drivers/md/raid5-ppl.c +++ b/drivers/md/raid5-ppl.c @@ -629,9 +629,9 @@ static void ppl_do_flush(struct ppl_io_unit *io) if (bdev) { struct bio *bio; - bio = bio_alloc_bioset(bdev, 0, GFP_NOIO, + bio = bio_alloc_bioset(bdev, 0, REQ_OP_WRITE | REQ_PREFLUSH, - &ppl_conf->flush_bs); + GFP_NOIO, &ppl_conf->flush_bs); bio->bi_private = io; bio->bi_end_io = ppl_flush_endio; diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig index b7800b37af78..ac1a411648d8 100644 --- a/drivers/memory/Kconfig +++ b/drivers/memory/Kconfig @@ -105,6 +105,7 @@ config TI_EMIF config OMAP_GPMC tristate "Texas Instruments OMAP SoC GPMC driver" depends on OF_ADDRESS + depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST select GPIOLIB help This driver is for the General Purpose Memory Controller (GPMC) diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c index 86a3d34f418e..4c5154e0bf00 100644 --- a/drivers/memory/mtk-smi.c +++ b/drivers/memory/mtk-smi.c @@ -404,13 +404,16 @@ static int mtk_smi_device_link_common(struct device *dev, struct device **com_de of_node_put(smi_com_node); if (smi_com_pdev) { /* smi common is the supplier, Make sure it is ready before */ - if (!platform_get_drvdata(smi_com_pdev)) + if (!platform_get_drvdata(smi_com_pdev)) { + put_device(&smi_com_pdev->dev); return -EPROBE_DEFER; + } smi_com_dev = &smi_com_pdev->dev; link = device_link_add(dev, smi_com_dev, DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS); if (!link) { dev_err(dev, "Unable to link smi-common dev\n"); + put_device(&smi_com_pdev->dev); return -ENODEV; } *com_dev = smi_com_dev; diff --git a/drivers/memory/samsung/exynos5422-dmc.c b/drivers/memory/samsung/exynos5422-dmc.c index 4733e7898ffe..c491cd549644 100644 --- a/drivers/memory/samsung/exynos5422-dmc.c +++ b/drivers/memory/samsung/exynos5422-dmc.c @@ -1187,33 +1187,39 @@ static int of_get_dram_timings(struct exynos5_dmc *dmc) dmc->timing_row = devm_kmalloc_array(dmc->dev, TIMING_COUNT, sizeof(u32), GFP_KERNEL); - if (!dmc->timing_row) - return -ENOMEM; + if (!dmc->timing_row) { + ret = -ENOMEM; + goto put_node; + } dmc->timing_data = devm_kmalloc_array(dmc->dev, TIMING_COUNT, sizeof(u32), GFP_KERNEL); - if (!dmc->timing_data) - return -ENOMEM; + if (!dmc->timing_data) { + ret = -ENOMEM; + goto put_node; + } dmc->timing_power = devm_kmalloc_array(dmc->dev, TIMING_COUNT, sizeof(u32), GFP_KERNEL); - if (!dmc->timing_power) - return -ENOMEM; + if (!dmc->timing_power) { + ret = -ENOMEM; + goto put_node; + } dmc->timings = of_lpddr3_get_ddr_timings(np_ddr, dmc->dev, DDR_TYPE_LPDDR3, &dmc->timings_arr_size); if (!dmc->timings) { - of_node_put(np_ddr); dev_warn(dmc->dev, "could not get timings from DT\n"); - return -EINVAL; + ret = -EINVAL; + goto put_node; } dmc->min_tck = of_lpddr3_get_min_tck(np_ddr, dmc->dev); if (!dmc->min_tck) { - of_node_put(np_ddr); dev_warn(dmc->dev, "could not get tck from DT\n"); - return -EINVAL; + ret = -EINVAL; + goto put_node; } /* Sorted array of OPPs with frequency ascending */ @@ -1227,13 +1233,14 @@ static int of_get_dram_timings(struct exynos5_dmc *dmc) clk_period_ps); } - of_node_put(np_ddr); /* Take the highest frequency's timings as 'bypass' */ dmc->bypass_timing_row = dmc->timing_row[idx - 1]; dmc->bypass_timing_data = dmc->timing_data[idx - 1]; dmc->bypass_timing_power = dmc->timing_power[idx - 1]; +put_node: + of_node_put(np_ddr); return ret; } diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c index d6cd5537126c..69f9b0336410 100644 --- a/drivers/misc/atmel-ssc.c +++ b/drivers/misc/atmel-ssc.c @@ -232,9 +232,9 @@ static int ssc_probe(struct platform_device *pdev) clk_disable_unprepare(ssc->clk); ssc->irq = platform_get_irq(pdev, 0); - if (!ssc->irq) { + if (ssc->irq < 0) { dev_dbg(&pdev->dev, "could not get irq\n"); - return -ENXIO; + return ssc->irq; } mutex_lock(&user_lock); diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c index 749cc5a46d13..b1e76030cafd 100644 --- a/drivers/misc/cardreader/rts5261.c +++ b/drivers/misc/cardreader/rts5261.c @@ -407,6 +407,8 @@ static void rts5261_init_from_hw(struct rtsx_pcr *pcr) // default setting_reg1 = PCR_SETTING_REG1; setting_reg2 = PCR_SETTING_REG2; + } else { + return; } pci_read_config_dword(pdev, setting_reg2, &lval2); diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c index 8d169a35cf13..c9c56fd194c1 100644 --- a/drivers/misc/eeprom/at25.c +++ b/drivers/misc/eeprom/at25.c @@ -79,6 +79,11 @@ static int at25_ee_read(void *priv, unsigned int offset, { struct at25_data *at25 = priv; char *buf = val; + size_t max_chunk = spi_max_transfer_size(at25->spi); + size_t num_msgs = DIV_ROUND_UP(count, max_chunk); + size_t nr_bytes = 0; + unsigned int msg_offset; + size_t msg_count; u8 *cp; ssize_t status; struct spi_transfer t[2]; @@ -92,54 +97,59 @@ static int at25_ee_read(void *priv, unsigned int offset, if (unlikely(!count)) return -EINVAL; - cp = at25->command; + msg_offset = (unsigned int)offset; + msg_count = min(count, max_chunk); + while (num_msgs) { + cp = at25->command; - instr = AT25_READ; - if (at25->chip.flags & EE_INSTR_BIT3_IS_ADDR) - if (offset >= BIT(at25->addrlen * 8)) - instr |= AT25_INSTR_BIT3; + instr = AT25_READ; + if (at25->chip.flags & EE_INSTR_BIT3_IS_ADDR) + if (msg_offset >= BIT(at25->addrlen * 8)) + instr |= AT25_INSTR_BIT3; - mutex_lock(&at25->lock); + mutex_lock(&at25->lock); - *cp++ = instr; - - /* 8/16/24-bit address is written MSB first */ - switch (at25->addrlen) { - default: /* case 3 */ - *cp++ = offset >> 16; - fallthrough; - case 2: - *cp++ = offset >> 8; - fallthrough; - case 1: - case 0: /* can't happen: for better code generation */ - *cp++ = offset >> 0; - } + *cp++ = instr; - spi_message_init(&m); - memset(t, 0, sizeof(t)); + /* 8/16/24-bit address is written MSB first */ + switch (at25->addrlen) { + default: /* case 3 */ + *cp++ = msg_offset >> 16; + fallthrough; + case 2: + *cp++ = msg_offset >> 8; + fallthrough; + case 1: + case 0: /* can't happen: for better code generation */ + *cp++ = msg_offset >> 0; + } - t[0].tx_buf = at25->command; - t[0].len = at25->addrlen + 1; - spi_message_add_tail(&t[0], &m); + spi_message_init(&m); + memset(t, 0, sizeof(t)); - t[1].rx_buf = buf; - t[1].len = count; - spi_message_add_tail(&t[1], &m); + t[0].tx_buf = at25->command; + t[0].len = at25->addrlen + 1; + spi_message_add_tail(&t[0], &m); - /* - * Read it all at once. - * - * REVISIT that's potentially a problem with large chips, if - * other devices on the bus need to be accessed regularly or - * this chip is clocked very slowly. - */ - status = spi_sync(at25->spi, &m); - dev_dbg(&at25->spi->dev, "read %zu bytes at %d --> %zd\n", - count, offset, status); + t[1].rx_buf = buf + nr_bytes; + t[1].len = msg_count; + spi_message_add_tail(&t[1], &m); - mutex_unlock(&at25->lock); - return status; + status = spi_sync(at25->spi, &m); + + mutex_unlock(&at25->lock); + + if (status) + return status; + + --num_msgs; + msg_offset += msg_count; + nr_bytes += msg_count; + } + + dev_dbg(&at25->spi->dev, "read %zu bytes at %d\n", + count, offset); + return 0; } /* Read extra registers as ID or serial number */ @@ -190,6 +200,7 @@ ATTRIBUTE_GROUPS(sernum); static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count) { struct at25_data *at25 = priv; + size_t maxsz = spi_max_transfer_size(at25->spi); const char *buf = val; int status = 0; unsigned buf_size; @@ -253,6 +264,8 @@ static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count) segment = buf_size - (offset % buf_size); if (segment > count) segment = count; + if (segment > maxsz) + segment = maxsz; memcpy(cp, buf, segment); status = spi_write(at25->spi, bounce, segment + at25->addrlen + 1); diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index cebcca6d6d3e..cf2b8261da14 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c @@ -1351,7 +1351,8 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) if (dev->dev_state != MEI_DEV_INIT_CLIENTS || dev->hbm_state != MEI_HBM_CAP_SETUP) { - if (dev->dev_state == MEI_DEV_POWER_DOWN) { + if (dev->dev_state == MEI_DEV_POWER_DOWN || + dev->dev_state == MEI_DEV_POWERING_DOWN) { dev_dbg(dev->dev, "hbm: capabilities response: on shutdown, ignoring\n"); return 0; } diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index 64ce3f830262..15e8e2b322b1 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -109,6 +109,8 @@ #define MEI_DEV_ID_ADP_P 0x51E0 /* Alder Lake Point P */ #define MEI_DEV_ID_ADP_N 0x54E0 /* Alder Lake Point N */ +#define MEI_DEV_ID_RPL_S 0x7A68 /* Raptor Lake Point S */ + /* * MEI HW Section */ diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c index 9870bf717979..befa491e3344 100644 --- a/drivers/misc/mei/hw-me.c +++ b/drivers/misc/mei/hw-me.c @@ -1154,6 +1154,8 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable) ret = mei_me_d0i3_exit_sync(dev); if (ret) return ret; + } else { + hw->pg_state = MEI_PG_OFF; } } diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 33e58821e478..5435604327a7 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -116,6 +116,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_N, MEI_ME_PCH15_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)}, + /* required last entry */ {0, } }; diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 1259ca22d625..f4a1281658db 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -1499,8 +1499,7 @@ void mmc_blk_cqe_recovery(struct mmc_queue *mq) err = mmc_cqe_recovery(host); if (err) mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY); - else - mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY); + mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY); pr_debug("%s: CQE recovery done\n", mmc_hostname(host)); } diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index 195dc897188b..9da4489dc345 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -1356,7 +1356,7 @@ static void msdc_data_xfer_next(struct msdc_host *host, struct mmc_request *mrq) msdc_request_done(host, mrq); } -static bool msdc_data_xfer_done(struct msdc_host *host, u32 events, +static void msdc_data_xfer_done(struct msdc_host *host, u32 events, struct mmc_request *mrq, struct mmc_data *data) { struct mmc_command *stop; @@ -1376,7 +1376,7 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events, spin_unlock_irqrestore(&host->lock, flags); if (done) - return true; + return; stop = data->stop; if (check_data || (stop && stop->error)) { @@ -1385,12 +1385,15 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events, sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_STOP, 1); + ret = readl_poll_timeout_atomic(host->base + MSDC_DMA_CTRL, val, + !(val & MSDC_DMA_CTRL_STOP), 1, 20000); + if (ret) + dev_dbg(host->dev, "DMA stop timed out\n"); + ret = readl_poll_timeout_atomic(host->base + MSDC_DMA_CFG, val, !(val & MSDC_DMA_CFG_STS), 1, 20000); - if (ret) { - dev_dbg(host->dev, "DMA stop timed out\n"); - return false; - } + if (ret) + dev_dbg(host->dev, "DMA inactive timed out\n"); sdr_clr_bits(host->base + MSDC_INTEN, data_ints_mask); dev_dbg(host->dev, "DMA stop\n"); @@ -1415,9 +1418,7 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events, } msdc_data_xfer_next(host, mrq); - done = true; } - return done; } static void msdc_set_buswidth(struct msdc_host *host, u32 width) @@ -2416,6 +2417,9 @@ static void msdc_cqe_disable(struct mmc_host *mmc, bool recovery) if (recovery) { sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_STOP, 1); + if (WARN_ON(readl_poll_timeout(host->base + MSDC_DMA_CTRL, val, + !(val & MSDC_DMA_CTRL_STOP), 1, 3000))) + return; if (WARN_ON(readl_poll_timeout(host->base + MSDC_DMA_CFG, val, !(val & MSDC_DMA_CFG_STS), 1, 3000))) return; diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c index 1499a64ec3aa..f13c08db3da5 100644 --- a/drivers/mmc/host/sdhci-pci-gli.c +++ b/drivers/mmc/host/sdhci-pci-gli.c @@ -982,6 +982,9 @@ static int gl9763e_runtime_resume(struct sdhci_pci_chip *chip) struct sdhci_host *host = slot->host; u16 clock; + if (host->mmc->ios.power_mode != MMC_POWER_ON) + return 0; + clock = sdhci_readw(host, SDHCI_CLOCK_CONTROL); clock |= SDHCI_CLOCK_PLL_EN; diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c index 92c20cb8074a..0d4d343dbb77 100644 --- a/drivers/mmc/host/sdhci-pci-o2micro.c +++ b/drivers/mmc/host/sdhci-pci-o2micro.c @@ -152,6 +152,8 @@ static int sdhci_o2_get_cd(struct mmc_host *mmc) if (!(sdhci_readw(host, O2_PLL_DLL_WDT_CONTROL1) & O2_PLL_LOCK_STATUS)) sdhci_o2_enable_internal_clock(host); + else + sdhci_o2_wait_card_detect_stable(host); return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); } diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index 0b68d05846e1..889e40329956 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -890,7 +890,7 @@ static int gpmi_nfc_compute_timings(struct gpmi_nand_data *this, hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) | BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) | BF_GPMI_TIMING0_DATA_SETUP(data_setup_cycles); - hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(busy_timeout_cycles * 4096); + hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(DIV_ROUND_UP(busy_timeout_cycles, 4096)); /* * Derive NFC ideal delay from {3}: diff --git a/drivers/mtd/nand/raw/nand_ids.c b/drivers/mtd/nand/raw/nand_ids.c index 88c2440b47d8..dacc5529b3df 100644 --- a/drivers/mtd/nand/raw/nand_ids.c +++ b/drivers/mtd/nand/raw/nand_ids.c @@ -29,9 +29,6 @@ struct nand_flash_dev nand_flash_ids[] = { {"TC58NVG0S3E 1G 3.3V 8-bit", { .id = {0x98, 0xd1, 0x90, 0x15, 0x76, 0x14, 0x01, 0x00} }, SZ_2K, SZ_128, SZ_128K, 0, 8, 64, NAND_ECC_INFO(1, SZ_512), }, - {"TC58NVG0S3HTA00 1G 3.3V 8-bit", - { .id = {0x98, 0xf1, 0x80, 0x15} }, - SZ_2K, SZ_128, SZ_128K, 0, 4, 128, NAND_ECC_INFO(8, SZ_512), }, {"TC58NVG2S0F 4G 3.3V 8-bit", { .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x15, 0x01, 0x08} }, SZ_4K, SZ_512, SZ_256K, 0, 8, 224, NAND_ECC_INFO(4, SZ_512) }, diff --git a/drivers/mtd/spi-nor/controllers/hisi-sfc.c b/drivers/mtd/spi-nor/controllers/hisi-sfc.c index 94a969185ceb..5070d72835ec 100644 --- a/drivers/mtd/spi-nor/controllers/hisi-sfc.c +++ b/drivers/mtd/spi-nor/controllers/hisi-sfc.c @@ -237,7 +237,7 @@ static int hisi_spi_nor_dma_transfer(struct spi_nor *nor, loff_t start_off, reg = readl(host->regbase + FMC_CFG); reg &= ~(FMC_CFG_OP_MODE_MASK | SPI_NOR_ADDR_MODE_MASK); reg |= FMC_CFG_OP_MODE_NORMAL; - reg |= (nor->addr_width == 4) ? SPI_NOR_ADDR_MODE_4BYTES + reg |= (nor->addr_nbytes == 4) ? SPI_NOR_ADDR_MODE_4BYTES : SPI_NOR_ADDR_MODE_3BYTES; writel(reg, host->regbase + FMC_CFG); diff --git a/drivers/mtd/spi-nor/controllers/nxp-spifi.c b/drivers/mtd/spi-nor/controllers/nxp-spifi.c index 9032b9ab2eaf..ab3990e6ac25 100644 --- a/drivers/mtd/spi-nor/controllers/nxp-spifi.c +++ b/drivers/mtd/spi-nor/controllers/nxp-spifi.c @@ -203,7 +203,7 @@ static ssize_t nxp_spifi_write(struct spi_nor *nor, loff_t to, size_t len, SPIFI_CMD_DATALEN(len) | SPIFI_CMD_FIELDFORM_ALL_SERIAL | SPIFI_CMD_OPCODE(nor->program_opcode) | - SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1); + SPIFI_CMD_FRAMEFORM(spifi->nor.addr_nbytes + 1); writel(cmd, spifi->io_base + SPIFI_CMD); for (i = 0; i < len; i++) @@ -230,7 +230,7 @@ static int nxp_spifi_erase(struct spi_nor *nor, loff_t offs) cmd = SPIFI_CMD_FIELDFORM_ALL_SERIAL | SPIFI_CMD_OPCODE(nor->erase_opcode) | - SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1); + SPIFI_CMD_FRAMEFORM(spifi->nor.addr_nbytes + 1); writel(cmd, spifi->io_base + SPIFI_CMD); return nxp_spifi_wait_for_cmd(spifi); @@ -252,12 +252,12 @@ static int nxp_spifi_setup_memory_cmd(struct nxp_spifi *spifi) } /* Memory mode supports address length between 1 and 4 */ - if (spifi->nor.addr_width < 1 || spifi->nor.addr_width > 4) + if (spifi->nor.addr_nbytes < 1 || spifi->nor.addr_nbytes > 4) return -EINVAL; spifi->mcmd |= SPIFI_CMD_OPCODE(spifi->nor.read_opcode) | SPIFI_CMD_INTLEN(spifi->nor.read_dummy / 8) | - SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1); + SPIFI_CMD_FRAMEFORM(spifi->nor.addr_nbytes + 1); return 0; } diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c index 502967c76c5f..f2c64006f8d7 100644 --- a/drivers/mtd/spi-nor/core.c +++ b/drivers/mtd/spi-nor/core.c @@ -38,7 +38,7 @@ */ #define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ) -#define SPI_NOR_MAX_ADDR_WIDTH 4 +#define SPI_NOR_MAX_ADDR_NBYTES 4 #define SPI_NOR_SRST_SLEEP_MIN 200 #define SPI_NOR_SRST_SLEEP_MAX 400 @@ -177,7 +177,7 @@ int spi_nor_controller_ops_write_reg(struct spi_nor *nor, u8 opcode, static int spi_nor_controller_ops_erase(struct spi_nor *nor, loff_t offs) { - if (spi_nor_protocol_is_dtr(nor->write_proto)) + if (spi_nor_protocol_is_dtr(nor->reg_proto)) return -EOPNOTSUPP; return nor->controller_ops->erase(nor, offs); @@ -198,7 +198,7 @@ static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from, { struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0), - SPI_MEM_OP_ADDR(nor->addr_width, from, 0), + SPI_MEM_OP_ADDR(nor->addr_nbytes, from, 0), SPI_MEM_OP_DUMMY(nor->read_dummy, 0), SPI_MEM_OP_DATA_IN(len, buf, 0)); bool usebouncebuf; @@ -262,7 +262,7 @@ static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to, { struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0), - SPI_MEM_OP_ADDR(nor->addr_width, to, 0), + SPI_MEM_OP_ADDR(nor->addr_nbytes, to, 0), SPI_MEM_OP_NO_DUMMY, SPI_MEM_OP_DATA_OUT(len, buf, 0)); ssize_t nbytes; @@ -972,7 +972,7 @@ static int spi_nor_erase_chip(struct spi_nor *nor) if (nor->spimem) { struct spi_mem_op op = SPI_NOR_CHIP_ERASE_OP; - spi_nor_spimem_setup_op(nor, &op, nor->write_proto); + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); ret = spi_mem_exec_op(nor->spimem, &op); } else { @@ -1113,9 +1113,9 @@ int spi_nor_erase_sector(struct spi_nor *nor, u32 addr) if (nor->spimem) { struct spi_mem_op op = SPI_NOR_SECTOR_ERASE_OP(nor->erase_opcode, - nor->addr_width, addr); + nor->addr_nbytes, addr); - spi_nor_spimem_setup_op(nor, &op, nor->write_proto); + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); return spi_mem_exec_op(nor->spimem, &op); } else if (nor->controller_ops->erase) { @@ -1126,13 +1126,13 @@ int spi_nor_erase_sector(struct spi_nor *nor, u32 addr) * Default implementation, if driver doesn't have a specialized HW * control */ - for (i = nor->addr_width - 1; i >= 0; i--) { + for (i = nor->addr_nbytes - 1; i >= 0; i--) { nor->bouncebuf[i] = addr & 0xff; addr >>= 8; } return spi_nor_controller_ops_write_reg(nor, nor->erase_opcode, - nor->bouncebuf, nor->addr_width); + nor->bouncebuf, nor->addr_nbytes); } /** @@ -2249,43 +2249,43 @@ static int spi_nor_default_setup(struct spi_nor *nor, return 0; } -static int spi_nor_set_addr_width(struct spi_nor *nor) +static int spi_nor_set_addr_nbytes(struct spi_nor *nor) { - if (nor->addr_width) { - /* already configured from SFDP */ + if (nor->params->addr_nbytes) { + nor->addr_nbytes = nor->params->addr_nbytes; } else if (nor->read_proto == SNOR_PROTO_8_8_8_DTR) { /* * In 8D-8D-8D mode, one byte takes half a cycle to transfer. So - * in this protocol an odd address width cannot be used because + * in this protocol an odd addr_nbytes cannot be used because * then the address phase would only span a cycle and a half. * Half a cycle would be left over. We would then have to start * the dummy phase in the middle of a cycle and so too the data * phase, and we will end the transaction with half a cycle left * over. * - * Force all 8D-8D-8D flashes to use an address width of 4 to + * Force all 8D-8D-8D flashes to use an addr_nbytes of 4 to * avoid this situation. */ - nor->addr_width = 4; - } else if (nor->info->addr_width) { - nor->addr_width = nor->info->addr_width; + nor->addr_nbytes = 4; + } else if (nor->info->addr_nbytes) { + nor->addr_nbytes = nor->info->addr_nbytes; } else { - nor->addr_width = 3; + nor->addr_nbytes = 3; } - if (nor->addr_width == 3 && nor->params->size > 0x1000000) { + if (nor->addr_nbytes == 3 && nor->params->size > 0x1000000) { /* enable 4-byte addressing if the device exceeds 16MiB */ - nor->addr_width = 4; + nor->addr_nbytes = 4; } - if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) { - dev_dbg(nor->dev, "address width is too large: %u\n", - nor->addr_width); + if (nor->addr_nbytes > SPI_NOR_MAX_ADDR_NBYTES) { + dev_dbg(nor->dev, "The number of address bytes is too large: %u\n", + nor->addr_nbytes); return -EINVAL; } /* Set 4byte opcodes when possible. */ - if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES && + if (nor->addr_nbytes == 4 && nor->flags & SNOR_F_4B_OPCODES && !(nor->flags & SNOR_F_HAS_4BAIT)) spi_nor_set_4byte_opcodes(nor); @@ -2304,7 +2304,7 @@ static int spi_nor_setup(struct spi_nor *nor, if (ret) return ret; - return spi_nor_set_addr_width(nor); + return spi_nor_set_addr_nbytes(nor); } /** @@ -2382,12 +2382,7 @@ static void spi_nor_no_sfdp_init_params(struct spi_nor *nor) */ erase_mask = 0; i = 0; - if (no_sfdp_flags & SECT_4K_PMC) { - erase_mask |= BIT(i); - spi_nor_set_erase_type(&map->erase_type[i], 4096u, - SPINOR_OP_BE_4K_PMC); - i++; - } else if (no_sfdp_flags & SECT_4K) { + if (no_sfdp_flags & SECT_4K) { erase_mask |= BIT(i); spi_nor_set_erase_type(&map->erase_type[i], 4096u, SPINOR_OP_BE_4K); @@ -2497,7 +2492,6 @@ static void spi_nor_sfdp_init_params_deprecated(struct spi_nor *nor) if (spi_nor_parse_sfdp(nor)) { memcpy(nor->params, &sfdp_params, sizeof(*nor->params)); - nor->addr_width = 0; nor->flags &= ~SNOR_F_4B_OPCODES; } } @@ -2718,7 +2712,7 @@ static int spi_nor_init(struct spi_nor *nor) nor->flags & SNOR_F_SWP_IS_VOLATILE)) spi_nor_try_unlock_all(nor); - if (nor->addr_width == 4 && + if (nor->addr_nbytes == 4 && nor->read_proto != SNOR_PROTO_8_8_8_DTR && !(nor->flags & SNOR_F_4B_OPCODES)) { /* @@ -2730,7 +2724,7 @@ static int spi_nor_init(struct spi_nor *nor) */ WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET, "enabling reset hack; may not recover from unexpected reboots\n"); - nor->params->set_4byte_addr_mode(nor, true); + return nor->params->set_4byte_addr_mode(nor, true); } return 0; @@ -2845,7 +2839,7 @@ static void spi_nor_put_device(struct mtd_info *mtd) void spi_nor_restore(struct spi_nor *nor) { /* restore the addressing mode */ - if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) && + if (nor->addr_nbytes == 4 && !(nor->flags & SNOR_F_4B_OPCODES) && nor->flags & SNOR_F_BROKEN_RESET) nor->params->set_4byte_addr_mode(nor, false); @@ -2989,7 +2983,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, * - select op codes for (Fast) Read, Page Program and Sector Erase. * - set the number of dummy cycles (mode cycles + wait states). * - set the SPI protocols for register and memory accesses. - * - set the address width. + * - set the number of address bytes. */ ret = spi_nor_setup(nor, hwcaps); if (ret) @@ -3030,7 +3024,7 @@ static int spi_nor_create_read_dirmap(struct spi_nor *nor) { struct spi_mem_dirmap_info info = { .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0), - SPI_MEM_OP_ADDR(nor->addr_width, 0, 0), + SPI_MEM_OP_ADDR(nor->addr_nbytes, 0, 0), SPI_MEM_OP_DUMMY(nor->read_dummy, 0), SPI_MEM_OP_DATA_IN(0, NULL, 0)), .offset = 0, @@ -3061,7 +3055,7 @@ static int spi_nor_create_write_dirmap(struct spi_nor *nor) { struct spi_mem_dirmap_info info = { .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0), - SPI_MEM_OP_ADDR(nor->addr_width, 0, 0), + SPI_MEM_OP_ADDR(nor->addr_nbytes, 0, 0), SPI_MEM_OP_NO_DUMMY, SPI_MEM_OP_DATA_OUT(0, NULL, 0)), .offset = 0, diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h index 3f841ec36e56..85b0cf254e97 100644 --- a/drivers/mtd/spi-nor/core.h +++ b/drivers/mtd/spi-nor/core.h @@ -84,9 +84,9 @@ SPI_MEM_OP_NO_DUMMY, \ SPI_MEM_OP_NO_DATA) -#define SPI_NOR_SECTOR_ERASE_OP(opcode, addr_width, addr) \ +#define SPI_NOR_SECTOR_ERASE_OP(opcode, addr_nbytes, addr) \ SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 0), \ - SPI_MEM_OP_ADDR(addr_width, addr, 0), \ + SPI_MEM_OP_ADDR(addr_nbytes, addr, 0), \ SPI_MEM_OP_NO_DUMMY, \ SPI_MEM_OP_NO_DATA) @@ -340,6 +340,11 @@ struct spi_nor_otp { * @writesize Minimal writable flash unit size. Defaults to 1. Set to * ECC unit size for ECC-ed flashes. * @page_size: the page size of the SPI NOR flash memory. + * @addr_nbytes: number of address bytes to send. + * @addr_mode_nbytes: number of address bytes of current address mode. Useful + * when the flash operates with 4B opcodes but needs the + * internal address mode for opcodes that don't have a 4B + * opcode correspondent. * @rdsr_dummy: dummy cycles needed for Read Status Register command * in octal DTR mode. * @rdsr_addr_nbytes: dummy address bytes needed for Read Status Register @@ -372,6 +377,8 @@ struct spi_nor_flash_parameter { u64 size; u32 writesize; u32 page_size; + u8 addr_nbytes; + u8 addr_mode_nbytes; u8 rdsr_dummy; u8 rdsr_addr_nbytes; @@ -429,7 +436,7 @@ struct spi_nor_fixups { * isn't necessarily called a "sector" by the vendor. * @n_sectors: the number of sectors. * @page_size: the flash's page size. - * @addr_width: the flash's address width. + * @addr_nbytes: number of address bytes to send. * * @parse_sfdp: true when flash supports SFDP tables. The false value has no * meaning. If one wants to skip the SFDP tables, one should @@ -457,7 +464,6 @@ struct spi_nor_fixups { * flags are used together with the SPI_NOR_SKIP_SFDP flag. * SPI_NOR_SKIP_SFDP: skip parsing of SFDP tables. * SECT_4K: SPINOR_OP_BE_4K works uniformly. - * SECT_4K_PMC: SPINOR_OP_BE_4K_PMC works uniformly. * SPI_NOR_DUAL_READ: flash supports Dual Read. * SPI_NOR_QUAD_READ: flash supports Quad Read. * SPI_NOR_OCTAL_READ: flash supports Octal Read. @@ -488,7 +494,7 @@ struct flash_info { unsigned sector_size; u16 n_sectors; u16 page_size; - u16 addr_width; + u8 addr_nbytes; bool parse_sfdp; u16 flags; @@ -505,7 +511,6 @@ struct flash_info { u8 no_sfdp_flags; #define SPI_NOR_SKIP_SFDP BIT(0) #define SECT_4K BIT(1) -#define SECT_4K_PMC BIT(2) #define SPI_NOR_DUAL_READ BIT(3) #define SPI_NOR_QUAD_READ BIT(4) #define SPI_NOR_OCTAL_READ BIT(5) @@ -550,11 +555,11 @@ struct flash_info { .n_sectors = (_n_sectors), \ .page_size = 256, \ -#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width) \ +#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_nbytes) \ .sector_size = (_sector_size), \ .n_sectors = (_n_sectors), \ .page_size = (_page_size), \ - .addr_width = (_addr_width), \ + .addr_nbytes = (_addr_nbytes), \ .flags = SPI_NOR_NO_ERASE | SPI_NOR_NO_FR, \ #define OTP_INFO(_len, _n_regions, _base, _offset) \ diff --git a/drivers/mtd/spi-nor/debugfs.c b/drivers/mtd/spi-nor/debugfs.c index eaf84f7a0676..df76cb5de3f9 100644 --- a/drivers/mtd/spi-nor/debugfs.c +++ b/drivers/mtd/spi-nor/debugfs.c @@ -86,7 +86,7 @@ static int spi_nor_params_show(struct seq_file *s, void *data) seq_printf(s, "size\t\t%s\n", buf); seq_printf(s, "write size\t%u\n", params->writesize); seq_printf(s, "page size\t%u\n", params->page_size); - seq_printf(s, "address width\t%u\n", nor->addr_width); + seq_printf(s, "address nbytes\t%u\n", nor->addr_nbytes); seq_puts(s, "flags\t\t"); spi_nor_print_flags(s, nor->flags, snor_f_names, sizeof(snor_f_names)); diff --git a/drivers/mtd/spi-nor/esmt.c b/drivers/mtd/spi-nor/esmt.c index 79e2408f4998..fcc3b0e7cda9 100644 --- a/drivers/mtd/spi-nor/esmt.c +++ b/drivers/mtd/spi-nor/esmt.c @@ -13,7 +13,7 @@ static const struct flash_info esmt_nor_parts[] = { { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64) FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) NO_SFDP_FLAGS(SECT_4K) }, - { "f25l32qa", INFO(0x8c4116, 0, 64 * 1024, 64) + { "f25l32qa-2s", INFO(0x8c4116, 0, 64 * 1024, 64) FLAGS(SPI_NOR_HAS_LOCK) NO_SFDP_FLAGS(SECT_4K) }, { "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128) diff --git a/drivers/mtd/spi-nor/issi.c b/drivers/mtd/spi-nor/issi.c index c012bc2486e1..89a66a19d754 100644 --- a/drivers/mtd/spi-nor/issi.c +++ b/drivers/mtd/spi-nor/issi.c @@ -14,13 +14,13 @@ is25lp256_post_bfpt_fixups(struct spi_nor *nor, const struct sfdp_bfpt *bfpt) { /* - * IS25LP256 supports 4B opcodes, but the BFPT advertises a - * BFPT_DWORD1_ADDRESS_BYTES_3_ONLY address width. - * Overwrite the address width advertised by the BFPT. + * IS25LP256 supports 4B opcodes, but the BFPT advertises + * BFPT_DWORD1_ADDRESS_BYTES_3_ONLY. + * Overwrite the number of address bytes advertised by the BFPT. */ if ((bfpt->dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) == BFPT_DWORD1_ADDRESS_BYTES_3_ONLY) - nor->addr_width = 4; + nor->params->addr_nbytes = 4; return 0; } @@ -29,6 +29,21 @@ static const struct spi_nor_fixups is25lp256_fixups = { .post_bfpt = is25lp256_post_bfpt_fixups, }; +static void pm25lv_nor_late_init(struct spi_nor *nor) +{ + struct spi_nor_erase_map *map = &nor->params->erase_map; + int i; + + /* The PM25LV series has a different 4k sector erase opcode */ + for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) + if (map->erase_type[i].size == 4096) + map->erase_type[i].opcode = SPINOR_OP_BE_4K_PMC; +} + +static const struct spi_nor_fixups pm25lv_nor_fixups = { + .late_init = pm25lv_nor_late_init, +}; + static const struct flash_info issi_nor_parts[] = { /* ISSI */ { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2) @@ -62,9 +77,13 @@ static const struct flash_info issi_nor_parts[] = { /* PMC */ { "pm25lv512", INFO(0, 0, 32 * 1024, 2) - NO_SFDP_FLAGS(SECT_4K_PMC) }, + NO_SFDP_FLAGS(SECT_4K) + .fixups = &pm25lv_nor_fixups + }, { "pm25lv010", INFO(0, 0, 32 * 1024, 4) - NO_SFDP_FLAGS(SECT_4K_PMC) }, + NO_SFDP_FLAGS(SECT_4K) + .fixups = &pm25lv_nor_fixups + }, { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64) NO_SFDP_FLAGS(SECT_4K) }, }; diff --git a/drivers/mtd/spi-nor/micron-st.c b/drivers/mtd/spi-nor/micron-st.c index a96f74e0f568..3c9681a3f7a3 100644 --- a/drivers/mtd/spi-nor/micron-st.c +++ b/drivers/mtd/spi-nor/micron-st.c @@ -399,8 +399,16 @@ static int micron_st_nor_ready(struct spi_nor *nor) return sr_ready; ret = micron_st_nor_read_fsr(nor, nor->bouncebuf); - if (ret) - return ret; + if (ret) { + /* + * Some controllers, such as Intel SPI, do not support low + * level operations such as reading the flag status + * register. They only expose small amount of high level + * operations to the software. If this is the case we use + * only the status register value. + */ + return ret == -EOPNOTSUPP ? sr_ready : ret; + } if (nor->bouncebuf[0] & (FSR_E_ERR | FSR_P_ERR)) { if (nor->bouncebuf[0] & FSR_E_ERR) diff --git a/drivers/mtd/spi-nor/otp.c b/drivers/mtd/spi-nor/otp.c index fa63d8571218..00ab0d2d6d2f 100644 --- a/drivers/mtd/spi-nor/otp.c +++ b/drivers/mtd/spi-nor/otp.c @@ -35,13 +35,13 @@ */ int spi_nor_otp_read_secr(struct spi_nor *nor, loff_t addr, size_t len, u8 *buf) { - u8 addr_width, read_opcode, read_dummy; + u8 addr_nbytes, read_opcode, read_dummy; struct spi_mem_dirmap_desc *rdesc; enum spi_nor_protocol read_proto; int ret; read_opcode = nor->read_opcode; - addr_width = nor->addr_width; + addr_nbytes = nor->addr_nbytes; read_dummy = nor->read_dummy; read_proto = nor->read_proto; rdesc = nor->dirmap.rdesc; @@ -54,7 +54,7 @@ int spi_nor_otp_read_secr(struct spi_nor *nor, loff_t addr, size_t len, u8 *buf) ret = spi_nor_read_data(nor, addr, len, buf); nor->read_opcode = read_opcode; - nor->addr_width = addr_width; + nor->addr_nbytes = addr_nbytes; nor->read_dummy = read_dummy; nor->read_proto = read_proto; nor->dirmap.rdesc = rdesc; @@ -85,11 +85,11 @@ int spi_nor_otp_write_secr(struct spi_nor *nor, loff_t addr, size_t len, { enum spi_nor_protocol write_proto; struct spi_mem_dirmap_desc *wdesc; - u8 addr_width, program_opcode; + u8 addr_nbytes, program_opcode; int ret, written; program_opcode = nor->program_opcode; - addr_width = nor->addr_width; + addr_nbytes = nor->addr_nbytes; write_proto = nor->write_proto; wdesc = nor->dirmap.wdesc; @@ -113,7 +113,7 @@ int spi_nor_otp_write_secr(struct spi_nor *nor, loff_t addr, size_t len, out: nor->program_opcode = program_opcode; - nor->addr_width = addr_width; + nor->addr_nbytes = addr_nbytes; nor->write_proto = write_proto; nor->dirmap.wdesc = wdesc; diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c index a5211543d30d..2257f1b4c2e2 100644 --- a/drivers/mtd/spi-nor/sfdp.c +++ b/drivers/mtd/spi-nor/sfdp.c @@ -134,7 +134,7 @@ struct sfdp_4bait { /** * spi_nor_read_raw() - raw read of serial flash memory. read_opcode, - * addr_width and read_dummy members of the struct spi_nor + * addr_nbytes and read_dummy members of the struct spi_nor * should be previously * set. * @nor: pointer to a 'struct spi_nor' @@ -178,21 +178,21 @@ static int spi_nor_read_raw(struct spi_nor *nor, u32 addr, size_t len, u8 *buf) static int spi_nor_read_sfdp(struct spi_nor *nor, u32 addr, size_t len, void *buf) { - u8 addr_width, read_opcode, read_dummy; + u8 addr_nbytes, read_opcode, read_dummy; int ret; read_opcode = nor->read_opcode; - addr_width = nor->addr_width; + addr_nbytes = nor->addr_nbytes; read_dummy = nor->read_dummy; nor->read_opcode = SPINOR_OP_RDSFDP; - nor->addr_width = 3; + nor->addr_nbytes = 3; nor->read_dummy = 8; ret = spi_nor_read_raw(nor, addr, len, buf); nor->read_opcode = read_opcode; - nor->addr_width = addr_width; + nor->addr_nbytes = addr_nbytes; nor->read_dummy = read_dummy; return ret; @@ -462,11 +462,13 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor, switch (bfpt.dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) { case BFPT_DWORD1_ADDRESS_BYTES_3_ONLY: case BFPT_DWORD1_ADDRESS_BYTES_3_OR_4: - nor->addr_width = 3; + params->addr_nbytes = 3; + params->addr_mode_nbytes = 3; break; case BFPT_DWORD1_ADDRESS_BYTES_4_ONLY: - nor->addr_width = 4; + params->addr_nbytes = 4; + params->addr_mode_nbytes = 4; break; default: @@ -637,12 +639,12 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor, } /** - * spi_nor_smpt_addr_width() - return the address width used in the + * spi_nor_smpt_addr_nbytes() - return the number of address bytes used in the * configuration detection command. * @nor: pointer to a 'struct spi_nor' * @settings: configuration detection command descriptor, dword1 */ -static u8 spi_nor_smpt_addr_width(const struct spi_nor *nor, const u32 settings) +static u8 spi_nor_smpt_addr_nbytes(const struct spi_nor *nor, const u32 settings) { switch (settings & SMPT_CMD_ADDRESS_LEN_MASK) { case SMPT_CMD_ADDRESS_LEN_0: @@ -653,7 +655,7 @@ static u8 spi_nor_smpt_addr_width(const struct spi_nor *nor, const u32 settings) return 4; case SMPT_CMD_ADDRESS_LEN_USE_CURRENT: default: - return nor->addr_width; + return nor->params->addr_mode_nbytes; } } @@ -690,7 +692,7 @@ static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt, u32 addr; int err; u8 i; - u8 addr_width, read_opcode, read_dummy; + u8 addr_nbytes, read_opcode, read_dummy; u8 read_data_mask, map_id; /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */ @@ -698,7 +700,7 @@ static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt, if (!buf) return ERR_PTR(-ENOMEM); - addr_width = nor->addr_width; + addr_nbytes = nor->addr_nbytes; read_dummy = nor->read_dummy; read_opcode = nor->read_opcode; @@ -709,7 +711,7 @@ static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt, break; read_data_mask = SMPT_CMD_READ_DATA(smpt[i]); - nor->addr_width = spi_nor_smpt_addr_width(nor, smpt[i]); + nor->addr_nbytes = spi_nor_smpt_addr_nbytes(nor, smpt[i]); nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]); nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]); addr = smpt[i + 1]; @@ -756,7 +758,7 @@ static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt, /* fall through */ out: kfree(buf); - nor->addr_width = addr_width; + nor->addr_nbytes = addr_nbytes; nor->read_dummy = read_dummy; nor->read_opcode = read_opcode; return ret; @@ -1044,7 +1046,7 @@ static int spi_nor_parse_4bait(struct spi_nor *nor, /* * We need at least one 4-byte op code per read, program and erase * operation; the .read(), .write() and .erase() hooks share the - * nor->addr_width value. + * nor->addr_nbytes value. */ if (!read_hwcaps || !pp_hwcaps || !erase_mask) goto out; @@ -1098,7 +1100,7 @@ static int spi_nor_parse_4bait(struct spi_nor *nor, * Spansion memory. However this quirk is no longer needed with new * SFDP compliant memories. */ - nor->addr_width = 4; + params->addr_nbytes = 4; nor->flags |= SNOR_F_4B_OPCODES | SNOR_F_HAS_4BAIT; /* fall through */ diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c index 43cd6cd92537..0150049007be 100644 --- a/drivers/mtd/spi-nor/spansion.c +++ b/drivers/mtd/spi-nor/spansion.c @@ -14,6 +14,8 @@ #define SPINOR_OP_CLSR 0x30 /* Clear status register 1 */ #define SPINOR_OP_RD_ANY_REG 0x65 /* Read any register */ #define SPINOR_OP_WR_ANY_REG 0x71 /* Write any register */ +#define SPINOR_REG_CYPRESS_CFR1V 0x00800002 +#define SPINOR_REG_CYPRESS_CFR1V_QUAD_EN BIT(1) /* Quad Enable */ #define SPINOR_REG_CYPRESS_CFR2V 0x00800003 #define SPINOR_REG_CYPRESS_CFR2V_MEMLAT_11_24 0xb #define SPINOR_REG_CYPRESS_CFR3V 0x00800004 @@ -114,6 +116,150 @@ static int cypress_nor_octal_dtr_dis(struct spi_nor *nor) } /** + * cypress_nor_quad_enable_volatile() - enable Quad I/O mode in volatile + * register. + * @nor: pointer to a 'struct spi_nor' + * + * It is recommended to update volatile registers in the field application due + * to a risk of the non-volatile registers corruption by power interrupt. This + * function sets Quad Enable bit in CFR1 volatile. If users set the Quad Enable + * bit in the CFR1 non-volatile in advance (typically by a Flash programmer + * before mounting Flash on PCB), the Quad Enable bit in the CFR1 volatile is + * also set during Flash power-up. + * + * Return: 0 on success, -errno otherwise. + */ +static int cypress_nor_quad_enable_volatile(struct spi_nor *nor) +{ + struct spi_mem_op op; + u8 addr_mode_nbytes = nor->params->addr_mode_nbytes; + u8 cfr1v_written; + int ret; + + op = (struct spi_mem_op) + CYPRESS_NOR_RD_ANY_REG_OP(addr_mode_nbytes, + SPINOR_REG_CYPRESS_CFR1V, + nor->bouncebuf); + + ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto); + if (ret) + return ret; + + if (nor->bouncebuf[0] & SPINOR_REG_CYPRESS_CFR1V_QUAD_EN) + return 0; + + /* Update the Quad Enable bit. */ + nor->bouncebuf[0] |= SPINOR_REG_CYPRESS_CFR1V_QUAD_EN; + op = (struct spi_mem_op) + CYPRESS_NOR_WR_ANY_REG_OP(addr_mode_nbytes, + SPINOR_REG_CYPRESS_CFR1V, 1, + nor->bouncebuf); + ret = spi_nor_write_any_volatile_reg(nor, &op, nor->reg_proto); + if (ret) + return ret; + + cfr1v_written = nor->bouncebuf[0]; + + /* Read back and check it. */ + op = (struct spi_mem_op) + CYPRESS_NOR_RD_ANY_REG_OP(addr_mode_nbytes, + SPINOR_REG_CYPRESS_CFR1V, + nor->bouncebuf); + ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto); + if (ret) + return ret; + + if (nor->bouncebuf[0] != cfr1v_written) { + dev_err(nor->dev, "CFR1: Read back test failed\n"); + return -EIO; + } + + return 0; +} + +/** + * cypress_nor_set_page_size() - Set page size which corresponds to the flash + * configuration. + * @nor: pointer to a 'struct spi_nor' + * + * The BFPT table advertises a 512B or 256B page size depending on part but the + * page size is actually configurable (with the default being 256B). Read from + * CFR3V[4] and set the correct size. + * + * Return: 0 on success, -errno otherwise. + */ +static int cypress_nor_set_page_size(struct spi_nor *nor) +{ + struct spi_mem_op op = + CYPRESS_NOR_RD_ANY_REG_OP(3, SPINOR_REG_CYPRESS_CFR3V, + nor->bouncebuf); + int ret; + + ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto); + if (ret) + return ret; + + if (nor->bouncebuf[0] & SPINOR_REG_CYPRESS_CFR3V_PGSZ) + nor->params->page_size = 512; + else + nor->params->page_size = 256; + + return 0; +} + +static int +s25hx_t_post_bfpt_fixup(struct spi_nor *nor, + const struct sfdp_parameter_header *bfpt_header, + const struct sfdp_bfpt *bfpt) +{ + /* Replace Quad Enable with volatile version */ + nor->params->quad_enable = cypress_nor_quad_enable_volatile; + + return cypress_nor_set_page_size(nor); +} + +static void s25hx_t_post_sfdp_fixup(struct spi_nor *nor) +{ + struct spi_nor_erase_type *erase_type = + nor->params->erase_map.erase_type; + unsigned int i; + + /* + * In some parts, 3byte erase opcodes are advertised by 4BAIT. + * Convert them to 4byte erase opcodes. + */ + for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) { + switch (erase_type[i].opcode) { + case SPINOR_OP_SE: + erase_type[i].opcode = SPINOR_OP_SE_4B; + break; + case SPINOR_OP_BE_4K: + erase_type[i].opcode = SPINOR_OP_BE_4K_4B; + break; + default: + break; + } + } +} + +static void s25hx_t_late_init(struct spi_nor *nor) +{ + struct spi_nor_flash_parameter *params = nor->params; + + /* Fast Read 4B requires mode cycles */ + params->reads[SNOR_CMD_READ_FAST].num_mode_clocks = 8; + + /* The writesize should be ECC data unit size */ + params->writesize = 16; +} + +static struct spi_nor_fixups s25hx_t_fixups = { + .post_bfpt = s25hx_t_post_bfpt_fixup, + .post_sfdp = s25hx_t_post_sfdp_fixup, + .late_init = s25hx_t_late_init, +}; + +/** * cypress_nor_octal_dtr_enable() - Enable octal DTR on Cypress flashes. * @nor: pointer to a 'struct spi_nor' * @enable: whether to enable or disable Octal DTR @@ -167,28 +313,7 @@ static int s28hs512t_post_bfpt_fixup(struct spi_nor *nor, const struct sfdp_parameter_header *bfpt_header, const struct sfdp_bfpt *bfpt) { - /* - * The BFPT table advertises a 512B page size but the page size is - * actually configurable (with the default being 256B). Read from - * CFR3V[4] and set the correct size. - */ - struct spi_mem_op op = - CYPRESS_NOR_RD_ANY_REG_OP(3, SPINOR_REG_CYPRESS_CFR3V, - nor->bouncebuf); - int ret; - - spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); - - ret = spi_mem_exec_op(nor->spimem, &op); - if (ret) - return ret; - - if (nor->bouncebuf[0] & SPINOR_REG_CYPRESS_CFR3V_PGSZ) - nor->params->page_size = 512; - else - nor->params->page_size = 256; - - return 0; + return cypress_nor_set_page_size(nor); } static const struct spi_nor_fixups s28hs512t_fixups = { @@ -310,6 +435,22 @@ static const struct flash_info spansion_nor_parts[] = { { "s25fl256l", INFO(0x016019, 0, 64 * 1024, 512) NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) FIXUP_FLAGS(SPI_NOR_4B_OPCODES) }, + { "s25hl512t", INFO6(0x342a1a, 0x0f0390, 256 * 1024, 256) + PARSE_SFDP + MFR_FLAGS(USE_CLSR) + .fixups = &s25hx_t_fixups }, + { "s25hl01gt", INFO6(0x342a1b, 0x0f0390, 256 * 1024, 512) + PARSE_SFDP + MFR_FLAGS(USE_CLSR) + .fixups = &s25hx_t_fixups }, + { "s25hs512t", INFO6(0x342b1a, 0x0f0390, 256 * 1024, 256) + PARSE_SFDP + MFR_FLAGS(USE_CLSR) + .fixups = &s25hx_t_fixups }, + { "s25hs01gt", INFO6(0x342b1b, 0x0f0390, 256 * 1024, 512) + PARSE_SFDP + MFR_FLAGS(USE_CLSR) + .fixups = &s25hx_t_fixups }, { "cy15x104q", INFO6(0x042cc2, 0x7f7f7f, 512 * 1024, 1) FLAGS(SPI_NOR_NO_ERASE) }, { "s28hs512t", INFO(0x345b1a, 0, 256 * 1024, 256) diff --git a/drivers/mtd/spi-nor/xilinx.c b/drivers/mtd/spi-nor/xilinx.c index 1d2f5db047bd..5723157739fc 100644 --- a/drivers/mtd/spi-nor/xilinx.c +++ b/drivers/mtd/spi-nor/xilinx.c @@ -31,7 +31,7 @@ .sector_size = (8 * (_page_size)), \ .n_sectors = (_n_sectors), \ .page_size = (_page_size), \ - .addr_width = 3, \ + .addr_nbytes = 3, \ .flags = SPI_NOR_NO_FR /* Xilinx S3AN share MFR with Atmel SPI NOR */ diff --git a/drivers/net/amt.c b/drivers/net/amt.c index ebee5f07a208..be2719a3ba70 100644 --- a/drivers/net/amt.c +++ b/drivers/net/amt.c @@ -51,6 +51,7 @@ static char *status_str[] = { }; static char *type_str[] = { + "", /* Type 0 is not defined */ "AMT_MSG_DISCOVERY", "AMT_MSG_ADVERTISEMENT", "AMT_MSG_REQUEST", @@ -2220,8 +2221,7 @@ static bool amt_advertisement_handler(struct amt_dev *amt, struct sk_buff *skb) struct amt_header_advertisement *amta; int hdr_size; - hdr_size = sizeof(*amta) - sizeof(struct amt_header); - + hdr_size = sizeof(*amta) + sizeof(struct udphdr); if (!pskb_may_pull(skb, hdr_size)) return true; @@ -2251,19 +2251,27 @@ static bool amt_multicast_data_handler(struct amt_dev *amt, struct sk_buff *skb) struct ethhdr *eth; struct iphdr *iph; + hdr_size = sizeof(*amtmd) + sizeof(struct udphdr); + if (!pskb_may_pull(skb, hdr_size)) + return true; + amtmd = (struct amt_header_mcast_data *)(udp_hdr(skb) + 1); if (amtmd->reserved || amtmd->version) return true; - hdr_size = sizeof(*amtmd) + sizeof(struct udphdr); if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_IP), false)) return true; + skb_reset_network_header(skb); skb_push(skb, sizeof(*eth)); skb_reset_mac_header(skb); skb_pull(skb, sizeof(*eth)); eth = eth_hdr(skb); + + if (!pskb_may_pull(skb, sizeof(*iph))) + return true; iph = ip_hdr(skb); + if (iph->version == 4) { if (!ipv4_is_multicast(iph->daddr)) return true; @@ -2274,6 +2282,9 @@ static bool amt_multicast_data_handler(struct amt_dev *amt, struct sk_buff *skb) } else if (iph->version == 6) { struct ipv6hdr *ip6h; + if (!pskb_may_pull(skb, sizeof(*ip6h))) + return true; + ip6h = ipv6_hdr(skb); if (!ipv6_addr_is_multicast(&ip6h->daddr)) return true; @@ -2306,8 +2317,7 @@ static bool amt_membership_query_handler(struct amt_dev *amt, struct iphdr *iph; int hdr_size, len; - hdr_size = sizeof(*amtmq) - sizeof(struct amt_header); - + hdr_size = sizeof(*amtmq) + sizeof(struct udphdr); if (!pskb_may_pull(skb, hdr_size)) return true; @@ -2315,22 +2325,27 @@ static bool amt_membership_query_handler(struct amt_dev *amt, if (amtmq->reserved || amtmq->version) return true; - hdr_size = sizeof(*amtmq) + sizeof(struct udphdr) - sizeof(*eth); + hdr_size -= sizeof(*eth); if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_TEB), false)) return true; + oeth = eth_hdr(skb); skb_reset_mac_header(skb); skb_pull(skb, sizeof(*eth)); skb_reset_network_header(skb); eth = eth_hdr(skb); + if (!pskb_may_pull(skb, sizeof(*iph))) + return true; + iph = ip_hdr(skb); if (iph->version == 4) { - if (!ipv4_is_multicast(iph->daddr)) - return true; if (!pskb_may_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS + sizeof(*ihv3))) return true; + if (!ipv4_is_multicast(iph->daddr)) + return true; + ihv3 = skb_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS); skb_reset_transport_header(skb); skb_push(skb, sizeof(*iph) + AMT_IPHDR_OPTS); @@ -2345,15 +2360,17 @@ static bool amt_membership_query_handler(struct amt_dev *amt, ip_eth_mc_map(iph->daddr, eth->h_dest); #if IS_ENABLED(CONFIG_IPV6) } else if (iph->version == 6) { - struct ipv6hdr *ip6h = ipv6_hdr(skb); struct mld2_query *mld2q; + struct ipv6hdr *ip6h; - if (!ipv6_addr_is_multicast(&ip6h->daddr)) - return true; if (!pskb_may_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS + sizeof(*mld2q))) return true; + ip6h = ipv6_hdr(skb); + if (!ipv6_addr_is_multicast(&ip6h->daddr)) + return true; + mld2q = skb_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS); skb_reset_transport_header(skb); skb_push(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS); @@ -2389,23 +2406,23 @@ static bool amt_update_handler(struct amt_dev *amt, struct sk_buff *skb) { struct amt_header_membership_update *amtmu; struct amt_tunnel_list *tunnel; - struct udphdr *udph; struct ethhdr *eth; struct iphdr *iph; - int len; + int len, hdr_size; iph = ip_hdr(skb); - udph = udp_hdr(skb); - if (__iptunnel_pull_header(skb, sizeof(*udph), skb->protocol, - false, false)) + hdr_size = sizeof(*amtmu) + sizeof(struct udphdr); + if (!pskb_may_pull(skb, hdr_size)) return true; - amtmu = (struct amt_header_membership_update *)skb->data; + amtmu = (struct amt_header_membership_update *)(udp_hdr(skb) + 1); if (amtmu->reserved || amtmu->version) return true; - skb_pull(skb, sizeof(*amtmu)); + if (iptunnel_pull_header(skb, hdr_size, skb->protocol, false)) + return true; + skb_reset_network_header(skb); list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list) { @@ -2426,6 +2443,9 @@ static bool amt_update_handler(struct amt_dev *amt, struct sk_buff *skb) return true; report: + if (!pskb_may_pull(skb, sizeof(*iph))) + return true; + iph = ip_hdr(skb); if (iph->version == 4) { if (ip_mc_check_igmp(skb)) { @@ -2679,7 +2699,8 @@ static int amt_rcv(struct sock *sk, struct sk_buff *skb) amt = rcu_dereference_sk_user_data(sk); if (!amt) { err = true; - goto drop; + kfree_skb(skb); + goto out; } skb->dev = amt->dev; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index f85372adf042..6ba4c83fe5fc 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3684,9 +3684,11 @@ re_arm: if (!rtnl_trylock()) return; - if (should_notify_peers) + if (should_notify_peers) { + bond->send_peer_notif--; call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev); + } if (should_notify_rtnl) { bond_slave_state_notify(bond); bond_slave_link_notify(bond); diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c index 8af4def38a98..e531b93f3cb2 100644 --- a/drivers/net/dsa/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq_gswip.c @@ -2070,8 +2070,10 @@ static int gswip_gphy_fw_list(struct gswip_priv *priv, for_each_available_child_of_node(gphy_fw_list_np, gphy_fw_np) { err = gswip_gphy_fw_probe(priv, &priv->gphy_fw[i], gphy_fw_np, i); - if (err) + if (err) { + of_node_put(gphy_fw_np); goto remove_gphy; + } i++; } diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c index 7b37d45bc9fb..d94150d8f3f4 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.c +++ b/drivers/net/dsa/mv88e6xxx/serdes.c @@ -50,22 +50,25 @@ static int mv88e6390_serdes_write(struct mv88e6xxx_chip *chip, } static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, - u16 ctrl, u16 status, u16 lpa, + u16 bmsr, u16 lpa, u16 status, struct phylink_link_state *state) { + state->link = false; + + /* If the BMSR reports that the link had failed, report this to + * phylink. + */ + if (!(bmsr & BMSR_LSTATUS)) + return 0; + state->link = !!(status & MV88E6390_SGMII_PHY_STATUS_LINK); + state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE); if (status & MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID) { /* The Spped and Duplex Resolved register is 1 if AN is enabled * and complete, or if AN is disabled. So with disabled AN we - * still get here on link up. But we want to set an_complete - * only if AN was enabled, thus we look at BMCR_ANENABLE. - * (According to 802.3-2008 section 22.2.4.2.10, we should be - * able to get this same value from BMSR_ANEGCAPABLE, but tests - * show that these Marvell PHYs don't conform to this part of - * the specificaion - BMSR_ANEGCAPABLE is simply always 1.) + * still get here on link up. */ - state->an_complete = !!(ctrl & BMCR_ANENABLE); state->duplex = status & MV88E6390_SGMII_PHY_STATUS_DUPLEX_FULL ? DUPLEX_FULL : DUPLEX_HALF; @@ -191,12 +194,12 @@ int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port, int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, int lane, struct phylink_link_state *state) { - u16 lpa, status, ctrl; + u16 bmsr, lpa, status; int err; - err = mv88e6352_serdes_read(chip, MII_BMCR, &ctrl); + err = mv88e6352_serdes_read(chip, MII_BMSR, &bmsr); if (err) { - dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err); + dev_err(chip->dev, "can't read Serdes PHY BMSR: %d\n", err); return err; } @@ -212,7 +215,7 @@ int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, return err; } - return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state); + return mv88e6xxx_serdes_pcs_get_state(chip, bmsr, lpa, status, state); } int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port, @@ -918,13 +921,13 @@ int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port, static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip, int port, int lane, struct phylink_link_state *state) { - u16 lpa, status, ctrl; + u16 bmsr, lpa, status; int err; err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, - MV88E6390_SGMII_BMCR, &ctrl); + MV88E6390_SGMII_BMSR, &bmsr); if (err) { - dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err); + dev_err(chip->dev, "can't read Serdes PHY BMSR: %d\n", err); return err; } @@ -942,7 +945,7 @@ static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip, return err; } - return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state); + return mv88e6xxx_serdes_pcs_get_state(chip, bmsr, lpa, status, state); } static int mv88e6390_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip, diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 2727d3169c25..1cbb05b0323f 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -2334,6 +2334,7 @@ static int qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) { struct qca8k_priv *priv = ds->priv; + int ret; /* We have only have a general MTU setting. * DSA always set the CPU port's MTU to the largest MTU of the slave @@ -2344,8 +2345,27 @@ qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) if (!dsa_is_cpu_port(ds, port)) return 0; + /* To change the MAX_FRAME_SIZE the cpu ports must be off or + * the switch panics. + * Turn off both cpu ports before applying the new value to prevent + * this. + */ + if (priv->port_enabled_map & BIT(0)) + qca8k_port_set_status(priv, 0, 0); + + if (priv->port_enabled_map & BIT(6)) + qca8k_port_set_status(priv, 6, 0); + /* Include L2 header / FCS length */ - return qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu + ETH_HLEN + ETH_FCS_LEN); + ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu + ETH_HLEN + ETH_FCS_LEN); + + if (priv->port_enabled_map & BIT(0)) + qca8k_port_set_status(priv, 0, 1); + + if (priv->port_enabled_map & BIT(6)) + qca8k_port_set_status(priv, 6, 1); + + return ret; } static int diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h index 04408e11402a..ec58d0e80a70 100644 --- a/drivers/net/dsa/qca8k.h +++ b/drivers/net/dsa/qca8k.h @@ -15,7 +15,7 @@ #define QCA8K_ETHERNET_MDIO_PRIORITY 7 #define QCA8K_ETHERNET_PHY_PRIORITY 6 -#define QCA8K_ETHERNET_TIMEOUT 100 +#define QCA8K_ETHERNET_TIMEOUT 5 #define QCA8K_NUM_PORTS 7 #define QCA8K_NUM_CPU_PORTS 2 diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c index 3bb42a9f236d..769f672e9128 100644 --- a/drivers/net/dsa/realtek/rtl8365mb.c +++ b/drivers/net/dsa/realtek/rtl8365mb.c @@ -955,35 +955,21 @@ static int rtl8365mb_ext_config_forcemode(struct realtek_priv *priv, int port, return 0; } -static bool rtl8365mb_phy_mode_supported(struct dsa_switch *ds, int port, - phy_interface_t interface) -{ - int ext_int; - - ext_int = rtl8365mb_extint_port_map[port]; - - if (ext_int < 0 && - (interface == PHY_INTERFACE_MODE_NA || - interface == PHY_INTERFACE_MODE_INTERNAL || - interface == PHY_INTERFACE_MODE_GMII)) - /* Internal PHY */ - return true; - else if ((ext_int >= 1) && - phy_interface_mode_is_rgmii(interface)) - /* Extension MAC */ - return true; - - return false; -} - static void rtl8365mb_phylink_get_caps(struct dsa_switch *ds, int port, struct phylink_config *config) { - if (dsa_is_user_port(ds, port)) + if (dsa_is_user_port(ds, port)) { __set_bit(PHY_INTERFACE_MODE_INTERNAL, config->supported_interfaces); - else if (dsa_is_cpu_port(ds, port)) + + /* GMII is the default interface mode for phylib, so + * we have to support it for ports with integrated PHY. + */ + __set_bit(PHY_INTERFACE_MODE_GMII, + config->supported_interfaces); + } else if (dsa_is_cpu_port(ds, port)) { phy_interface_set_rgmii(config->supported_interfaces); + } config->mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD; @@ -996,12 +982,6 @@ static void rtl8365mb_phylink_mac_config(struct dsa_switch *ds, int port, struct realtek_priv *priv = ds->priv; int ret; - if (!rtl8365mb_phy_mode_supported(ds, port, state->interface)) { - dev_err(priv->dev, "phy mode %s is unsupported on port %d\n", - phy_modes(state->interface), port); - return; - } - if (mode != MLO_AN_PHY && mode != MLO_AN_FIXED) { dev_err(priv->dev, "port %d supports only conventional PHY or fixed-link\n", diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index a3816264c35c..8c5828582c21 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -163,7 +163,8 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id) mdio = mdiobus_alloc(); if (mdio == NULL) { netdev_err(dev, "Error allocating MDIO bus\n"); - return -ENOMEM; + ret = -ENOMEM; + goto put_node; } mdio->name = ALTERA_TSE_RESOURCE_NAME; @@ -180,6 +181,7 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id) mdio->id); goto out_free_mdio; } + of_node_put(mdio_node); if (netif_msg_drv(priv)) netdev_info(dev, "MDIO bus %s: created\n", mdio->id); @@ -189,6 +191,8 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id) out_free_mdio: mdiobus_free(mdio); mdio = NULL; +put_node: + of_node_put(mdio_node); return ret; } diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index c6f003975621..d5f2c6989221 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -820,7 +820,7 @@ static int au1000_rx(struct net_device *dev) pr_cont("\n"); } } - prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE); + prxd->buff_stat = lower_32_bits(pDB->dma_addr) | RX_DMA_ENABLE; aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1); wmb(); /* drain writebuffer */ @@ -996,7 +996,7 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev) ps->tx_packets++; ps->tx_bytes += ptxd->len; - ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE; + ptxd->buff_stat = lower_32_bits(pDB->dma_addr) | TX_DMA_ENABLE; wmb(); /* drain writebuffer */ dev_kfree_skb(skb); aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1); @@ -1131,9 +1131,9 @@ static int au1000_probe(struct platform_device *pdev) /* Allocate the data buffers * Snooping works fine with eth on all au1xxx */ - aup->vaddr = (u32)dma_alloc_coherent(&pdev->dev, MAX_BUF_SIZE * - (NUM_TX_BUFFS + NUM_RX_BUFFS), - &aup->dma_addr, 0); + aup->vaddr = dma_alloc_coherent(&pdev->dev, MAX_BUF_SIZE * + (NUM_TX_BUFFS + NUM_RX_BUFFS), + &aup->dma_addr, 0); if (!aup->vaddr) { dev_err(&pdev->dev, "failed to allocate data buffers\n"); err = -ENOMEM; @@ -1234,8 +1234,8 @@ static int au1000_probe(struct platform_device *pdev) for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) { pDB->pnext = pDBfree; pDBfree = pDB; - pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i); - pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr); + pDB->vaddr = aup->vaddr + MAX_BUF_SIZE * i; + pDB->dma_addr = aup->dma_addr + MAX_BUF_SIZE * i; pDB++; } aup->pDBfree = pDBfree; @@ -1246,7 +1246,7 @@ static int au1000_probe(struct platform_device *pdev) if (!pDB) goto err_out; - aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr; + aup->rx_dma_ring[i]->buff_stat = lower_32_bits(pDB->dma_addr); aup->rx_db_inuse[i] = pDB; } @@ -1255,7 +1255,7 @@ static int au1000_probe(struct platform_device *pdev) if (!pDB) goto err_out; - aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr; + aup->tx_dma_ring[i]->buff_stat = lower_32_bits(pDB->dma_addr); aup->tx_dma_ring[i]->len = 0; aup->tx_db_inuse[i] = pDB; } @@ -1310,7 +1310,7 @@ err_remap2: iounmap(aup->mac); err_remap1: dma_free_coherent(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), - (void *)aup->vaddr, aup->dma_addr); + aup->vaddr, aup->dma_addr); err_vaddr: free_netdev(dev); err_alloc: @@ -1343,7 +1343,7 @@ static int au1000_remove(struct platform_device *pdev) au1000_ReleaseDB(aup, aup->tx_db_inuse[i]); dma_free_coherent(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), - (void *)aup->vaddr, aup->dma_addr); + aup->vaddr, aup->dma_addr); iounmap(aup->macdma); iounmap(aup->mac); diff --git a/drivers/net/ethernet/amd/au1000_eth.h b/drivers/net/ethernet/amd/au1000_eth.h index e3a3ed29db61..2489c2f4fd8a 100644 --- a/drivers/net/ethernet/amd/au1000_eth.h +++ b/drivers/net/ethernet/amd/au1000_eth.h @@ -106,8 +106,8 @@ struct au1000_private { struct mac_reg *mac; /* mac registers */ u32 *enable; /* address of MAC Enable Register */ void __iomem *macdma; /* base of MAC DMA port */ - u32 vaddr; /* virtual address of rx/tx buffers */ - dma_addr_t dma_addr; /* dma address of rx/tx buffers */ + void *vaddr; /* virtual address of rx/tx buffers */ + dma_addr_t dma_addr; /* dma address of rx/tx buffers */ spinlock_t lock; /* Serialise access to device */ diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index a3593290886f..4d46780fad13 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -2784,7 +2784,7 @@ void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx) netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest); netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source); - netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto)); + netdev_dbg(netdev, "Protocol: %#06x\n", ntohs(eth->h_proto)); for (i = 0; i < skb->len; i += 32) { unsigned int len = min(skb->len - i, 32U); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c index 4ebd2410185a..4d790a89fe77 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c @@ -338,7 +338,7 @@ static int xgbe_platform_probe(struct platform_device *pdev) * the PHY resources listed last */ phy_memnum = xgbe_resource_count(pdev, IORESOURCE_MEM) - 3; - phy_irqnum = xgbe_resource_count(pdev, IORESOURCE_IRQ) - 1; + phy_irqnum = platform_irq_count(pdev) - 1; dma_irqnum = 1; dma_irqend = phy_irqnum; } else { @@ -348,7 +348,7 @@ static int xgbe_platform_probe(struct platform_device *pdev) phy_memnum = 0; phy_irqnum = 0; dma_irqnum = 1; - dma_irqend = xgbe_resource_count(pdev, IORESOURCE_IRQ); + dma_irqend = platform_irq_count(pdev); } /* Obtain the mmio areas for the device */ diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c index 086739e4f40a..9b83d5361699 100644 --- a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c +++ b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c @@ -234,6 +234,7 @@ struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac) np = of_get_child_by_name(core->dev.of_node, "mdio"); err = of_mdiobus_register(mii_bus, np); + of_node_put(np); if (err) { dev_err(&core->dev, "Registration of mii bus failed\n"); goto err_free_bus; diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c index e6f48786949c..02bd3cf9a260 100644 --- a/drivers/net/ethernet/broadcom/bgmac-bcma.c +++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c @@ -332,7 +332,6 @@ static void bgmac_remove(struct bcma_device *core) bcma_mdio_mii_unregister(bgmac->mii_bus); bgmac_enet_remove(bgmac); bcma_set_drvdata(core, NULL); - kfree(bgmac); } static struct bcma_driver bgmac_bcma_driver = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 8a3a446219f7..94f80e1c4020 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -769,6 +769,7 @@ struct hnae3_tc_info { u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */ u16 tqp_count[HNAE3_MAX_TC]; u16 tqp_offset[HNAE3_MAX_TC]; + u8 max_tc; /* Total number of TCs */ u8 num_tc; /* Total number of enabled TCs */ bool mqprio_active; }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 6d20974519fe..4c7988e308a2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -1129,7 +1129,7 @@ hns3_is_ringparam_changed(struct net_device *ndev, if (old_ringparam->tx_desc_num == new_ringparam->tx_desc_num && old_ringparam->rx_desc_num == new_ringparam->rx_desc_num && old_ringparam->rx_buf_len == new_ringparam->rx_buf_len) { - netdev_info(ndev, "ringparam not changed\n"); + netdev_info(ndev, "descriptor number and rx buffer length not changed\n"); return false; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 1ebad0e50e6a..fae79764dc44 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -3268,7 +3268,7 @@ static int hclge_tp_port_init(struct hclge_dev *hdev) static int hclge_update_port_info(struct hclge_dev *hdev) { struct hclge_mac *mac = &hdev->hw.mac; - int speed = HCLGE_MAC_SPEED_UNKNOWN; + int speed; int ret; /* get the port info from SFP cmd if not copper port */ @@ -3279,10 +3279,13 @@ static int hclge_update_port_info(struct hclge_dev *hdev) if (!hdev->support_sfp_query) return 0; - if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { + speed = mac->speed; ret = hclge_get_sfp_info(hdev, mac); - else + } else { + speed = HCLGE_MAC_SPEED_UNKNOWN; ret = hclge_get_sfp_speed(hdev, &speed); + } if (ret == -EOPNOTSUPP) { hdev->support_sfp_query = false; @@ -3294,6 +3297,8 @@ static int hclge_update_port_info(struct hclge_dev *hdev) if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { if (mac->speed_type == QUERY_ACTIVE_SPEED) { hclge_update_port_capability(hdev, mac); + if (mac->speed != speed) + (void)hclge_tm_port_shaper_cfg(hdev); return 0; } return hclge_cfg_mac_speed_dup(hdev, mac->speed, @@ -3376,6 +3381,12 @@ static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf, link_state_old = vport->vf_info.link_state; vport->vf_info.link_state = link_state; + /* return success directly if the VF is unalive, VF will + * query link state itself when it starts work. + */ + if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) + return 0; + ret = hclge_push_vf_link_status(vport); if (ret) { vport->vf_info.link_state = link_state_old; @@ -10117,6 +10128,7 @@ static int hclge_modify_port_base_vlan_tag(struct hclge_vport *vport, if (ret) return ret; + vport->port_base_vlan_cfg.tbl_sta = false; /* remove old VLAN tag */ if (old_info->vlan_tag == 0) ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 1f87a8a3fe32..2f33b036a47a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -282,8 +282,8 @@ static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev, return hclge_cmd_send(&hdev->hw, &desc, 1); } -static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, - u16 qs_id, u8 pri) +static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, u16 qs_id, u8 pri, + bool link_vld) { struct hclge_qs_to_pri_link_cmd *map; struct hclge_desc desc; @@ -294,7 +294,7 @@ static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, map->qs_id = cpu_to_le16(qs_id); map->priority = pri; - map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK; + map->link_vld = link_vld ? HCLGE_TM_QS_PRI_LINK_VLD_MSK : 0; return hclge_cmd_send(&hdev->hw, &desc, 1); } @@ -420,7 +420,7 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev, return hclge_cmd_send(&hdev->hw, &desc, 1); } -static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev) +int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev) { struct hclge_port_shapping_cmd *shap_cfg_cmd; struct hclge_shaper_ir_para ir_para; @@ -642,11 +642,13 @@ static void hclge_tm_update_kinfo_rss_size(struct hclge_vport *vport) * one tc for VF for simplicity. VF's vport_id is non zero. */ if (vport->vport_id) { + kinfo->tc_info.max_tc = 1; kinfo->tc_info.num_tc = 1; vport->qs_offset = HNAE3_MAX_TC + vport->vport_id - HCLGE_VF_VPORT_START_NUM; vport_max_rss_size = hdev->vf_rss_size_max; } else { + kinfo->tc_info.max_tc = hdev->tc_max; kinfo->tc_info.num_tc = min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc); vport->qs_offset = 0; @@ -679,7 +681,9 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) kinfo->num_tqps = hclge_vport_get_tqp_num(vport); vport->dwrr = 100; /* 100 percent as init */ vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit; - hdev->rss_cfg.rss_size = kinfo->rss_size; + + if (vport->vport_id == PF_VPORT_ID) + hdev->rss_cfg.rss_size = kinfo->rss_size; /* when enable mqprio, the tc_info has been updated. */ if (kinfo->tc_info.mqprio_active) @@ -714,14 +718,22 @@ static void hclge_tm_vport_info_update(struct hclge_dev *hdev) static void hclge_tm_tc_info_init(struct hclge_dev *hdev) { - u8 i; + u8 i, tc_sch_mode; + u32 bw_limit; + + for (i = 0; i < hdev->tc_max; i++) { + if (i < hdev->tm_info.num_tc) { + tc_sch_mode = HCLGE_SCH_MODE_DWRR; + bw_limit = hdev->tm_info.pg_info[0].bw_limit; + } else { + tc_sch_mode = HCLGE_SCH_MODE_SP; + bw_limit = 0; + } - for (i = 0; i < hdev->tm_info.num_tc; i++) { hdev->tm_info.tc_info[i].tc_id = i; - hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR; + hdev->tm_info.tc_info[i].tc_sch_mode = tc_sch_mode; hdev->tm_info.tc_info[i].pgid = 0; - hdev->tm_info.tc_info[i].bw_limit = - hdev->tm_info.pg_info[0].bw_limit; + hdev->tm_info.tc_info[i].bw_limit = bw_limit; } for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) @@ -926,10 +938,13 @@ static int hclge_tm_pri_q_qs_cfg_tc_base(struct hclge_dev *hdev) for (k = 0; k < hdev->num_alloc_vport; k++) { struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo; - for (i = 0; i < kinfo->tc_info.num_tc; i++) { + for (i = 0; i < kinfo->tc_info.max_tc; i++) { + u8 pri = i < kinfo->tc_info.num_tc ? i : 0; + bool link_vld = i < kinfo->tc_info.num_tc; + ret = hclge_tm_qs_to_pri_map_cfg(hdev, vport[k].qs_offset + i, - i); + pri, link_vld); if (ret) return ret; } @@ -949,7 +964,7 @@ static int hclge_tm_pri_q_qs_cfg_vnet_base(struct hclge_dev *hdev) for (i = 0; i < HNAE3_MAX_TC; i++) { ret = hclge_tm_qs_to_pri_map_cfg(hdev, vport[k].qs_offset + i, - k); + k, true); if (ret) return ret; } @@ -989,33 +1004,39 @@ static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev) { u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate; struct hclge_shaper_ir_para ir_para; - u32 shaper_para; + u32 shaper_para_c, shaper_para_p; int ret; u32 i; - for (i = 0; i < hdev->tm_info.num_tc; i++) { + for (i = 0; i < hdev->tc_max; i++) { u32 rate = hdev->tm_info.tc_info[i].bw_limit; - ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI, - &ir_para, max_tm_rate); - if (ret) - return ret; + if (rate) { + ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI, + &ir_para, max_tm_rate); + if (ret) + return ret; + + shaper_para_c = hclge_tm_get_shapping_para(0, 0, 0, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + shaper_para_p = hclge_tm_get_shapping_para(ir_para.ir_b, + ir_para.ir_u, + ir_para.ir_s, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + } else { + shaper_para_c = 0; + shaper_para_p = 0; + } - shaper_para = hclge_tm_get_shapping_para(0, 0, 0, - HCLGE_SHAPER_BS_U_DEF, - HCLGE_SHAPER_BS_S_DEF); ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i, - shaper_para, rate); + shaper_para_c, rate); if (ret) return ret; - shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, - ir_para.ir_u, - ir_para.ir_s, - HCLGE_SHAPER_BS_U_DEF, - HCLGE_SHAPER_BS_S_DEF); ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i, - shaper_para, rate); + shaper_para_p, rate); if (ret) return ret; } @@ -1125,7 +1146,7 @@ static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev) int ret; u32 i, k; - for (i = 0; i < hdev->tm_info.num_tc; i++) { + for (i = 0; i < hdev->tc_max; i++) { pg_info = &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid]; dwrr = pg_info->tc_dwrr[i]; @@ -1135,9 +1156,15 @@ static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev) return ret; for (k = 0; k < hdev->num_alloc_vport; k++) { + struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo; + + if (i >= kinfo->tc_info.max_tc) + continue; + + dwrr = i < kinfo->tc_info.num_tc ? vport[k].dwrr : 0; ret = hclge_tm_qs_weight_cfg( hdev, vport[k].qs_offset + i, - vport[k].dwrr); + dwrr); if (ret) return ret; } @@ -1303,6 +1330,7 @@ static int hclge_tm_schd_mode_tc_base_cfg(struct hclge_dev *hdev, u8 pri_id) { struct hclge_vport *vport = hdev->vport; int ret; + u8 mode; u16 i; ret = hclge_tm_pri_schd_mode_cfg(hdev, pri_id); @@ -1310,9 +1338,16 @@ static int hclge_tm_schd_mode_tc_base_cfg(struct hclge_dev *hdev, u8 pri_id) return ret; for (i = 0; i < hdev->num_alloc_vport; i++) { + struct hnae3_knic_private_info *kinfo = &vport[i].nic.kinfo; + + if (pri_id >= kinfo->tc_info.max_tc) + continue; + + mode = pri_id < kinfo->tc_info.num_tc ? HCLGE_SCH_MODE_DWRR : + HCLGE_SCH_MODE_SP; ret = hclge_tm_qs_schd_mode_cfg(hdev, vport[i].qs_offset + pri_id, - HCLGE_SCH_MODE_DWRR); + mode); if (ret) return ret; } @@ -1353,7 +1388,7 @@ static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev) u8 i; if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { - for (i = 0; i < hdev->tm_info.num_tc; i++) { + for (i = 0; i < hdev->tc_max; i++) { ret = hclge_tm_schd_mode_tc_base_cfg(hdev, i); if (ret) return ret; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index 619cc30a2dfc..d943943912f7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -237,6 +237,7 @@ int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr); void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats); void hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats); int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate); +int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev); int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num); int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num); int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority, diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c index 60ae8bfc5f69..1749d26f4bef 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c @@ -43,9 +43,7 @@ static bool check_image_valid(struct hinic_devlink_priv *priv, const u8 *buf, for (i = 0; i < fw_image->fw_info.fw_section_cnt; i++) { len += fw_image->fw_section_info[i].fw_section_len; - memcpy(&host_image->image_section_info[i], - &fw_image->fw_section_info[i], - sizeof(struct fw_section_info_st)); + host_image->image_section_info[i] = fw_image->fw_section_info[i]; } if (len != fw_image->fw_len || diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 610f00cbaff9..19704f5c8291 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -2586,15 +2586,16 @@ static void i40e_diag_test(struct net_device *netdev, set_bit(__I40E_TESTING, pf->state); + if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || + test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) { + dev_warn(&pf->pdev->dev, + "Cannot start offline testing when PF is in reset state.\n"); + goto skip_ol_tests; + } + if (i40e_active_vfs(pf) || i40e_active_vmdqs(pf)) { dev_warn(&pf->pdev->dev, "Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n"); - data[I40E_ETH_TEST_REG] = 1; - data[I40E_ETH_TEST_EEPROM] = 1; - data[I40E_ETH_TEST_INTR] = 1; - data[I40E_ETH_TEST_LINK] = 1; - eth_test->flags |= ETH_TEST_FL_FAILED; - clear_bit(__I40E_TESTING, pf->state); goto skip_ol_tests; } @@ -2641,9 +2642,17 @@ static void i40e_diag_test(struct net_device *netdev, data[I40E_ETH_TEST_INTR] = 0; } -skip_ol_tests: - netif_info(pf, drv, netdev, "testing finished\n"); + return; + +skip_ol_tests: + data[I40E_ETH_TEST_REG] = 1; + data[I40E_ETH_TEST_EEPROM] = 1; + data[I40E_ETH_TEST_INTR] = 1; + data[I40E_ETH_TEST_LINK] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + clear_bit(__I40E_TESTING, pf->state); + netif_info(pf, drv, netdev, "testing failed\n"); } static void i40e_get_wol(struct net_device *netdev, diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 332a608dbaa6..72576bb3e94d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -8542,6 +8542,11 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi, return -EOPNOTSUPP; } + if (!tc) { + dev_err(&pf->pdev->dev, "Unable to add filter because of invalid destination"); + return -EINVAL; + } + if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) return -EBUSY; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 2606e8f0f19b..033ea71763e3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2282,7 +2282,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) } if (vf->adq_enabled) { - for (i = 0; i < I40E_MAX_VF_VSI; i++) + for (i = 0; i < vf->num_tc; i++) num_qps_all += vf->ch[i].num_qps; if (num_qps_all != qci->num_queue_pairs) { aq_ret = I40E_ERR_PARAM; diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 7dfcf78b57fb..f3ecb3bca33d 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -984,7 +984,7 @@ struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, list_add_tail(&f->list, &adapter->mac_filter_list); f->add = true; f->is_new_mac = true; - f->is_primary = false; + f->is_primary = ether_addr_equal(macaddr, adapter->hw.mac.addr); adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; } else { f->remove = false; diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 1e71b70f0e52..70335f6e8524 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -2190,6 +2190,42 @@ ice_setup_autoneg(struct ice_port_info *p, struct ethtool_link_ksettings *ks, } /** + * ice_set_phy_type_from_speed - set phy_types based on speeds + * and advertised modes + * @ks: ethtool link ksettings struct + * @phy_type_low: pointer to the lower part of phy_type + * @phy_type_high: pointer to the higher part of phy_type + * @adv_link_speed: targeted link speeds bitmap + */ +static void +ice_set_phy_type_from_speed(const struct ethtool_link_ksettings *ks, + u64 *phy_type_low, u64 *phy_type_high, + u16 adv_link_speed) +{ + /* Handle 1000M speed in a special way because ice_update_phy_type + * enables all link modes, but having mixed copper and optical + * standards is not supported. + */ + adv_link_speed &= ~ICE_AQ_LINK_SPEED_1000MB; + + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 1000baseT_Full)) + *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_T | + ICE_PHY_TYPE_LOW_1G_SGMII; + + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 1000baseKX_Full)) + *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_KX; + + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 1000baseX_Full)) + *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_SX | + ICE_PHY_TYPE_LOW_1000BASE_LX; + + ice_update_phy_type(phy_type_low, phy_type_high, adv_link_speed); +} + +/** * ice_set_link_ksettings - Set Speed and Duplex * @netdev: network interface device structure * @ks: ethtool ksettings @@ -2320,7 +2356,8 @@ ice_set_link_ksettings(struct net_device *netdev, adv_link_speed = curr_link_speed; /* Convert the advertise link speeds to their corresponded PHY_TYPE */ - ice_update_phy_type(&phy_type_low, &phy_type_high, adv_link_speed); + ice_set_phy_type_from_speed(ks, &phy_type_low, &phy_type_high, + adv_link_speed); if (!autoneg_changed && adv_link_speed == curr_link_speed) { netdev_info(netdev, "Nothing changed, exiting without setting anything.\n"); @@ -3470,6 +3507,16 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch) new_rx = ch->combined_count + ch->rx_count; new_tx = ch->combined_count + ch->tx_count; + if (new_rx < vsi->tc_cfg.numtc) { + netdev_err(dev, "Cannot set less Rx channels, than Traffic Classes you have (%u)\n", + vsi->tc_cfg.numtc); + return -EINVAL; + } + if (new_tx < vsi->tc_cfg.numtc) { + netdev_err(dev, "Cannot set less Tx channels, than Traffic Classes you have (%u)\n", + vsi->tc_cfg.numtc); + return -EINVAL; + } if (new_rx > ice_get_max_rxq(pf)) { netdev_err(dev, "Maximum allowed Rx channels is %d\n", ice_get_max_rxq(pf)); diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 454e01ae09b9..f7f9c973ec54 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -909,7 +909,7 @@ static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt) * @vsi: the VSI being configured * @ctxt: VSI context structure */ -static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) +static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) { u16 offset = 0, qmap = 0, tx_count = 0, pow = 0; u16 num_txq_per_tc, num_rxq_per_tc; @@ -982,7 +982,18 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) else vsi->num_rxq = num_rxq_per_tc; + if (vsi->num_rxq > vsi->alloc_rxq) { + dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", + vsi->num_rxq, vsi->alloc_rxq); + return -EINVAL; + } + vsi->num_txq = tx_count; + if (vsi->num_txq > vsi->alloc_txq) { + dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", + vsi->num_txq, vsi->alloc_txq); + return -EINVAL; + } if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) { dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n"); @@ -1000,6 +1011,8 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) */ ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq); + + return 0; } /** @@ -1187,7 +1200,10 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi) if (vsi->type == ICE_VSI_CHNL) { ice_chnl_vsi_setup_q_map(vsi, ctxt); } else { - ice_vsi_setup_q_map(vsi, ctxt); + ret = ice_vsi_setup_q_map(vsi, ctxt); + if (ret) + goto out; + if (!init_vsi) /* means VSI being updated */ /* must to indicate which section of VSI context are * being modified @@ -3464,7 +3480,7 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc) * * Prepares VSI tc_config to have queue configurations based on MQPRIO options. */ -static void +static int ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt, u8 ena_tc) { @@ -3513,7 +3529,18 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt, /* Set actual Tx/Rx queue pairs */ vsi->num_txq = offset + qcount_tx; + if (vsi->num_txq > vsi->alloc_txq) { + dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", + vsi->num_txq, vsi->alloc_txq); + return -EINVAL; + } + vsi->num_rxq = offset + qcount_rx; + if (vsi->num_rxq > vsi->alloc_rxq) { + dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", + vsi->num_rxq, vsi->alloc_rxq); + return -EINVAL; + } /* Setup queue TC[0].qmap for given VSI context */ ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); @@ -3531,6 +3558,8 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt, dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_rxq = %d\n", vsi->num_rxq); dev_dbg(ice_pf_to_dev(vsi->back), "all_numtc %u, all_enatc: 0x%04x, tc_cfg.numtc %u\n", vsi->all_numtc, vsi->all_enatc, vsi->tc_cfg.numtc); + + return 0; } /** @@ -3580,9 +3609,12 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) if (vsi->type == ICE_VSI_PF && test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) - ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc); + ret = ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc); else - ice_vsi_setup_q_map(vsi, ctx); + ret = ice_vsi_setup_q_map(vsi, ctx); + + if (ret) + goto out; /* must to indicate which section of VSI context are being modified */ ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index e1cae253412c..c1ac2f746714 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -5763,25 +5763,38 @@ static netdev_features_t ice_fix_features(struct net_device *netdev, netdev_features_t features) { struct ice_netdev_priv *np = netdev_priv(netdev); - netdev_features_t supported_vlan_filtering; - netdev_features_t requested_vlan_filtering; - struct ice_vsi *vsi = np->vsi; - - requested_vlan_filtering = features & NETIF_VLAN_FILTERING_FEATURES; - - /* make sure supported_vlan_filtering works for both SVM and DVM */ - supported_vlan_filtering = NETIF_F_HW_VLAN_CTAG_FILTER; - if (ice_is_dvm_ena(&vsi->back->hw)) - supported_vlan_filtering |= NETIF_F_HW_VLAN_STAG_FILTER; - - if (requested_vlan_filtering && - requested_vlan_filtering != supported_vlan_filtering) { - if (requested_vlan_filtering & NETIF_F_HW_VLAN_CTAG_FILTER) { - netdev_warn(netdev, "cannot support requested VLAN filtering settings, enabling all supported VLAN filtering settings\n"); - features |= supported_vlan_filtering; + netdev_features_t req_vlan_fltr, cur_vlan_fltr; + bool cur_ctag, cur_stag, req_ctag, req_stag; + + cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES; + cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER; + cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER; + + req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES; + req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER; + req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER; + + if (req_vlan_fltr != cur_vlan_fltr) { + if (ice_is_dvm_ena(&np->vsi->back->hw)) { + if (req_ctag && req_stag) { + features |= NETIF_VLAN_FILTERING_FEATURES; + } else if (!req_ctag && !req_stag) { + features &= ~NETIF_VLAN_FILTERING_FEATURES; + } else if ((!cur_ctag && req_ctag && !cur_stag) || + (!cur_stag && req_stag && !cur_ctag)) { + features |= NETIF_VLAN_FILTERING_FEATURES; + netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been enabled for both types.\n"); + } else if ((cur_ctag && !req_ctag && cur_stag) || + (cur_stag && !req_stag && cur_ctag)) { + features &= ~NETIF_VLAN_FILTERING_FEATURES; + netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been disabled for both types.\n"); + } } else { - netdev_warn(netdev, "cannot support requested VLAN filtering settings, clearing all supported VLAN filtering settings\n"); - features &= ~supported_vlan_filtering; + if (req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER) + netdev_warn(netdev, "cannot support requested 802.1ad filtering setting in SVM mode\n"); + + if (req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER) + features |= NETIF_F_HW_VLAN_CTAG_FILTER; } } diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index 662947c882e8..ef9344ef0d8e 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -2271,7 +2271,7 @@ static int ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port) { tx->quad = port / ICE_PORTS_PER_QUAD; - tx->quad_offset = tx->quad * INDEX_PER_PORT; + tx->quad_offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT; tx->len = INDEX_PER_PORT; return ice_ptp_alloc_tx_tracker(tx); diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h index afd048d69959..10e396abf130 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp.h @@ -49,6 +49,37 @@ struct ice_perout_channel { * To allow multiple ports to access the shared register block independently, * the blocks are split up so that indexes are assigned to each port based on * hardware logical port number. + * + * The timestamp blocks are handled differently for E810- and E822-based + * devices. In E810 devices, each port has its own block of timestamps, while in + * E822 there is a need to logically break the block of registers into smaller + * chunks based on the port number to avoid collisions. + * + * Example for port 5 in E810: + * +--------+--------+--------+--------+--------+--------+--------+--------+ + * |register|register|register|register|register|register|register|register| + * | block | block | block | block | block | block | block | block | + * | for | for | for | for | for | for | for | for | + * | port 0 | port 1 | port 2 | port 3 | port 4 | port 5 | port 6 | port 7 | + * +--------+--------+--------+--------+--------+--------+--------+--------+ + * ^^ + * || + * |--- quad offset is always 0 + * ---- quad number + * + * Example for port 5 in E822: + * +-----------------------------+-----------------------------+ + * | register block for quad 0 | register block for quad 1 | + * |+------+------+------+------+|+------+------+------+------+| + * ||port 0|port 1|port 2|port 3|||port 0|port 1|port 2|port 3|| + * |+------+------+------+------+|+------+------+------+------+| + * +-----------------------------+-------^---------------------+ + * ^ | + * | --- quad offset* + * ---- quad number + * + * * PHY port 5 is port 1 in quad 1 + * */ /** diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c index 0a0c55fb8699..b803f2ab3cc7 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c @@ -524,6 +524,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) */ fltr->rid = rule_added.rid; fltr->rule_id = rule_added.rule_id; + fltr->dest_id = rule_added.vsi_handle; exit: kfree(list); @@ -993,7 +994,9 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, n_proto_key = ntohs(match.key->n_proto); n_proto_mask = ntohs(match.mask->n_proto); - if (n_proto_key == ETH_P_ALL || n_proto_key == 0) { + if (n_proto_key == ETH_P_ALL || n_proto_key == 0 || + fltr->tunnel_type == TNL_GTPU || + fltr->tunnel_type == TNL_GTPC) { n_proto_key = 0; n_proto_mask = 0; } else { diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c index cd8e6b50968c..7adf9ddf129e 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@ -504,6 +504,11 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags) } if (ice_is_vf_disabled(vf)) { + vsi = ice_get_vf_vsi(vf); + if (WARN_ON(!vsi)) + return -EINVAL; + ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); + ice_vsi_stop_all_rx_rings(vsi); dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n", vf->vf_id); return 0; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index 1d9b84c3937a..4547bc1f7cee 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -1569,35 +1569,27 @@ error_param: */ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) { - enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_vsi_queue_config_info *qci = (struct virtchnl_vsi_queue_config_info *)msg; struct virtchnl_queue_pair_info *qpi; struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; - int i, q_idx; + int i = -1, q_idx; - if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) goto error_param; - } - if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) goto error_param; - } vsi = ice_get_vf_vsi(vf); - if (!vsi) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + if (!vsi) goto error_param; - } if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF || qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) { dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n", vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)); - v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1610,7 +1602,6 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) !ice_vc_isvalid_ring_len(qpi->txq.ring_len) || !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) || !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1620,7 +1611,6 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) * for selected "vsi" */ if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1630,14 +1620,13 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) vsi->tx_rings[i]->count = qpi->txq.ring_len; /* Disable any existing queue first */ - if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx)) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx)) goto error_param; - } /* Configure a queue with the requested settings */ if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure TX queue %d\n", + vf->vf_id, i); goto error_param; } } @@ -1651,17 +1640,13 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) if (qpi->rxq.databuffer_size != 0 && (qpi->rxq.databuffer_size > ((16 * 1024) - 128) || - qpi->rxq.databuffer_size < 1024)) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + qpi->rxq.databuffer_size < 1024)) goto error_param; - } vsi->rx_buf_len = qpi->rxq.databuffer_size; vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len; if (qpi->rxq.max_pkt_size > max_frame_size || - qpi->rxq.max_pkt_size < 64) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + qpi->rxq.max_pkt_size < 64) goto error_param; - } vsi->max_frame = qpi->rxq.max_pkt_size; /* add space for the port VLAN since the VF driver is @@ -1672,16 +1657,30 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) vsi->max_frame += VLAN_HLEN; if (ice_vsi_cfg_single_rxq(vsi, q_idx)) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n", + vf->vf_id, i); goto error_param; } } } + /* send the response to the VF */ + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, + VIRTCHNL_STATUS_SUCCESS, NULL, 0); error_param: + /* disable whatever we can */ + for (; i >= 0; i--) { + if (ice_vsi_ctrl_one_rx_ring(vsi, false, i, true)) + dev_err(ice_pf_to_dev(pf), "VF-%d could not disable RX queue %d\n", + vf->vf_id, i); + if (ice_vf_vsi_dis_single_txq(vf, vsi, i)) + dev_err(ice_pf_to_dev(pf), "VF-%d could not disable TX queue %d\n", + vf->vf_id, i); + } + /* send the response to the VF */ - return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret, - NULL, 0); + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, + VIRTCHNL_STATUS_ERR_PARAM, NULL, 0); } /** diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 68be2976f539..c5f04c40284b 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -4819,8 +4819,11 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring) while (i != tx_ring->next_to_use) { union e1000_adv_tx_desc *eop_desc, *tx_desc; - /* Free all the Tx ring sk_buffs */ - dev_kfree_skb_any(tx_buffer->skb); + /* Free all the Tx ring sk_buffs or xdp frames */ + if (tx_buffer->type == IGB_TYPE_SKB) + dev_kfree_skb_any(tx_buffer->skb); + else + xdp_return_frame(tx_buffer->xdpf); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -9898,11 +9901,10 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) struct e1000_hw *hw = &adapter->hw; u32 dmac_thr; u16 hwm; + u32 reg; if (hw->mac.type > e1000_82580) { if (adapter->flags & IGB_FLAG_DMAC) { - u32 reg; - /* force threshold to 0. */ wr32(E1000_DMCTXTH, 0); @@ -9935,7 +9937,6 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) /* Disable BMC-to-OS Watchdog Enable */ if (hw->mac.type != e1000_i354) reg &= ~E1000_DMACR_DC_BMC2OSW_EN; - wr32(E1000_DMACR, reg); /* no lower threshold to disable @@ -9952,12 +9953,12 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) */ wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE - (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6); + } - /* make low power state decision controlled - * by DMA coal - */ + if (hw->mac.type >= e1000_i210 || + (adapter->flags & IGB_FLAG_DMAC)) { reg = rd32(E1000_PCIEMISC); - reg &= ~E1000_PCIEMISC_LX_DECISION; + reg |= E1000_PCIEMISC_LX_DECISION; wr32(E1000_PCIEMISC, reg); } /* endif adapter->dmac is not disabled */ } else if (hw->mac.type == e1000_82580) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 7f11c0a8e7a9..d4e63f0644c3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -1184,9 +1184,9 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, switch (xcast_mode) { case IXGBEVF_XCAST_MODE_NONE: - disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | + disable = IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; - enable = 0; + enable = IXGBE_VMOLR_BAM; break; case IXGBEVF_XCAST_MODE_MULTI: disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; @@ -1208,9 +1208,9 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, return -EPERM; } - disable = 0; + disable = IXGBE_VMOLR_VPE; enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | - IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; + IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE; break; default: return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c index bc614a4def9e..3f60a80e34c8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c @@ -1390,7 +1390,8 @@ static int otx2vf_get_link_ksettings(struct net_device *netdev, static const struct ethtool_ops otx2vf_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | - ETHTOOL_COALESCE_MAX_FRAMES, + ETHTOOL_COALESCE_MAX_FRAMES | + ETHTOOL_COALESCE_USE_ADAPTIVE, .supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN | ETHTOOL_RING_USE_CQE_SIZE, .get_link = otx2_get_link, diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index b3b3c079a0fa..59c9a10f83ba 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -899,6 +899,17 @@ static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd, return true; } +static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask) +{ + unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH); + unsigned long data; + + data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN, + get_order(size)); + + return (void *)data; +} + /* the qdma core needs scratch memory to be setup */ static int mtk_init_fq_dma(struct mtk_eth *eth) { @@ -1467,7 +1478,10 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, goto release_desc; /* alloc new buffer */ - new_data = napi_alloc_frag(ring->frag_size); + if (ring->frag_size <= PAGE_SIZE) + new_data = napi_alloc_frag(ring->frag_size); + else + new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC); if (unlikely(!new_data)) { netdev->stats.rx_dropped++; goto release_desc; @@ -1914,7 +1928,10 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) return -ENOMEM; for (i = 0; i < rx_dma_size; i++) { - ring->data[i] = netdev_alloc_frag(ring->frag_size); + if (ring->frag_size <= PAGE_SIZE) + ring->data[i] = netdev_alloc_frag(ring->frag_size); + else + ring->data[i] = mtk_max_lro_buf_alloc(GFP_KERNEL); if (!ring->data[i]) return -ENOMEM; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index ed5038d98ef6..6400a827173c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -2110,7 +2110,7 @@ static int mlx4_en_get_module_eeprom(struct net_device *dev, en_err(priv, "mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n", i, offset, ee->len - i, ret); - return 0; + return ret; } i += ret; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index 0eb9d74547f8..50422b56a64d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -579,17 +579,6 @@ static void *pci_get_other_drvdata(struct device *this, struct device *other) return pci_get_drvdata(to_pci_dev(other)); } -static int next_phys_dev(struct device *dev, const void *data) -{ - struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data; - - mdev = pci_get_other_drvdata(this->device, dev); - if (!mdev) - return 0; - - return _next_phys_dev(mdev, data); -} - static int next_phys_dev_lag(struct device *dev, const void *data) { struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data; @@ -624,13 +613,6 @@ static struct mlx5_core_dev *mlx5_get_next_dev(struct mlx5_core_dev *dev, } /* Must be called with intf_mutex held */ -struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev) -{ - lockdep_assert_held(&mlx5_intf_mutex); - return mlx5_get_next_dev(dev, &next_phys_dev); -} - -/* Must be called with intf_mutex held */ struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev) { lockdep_assert_held(&mlx5_intf_mutex); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index eae9aa9c0811..978a2bb8e122 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -675,6 +675,9 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work) if (!tracer->owner) return; + if (unlikely(!tracer->str_db.loaded)) + goto arm; + block_count = tracer->buff.size / TRACER_BLOCK_SIZE_BYTE; start_offset = tracer->buff.consumer_index * TRACER_BLOCK_SIZE_BYTE; @@ -732,6 +735,7 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work) &tmp_trace_block[TRACES_PER_BLOCK - 1]); } +arm: mlx5_fw_tracer_arm(dev); } @@ -1136,8 +1140,7 @@ static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void queue_work(tracer->work_queue, &tracer->ownership_change_work); break; case MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE: - if (likely(tracer->str_db.loaded)) - queue_work(tracer->work_queue, &tracer->handle_traces_work); + queue_work(tracer->work_queue, &tracer->handle_traces_work); break; default: mlx5_core_dbg(dev, "FWTracer: Event with unrecognized subtype: sub_type %d\n", diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 68364484a435..3c1edfa33aa7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -565,7 +565,8 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev, static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { bool lro_en = params->packet_merge.type == MLX5E_PACKET_MERGE_LRO; - bool ro = MLX5_CAP_GEN(mdev, relaxed_ordering_write); + bool ro = pcie_relaxed_ordering_enabled(mdev->pdev) && + MLX5_CAP_GEN(mdev, relaxed_ordering_write); return ro && lro_en ? MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index 43a536cb81db..c0f409c195bf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c @@ -38,11 +38,12 @@ void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc) { + bool ro_pci_enable = pcie_relaxed_ordering_enabled(mdev->pdev); bool ro_write = MLX5_CAP_GEN(mdev, relaxed_ordering_write); bool ro_read = MLX5_CAP_GEN(mdev, relaxed_ordering_read); - MLX5_SET(mkc, mkc, relaxed_ordering_read, ro_read); - MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_write); + MLX5_SET(mkc, mkc, relaxed_ordering_read, ro_pci_enable && ro_read); + MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_pci_enable && ro_write); } static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index eb90e79388f1..f797fd97d305 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -950,6 +950,13 @@ err_event_reg: return err; } +static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv) +{ + mlx5e_rep_tc_netdevice_event_unregister(rpriv); + mlx5e_rep_bond_cleanup(rpriv); + mlx5e_rep_tc_cleanup(rpriv); +} + static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) { struct mlx5e_rep_priv *rpriv = priv->ppriv; @@ -961,42 +968,36 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) return err; } - err = mlx5e_tc_ht_init(&rpriv->tc_ht); - if (err) - goto err_ht_init; - if (rpriv->rep->vport == MLX5_VPORT_UPLINK) { err = mlx5e_init_uplink_rep_tx(rpriv); if (err) goto err_init_tx; } + err = mlx5e_tc_ht_init(&rpriv->tc_ht); + if (err) + goto err_ht_init; + return 0; -err_init_tx: - mlx5e_tc_ht_cleanup(&rpriv->tc_ht); err_ht_init: + if (rpriv->rep->vport == MLX5_VPORT_UPLINK) + mlx5e_cleanup_uplink_rep_tx(rpriv); +err_init_tx: mlx5e_destroy_tises(priv); return err; } -static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv) -{ - mlx5e_rep_tc_netdevice_event_unregister(rpriv); - mlx5e_rep_bond_cleanup(rpriv); - mlx5e_rep_tc_cleanup(rpriv); -} - static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv) { struct mlx5e_rep_priv *rpriv = priv->ppriv; - mlx5e_destroy_tises(priv); + mlx5e_tc_ht_cleanup(&rpriv->tc_ht); if (rpriv->rep->vport == MLX5_VPORT_UPLINK) mlx5e_cleanup_uplink_rep_tx(rpriv); - mlx5e_tc_ht_cleanup(&rpriv->tc_ht); + mlx5e_destroy_tises(priv); } static void mlx5e_rep_enable(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 217cac29057f..2ce3728576d1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -2690,9 +2690,6 @@ static int mlx5_esw_offloads_devcom_event(int event, switch (event) { case ESW_OFFLOADS_DEVCOM_PAIR: - if (mlx5_get_next_phys_dev(esw->dev) != peer_esw->dev) - break; - if (mlx5_eswitch_vport_match_metadata_enabled(esw) != mlx5_eswitch_vport_match_metadata_enabled(peer_esw)) break; @@ -2744,6 +2741,9 @@ static void esw_offloads_devcom_init(struct mlx5_eswitch *esw) if (!MLX5_CAP_ESW(esw->dev, merged_eswitch)) return; + if (!mlx5_is_lag_supported(esw->dev)) + return; + mlx5_devcom_register_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS, mlx5_esw_offloads_devcom_event, @@ -2761,6 +2761,9 @@ static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) if (!MLX5_CAP_ESW(esw->dev, merged_eswitch)) return; + if (!mlx5_is_lag_supported(esw->dev)) + return; + mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS, ESW_OFFLOADS_DEVCOM_UNPAIR, esw); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index fdcf7f529330..21e5c709b2d3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1574,9 +1574,22 @@ static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte, return NULL; } -static bool check_conflicting_actions(u32 action1, u32 action2) +static bool check_conflicting_actions_vlan(const struct mlx5_fs_vlan *vlan0, + const struct mlx5_fs_vlan *vlan1) { - u32 xored_actions = action1 ^ action2; + return vlan0->ethtype != vlan1->ethtype || + vlan0->vid != vlan1->vid || + vlan0->prio != vlan1->prio; +} + +static bool check_conflicting_actions(const struct mlx5_flow_act *act1, + const struct mlx5_flow_act *act2) +{ + u32 action1 = act1->action; + u32 action2 = act2->action; + u32 xored_actions; + + xored_actions = action1 ^ action2; /* if one rule only wants to count, it's ok */ if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT || @@ -1593,6 +1606,22 @@ static bool check_conflicting_actions(u32 action1, u32 action2) MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2)) return true; + if (action1 & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT && + act1->pkt_reformat != act2->pkt_reformat) + return true; + + if (action1 & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR && + act1->modify_hdr != act2->modify_hdr) + return true; + + if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH && + check_conflicting_actions_vlan(&act1->vlan[0], &act2->vlan[0])) + return true; + + if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 && + check_conflicting_actions_vlan(&act1->vlan[1], &act2->vlan[1])) + return true; + return false; } @@ -1600,7 +1629,7 @@ static int check_conflicting_ftes(struct fs_fte *fte, const struct mlx5_flow_context *flow_context, const struct mlx5_flow_act *flow_act) { - if (check_conflicting_actions(flow_act->action, fte->action.action)) { + if (check_conflicting_actions(flow_act, &fte->action)) { mlx5_core_warn(get_dev(&fte->node), "Found two FTEs with conflicting actions\n"); return -EEXIST; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index 552b6e26e701..2a8fc547eb37 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -783,7 +783,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) { struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev; - struct lag_tracker tracker; + struct lag_tracker tracker = { }; bool do_bond, roce_lag; int err; int i; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h index 72f70fad4641..c81b173156d2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h @@ -74,6 +74,16 @@ struct mlx5_lag { struct lag_mpesw lag_mpesw; }; +static inline bool mlx5_is_lag_supported(struct mlx5_core_dev *dev) +{ + if (!MLX5_CAP_GEN(dev, vport_group_manager) || + !MLX5_CAP_GEN(dev, lag_master) || + MLX5_CAP_GEN(dev, num_lag_ports) < 2 || + MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS) + return false; + return true; +} + static inline struct mlx5_lag * mlx5_lag_dev(struct mlx5_core_dev *dev) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 484cb1e4fc7f..9cc7afea2758 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -209,7 +209,6 @@ int mlx5_attach_device(struct mlx5_core_dev *dev); void mlx5_detach_device(struct mlx5_core_dev *dev); int mlx5_register_device(struct mlx5_core_dev *dev); void mlx5_unregister_device(struct mlx5_core_dev *dev); -struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev); struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev); void mlx5_dev_list_lock(void); void mlx5_dev_list_unlock(void); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h index a68d931090dd..15c8d4de8350 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h @@ -8,8 +8,8 @@ #include "spectrum.h" enum mlxsw_sp_counter_sub_pool_id { - MLXSW_SP_COUNTER_SUB_POOL_FLOW, MLXSW_SP_COUNTER_SUB_POOL_RIF, + MLXSW_SP_COUNTER_SUB_POOL_FLOW, }; int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp, diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c index 443a5d6eb57b..7c31a46195b2 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c +++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c @@ -507,6 +507,11 @@ nfp_fl_calc_key_layers_sz(struct nfp_fl_key_ls in_key_ls, uint16_t *map) key_size += sizeof(struct nfp_flower_ipv6); } + if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_QINQ) { + map[FLOW_PAY_QINQ] = key_size; + key_size += sizeof(struct nfp_flower_vlan); + } + if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GRE) { map[FLOW_PAY_GRE] = key_size; if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) @@ -515,11 +520,6 @@ nfp_fl_calc_key_layers_sz(struct nfp_fl_key_ls in_key_ls, uint16_t *map) key_size += sizeof(struct nfp_flower_ipv4_gre_tun); } - if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_QINQ) { - map[FLOW_PAY_QINQ] = key_size; - key_size += sizeof(struct nfp_flower_vlan); - } - if ((in_key_ls.key_layer & NFP_FLOWER_LAYER_VXLAN) || (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GENEVE)) { map[FLOW_PAY_UDP_TUN] = key_size; @@ -758,6 +758,17 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry) } } + if (NFP_FLOWER_LAYER2_QINQ & key_layer.key_layer_two) { + offset = key_map[FLOW_PAY_QINQ]; + key = kdata + offset; + msk = mdata + offset; + for (i = 0; i < _CT_TYPE_MAX; i++) { + nfp_flower_compile_vlan((struct nfp_flower_vlan *)key, + (struct nfp_flower_vlan *)msk, + rules[i]); + } + } + if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_GRE) { offset = key_map[FLOW_PAY_GRE]; key = kdata + offset; @@ -798,17 +809,6 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry) } } - if (NFP_FLOWER_LAYER2_QINQ & key_layer.key_layer_two) { - offset = key_map[FLOW_PAY_QINQ]; - key = kdata + offset; - msk = mdata + offset; - for (i = 0; i < _CT_TYPE_MAX; i++) { - nfp_flower_compile_vlan((struct nfp_flower_vlan *)key, - (struct nfp_flower_vlan *)msk, - rules[i]); - } - } - if (key_layer.key_layer & NFP_FLOWER_LAYER_VXLAN || key_layer.key_layer_two & NFP_FLOWER_LAYER2_GENEVE) { offset = key_map[FLOW_PAY_UDP_TUN]; diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index 193a167a6762..e01430139b6d 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c @@ -625,6 +625,14 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, msk += sizeof(struct nfp_flower_ipv6); } + if (NFP_FLOWER_LAYER2_QINQ & key_ls->key_layer_two) { + nfp_flower_compile_vlan((struct nfp_flower_vlan *)ext, + (struct nfp_flower_vlan *)msk, + rule); + ext += sizeof(struct nfp_flower_vlan); + msk += sizeof(struct nfp_flower_vlan); + } + if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GRE) { if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) { struct nfp_flower_ipv6_gre_tun *gre_match; @@ -660,14 +668,6 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, } } - if (NFP_FLOWER_LAYER2_QINQ & key_ls->key_layer_two) { - nfp_flower_compile_vlan((struct nfp_flower_vlan *)ext, - (struct nfp_flower_vlan *)msk, - rule); - ext += sizeof(struct nfp_flower_vlan); - msk += sizeof(struct nfp_flower_vlan); - } - if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN || key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) { if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c index 54af30961351..6eeeb0fda91f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c @@ -15,7 +15,7 @@ #include "nfp_net_sriov.h" static int -nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg) +nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg, bool warn) { u16 cap_vf; @@ -24,12 +24,14 @@ nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg) cap_vf = readw(app->pf->vfcfg_tbl2 + NFP_NET_VF_CFG_MB_CAP); if ((cap_vf & cap) != cap) { - nfp_warn(app->pf->cpp, "ndo_set_vf_%s not supported\n", msg); + if (warn) + nfp_warn(app->pf->cpp, "ndo_set_vf_%s not supported\n", msg); return -EOPNOTSUPP; } if (vf < 0 || vf >= app->pf->num_vfs) { - nfp_warn(app->pf->cpp, "invalid VF id %d\n", vf); + if (warn) + nfp_warn(app->pf->cpp, "invalid VF id %d\n", vf); return -EINVAL; } @@ -65,7 +67,7 @@ int nfp_app_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) unsigned int vf_offset; int err; - err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_MAC, "mac"); + err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_MAC, "mac", true); if (err) return err; @@ -101,7 +103,7 @@ int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, u32 vlan_tag; int err; - err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN, "vlan"); + err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN, "vlan", true); if (err) return err; @@ -115,7 +117,7 @@ int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, } /* Check if fw supports or not */ - err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto"); + err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto", true); if (err) is_proto_sup = false; @@ -149,7 +151,7 @@ int nfp_app_set_vf_rate(struct net_device *netdev, int vf, u32 vf_offset, ratevalue; int err; - err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_RATE, "rate"); + err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_RATE, "rate", true); if (err) return err; @@ -181,7 +183,7 @@ int nfp_app_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable) int err; err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_SPOOF, - "spoofchk"); + "spoofchk", true); if (err) return err; @@ -205,7 +207,7 @@ int nfp_app_set_vf_trust(struct net_device *netdev, int vf, bool enable) int err; err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_TRUST, - "trust"); + "trust", true); if (err) return err; @@ -230,7 +232,7 @@ int nfp_app_set_vf_link_state(struct net_device *netdev, int vf, int err; err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_LINK_STATE, - "link_state"); + "link_state", true); if (err) return err; @@ -265,7 +267,7 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf, u8 flags; int err; - err = nfp_net_sriov_check(app, vf, 0, ""); + err = nfp_net_sriov_check(app, vf, 0, "", true); if (err) return err; @@ -285,13 +287,13 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf, ivi->vlan = FIELD_GET(NFP_NET_VF_CFG_VLAN_VID, vlan_tag); ivi->qos = FIELD_GET(NFP_NET_VF_CFG_VLAN_QOS, vlan_tag); - if (!nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto")) + if (!nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto", false)) ivi->vlan_proto = htons(FIELD_GET(NFP_NET_VF_CFG_VLAN_PROT, vlan_tag)); ivi->spoofchk = FIELD_GET(NFP_NET_VF_CFG_CTRL_SPOOF, flags); ivi->trusted = FIELD_GET(NFP_NET_VF_CFG_CTRL_TRUST, flags); ivi->linkstate = FIELD_GET(NFP_NET_VF_CFG_CTRL_LINK_STATE, flags); - err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_RATE, "rate"); + err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_RATE, "rate", false); if (!err) { rate = readl(app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_RATE); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c index f9f80933e0c9..38fe77d1035e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c @@ -1072,13 +1072,11 @@ static int intel_eth_pci_probe(struct pci_dev *pdev, ret = stmmac_dvr_probe(&pdev->dev, plat, &res); if (ret) { - goto err_dvr_probe; + goto err_alloc_irq; } return 0; -err_dvr_probe: - pci_free_irq_vectors(pdev); err_alloc_irq: clk_disable_unprepare(plat->stmmac_clk); clk_unregister_fixed_rate(plat->stmmac_clk); diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h index 4225efbeda3d..f2e2261b4b7d 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet.h +++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h @@ -547,6 +547,57 @@ static inline void axienet_iow(struct axienet_local *lp, off_t offset, iowrite32(value, lp->regs + offset); } +/** + * axienet_dma_out32 - Memory mapped Axi DMA register write. + * @lp: Pointer to axienet local structure + * @reg: Address offset from the base address of the Axi DMA core + * @value: Value to be written into the Axi DMA register + * + * This function writes the desired value into the corresponding Axi DMA + * register. + */ + +static inline void axienet_dma_out32(struct axienet_local *lp, + off_t reg, u32 value) +{ + iowrite32(value, lp->dma_regs + reg); +} + +#if defined(CONFIG_64BIT) && defined(iowrite64) +/** + * axienet_dma_out64 - Memory mapped Axi DMA register write. + * @lp: Pointer to axienet local structure + * @reg: Address offset from the base address of the Axi DMA core + * @value: Value to be written into the Axi DMA register + * + * This function writes the desired value into the corresponding Axi DMA + * register. + */ +static inline void axienet_dma_out64(struct axienet_local *lp, + off_t reg, u64 value) +{ + iowrite64(value, lp->dma_regs + reg); +} + +static inline void axienet_dma_out_addr(struct axienet_local *lp, off_t reg, + dma_addr_t addr) +{ + if (lp->features & XAE_FEATURE_DMA_64BIT) + axienet_dma_out64(lp, reg, addr); + else + axienet_dma_out32(lp, reg, lower_32_bits(addr)); +} + +#else /* CONFIG_64BIT */ + +static inline void axienet_dma_out_addr(struct axienet_local *lp, off_t reg, + dma_addr_t addr) +{ + axienet_dma_out32(lp, reg, lower_32_bits(addr)); +} + +#endif /* CONFIG_64BIT */ + /* Function prototypes visible in xilinx_axienet_mdio.c for other files */ int axienet_mdio_enable(struct axienet_local *lp); void axienet_mdio_disable(struct axienet_local *lp); diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 93c9f305bba4..1760930ec0c4 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -133,30 +133,6 @@ static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg) return ioread32(lp->dma_regs + reg); } -/** - * axienet_dma_out32 - Memory mapped Axi DMA register write. - * @lp: Pointer to axienet local structure - * @reg: Address offset from the base address of the Axi DMA core - * @value: Value to be written into the Axi DMA register - * - * This function writes the desired value into the corresponding Axi DMA - * register. - */ -static inline void axienet_dma_out32(struct axienet_local *lp, - off_t reg, u32 value) -{ - iowrite32(value, lp->dma_regs + reg); -} - -static void axienet_dma_out_addr(struct axienet_local *lp, off_t reg, - dma_addr_t addr) -{ - axienet_dma_out32(lp, reg, lower_32_bits(addr)); - - if (lp->features & XAE_FEATURE_DMA_64BIT) - axienet_dma_out32(lp, reg + 4, upper_32_bits(addr)); -} - static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr, struct axidma_bd *desc) { @@ -2061,6 +2037,11 @@ static int axienet_probe(struct platform_device *pdev) iowrite32(0x0, desc); } } + if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) { + dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n"); + ret = -EINVAL; + goto cleanup_clk; + } ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width)); if (ret) { diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index 45c3c4a1101b..9fb567524220 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -99,6 +99,7 @@ struct sixpack { unsigned int rx_count; unsigned int rx_count_cooked; + spinlock_t rxlock; int mtu; /* Our mtu (to spot changes!) */ int buffsize; /* Max buffers sizes */ @@ -565,6 +566,7 @@ static int sixpack_open(struct tty_struct *tty) sp->dev = dev; spin_lock_init(&sp->lock); + spin_lock_init(&sp->rxlock); refcount_set(&sp->refcnt, 1); init_completion(&sp->dead); @@ -913,6 +915,7 @@ static void decode_std_command(struct sixpack *sp, unsigned char cmd) sp->led_state = 0x60; /* fill trailing bytes with zeroes */ sp->tty->ops->write(sp->tty, &sp->led_state, 1); + spin_lock_bh(&sp->rxlock); rest = sp->rx_count; if (rest != 0) for (i = rest; i <= 3; i++) @@ -930,6 +933,7 @@ static void decode_std_command(struct sixpack *sp, unsigned char cmd) sp_bump(sp, 0); } sp->rx_count_cooked = 0; + spin_unlock_bh(&sp->rxlock); } break; case SIXP_TX_URUN: printk(KERN_DEBUG "6pack: TX underrun\n"); @@ -959,8 +963,11 @@ sixpack_decode(struct sixpack *sp, const unsigned char *pre_rbuff, int count) decode_prio_command(sp, inbyte); else if ((inbyte & SIXP_STD_CMD_MASK) != 0) decode_std_command(sp, inbyte); - else if ((sp->status & SIXP_RX_DCD_MASK) == SIXP_RX_DCD_MASK) + else if ((sp->status & SIXP_RX_DCD_MASK) == SIXP_RX_DCD_MASK) { + spin_lock_bh(&sp->rxlock); decode_data(sp, inbyte); + spin_unlock_bh(&sp->rxlock); + } } } diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c index a8db1a19011b..c7047f5d7a9b 100644 --- a/drivers/net/phy/aquantia_main.c +++ b/drivers/net/phy/aquantia_main.c @@ -34,6 +34,8 @@ #define MDIO_AN_VEND_PROV 0xc400 #define MDIO_AN_VEND_PROV_1000BASET_FULL BIT(15) #define MDIO_AN_VEND_PROV_1000BASET_HALF BIT(14) +#define MDIO_AN_VEND_PROV_5000BASET_FULL BIT(11) +#define MDIO_AN_VEND_PROV_2500BASET_FULL BIT(10) #define MDIO_AN_VEND_PROV_DOWNSHIFT_EN BIT(4) #define MDIO_AN_VEND_PROV_DOWNSHIFT_MASK GENMASK(3, 0) #define MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT 4 @@ -231,9 +233,20 @@ static int aqr_config_aneg(struct phy_device *phydev) phydev->advertising)) reg |= MDIO_AN_VEND_PROV_1000BASET_HALF; + /* Handle the case when the 2.5G and 5G speeds are not advertised */ + if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, + phydev->advertising)) + reg |= MDIO_AN_VEND_PROV_2500BASET_FULL; + + if (linkmode_test_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT, + phydev->advertising)) + reg |= MDIO_AN_VEND_PROV_5000BASET_FULL; + ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV, MDIO_AN_VEND_PROV_1000BASET_HALF | - MDIO_AN_VEND_PROV_1000BASET_FULL, reg); + MDIO_AN_VEND_PROV_1000BASET_FULL | + MDIO_AN_VEND_PROV_2500BASET_FULL | + MDIO_AN_VEND_PROV_5000BASET_FULL, reg); if (ret < 0) return ret; if (ret > 0) diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index 6a467e7817a6..59fe356942b5 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -2072,6 +2072,8 @@ static struct phy_driver at803x_driver[] = { /* ATHEROS AR9331 */ PHY_ID_MATCH_EXACT(ATH9331_PHY_ID), .name = "Qualcomm Atheros AR9331 built-in PHY", + .probe = at803x_probe, + .remove = at803x_remove, .suspend = at803x_suspend, .resume = at803x_resume, .flags = PHY_POLL_CABLE_TEST, @@ -2087,6 +2089,8 @@ static struct phy_driver at803x_driver[] = { /* Qualcomm Atheros QCA9561 */ PHY_ID_MATCH_EXACT(QCA9561_PHY_ID), .name = "Qualcomm Atheros QCA9561 built-in PHY", + .probe = at803x_probe, + .remove = at803x_remove, .suspend = at803x_suspend, .resume = at803x_resume, .flags = PHY_POLL_CABLE_TEST, @@ -2151,6 +2155,8 @@ static struct phy_driver at803x_driver[] = { PHY_ID_MATCH_EXACT(QCA8081_PHY_ID), .name = "Qualcomm QCA8081", .flags = PHY_POLL_CABLE_TEST, + .probe = at803x_probe, + .remove = at803x_remove, .config_intr = at803x_config_intr, .handle_interrupt = at803x_handle_interrupt, .get_tunable = at803x_get_tunable, diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index 8561f2d4443b..13dafe7a29bd 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -137,6 +137,7 @@ #define DP83867_DOWNSHIFT_2_COUNT 2 #define DP83867_DOWNSHIFT_4_COUNT 4 #define DP83867_DOWNSHIFT_8_COUNT 8 +#define DP83867_SGMII_AUTONEG_EN BIT(7) /* CFG3 bits */ #define DP83867_CFG3_INT_OE BIT(7) @@ -855,6 +856,32 @@ static int dp83867_phy_reset(struct phy_device *phydev) DP83867_PHYCR_FORCE_LINK_GOOD, 0); } +static void dp83867_link_change_notify(struct phy_device *phydev) +{ + /* There is a limitation in DP83867 PHY device where SGMII AN is + * only triggered once after the device is booted up. Even after the + * PHY TPI is down and up again, SGMII AN is not triggered and + * hence no new in-band message from PHY to MAC side SGMII. + * This could cause an issue during power up, when PHY is up prior + * to MAC. At this condition, once MAC side SGMII is up, MAC side + * SGMII wouldn`t receive new in-band message from TI PHY with + * correct link status, speed and duplex info. + * Thus, implemented a SW solution here to retrigger SGMII Auto-Neg + * whenever there is a link change. + */ + if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { + int val = 0; + + val = phy_clear_bits(phydev, DP83867_CFG2, + DP83867_SGMII_AUTONEG_EN); + if (val < 0) + return; + + phy_set_bits(phydev, DP83867_CFG2, + DP83867_SGMII_AUTONEG_EN); + } +} + static struct phy_driver dp83867_driver[] = { { .phy_id = DP83867_PHY_ID, @@ -879,6 +906,8 @@ static struct phy_driver dp83867_driver[] = { .suspend = genphy_suspend, .resume = genphy_resume, + + .link_change_notify = dp83867_link_change_notify, }, }; module_phy_driver(dp83867_driver); diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 58d602985877..8a2dbe849866 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -1046,7 +1046,6 @@ int __init mdio_bus_init(void) return ret; } -EXPORT_SYMBOL_GPL(mdio_bus_init); #if IS_ENABLED(CONFIG_PHYLIB) void mdio_bus_exit(void) diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index 1b54684b68a0..96d3c40932d8 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -110,7 +110,7 @@ static int smsc_phy_config_init(struct phy_device *phydev) struct smsc_phy_priv *priv = phydev->priv; int rc; - if (!priv->energy_enable) + if (!priv->energy_enable || phydev->irq != PHY_POLL) return 0; rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS); @@ -210,6 +210,8 @@ static int lan95xx_config_aneg_ext(struct phy_device *phydev) * response on link pulses to detect presence of plugged Ethernet cable. * The Energy Detect Power-Down mode is enabled again in the end of procedure to * save approximately 220 mW of power if cable is unplugged. + * The workaround is only applicable to poll mode. Energy Detect Power-Down may + * not be used in interrupt mode lest link change detection becomes unreliable. */ static int lan87xx_read_status(struct phy_device *phydev) { @@ -217,7 +219,7 @@ static int lan87xx_read_status(struct phy_device *phydev) int err = genphy_read_status(phydev); - if (!phydev->link && priv->energy_enable) { + if (!phydev->link && priv->energy_enable && phydev->irq == PHY_POLL) { /* Disable EDPD to wake up PHY */ int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS); if (rc < 0) diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index 7a8c11a26eb5..4704ed6f00ef 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1750,7 +1750,7 @@ static const struct driver_info ax88179_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1763,7 +1763,7 @@ static const struct driver_info ax88178a_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1776,7 +1776,7 @@ static const struct driver_info cypress_GX3_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1789,7 +1789,7 @@ static const struct driver_info dlink_dub1312_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1802,7 +1802,7 @@ static const struct driver_info sitecom_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1815,7 +1815,7 @@ static const struct driver_info samsung_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1828,7 +1828,7 @@ static const struct driver_info lenovo_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1841,7 +1841,7 @@ static const struct driver_info belkin_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1854,7 +1854,7 @@ static const struct driver_info toshiba_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1867,7 +1867,7 @@ static const struct driver_info mct_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1880,7 +1880,7 @@ static const struct driver_info at_umc2000_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1893,7 +1893,7 @@ static const struct driver_info at_umc200_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1906,7 +1906,7 @@ static const struct driver_info at_umc2000sp_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 466da01ba2e3..2cb833b3006a 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -312,6 +312,7 @@ static bool veth_skb_is_eligible_for_gro(const struct net_device *dev, static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) { struct veth_priv *rcv_priv, *priv = netdev_priv(dev); + struct netdev_queue *queue = NULL; struct veth_rq *rq = NULL; struct net_device *rcv; int length = skb->len; @@ -329,6 +330,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) rxq = skb_get_queue_mapping(skb); if (rxq < rcv->real_num_rx_queues) { rq = &rcv_priv->rq[rxq]; + queue = netdev_get_tx_queue(dev, rxq); /* The napi pointer is available when an XDP program is * attached or when GRO is enabled @@ -340,6 +342,8 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) skb_tx_timestamp(skb); if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) { + if (queue) + txq_trans_cond_update(queue); if (!use_napi) dev_lstats_add(dev, length); } else { diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index db05b5e930be..969a67970e71 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -2768,7 +2768,6 @@ static const struct ethtool_ops virtnet_ethtool_ops = { static void virtnet_freeze_down(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; - int i; /* Make sure no work handler is accessing the device */ flush_work(&vi->config_work); @@ -2776,14 +2775,8 @@ static void virtnet_freeze_down(struct virtio_device *vdev) netif_tx_lock_bh(vi->dev); netif_device_detach(vi->dev); netif_tx_unlock_bh(vi->dev); - cancel_delayed_work_sync(&vi->refill); - - if (netif_running(vi->dev)) { - for (i = 0; i < vi->max_queue_pairs; i++) { - napi_disable(&vi->rq[i].napi); - virtnet_napi_tx_disable(&vi->sq[i].napi); - } - } + if (netif_running(vi->dev)) + virtnet_close(vi->dev); } static int init_vqs(struct virtnet_info *vi); @@ -2791,7 +2784,7 @@ static int init_vqs(struct virtnet_info *vi); static int virtnet_restore_up(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; - int err, i; + int err; err = init_vqs(vi); if (err) @@ -2800,15 +2793,9 @@ static int virtnet_restore_up(struct virtio_device *vdev) virtio_device_ready(vdev); if (netif_running(vi->dev)) { - for (i = 0; i < vi->curr_queue_pairs; i++) - if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) - schedule_delayed_work(&vi->refill, 0); - - for (i = 0; i < vi->max_queue_pairs; i++) { - virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); - virtnet_napi_tx_enable(vi, vi->sq[i].vq, - &vi->sq[i].napi); - } + err = virtnet_open(vi->dev); + if (err) + return err; } netif_tx_lock_bh(vi->dev); diff --git a/drivers/nfc/nfcmrvl/usb.c b/drivers/nfc/nfcmrvl/usb.c index a99aedff795d..ea7309453096 100644 --- a/drivers/nfc/nfcmrvl/usb.c +++ b/drivers/nfc/nfcmrvl/usb.c @@ -388,13 +388,25 @@ static void nfcmrvl_play_deferred(struct nfcmrvl_usb_drv_data *drv_data) int err; while ((urb = usb_get_from_anchor(&drv_data->deferred))) { + usb_anchor_urb(urb, &drv_data->tx_anchor); + err = usb_submit_urb(urb, GFP_ATOMIC); - if (err) + if (err) { + kfree(urb->setup_packet); + usb_unanchor_urb(urb); + usb_free_urb(urb); break; + } drv_data->tx_in_flight++; + usb_free_urb(urb); + } + + /* Cleanup the rest deferred urbs. */ + while ((urb = usb_get_from_anchor(&drv_data->deferred))) { + kfree(urb->setup_packet); + usb_free_urb(urb); } - usb_scuttle_anchored_urbs(&drv_data->deferred); } static int nfcmrvl_resume(struct usb_interface *intf) diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c index 7e213f8ddc98..df8d27cf2956 100644 --- a/drivers/nfc/st21nfca/se.c +++ b/drivers/nfc/st21nfca/se.c @@ -300,6 +300,8 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host, int r = 0; struct device *dev = &hdev->ndev->dev; struct nfc_evt_transaction *transaction; + u32 aid_len; + u8 params_len; pr_debug("connectivity gate event: %x\n", event); @@ -308,43 +310,48 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host, r = nfc_se_connectivity(hdev->ndev, host); break; case ST21NFCA_EVT_TRANSACTION: - /* - * According to specification etsi 102 622 + /* According to specification etsi 102 622 * 11.2.2.4 EVT_TRANSACTION Table 52 * Description Tag Length * AID 81 5 to 16 * PARAMETERS 82 0 to 255 + * + * The key differences are aid storage length is variably sized + * in the packet, but fixed in nfc_evt_transaction, and that the aid_len + * is u8 in the packet, but u32 in the structure, and the tags in + * the packet are not included in nfc_evt_transaction. + * + * size in bytes: 1 1 5-16 1 1 0-255 + * offset: 0 1 2 aid_len + 2 aid_len + 3 aid_len + 4 + * member name: aid_tag(M) aid_len aid params_tag(M) params_len params + * example: 0x81 5-16 X 0x82 0-255 X */ - if (skb->len < NFC_MIN_AID_LENGTH + 2 && - skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG) + if (skb->len < 2 || skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG) return -EPROTO; - transaction = devm_kzalloc(dev, skb->len - 2, GFP_KERNEL); - if (!transaction) - return -ENOMEM; - - transaction->aid_len = skb->data[1]; + aid_len = skb->data[1]; - /* Checking if the length of the AID is valid */ - if (transaction->aid_len > sizeof(transaction->aid)) - return -EINVAL; + if (skb->len < aid_len + 4 || aid_len > sizeof(transaction->aid)) + return -EPROTO; - memcpy(transaction->aid, &skb->data[2], - transaction->aid_len); + params_len = skb->data[aid_len + 3]; - /* Check next byte is PARAMETERS tag (82) */ - if (skb->data[transaction->aid_len + 2] != - NFC_EVT_TRANSACTION_PARAMS_TAG) + /* Verify PARAMETERS tag is (82), and final check that there is enough + * space in the packet to read everything. + */ + if ((skb->data[aid_len + 2] != NFC_EVT_TRANSACTION_PARAMS_TAG) || + (skb->len < aid_len + 4 + params_len)) return -EPROTO; - transaction->params_len = skb->data[transaction->aid_len + 3]; + transaction = devm_kzalloc(dev, sizeof(*transaction) + params_len, GFP_KERNEL); + if (!transaction) + return -ENOMEM; - /* Total size is allocated (skb->len - 2) minus fixed array members */ - if (transaction->params_len > ((skb->len - 2) - sizeof(struct nfc_evt_transaction))) - return -EINVAL; + transaction->aid_len = aid_len; + transaction->params_len = params_len; - memcpy(transaction->params, skb->data + - transaction->aid_len + 4, transaction->params_len); + memcpy(transaction->aid, &skb->data[2], aid_len); + memcpy(transaction->params, &skb->data[aid_len + 4], params_len); r = nfc_se_transaction(hdev->ndev, host, transaction); break; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 24165daee3c8..b3d9c29aba1e 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2546,6 +2546,20 @@ static const struct nvme_core_quirk_entry core_quirks[] = { .vid = 0x1e0f, .mn = "KCD6XVUL6T40", .quirks = NVME_QUIRK_NO_APST, + }, + { + /* + * The external Samsung X5 SSD fails initialization without a + * delay before checking if it is ready and has a whole set of + * other problems. To make this even more interesting, it + * shares the PCI ID with internal Samsung 970 Evo Plus that + * does not need or want these quirks. + */ + .vid = 0x144d, + .mn = "Samsung Portable SSD X5", + .quirks = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | + NVME_QUIRK_NO_DEEPEST_PS | + NVME_QUIRK_IGNORE_DEV_SUBNQN, } }; @@ -3285,8 +3299,8 @@ static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, * we have no UUID set */ if (uuid_is_null(&ids->uuid)) { - printk_ratelimited(KERN_WARNING - "No UUID available providing old NGUID\n"); + dev_warn_ratelimited(dev, + "No UUID available providing old NGUID\n"); return sysfs_emit(buf, "%pU\n", ids->nguid); } return sysfs_emit(buf, "%pU\n", &ids->uuid); @@ -3863,6 +3877,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, if (ret) { dev_err(ctrl->device, "globally duplicate IDs for nsid %d\n", nsid); + nvme_print_device_info(ctrl); return ret; } diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 9b72b6ecf33c..0da94b233fed 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -503,6 +503,7 @@ struct nvme_ctrl_ops { void (*submit_async_event)(struct nvme_ctrl *ctrl); void (*delete_ctrl)(struct nvme_ctrl *ctrl); int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size); + void (*print_device_info)(struct nvme_ctrl *ctrl); }; /* @@ -548,6 +549,33 @@ static inline struct request *nvme_cid_to_rq(struct blk_mq_tags *tags, return blk_mq_tag_to_rq(tags, nvme_tag_from_cid(command_id)); } +/* + * Return the length of the string without the space padding + */ +static inline int nvme_strlen(char *s, int len) +{ + while (s[len - 1] == ' ') + len--; + return len; +} + +static inline void nvme_print_device_info(struct nvme_ctrl *ctrl) +{ + struct nvme_subsystem *subsys = ctrl->subsys; + + if (ctrl->ops->print_device_info) { + ctrl->ops->print_device_info(ctrl); + return; + } + + dev_err(ctrl->device, + "VID:%04x model:%.*s firmware:%.*s\n", subsys->vendor_id, + nvme_strlen(subsys->model, sizeof(subsys->model)), + subsys->model, nvme_strlen(subsys->firmware_rev, + sizeof(subsys->firmware_rev)), + subsys->firmware_rev); +} + #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj, const char *dev_name); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 48f4f6eb877b..d7b24ee17285 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1334,6 +1334,14 @@ static void nvme_warn_reset(struct nvme_dev *dev, u32 csts) dev_warn(dev->ctrl.device, "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n", csts, result); + + if (csts != ~0) + return; + + dev_warn(dev->ctrl.device, + "Does your device have a faulty power saving mode enabled?\n"); + dev_warn(dev->ctrl.device, + "Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off\" and report a bug\n"); } static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) @@ -2976,6 +2984,21 @@ static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size) return snprintf(buf, size, "%s\n", dev_name(&pdev->dev)); } + +static void nvme_pci_print_device_info(struct nvme_ctrl *ctrl) +{ + struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); + struct nvme_subsystem *subsys = ctrl->subsys; + + dev_err(ctrl->device, + "VID:DID %04x:%04x model:%.*s firmware:%.*s\n", + pdev->vendor, pdev->device, + nvme_strlen(subsys->model, sizeof(subsys->model)), + subsys->model, nvme_strlen(subsys->firmware_rev, + sizeof(subsys->firmware_rev)), + subsys->firmware_rev); +} + static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { .name = "pcie", .module = THIS_MODULE, @@ -2987,6 +3010,7 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { .free_ctrl = nvme_pci_free_ctrl, .submit_async_event = nvme_pci_submit_async_event, .get_address = nvme_pci_get_address, + .print_device_info = nvme_pci_print_device_info, }; static int nvme_dev_map(struct nvme_dev *dev) @@ -3421,7 +3445,8 @@ static const struct pci_device_id nvme_id_table[] = { { PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */ - .driver_data = NVME_QUIRK_NO_NS_DESC_LIST, }, + .driver_data = NVME_QUIRK_NO_NS_DESC_LIST | + NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | NVME_QUIRK_NO_NS_DESC_LIST, }, @@ -3437,6 +3462,8 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | NVME_QUIRK_DISABLE_WRITE_ZEROES| NVME_QUIRK_IGNORE_DEV_SUBNQN, }, + { PCI_DEVICE(0x1987, 0x5012), /* Phison E12 */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */ @@ -3447,12 +3474,24 @@ static const struct pci_device_id nvme_id_table[] = { { PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS | NVME_QUIRK_IGNORE_DEV_SUBNQN, }, + { PCI_DEVICE(0x1344, 0x5407), /* Micron Technology Inc NVMe SSD */ + .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN }, { PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x1c5c, 0x174a), /* SK Hynix P31 SSD */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE(0x1d97, 0x2263), /* SPCC */ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x144d, 0xa80b), /* Samsung PM9B1 256G and 512G */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x144d, 0xa809), /* Samsung MZALQ256HBJD 256G */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x1cc4, 0x6303), /* UMIS RPJTJ512MGE1QDY 512G */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x1cc4, 0x6302), /* UMIS RPJTJ256MGE1QDY 256G */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE(0x2646, 0x2262), /* KINGSTON SKC2000 NVMe SSD */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */ @@ -3463,6 +3502,10 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1e4B, 0x1202), /* MAXIO MAP1202 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1cc1, 0x5350), /* ADATA XPG GAMMIX S50 */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1e49, 0x0041), /* ZHITAI TiPro7000 NVMe SSD */ + .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061), .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065), @@ -3483,10 +3526,6 @@ static const struct pci_device_id nvme_id_table[] = { NVME_QUIRK_128_BYTES_SQES | NVME_QUIRK_SHARED_TAGS | NVME_QUIRK_SKIP_CID_GEN }, - { PCI_DEVICE(0x144d, 0xa808), /* Samsung X5 */ - .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY| - NVME_QUIRK_NO_DEEPEST_PS | - NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, { 0, } }; diff --git a/drivers/platform/mellanox/Kconfig b/drivers/platform/mellanox/Kconfig index 72df4b8f4dd8..09c7829e95c4 100644 --- a/drivers/platform/mellanox/Kconfig +++ b/drivers/platform/mellanox/Kconfig @@ -85,7 +85,7 @@ config NVSW_SN2201 depends on I2C depends on REGMAP_I2C help - This driver provides support for the Nvidia SN2201 platfom. + This driver provides support for the Nvidia SN2201 platform. The SN2201 is a highly integrated for one rack unit system with L3 management switches. It has 48 x 1Gbps RJ45 + 4 x 100G QSFP28 ports in a compact 1RU form factor. The system also including a diff --git a/drivers/platform/mellanox/nvsw-sn2201.c b/drivers/platform/mellanox/nvsw-sn2201.c index 0bcdc7c75007..2923daf63b75 100644 --- a/drivers/platform/mellanox/nvsw-sn2201.c +++ b/drivers/platform/mellanox/nvsw-sn2201.c @@ -326,7 +326,7 @@ static struct resource nvsw_sn2201_lpc_res[] = { }; /* SN2201 I2C platform data. */ -struct mlxreg_core_hotplug_platform_data nvsw_sn2201_i2c_data = { +static struct mlxreg_core_hotplug_platform_data nvsw_sn2201_i2c_data = { .irq = NVSW_SN2201_CPLD_SYSIRQ, }; diff --git a/drivers/platform/mips/Kconfig b/drivers/platform/mips/Kconfig index d421e1482395..6b51ad01f791 100644 --- a/drivers/platform/mips/Kconfig +++ b/drivers/platform/mips/Kconfig @@ -17,7 +17,7 @@ menuconfig MIPS_PLATFORM_DEVICES if MIPS_PLATFORM_DEVICES config CPU_HWMON - tristate "Loongson-3 CPU HWMon Driver" + bool "Loongson-3 CPU HWMon Driver" depends on MACH_LOONGSON64 select HWMON default y diff --git a/drivers/platform/x86/barco-p50-gpio.c b/drivers/platform/x86/barco-p50-gpio.c index 05534287bc26..8dd672339485 100644 --- a/drivers/platform/x86/barco-p50-gpio.c +++ b/drivers/platform/x86/barco-p50-gpio.c @@ -405,11 +405,14 @@ MODULE_DEVICE_TABLE(dmi, dmi_ids); static int __init p50_module_init(void) { struct resource res = DEFINE_RES_IO(P50_GPIO_IO_PORT_BASE, P50_PORT_CMD + 1); + int ret; if (!dmi_first_match(dmi_ids)) return -ENODEV; - platform_driver_register(&p50_gpio_driver); + ret = platform_driver_register(&p50_gpio_driver); + if (ret) + return ret; gpio_pdev = platform_device_register_simple(DRIVER_NAME, PLATFORM_DEVID_NONE, &res, 1); if (IS_ERR(gpio_pdev)) { diff --git a/drivers/platform/x86/gigabyte-wmi.c b/drivers/platform/x86/gigabyte-wmi.c index 1ef606e3ef80..497ad2f64a51 100644 --- a/drivers/platform/x86/gigabyte-wmi.c +++ b/drivers/platform/x86/gigabyte-wmi.c @@ -140,6 +140,7 @@ static u8 gigabyte_wmi_detect_sensor_usability(struct wmi_device *wdev) }} static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = { + DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M DS3H-CF"), DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M S2H V2"), DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE AX V2"), DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"), @@ -156,6 +157,7 @@ static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = { DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 GAMING X"), DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 I AORUS PRO WIFI"), DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 UD"), + DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z690M AORUS ELITE AX DDR4"), { } }; diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 667f94bba905..0d8cb22e30df 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -38,6 +38,7 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4"); #define HPWMI_EVENT_GUID "95F24279-4D7B-4334-9387-ACCDC67EF61C" #define HPWMI_BIOS_GUID "5FB7F034-2C63-45e9-BE91-3D44E2C707E4" #define HP_OMEN_EC_THERMAL_PROFILE_OFFSET 0x95 +#define zero_if_sup(tmp) (zero_insize_support?0:sizeof(tmp)) // use when zero insize is required /* DMI board names of devices that should use the omen specific path for * thermal profiles. @@ -220,6 +221,7 @@ static struct input_dev *hp_wmi_input_dev; static struct platform_device *hp_wmi_platform_dev; static struct platform_profile_handler platform_profile_handler; static bool platform_profile_support; +static bool zero_insize_support; static struct rfkill *wifi_rfkill; static struct rfkill *bluetooth_rfkill; @@ -290,14 +292,16 @@ static int hp_wmi_perform_query(int query, enum hp_wmi_command command, struct bios_return *bios_return; union acpi_object *obj = NULL; struct bios_args *args = NULL; - int mid, actual_outsize, ret; + int mid, actual_insize, actual_outsize; size_t bios_args_size; + int ret; mid = encode_outsize_for_pvsz(outsize); if (WARN_ON(mid < 0)) return mid; - bios_args_size = struct_size(args, data, insize); + actual_insize = max(insize, 128); + bios_args_size = struct_size(args, data, actual_insize); args = kmalloc(bios_args_size, GFP_KERNEL); if (!args) return -ENOMEM; @@ -374,7 +378,7 @@ static int hp_wmi_read_int(int query) int val = 0, ret; ret = hp_wmi_perform_query(query, HPWMI_READ, &val, - 0, sizeof(val)); + zero_if_sup(val), sizeof(val)); if (ret) return ret < 0 ? ret : -EINVAL; @@ -410,7 +414,8 @@ static int hp_wmi_get_tablet_mode(void) return -ENODEV; ret = hp_wmi_perform_query(HPWMI_SYSTEM_DEVICE_MODE, HPWMI_READ, - system_device_mode, 0, sizeof(system_device_mode)); + system_device_mode, zero_if_sup(system_device_mode), + sizeof(system_device_mode)); if (ret < 0) return ret; @@ -497,7 +502,7 @@ static int hp_wmi_fan_speed_max_get(void) int val = 0, ret; ret = hp_wmi_perform_query(HPWMI_FAN_SPEED_MAX_GET_QUERY, HPWMI_GM, - &val, 0, sizeof(val)); + &val, zero_if_sup(val), sizeof(val)); if (ret) return ret < 0 ? ret : -EINVAL; @@ -509,7 +514,7 @@ static int __init hp_wmi_bios_2008_later(void) { int state = 0; int ret = hp_wmi_perform_query(HPWMI_FEATURE_QUERY, HPWMI_READ, &state, - 0, sizeof(state)); + zero_if_sup(state), sizeof(state)); if (!ret) return 1; @@ -520,7 +525,7 @@ static int __init hp_wmi_bios_2009_later(void) { u8 state[128]; int ret = hp_wmi_perform_query(HPWMI_FEATURE2_QUERY, HPWMI_READ, &state, - 0, sizeof(state)); + zero_if_sup(state), sizeof(state)); if (!ret) return 1; @@ -598,7 +603,7 @@ static int hp_wmi_rfkill2_refresh(void) int err, i; err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_READ, &state, - 0, sizeof(state)); + zero_if_sup(state), sizeof(state)); if (err) return err; @@ -1007,7 +1012,7 @@ static int __init hp_wmi_rfkill2_setup(struct platform_device *device) int err, i; err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_READ, &state, - 0, sizeof(state)); + zero_if_sup(state), sizeof(state)); if (err) return err < 0 ? err : -EINVAL; @@ -1483,11 +1488,15 @@ static int __init hp_wmi_init(void) { int event_capable = wmi_has_guid(HPWMI_EVENT_GUID); int bios_capable = wmi_has_guid(HPWMI_BIOS_GUID); - int err; + int err, tmp = 0; if (!bios_capable && !event_capable) return -ENODEV; + if (hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, HPWMI_READ, &tmp, + sizeof(tmp), sizeof(tmp)) == HPWMI_RET_INVALID_PARAMETERS) + zero_insize_support = true; + if (event_capable) { err = hp_wmi_input_setup(); if (err) diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c index 216d31e3403d..79cff1fc675c 100644 --- a/drivers/platform/x86/intel/hid.c +++ b/drivers/platform/x86/intel/hid.c @@ -122,6 +122,12 @@ static const struct dmi_system_id dmi_vgbs_allow_list[] = { DMI_MATCH(DMI_PRODUCT_NAME, "HP Spectre x360 Convertible 15-df0xxx"), }, }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go"), + }, + }, { } }; diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c index edaf22e5ae98..40183bda7894 100644 --- a/drivers/platform/x86/intel/pmc/core.c +++ b/drivers/platform/x86/intel/pmc/core.c @@ -1912,6 +1912,7 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = { X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &tgl_reg_map), X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &tgl_reg_map), X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &adl_reg_map), + X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &tgl_reg_map), {} }; diff --git a/drivers/platform/x86/intel/pmt/crashlog.c b/drivers/platform/x86/intel/pmt/crashlog.c index 34daf9df168b..ace1239bc0a0 100644 --- a/drivers/platform/x86/intel/pmt/crashlog.c +++ b/drivers/platform/x86/intel/pmt/crashlog.c @@ -282,7 +282,7 @@ static int pmt_crashlog_probe(struct auxiliary_device *auxdev, auxiliary_set_drvdata(auxdev, priv); for (i = 0; i < intel_vsec_dev->num_resources; i++) { - struct intel_pmt_entry *entry = &priv->entry[i].entry; + struct intel_pmt_entry *entry = &priv->entry[priv->num_entries].entry; ret = intel_pmt_dev_create(entry, &pmt_crashlog_ns, intel_vsec_dev, i); if (ret < 0) diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c index 7dff94a2eb7e..ef6e47d025ca 100644 --- a/drivers/regulator/qcom_smd-regulator.c +++ b/drivers/regulator/qcom_smd-regulator.c @@ -723,19 +723,19 @@ static const struct regulator_desc pms405_pldo600 = { static const struct regulator_desc mp5496_smpa2 = { .linear_ranges = (struct linear_range[]) { - REGULATOR_LINEAR_RANGE(725000, 0, 27, 12500), + REGULATOR_LINEAR_RANGE(600000, 0, 127, 12500), }, .n_linear_ranges = 1, - .n_voltages = 28, + .n_voltages = 128, .ops = &rpm_mp5496_ops, }; static const struct regulator_desc mp5496_ldoa2 = { .linear_ranges = (struct linear_range[]) { - REGULATOR_LINEAR_RANGE(1800000, 0, 60, 25000), + REGULATOR_LINEAR_RANGE(800000, 0, 127, 25000), }, .n_linear_ranges = 1, - .n_voltages = 61, + .n_voltages = 128, .ops = &rpm_mp5496_ops, }; diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index d0eab5700dc5..00684e11976b 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -160,8 +160,8 @@ static void ibmvfc_npiv_logout(struct ibmvfc_host *); static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *); static void ibmvfc_tgt_move_login(struct ibmvfc_target *); -static void ibmvfc_release_sub_crqs(struct ibmvfc_host *); -static void ibmvfc_init_sub_crqs(struct ibmvfc_host *); +static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *); +static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *); static const char *unknown_error = "unknown error"; @@ -917,7 +917,7 @@ static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost) struct vio_dev *vdev = to_vio_dev(vhost->dev); unsigned long flags; - ibmvfc_release_sub_crqs(vhost); + ibmvfc_dereg_sub_crqs(vhost); /* Re-enable the CRQ */ do { @@ -936,7 +936,7 @@ static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost) spin_unlock(vhost->crq.q_lock); spin_unlock_irqrestore(vhost->host->host_lock, flags); - ibmvfc_init_sub_crqs(vhost); + ibmvfc_reg_sub_crqs(vhost); return rc; } @@ -955,7 +955,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost) struct vio_dev *vdev = to_vio_dev(vhost->dev); struct ibmvfc_queue *crq = &vhost->crq; - ibmvfc_release_sub_crqs(vhost); + ibmvfc_dereg_sub_crqs(vhost); /* Close the CRQ */ do { @@ -988,7 +988,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost) spin_unlock(vhost->crq.q_lock); spin_unlock_irqrestore(vhost->host->host_lock, flags); - ibmvfc_init_sub_crqs(vhost); + ibmvfc_reg_sub_crqs(vhost); return rc; } @@ -5682,6 +5682,8 @@ static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost, queue->cur = 0; queue->fmt = fmt; queue->size = PAGE_SIZE / fmt_size; + + queue->vhost = vhost; return 0; } @@ -5757,9 +5759,6 @@ static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost, ENTER; - if (ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT)) - return -ENOMEM; - rc = h_reg_sub_crq(vdev->unit_address, scrq->msg_token, PAGE_SIZE, &scrq->cookie, &scrq->hw_irq); @@ -5790,7 +5789,6 @@ static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost, } scrq->hwq_id = index; - scrq->vhost = vhost; LEAVE; return 0; @@ -5800,7 +5798,6 @@ irq_failed: rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie); } while (rtas_busy_delay(rc)); reg_failed: - ibmvfc_free_queue(vhost, scrq); LEAVE; return rc; } @@ -5826,12 +5823,50 @@ static void ibmvfc_deregister_scsi_channel(struct ibmvfc_host *vhost, int index) if (rc) dev_err(dev, "Failed to free sub-crq[%d]: rc=%ld\n", index, rc); - ibmvfc_free_queue(vhost, scrq); + /* Clean out the queue */ + memset(scrq->msgs.crq, 0, PAGE_SIZE); + scrq->cur = 0; + + LEAVE; +} + +static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *vhost) +{ + int i, j; + + ENTER; + if (!vhost->mq_enabled || !vhost->scsi_scrqs.scrqs) + return; + + for (i = 0; i < nr_scsi_hw_queues; i++) { + if (ibmvfc_register_scsi_channel(vhost, i)) { + for (j = i; j > 0; j--) + ibmvfc_deregister_scsi_channel(vhost, j - 1); + vhost->do_enquiry = 0; + return; + } + } + + LEAVE; +} + +static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *vhost) +{ + int i; + + ENTER; + if (!vhost->mq_enabled || !vhost->scsi_scrqs.scrqs) + return; + + for (i = 0; i < nr_scsi_hw_queues; i++) + ibmvfc_deregister_scsi_channel(vhost, i); + LEAVE; } static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost) { + struct ibmvfc_queue *scrq; int i, j; ENTER; @@ -5847,30 +5882,41 @@ static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost) } for (i = 0; i < nr_scsi_hw_queues; i++) { - if (ibmvfc_register_scsi_channel(vhost, i)) { - for (j = i; j > 0; j--) - ibmvfc_deregister_scsi_channel(vhost, j - 1); + scrq = &vhost->scsi_scrqs.scrqs[i]; + if (ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT)) { + for (j = i; j > 0; j--) { + scrq = &vhost->scsi_scrqs.scrqs[j - 1]; + ibmvfc_free_queue(vhost, scrq); + } kfree(vhost->scsi_scrqs.scrqs); vhost->scsi_scrqs.scrqs = NULL; vhost->scsi_scrqs.active_queues = 0; vhost->do_enquiry = 0; - break; + vhost->mq_enabled = 0; + return; } } + ibmvfc_reg_sub_crqs(vhost); + LEAVE; } static void ibmvfc_release_sub_crqs(struct ibmvfc_host *vhost) { + struct ibmvfc_queue *scrq; int i; ENTER; if (!vhost->scsi_scrqs.scrqs) return; - for (i = 0; i < nr_scsi_hw_queues; i++) - ibmvfc_deregister_scsi_channel(vhost, i); + ibmvfc_dereg_sub_crqs(vhost); + + for (i = 0; i < nr_scsi_hw_queues; i++) { + scrq = &vhost->scsi_scrqs.scrqs[i]; + ibmvfc_free_queue(vhost, scrq); + } kfree(vhost->scsi_scrqs.scrqs); vhost->scsi_scrqs.scrqs = NULL; diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h index 3718406e0988..c39a245f43d0 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/drivers/scsi/ibmvscsi/ibmvfc.h @@ -789,6 +789,7 @@ struct ibmvfc_queue { spinlock_t _lock; spinlock_t *q_lock; + struct ibmvfc_host *vhost; struct ibmvfc_event_pool evt_pool; struct list_head sent; struct list_head free; @@ -797,7 +798,6 @@ struct ibmvfc_queue { union ibmvfc_iu cancel_rsp; /* Sub-CRQ fields */ - struct ibmvfc_host *vhost; unsigned long cookie; unsigned long vios_cookie; unsigned long hw_irq; diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 256ec6d08c16..9d01a3e3c26a 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -9795,7 +9795,7 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg) GFP_KERNEL); if (!ioa_cfg->hrrq[i].host_rrq) { - while (--i > 0) + while (--i >= 0) dma_free_coherent(&pdev->dev, sizeof(u32) * ioa_cfg->hrrq[i].size, ioa_cfg->hrrq[i].host_rrq, @@ -10068,7 +10068,7 @@ static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg, ioa_cfg->vectors_info[i].desc, &ioa_cfg->hrrq[i]); if (rc) { - while (--i >= 0) + while (--i > 0) free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]); return rc; diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index b1be0dd0337a..f5d74958b664 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -420,8 +420,6 @@ int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, uint32_t, uint32_t); void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); -void lpfc_sli4_abort_fcp_cmpl(struct lpfc_hba *h, struct lpfc_iocbq *i, - struct lpfc_wcqe_complete *w); void lpfc_sli_free_hbq(struct lpfc_hba *, struct hbq_dmabuf *); @@ -630,7 +628,7 @@ void lpfc_nvmet_invalidate_host(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp); void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, - struct lpfc_wcqe_complete *abts_cmpl); + struct lpfc_iocbq *rspiocb); void lpfc_create_multixri_pools(struct lpfc_hba *phba); void lpfc_create_destroy_pools(struct lpfc_hba *phba); void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid); diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 9d36b20fb878..13dfe285493d 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -197,7 +197,7 @@ lpfc_ct_reject_event(struct lpfc_nodelist *ndlp, memset(bpl, 0, sizeof(struct ulp_bde64)); bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys)); bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys)); - bpl->tus.f.bdeFlags = BUFF_TYPE_BLP_64; + bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; bpl->tus.f.bdeSize = (LPFC_CT_PREAMBLE - 4); bpl->tus.w = le32_to_cpu(bpl->tus.w); diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 07f9a6e61e10..3fababb7c181 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -2998,10 +2998,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ndlp->nlp_DID, ulp_status, ulp_word4); - /* Call NLP_EVT_DEVICE_RM if link is down or LOGO is aborted */ if (lpfc_error_lost_link(ulp_status, ulp_word4)) { - lpfc_disc_state_machine(vport, ndlp, cmdiocb, - NLP_EVT_DEVICE_RM); skip_recovery = 1; goto out; } @@ -3021,18 +3018,10 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, spin_unlock_irq(&ndlp->lock); lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_DEVICE_RM); - lpfc_els_free_iocb(phba, cmdiocb); - lpfc_nlp_put(ndlp); - - /* Presume the node was released. */ - return; + goto out_rsrc_free; } out: - /* Driver is done with the IO. */ - lpfc_els_free_iocb(phba, cmdiocb); - lpfc_nlp_put(ndlp); - /* At this point, the LOGO processing is complete. NOTE: For a * pt2pt topology, we are assuming the NPortID will only change * on link up processing. For a LOGO / PLOGI initiated by the @@ -3059,6 +3048,10 @@ out: ndlp->nlp_DID, ulp_status, ulp_word4, tmo, vport->num_disc_nodes); + + lpfc_els_free_iocb(phba, cmdiocb); + lpfc_nlp_put(ndlp); + lpfc_disc_start(vport); return; } @@ -3075,6 +3068,10 @@ out: lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_DEVICE_RM); } +out_rsrc_free: + /* Driver is done with the I/O. */ + lpfc_els_free_iocb(phba, cmdiocb); + lpfc_nlp_put(ndlp); } /** diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 8511369d2cf8..f024415731ac 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -4487,6 +4487,9 @@ struct wqe_common { #define wqe_sup_SHIFT 6 #define wqe_sup_MASK 0x00000001 #define wqe_sup_WORD word11 +#define wqe_ffrq_SHIFT 6 +#define wqe_ffrq_MASK 0x00000001 +#define wqe_ffrq_WORD word11 #define wqe_wqec_SHIFT 7 #define wqe_wqec_MASK 0x00000001 #define wqe_wqec_WORD word11 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 93b94c64518d..750dd1e9f2cc 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -12188,7 +12188,7 @@ lpfc_sli_enable_msi(struct lpfc_hba *phba) rc = pci_enable_msi(phba->pcidev); if (!rc) lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0462 PCI enable MSI mode success.\n"); + "0012 PCI enable MSI mode success.\n"); else { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0471 PCI enable MSI mode failed (%d)\n", rc); diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 639f86635127..b86ff9fcdf0c 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -834,7 +834,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_nvmet_invalidate_host(phba, ndlp); if (ndlp->nlp_DID == Fabric_DID) { - if (vport->port_state <= LPFC_FDISC) + if (vport->port_state <= LPFC_FDISC || + vport->fc_flag & FC_PT2PT) goto out; lpfc_linkdown_port(vport); spin_lock_irq(shost->host_lock); diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index 335e90633933..cd10ee6482fc 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -1065,25 +1065,37 @@ lpfc_nvme_io_cmd_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, nCmd->rcv_rsplen = wcqe->parameter; nCmd->status = 0; + /* Get the NVME cmd details for this unique error. */ + cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr; + ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr; + /* Check if this is really an ERSP */ if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN) { lpfc_ncmd->status = IOSTAT_SUCCESS; lpfc_ncmd->result = 0; lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, - "6084 NVME Completion ERSP: " - "xri %x placed x%x\n", - lpfc_ncmd->cur_iocbq.sli4_xritag, - wcqe->total_data_placed); + "6084 NVME FCP_ERR ERSP: " + "xri %x placed x%x opcode x%x cmd_id " + "x%x cqe_status x%x\n", + lpfc_ncmd->cur_iocbq.sli4_xritag, + wcqe->total_data_placed, + cp->sqe.common.opcode, + cp->sqe.common.command_id, + ep->cqe.status); break; } lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6081 NVME Completion Protocol Error: " "xri %x status x%x result x%x " - "placed x%x\n", + "placed x%x opcode x%x cmd_id x%x, " + "cqe_status x%x\n", lpfc_ncmd->cur_iocbq.sli4_xritag, lpfc_ncmd->status, lpfc_ncmd->result, - wcqe->total_data_placed); + wcqe->total_data_placed, + cp->sqe.common.opcode, + cp->sqe.common.command_id, + ep->cqe.status); break; case IOSTAT_LOCAL_REJECT: /* Let fall through to set command final state. */ @@ -1195,7 +1207,8 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, { struct lpfc_hba *phba = vport->phba; struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd; - struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq); + struct nvme_common_command *sqe; + struct lpfc_iocbq *pwqeq = &lpfc_ncmd->cur_iocbq; union lpfc_wqe128 *wqe = &pwqeq->wqe; uint32_t req_len; @@ -1252,8 +1265,14 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, cstat->control_requests++; } - if (pnode->nlp_nvme_info & NLP_NVME_NSLER) + if (pnode->nlp_nvme_info & NLP_NVME_NSLER) { bf_set(wqe_erp, &wqe->generic.wqe_com, 1); + sqe = &((struct nvme_fc_cmd_iu *) + nCmd->cmdaddr)->sqe.common; + if (sqe->opcode == nvme_admin_async_event) + bf_set(wqe_ffrq, &wqe->generic.wqe_com, 1); + } + /* * Finish initializing those WQE fields that are independent * of the nvme_cmnd request_buffer @@ -1787,7 +1806,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, * lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request. * @phba: Pointer to HBA context object * @cmdiocb: Pointer to command iocb object. - * @abts_cmpl: Pointer to wcqe complete object. + * @rspiocb: Pointer to response iocb object. * * This is the callback function for any NVME FCP IO that was aborted. * @@ -1796,8 +1815,10 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, **/ void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, - struct lpfc_wcqe_complete *abts_cmpl) + struct lpfc_iocbq *rspiocb) { + struct lpfc_wcqe_complete *abts_cmpl = &rspiocb->wcqe_cmpl; + lpfc_printf_log(phba, KERN_INFO, LOG_NVME, "6145 ABORT_XRI_CN completing on rpi x%x " "original iotag x%x, abort cmd iotag x%x " @@ -1840,6 +1861,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, struct lpfc_nvme_fcpreq_priv *freqpriv; unsigned long flags; int ret_val; + struct nvme_fc_cmd_iu *cp; /* Validate pointers. LLDD fault handling with transport does * have timing races. @@ -1963,10 +1985,16 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, return; } + /* + * Get Command Id from cmd to plug into response. This + * code is not needed in the next NVME Transport drop. + */ + cp = (struct nvme_fc_cmd_iu *)lpfc_nbuf->nvmeCmd->cmdaddr; lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS, "6138 Transport Abort NVME Request Issued for " - "ox_id x%x\n", - nvmereq_wqe->sli4_xritag); + "ox_id x%x nvme opcode x%x nvme cmd_id x%x\n", + nvmereq_wqe->sli4_xritag, cp->sqe.common.opcode, + cp->sqe.common.command_id); return; out_unlock: diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index d43968203248..ba5e4016262e 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -6062,6 +6062,9 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) int status; u32 logit = LOG_FCP; + if (!rport) + return FAILED; + rdata = rport->dd_data; if (!rdata || !rdata->pnode) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, @@ -6140,6 +6143,9 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) unsigned long flags; DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); + if (!rport) + return FAILED; + rdata = rport->dd_data; if (!rdata || !rdata->pnode) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 6ed696c4602a..80ac3a051c19 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1930,7 +1930,7 @@ lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total) sync_buf = __lpfc_sli_get_iocbq(phba); if (!sync_buf) { lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, - "6213 No available WQEs for CMF_SYNC_WQE\n"); + "6244 No available WQEs for CMF_SYNC_WQE\n"); ret_val = ENOMEM; goto out_unlock; } @@ -3805,7 +3805,7 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, set_job_ulpword4(cmdiocbp, IOERR_ABORT_REQUESTED); /* - * For SLI4, irsiocb contains + * For SLI4, irspiocb contains * NO_XRI in sli_xritag, it * shall not affect releasing * sgl (xri) process. @@ -3823,7 +3823,7 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, } } } - (cmdiocbp->cmd_cmpl) (phba, cmdiocbp, saveq); + cmdiocbp->cmd_cmpl(phba, cmdiocbp, saveq); } else lpfc_sli_release_iocbq(phba, cmdiocbp); } else { @@ -4063,8 +4063,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED; if (cmdiocbq->cmd_cmpl) { spin_unlock_irqrestore(&phba->hbalock, iflag); - (cmdiocbq->cmd_cmpl)(phba, cmdiocbq, - &rspiocbq); + cmdiocbq->cmd_cmpl(phba, cmdiocbq, &rspiocbq); spin_lock_irqsave(&phba->hbalock, iflag); } break; @@ -10288,7 +10287,7 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, * @flag: Flag indicating if this command can be put into txq. * * __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to - * send an iocb command to an HBA with SLI-4 interface spec. + * send an iocb command to an HBA with SLI-3 interface spec. * * This function takes the hbalock before invoking the lockless version. * The function will return success after it successfully submit the wqe to @@ -12740,7 +12739,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, cmdiocbq->cmd_cmpl = cmdiocbq->wait_cmd_cmpl; cmdiocbq->wait_cmd_cmpl = NULL; if (cmdiocbq->cmd_cmpl) - (cmdiocbq->cmd_cmpl)(phba, cmdiocbq, NULL); + cmdiocbq->cmd_cmpl(phba, cmdiocbq, NULL); else lpfc_sli_release_iocbq(phba, cmdiocbq); return; @@ -12754,9 +12753,9 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, /* Set the exchange busy flag for task management commands */ if ((cmdiocbq->cmd_flag & LPFC_IO_FCP) && - !(cmdiocbq->cmd_flag & LPFC_IO_LIBDFC)) { + !(cmdiocbq->cmd_flag & LPFC_IO_LIBDFC)) { lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf, - cur_iocbq); + cur_iocbq); if (rspiocbq && (rspiocbq->cmd_flag & LPFC_EXCHANGE_BUSY)) lpfc_cmd->flags |= LPFC_SBUF_XBUSY; else @@ -13896,7 +13895,7 @@ void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) * @irspiocbq: Pointer to work-queue completion queue entry. * * This routine handles an ELS work-queue completion event and construct - * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common + * a pseudo response ELS IOCBQ from the SLI4 ELS WCQE for the common * discovery engine to handle. * * Return: Pointer to the receive IOCBQ, NULL otherwise. @@ -13940,7 +13939,7 @@ lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba, if (bf_get(lpfc_wcqe_c_xb, wcqe)) { spin_lock_irqsave(&phba->hbalock, iflags); - cmdiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY; + irspiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY; spin_unlock_irqrestore(&phba->hbalock, iflags); } @@ -14799,7 +14798,7 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, /* Pass the cmd_iocb and the wcqe to the upper layer */ memcpy(&cmdiocbq->wcqe_cmpl, wcqe, sizeof(struct lpfc_wcqe_complete)); - (cmdiocbq->cmd_cmpl)(phba, cmdiocbq, cmdiocbq); + cmdiocbq->cmd_cmpl(phba, cmdiocbq, cmdiocbq); } else { lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "0375 FCP cmdiocb not callback function " @@ -18956,7 +18955,7 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport, /* Free iocb created in lpfc_prep_seq */ list_for_each_entry_safe(curr_iocb, next_iocb, - &iocbq->list, list) { + &iocbq->list, list) { list_del_init(&curr_iocb->list); lpfc_sli_release_iocbq(phba, curr_iocb); } diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 4fab79ed58ed..2ab6f7db64d8 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -20,7 +20,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "14.2.0.3" +#define LPFC_DRIVER_VERSION "14.2.0.4" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 37d46ae5c61d..9a1ae52bb621 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -5369,6 +5369,7 @@ static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc) Mpi2ConfigReply_t mpi_reply; Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; Mpi26PCIeIOUnitPage1_t pcie_iounit_pg1; + u16 depth; int sz; int rc = 0; @@ -5380,7 +5381,7 @@ static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc) goto out; /* sas iounit page 1 */ sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData); - sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); + sas_iounit_pg1 = kzalloc(sizeof(Mpi2SasIOUnitPage1_t), GFP_KERNEL); if (!sas_iounit_pg1) { pr_err("%s: failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); @@ -5393,16 +5394,16 @@ static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc) ioc->name, __FILE__, __LINE__, __func__); goto out; } - ioc->max_wideport_qd = - (le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth)) ? - le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth) : - MPT3SAS_SAS_QUEUE_DEPTH; - ioc->max_narrowport_qd = - (le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth)) ? - le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth) : - MPT3SAS_SAS_QUEUE_DEPTH; - ioc->max_sata_qd = (sas_iounit_pg1->SATAMaxQDepth) ? - sas_iounit_pg1->SATAMaxQDepth : MPT3SAS_SATA_QUEUE_DEPTH; + + depth = le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth); + ioc->max_wideport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH); + + depth = le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth); + ioc->max_narrowport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH); + + depth = sas_iounit_pg1->SATAMaxQDepth; + ioc->max_sata_qd = (depth ? depth : MPT3SAS_SATA_QUEUE_DEPTH); + /* pcie iounit page 1 */ rc = mpt3sas_config_get_pcie_iounit_pg1(ioc, &mpi_reply, &pcie_iounit_pg1, sizeof(Mpi26PCIeIOUnitPage1_t)); diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index bfce60183a6e..836ddc476764 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -4031,7 +4031,7 @@ pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance) return 0; out_unwind: - while (--i > 0) + while (--i >= 0) free_irq(pci_irq_vector(pdev, i), &pinstance->hrrq_vector[i]); pci_free_irq_vectors(pdev); return rc; diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 1f423f723d06..b8a76b89f85a 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -2826,6 +2826,24 @@ static void zbc_open_zone(struct sdebug_dev_info *devip, } } +static inline void zbc_set_zone_full(struct sdebug_dev_info *devip, + struct sdeb_zone_state *zsp) +{ + switch (zsp->z_cond) { + case ZC2_IMPLICIT_OPEN: + devip->nr_imp_open--; + break; + case ZC3_EXPLICIT_OPEN: + devip->nr_exp_open--; + break; + default: + WARN_ONCE(true, "Invalid zone %llu condition %x\n", + zsp->z_start, zsp->z_cond); + break; + } + zsp->z_cond = ZC5_FULL; +} + static void zbc_inc_wp(struct sdebug_dev_info *devip, unsigned long long lba, unsigned int num) { @@ -2838,7 +2856,7 @@ static void zbc_inc_wp(struct sdebug_dev_info *devip, if (zsp->z_type == ZBC_ZTYPE_SWR) { zsp->z_wp += num; if (zsp->z_wp >= zend) - zsp->z_cond = ZC5_FULL; + zbc_set_zone_full(devip, zsp); return; } @@ -2857,7 +2875,7 @@ static void zbc_inc_wp(struct sdebug_dev_info *devip, n = num; } if (zsp->z_wp >= zend) - zsp->z_cond = ZC5_FULL; + zbc_set_zone_full(devip, zsp); num -= n; lba += n; diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 2c0dd64159b0..5d21f07456c6 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -212,7 +212,12 @@ iscsi_create_endpoint(int dd_size) return NULL; mutex_lock(&iscsi_ep_idr_mutex); - id = idr_alloc(&iscsi_ep_idr, ep, 0, -1, GFP_NOIO); + + /* + * First endpoint id should be 1 to comply with user space + * applications (iscsid). + */ + id = idr_alloc(&iscsi_ep_idr, ep, 1, -1, GFP_NOIO); if (id < 0) { mutex_unlock(&iscsi_ep_idr_mutex); printk(KERN_ERR "Could not allocate endpoint ID. Error %d.\n", diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 895b56c8f25e..a1a2ac09066f 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -3072,7 +3072,7 @@ static void sd_read_cpr(struct scsi_disk *sdkp) goto out; /* We must have at least a 64B header and one 32B range descriptor */ - vpd_len = get_unaligned_be16(&buffer[2]) + 3; + vpd_len = get_unaligned_be16(&buffer[2]) + 4; if (vpd_len > buf_len || vpd_len < 64 + 32 || (vpd_len & 31)) { sd_printk(KERN_ERR, sdkp, "Invalid Concurrent Positioning Ranges VPD page\n"); diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index ca3530982e52..fe000da11332 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1844,7 +1844,7 @@ static struct scsi_host_template scsi_driver = { .cmd_per_lun = 2048, .this_id = -1, /* Ensure there are no gaps in presented sgls */ - .virt_boundary_mask = PAGE_SIZE-1, + .virt_boundary_mask = HV_HYP_PAGE_SIZE - 1, .no_write_same = 1, .track_queue_depth = 1, .change_queue_depth = storvsc_change_queue_depth, @@ -1895,6 +1895,7 @@ static int storvsc_probe(struct hv_device *device, int target = 0; struct storvsc_device *stor_device; int max_sub_channels = 0; + u32 max_xfer_bytes; /* * We support sub-channels for storage on SCSI and FC controllers. @@ -1968,12 +1969,28 @@ static int storvsc_probe(struct hv_device *device, } /* max cmd length */ host->max_cmd_len = STORVSC_MAX_CMD_LEN; - /* - * set the table size based on the info we got - * from the host. + * Any reasonable Hyper-V configuration should provide + * max_transfer_bytes value aligning to HV_HYP_PAGE_SIZE, + * protecting it from any weird value. + */ + max_xfer_bytes = round_down(stor_device->max_transfer_bytes, HV_HYP_PAGE_SIZE); + /* max_hw_sectors_kb */ + host->max_sectors = max_xfer_bytes >> 9; + /* + * There are 2 requirements for Hyper-V storvsc sgl segments, + * based on which the below calculation for max segments is + * done: + * + * 1. Except for the first and last sgl segment, all sgl segments + * should be align to HV_HYP_PAGE_SIZE, that also means the + * maximum number of segments in a sgl can be calculated by + * dividing the total max transfer length by HV_HYP_PAGE_SIZE. + * + * 2. Except for the first and last, each entry in the SGL must + * have an offset that is a multiple of HV_HYP_PAGE_SIZE. */ - host->sg_tablesize = (stor_device->max_transfer_bytes >> PAGE_SHIFT); + host->sg_tablesize = (max_xfer_bytes >> HV_HYP_PAGE_SHIFT) + 1; /* * For non-IDE disks, the host supports multiple channels. * Set the number of HW queues we are supporting. diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h index 51a82f7803d3..9d16cf925483 100644 --- a/drivers/scsi/vmw_pvscsi.h +++ b/drivers/scsi/vmw_pvscsi.h @@ -331,8 +331,8 @@ struct PVSCSIRingReqDesc { u8 tag; u8 bus; u8 target; - u8 vcpuHint; - u8 unused[59]; + u16 vcpuHint; + u8 unused[58]; } __packed; /* diff --git a/drivers/soc/bcm/brcmstb/pm/pm-arm.c b/drivers/soc/bcm/brcmstb/pm/pm-arm.c index 3cbb165d6e30..70ad0f3dce28 100644 --- a/drivers/soc/bcm/brcmstb/pm/pm-arm.c +++ b/drivers/soc/bcm/brcmstb/pm/pm-arm.c @@ -783,6 +783,7 @@ static int brcmstb_pm_probe(struct platform_device *pdev) } ret = brcmstb_init_sram(dn); + of_node_put(dn); if (ret) { pr_err("error setting up SRAM for PM\n"); return ret; diff --git a/drivers/soc/imx/imx8m-blk-ctrl.c b/drivers/soc/imx/imx8m-blk-ctrl.c index 7f49385ed2f8..7ebc28709e94 100644 --- a/drivers/soc/imx/imx8m-blk-ctrl.c +++ b/drivers/soc/imx/imx8m-blk-ctrl.c @@ -667,7 +667,7 @@ static const struct imx8m_blk_ctrl_domain_data imx8mp_media_blk_ctl_domain_data[ }, [IMX8MP_MEDIABLK_PD_LCDIF_2] = { .name = "mediablk-lcdif-2", - .clk_names = (const char *[]){ "disp1", "apb", "axi", }, + .clk_names = (const char *[]){ "disp2", "apb", "axi", }, .num_clks = 3, .gpc_name = "lcdif2", .rst_mask = BIT(11) | BIT(12) | BIT(24), diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c index a23d4f6329f5..31d778e9d255 100644 --- a/drivers/spi/spi-cadence.c +++ b/drivers/spi/spi-cadence.c @@ -69,6 +69,7 @@ #define CDNS_SPI_BAUD_DIV_SHIFT 3 /* Baud rate divisor shift in CR */ #define CDNS_SPI_SS_SHIFT 10 /* Slave Select field shift in CR */ #define CDNS_SPI_SS0 0x1 /* Slave Select zero */ +#define CDNS_SPI_NOSS 0x3C /* No Slave select */ /* * SPI Interrupt Registers bit Masks @@ -92,9 +93,6 @@ #define CDNS_SPI_ER_ENABLE 0x00000001 /* SPI Enable Bit Mask */ #define CDNS_SPI_ER_DISABLE 0x0 /* SPI Disable Bit Mask */ -/* SPI FIFO depth in bytes */ -#define CDNS_SPI_FIFO_DEPTH 128 - /* Default number of chip select lines */ #define CDNS_SPI_DEFAULT_NUM_CS 4 @@ -110,6 +108,7 @@ * @rx_bytes: Number of bytes requested * @dev_busy: Device busy flag * @is_decoded_cs: Flag for decoder property set or not + * @tx_fifo_depth: Depth of the TX FIFO */ struct cdns_spi { void __iomem *regs; @@ -123,6 +122,7 @@ struct cdns_spi { int rx_bytes; u8 dev_busy; u32 is_decoded_cs; + unsigned int tx_fifo_depth; }; /* Macros for the SPI controller read/write */ @@ -304,7 +304,7 @@ static void cdns_spi_fill_tx_fifo(struct cdns_spi *xspi) { unsigned long trans_cnt = 0; - while ((trans_cnt < CDNS_SPI_FIFO_DEPTH) && + while ((trans_cnt < xspi->tx_fifo_depth) && (xspi->tx_bytes > 0)) { /* When xspi in busy condition, bytes may send failed, @@ -450,20 +450,43 @@ static int cdns_prepare_transfer_hardware(struct spi_master *master) * @master: Pointer to the spi_master structure which provides * information about the controller. * - * This function disables the SPI master controller. + * This function disables the SPI master controller when no slave selected. * * Return: 0 always */ static int cdns_unprepare_transfer_hardware(struct spi_master *master) { struct cdns_spi *xspi = spi_master_get_devdata(master); + u32 ctrl_reg; - cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_DISABLE); + /* Disable the SPI if slave is deselected */ + ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR); + ctrl_reg = (ctrl_reg & CDNS_SPI_CR_SSCTRL) >> CDNS_SPI_SS_SHIFT; + if (ctrl_reg == CDNS_SPI_NOSS) + cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_DISABLE); return 0; } /** + * cdns_spi_detect_fifo_depth - Detect the FIFO depth of the hardware + * @xspi: Pointer to the cdns_spi structure + * + * The depth of the TX FIFO is a synthesis configuration parameter of the SPI + * IP. The FIFO threshold register is sized so that its maximum value can be the + * FIFO size - 1. This is used to detect the size of the FIFO. + */ +static void cdns_spi_detect_fifo_depth(struct cdns_spi *xspi) +{ + /* The MSBs will get truncated giving us the size of the FIFO */ + cdns_spi_write(xspi, CDNS_SPI_THLD, 0xffff); + xspi->tx_fifo_depth = cdns_spi_read(xspi, CDNS_SPI_THLD) + 1; + + /* Reset to default */ + cdns_spi_write(xspi, CDNS_SPI_THLD, 0x1); +} + +/** * cdns_spi_probe - Probe method for the SPI driver * @pdev: Pointer to the platform_device structure * @@ -535,6 +558,8 @@ static int cdns_spi_probe(struct platform_device *pdev) if (ret < 0) xspi->is_decoded_cs = 0; + cdns_spi_detect_fifo_depth(xspi); + /* SPI controller initializations */ cdns_spi_init_hw(xspi); diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c index e8de4f5017cd..0c79193d9697 100644 --- a/drivers/spi/spi-mem.c +++ b/drivers/spi/spi-mem.c @@ -808,7 +808,7 @@ int spi_mem_poll_status(struct spi_mem *mem, op->data.dir != SPI_MEM_DATA_IN) return -EINVAL; - if (ctlr->mem_ops && ctlr->mem_ops->poll_status) { + if (ctlr->mem_ops && ctlr->mem_ops->poll_status && !mem->spi->cs_gpiod) { ret = spi_mem_access_start(mem); if (ret) return ret; diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c index a08215eb9e14..79242dc5272d 100644 --- a/drivers/spi/spi-rockchip.c +++ b/drivers/spi/spi-rockchip.c @@ -381,15 +381,18 @@ static int rockchip_spi_prepare_irq(struct rockchip_spi *rs, rs->tx_left = rs->tx ? xfer->len / rs->n_bytes : 0; rs->rx_left = xfer->len / rs->n_bytes; - if (rs->cs_inactive) - writel_relaxed(INT_RF_FULL | INT_CS_INACTIVE, rs->regs + ROCKCHIP_SPI_IMR); - else - writel_relaxed(INT_RF_FULL, rs->regs + ROCKCHIP_SPI_IMR); + writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR); + spi_enable_chip(rs, true); if (rs->tx_left) rockchip_spi_pio_writer(rs); + if (rs->cs_inactive) + writel_relaxed(INT_RF_FULL | INT_CS_INACTIVE, rs->regs + ROCKCHIP_SPI_IMR); + else + writel_relaxed(INT_RF_FULL, rs->regs + ROCKCHIP_SPI_IMR); + /* 1 means the transfer is in progress */ return 1; } diff --git a/drivers/staging/olpc_dcon/Kconfig b/drivers/staging/olpc_dcon/Kconfig index d1a0dea09ef0..d0ba34cc32f7 100644 --- a/drivers/staging/olpc_dcon/Kconfig +++ b/drivers/staging/olpc_dcon/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 config FB_OLPC_DCON tristate "One Laptop Per Child Display CONtroller support" - depends on OLPC && FB + depends on OLPC && FB && BROKEN depends on I2C depends on GPIO_CS5535 && ACPI select BACKLIGHT_CLASS_DEVICE diff --git a/drivers/staging/r8188eu/core/rtw_xmit.c b/drivers/staging/r8188eu/core/rtw_xmit.c index 3d8e9dea7651..7135d89caac1 100644 --- a/drivers/staging/r8188eu/core/rtw_xmit.c +++ b/drivers/staging/r8188eu/core/rtw_xmit.c @@ -178,8 +178,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter) pxmitpriv->free_xmit_extbuf_cnt = num_xmit_extbuf; - res = rtw_alloc_hwxmits(padapter); - if (res) { + if (rtw_alloc_hwxmits(padapter)) { res = _FAIL; goto exit; } @@ -1483,19 +1482,10 @@ int rtw_alloc_hwxmits(struct adapter *padapter) hwxmits = pxmitpriv->hwxmits; - if (pxmitpriv->hwxmit_entry == 5) { - hwxmits[0] .sta_queue = &pxmitpriv->bm_pending; - hwxmits[1] .sta_queue = &pxmitpriv->vo_pending; - hwxmits[2] .sta_queue = &pxmitpriv->vi_pending; - hwxmits[3] .sta_queue = &pxmitpriv->bk_pending; - hwxmits[4] .sta_queue = &pxmitpriv->be_pending; - } else if (pxmitpriv->hwxmit_entry == 4) { - hwxmits[0] .sta_queue = &pxmitpriv->vo_pending; - hwxmits[1] .sta_queue = &pxmitpriv->vi_pending; - hwxmits[2] .sta_queue = &pxmitpriv->be_pending; - hwxmits[3] .sta_queue = &pxmitpriv->bk_pending; - } else { - } + hwxmits[0].sta_queue = &pxmitpriv->vo_pending; + hwxmits[1].sta_queue = &pxmitpriv->vi_pending; + hwxmits[2].sta_queue = &pxmitpriv->be_pending; + hwxmits[3].sta_queue = &pxmitpriv->bk_pending; return 0; } diff --git a/drivers/staging/r8188eu/os_dep/ioctl_linux.c b/drivers/staging/r8188eu/os_dep/ioctl_linux.c index 1b09462ca908..8dd280e2739a 100644 --- a/drivers/staging/r8188eu/os_dep/ioctl_linux.c +++ b/drivers/staging/r8188eu/os_dep/ioctl_linux.c @@ -403,7 +403,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param, if (wep_key_len > 0) { wep_key_len = wep_key_len <= 5 ? 5 : 13; - wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, KeyMaterial); + wep_total_len = wep_key_len + sizeof(*pwep); pwep = kzalloc(wep_total_len, GFP_KERNEL); if (!pwep) goto exit; diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c index ece97e37ac91..30374a820496 100644 --- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c +++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c @@ -90,7 +90,8 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param, if (wep_key_len > 0) { wep_key_len = wep_key_len <= 5 ? 5 : 13; wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, key_material); - pwep = kzalloc(wep_total_len, GFP_KERNEL); + /* Allocate a full structure to avoid potentially running off the end. */ + pwep = kzalloc(sizeof(*pwep), GFP_KERNEL); if (!pwep) { ret = -ENOMEM; goto exit; @@ -582,7 +583,8 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param, if (wep_key_len > 0) { wep_key_len = wep_key_len <= 5 ? 5 : 13; wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, key_material); - pwep = kzalloc(wep_total_len, GFP_KERNEL); + /* Allocate a full structure to avoid potentially running off the end. */ + pwep = kzalloc(sizeof(*pwep), GFP_KERNEL); if (!pwep) goto exit; diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c index c7968aecd870..d02de3f0326f 100644 --- a/drivers/tty/goldfish.c +++ b/drivers/tty/goldfish.c @@ -426,7 +426,7 @@ static int goldfish_tty_remove(struct platform_device *pdev) tty_unregister_device(goldfish_tty_driver, qtty->console.index); iounmap(qtty->base); qtty->base = NULL; - free_irq(qtty->irq, pdev); + free_irq(qtty->irq, qtty); tty_port_destroy(&qtty->port); goldfish_tty_current_line_count--; if (goldfish_tty_current_line_count == 0) diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index 137eebdcfda9..fd4d24f61c46 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -455,7 +455,7 @@ static void gsm_hex_dump_bytes(const char *fname, const u8 *data, return; } - prefix = kasprintf(GFP_KERNEL, "%s: ", fname); + prefix = kasprintf(GFP_ATOMIC, "%s: ", fname); if (!prefix) return; print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 16, 1, data, len, diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 78b6dedc43e6..8f32fe9e149e 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -1517,6 +1517,8 @@ static inline void __stop_tx(struct uart_8250_port *p) unsigned char lsr = serial_in(p, UART_LSR); u64 stop_delay = 0; + p->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; + if (!(lsr & UART_LSR_THRE)) return; /* diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c index 4733a233bd0c..f8f950641ad9 100644 --- a/drivers/tty/serial/qcom_geni_serial.c +++ b/drivers/tty/serial/qcom_geni_serial.c @@ -1306,6 +1306,7 @@ static const struct uart_ops qcom_geni_console_pops = { .stop_tx = qcom_geni_serial_stop_tx, .start_tx = qcom_geni_serial_start_tx, .stop_rx = qcom_geni_serial_stop_rx, + .start_rx = qcom_geni_serial_start_rx, .set_termios = qcom_geni_serial_set_termios, .startup = qcom_geni_serial_startup, .request_port = qcom_geni_serial_request_port, diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 9a85b41caa0a..338ebadfd44b 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -2214,11 +2214,12 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport) /* * Nothing to do if the console is not suspending * except stop_rx to prevent any asynchronous data - * over RX line. Re-start_rx, when required, is - * done by set_termios in resume sequence + * over RX line. However ensure that we will be + * able to Re-start_rx later. */ if (!console_suspend_enabled && uart_console(uport)) { - uport->ops->stop_rx(uport); + if (uport->ops->start_rx) + uport->ops->stop_rx(uport); goto unlock; } @@ -2310,6 +2311,8 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport) if (console_suspend_enabled) uart_change_pm(state, UART_PM_STATE_ON); uport->ops->set_termios(uport, &termios, NULL); + if (!console_suspend_enabled && uport->ops->start_rx) + uport->ops->start_rx(uport); if (console_suspend_enabled) console_start(uport->cons); } diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index 18e623325887..d2b2720db6ca 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -581,7 +581,6 @@ void __handle_sysrq(int key, bool check_mask) rcu_sysrq_start(); rcu_read_lock(); - printk_prefer_direct_enter(); /* * Raise the apparent loglevel to maximum so that the sysrq header * is shown to provide the user with positive feedback. We do not @@ -623,7 +622,6 @@ void __handle_sysrq(int key, bool check_mask) pr_cont("\n"); console_loglevel = orig_log_level; } - printk_prefer_direct_exit(); rcu_read_unlock(); rcu_sysrq_end(); diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 01fb4bad86be..ce86d1b790c0 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -748,17 +748,28 @@ static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp) } /** - * ufshcd_utrl_clear - Clear a bit in UTRLCLR register + * ufshcd_utrl_clear() - Clear requests from the controller request list. * @hba: per adapter instance - * @pos: position of the bit to be cleared + * @mask: mask with one bit set for each request to be cleared */ -static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos) +static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 mask) { if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR) - ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR); - else - ufshcd_writel(hba, ~(1 << pos), - REG_UTP_TRANSFER_REQ_LIST_CLEAR); + mask = ~mask; + /* + * From the UFSHCI specification: "UTP Transfer Request List CLear + * Register (UTRLCLR): This field is bit significant. Each bit + * corresponds to a slot in the UTP Transfer Request List, where bit 0 + * corresponds to request slot 0. A bit in this field is set to ‘0’ + * by host software to indicate to the host controller that a transfer + * request slot is cleared. The host controller + * shall free up any resources associated to the request slot + * immediately, and shall set the associated bit in UTRLDBR to ‘0’. The + * host software indicates no change to request slots by setting the + * associated bits in this field to ‘1’. Bits in this field shall only + * be set ‘1’ or ‘0’ by host software when UTRLRSR is set to ‘1’." + */ + ufshcd_writel(hba, ~mask, REG_UTP_TRANSFER_REQ_LIST_CLEAR); } /** @@ -2863,27 +2874,26 @@ static int ufshcd_compose_dev_cmd(struct ufs_hba *hba, return ufshcd_compose_devman_upiu(hba, lrbp); } -static int -ufshcd_clear_cmd(struct ufs_hba *hba, int tag) +/* + * Clear all the requests from the controller for which a bit has been set in + * @mask and wait until the controller confirms that these requests have been + * cleared. + */ +static int ufshcd_clear_cmds(struct ufs_hba *hba, u32 mask) { - int err = 0; unsigned long flags; - u32 mask = 1 << tag; /* clear outstanding transaction before retry */ spin_lock_irqsave(hba->host->host_lock, flags); - ufshcd_utrl_clear(hba, tag); + ufshcd_utrl_clear(hba, mask); spin_unlock_irqrestore(hba->host->host_lock, flags); /* * wait for h/w to clear corresponding bit in door-bell. * max. wait is 1 sec. */ - err = ufshcd_wait_for_register(hba, - REG_UTP_TRANSFER_REQ_DOOR_BELL, - mask, ~mask, 1000, 1000); - - return err; + return ufshcd_wait_for_register(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL, + mask, ~mask, 1000, 1000); } static int @@ -2963,7 +2973,7 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, err = -ETIMEDOUT; dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n", __func__, lrbp->task_tag); - if (!ufshcd_clear_cmd(hba, lrbp->task_tag)) + if (!ufshcd_clear_cmds(hba, 1U << lrbp->task_tag)) /* successfully cleared the command, retry if needed */ err = -EAGAIN; /* @@ -6958,14 +6968,14 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, } /** - * ufshcd_eh_device_reset_handler - device reset handler registered to - * scsi layer. + * ufshcd_eh_device_reset_handler() - Reset a single logical unit. * @cmd: SCSI command pointer * * Returns SUCCESS/FAILED */ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) { + unsigned long flags, pending_reqs = 0, not_cleared = 0; struct Scsi_Host *host; struct ufs_hba *hba; u32 pos; @@ -6984,14 +6994,24 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) } /* clear the commands that were pending for corresponding LUN */ - for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) { - if (hba->lrb[pos].lun == lun) { - err = ufshcd_clear_cmd(hba, pos); - if (err) - break; - __ufshcd_transfer_req_compl(hba, 1U << pos); - } + spin_lock_irqsave(&hba->outstanding_lock, flags); + for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) + if (hba->lrb[pos].lun == lun) + __set_bit(pos, &pending_reqs); + hba->outstanding_reqs &= ~pending_reqs; + spin_unlock_irqrestore(&hba->outstanding_lock, flags); + + if (ufshcd_clear_cmds(hba, pending_reqs) < 0) { + spin_lock_irqsave(&hba->outstanding_lock, flags); + not_cleared = pending_reqs & + ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); + hba->outstanding_reqs |= not_cleared; + spin_unlock_irqrestore(&hba->outstanding_lock, flags); + + dev_err(hba->dev, "%s: failed to clear requests %#lx\n", + __func__, not_cleared); } + __ufshcd_transfer_req_compl(hba, pending_reqs & ~not_cleared); out: hba->req_abort_count = 0; @@ -7088,7 +7108,7 @@ static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag) goto out; } - err = ufshcd_clear_cmd(hba, tag); + err = ufshcd_clear_cmds(hba, 1U << tag); if (err) dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n", __func__, tag, err); diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c index e45c3d6e1536..794e413800ae 100644 --- a/drivers/usb/cdns3/cdnsp-ring.c +++ b/drivers/usb/cdns3/cdnsp-ring.c @@ -1941,13 +1941,16 @@ int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq) } if (enqd_len + trb_buff_len >= full_len) { - if (need_zero_pkt) - zero_len_trb = !zero_len_trb; - - field &= ~TRB_CHAIN; - field |= TRB_IOC; - more_trbs_coming = false; - preq->td.last_trb = ring->enqueue; + if (need_zero_pkt && !zero_len_trb) { + zero_len_trb = true; + } else { + zero_len_trb = false; + field &= ~TRB_CHAIN; + field |= TRB_IOC; + more_trbs_coming = false; + need_zero_pkt = false; + preq->td.last_trb = ring->enqueue; + } } /* Only set interrupt on short packet for OUT endpoints. */ @@ -1962,7 +1965,7 @@ int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq) length_field = TRB_LEN(trb_buff_len) | TRB_TD_SIZE(remainder) | TRB_INTR_TARGET(0); - cdnsp_queue_trb(pdev, ring, more_trbs_coming | zero_len_trb, + cdnsp_queue_trb(pdev, ring, more_trbs_coming, lower_32_bits(send_addr), upper_32_bits(send_addr), length_field, diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index dc6c96e04bcf..3b8bf6daf7d0 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c @@ -1048,6 +1048,9 @@ isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req) struct ci_hdrc *ci = req->context; unsigned long flags; + if (req->status < 0) + return; + if (ci->setaddr) { hw_usb_set_address(ci, ci->address); ci->setaddr = false; diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index f63a27d11fac..3f107a06817d 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -5190,7 +5190,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { retval = -EINVAL; - goto error1; + goto error2; } hcd->rsrc_start = res->start; hcd->rsrc_len = resource_size(res); diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index e027c0420dc3..573421984948 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -1644,13 +1644,8 @@ static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc) * This device property is for kernel internal use only and * is expected to be set by the glue code. */ - if (device_property_read_string(dev, "linux,extcon-name", &name) == 0) { - edev = extcon_get_extcon_dev(name); - if (!edev) - return ERR_PTR(-EPROBE_DEFER); - - return edev; - } + if (device_property_read_string(dev, "linux,extcon-name", &name) == 0) + return extcon_get_extcon_dev(name); /* * Try to get an extcon device from the USB PHY controller's "port" diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index ba51de7dd760..6b018048fe2e 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -127,6 +127,7 @@ static const struct property_entry dwc3_pci_intel_phy_charger_detect_properties[ PROPERTY_ENTRY_STRING("dr_mode", "peripheral"), PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"), PROPERTY_ENTRY_BOOL("linux,phy_charger_detect"), + PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"), {} }; diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 00427d108ab9..8716bece1072 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -2976,6 +2976,7 @@ static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep) struct dwc3 *dwc = dep->dwc; u32 mdwidth; int size; + int maxpacket; mdwidth = dwc3_mdwidth(dwc); @@ -2988,21 +2989,24 @@ static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep) else size = DWC31_GTXFIFOSIZ_TXFDEP(size); - /* FIFO Depth is in MDWDITH bytes. Multiply */ - size *= mdwidth; - /* - * To meet performance requirement, a minimum TxFIFO size of 3x - * MaxPacketSize is recommended for endpoints that support burst and a - * minimum TxFIFO size of 2x MaxPacketSize for endpoints that don't - * support burst. Use those numbers and we can calculate the max packet - * limit as below. + * maxpacket size is determined as part of the following, after assuming + * a mult value of one maxpacket: + * DWC3 revision 280A and prior: + * fifo_size = mult * (max_packet / mdwidth) + 1; + * maxpacket = mdwidth * (fifo_size - 1); + * + * DWC3 revision 290A and onwards: + * fifo_size = mult * ((max_packet + mdwidth)/mdwidth + 1) + 1 + * maxpacket = mdwidth * ((fifo_size - 1) - 1) - mdwidth; */ - if (dwc->maximum_speed >= USB_SPEED_SUPER) - size /= 3; + if (DWC3_VER_IS_PRIOR(DWC3, 290A)) + maxpacket = mdwidth * (size - 1); else - size /= 2; + maxpacket = mdwidth * ((size - 1) - 1) - mdwidth; + /* Functionally, space for one max packet is sufficient */ + size = min_t(int, maxpacket, 1024); usb_ep_set_maxpacket_limit(&dep->endpoint, size); dep->endpoint.max_streams = 16; diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 4585ee3a444a..e0fa4b186ec6 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -122,8 +122,6 @@ struct ffs_ep { struct usb_endpoint_descriptor *descs[3]; u8 num; - - int status; /* P: epfile->mutex */ }; struct ffs_epfile { @@ -227,6 +225,9 @@ struct ffs_io_data { bool use_sg; struct ffs_data *ffs; + + int status; + struct completion done; }; struct ffs_desc_helper { @@ -707,12 +708,15 @@ static const struct file_operations ffs_ep0_operations = { static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req) { + struct ffs_io_data *io_data = req->context; + ENTER(); - if (req->context) { - struct ffs_ep *ep = _ep->driver_data; - ep->status = req->status ? req->status : req->actual; - complete(req->context); - } + if (req->status) + io_data->status = req->status; + else + io_data->status = req->actual; + + complete(&io_data->done); } static ssize_t ffs_copy_to_iter(void *data, int data_len, struct iov_iter *iter) @@ -1050,7 +1054,6 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) WARN(1, "%s: data_len == -EINVAL\n", __func__); ret = -EINVAL; } else if (!io_data->aio) { - DECLARE_COMPLETION_ONSTACK(done); bool interrupted = false; req = ep->req; @@ -1066,7 +1069,8 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) io_data->buf = data; - req->context = &done; + init_completion(&io_data->done); + req->context = io_data; req->complete = ffs_epfile_io_complete; ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC); @@ -1075,7 +1079,12 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) spin_unlock_irq(&epfile->ffs->eps_lock); - if (wait_for_completion_interruptible(&done)) { + if (wait_for_completion_interruptible(&io_data->done)) { + spin_lock_irq(&epfile->ffs->eps_lock); + if (epfile->ep != ep) { + ret = -ESHUTDOWN; + goto error_lock; + } /* * To avoid race condition with ffs_epfile_io_complete, * dequeue the request first then check @@ -1083,17 +1092,18 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) * condition with req->complete callback. */ usb_ep_dequeue(ep->ep, req); - wait_for_completion(&done); - interrupted = ep->status < 0; + spin_unlock_irq(&epfile->ffs->eps_lock); + wait_for_completion(&io_data->done); + interrupted = io_data->status < 0; } if (interrupted) ret = -EINTR; - else if (io_data->read && ep->status > 0) - ret = __ffs_epfile_read_data(epfile, data, ep->status, + else if (io_data->read && io_data->status > 0) + ret = __ffs_epfile_read_data(epfile, data, io_data->status, &io_data->data); else - ret = ep->status; + ret = io_data->status; goto error_mutex; } else if (!(req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC))) { ret = -ENOMEM; diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c index 6f5d45ef2e39..f51694f29de9 100644 --- a/drivers/usb/gadget/function/u_ether.c +++ b/drivers/usb/gadget/function/u_ether.c @@ -775,9 +775,13 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g, dev->qmult = qmult; snprintf(net->name, sizeof(net->name), "%s%%d", netname); - if (get_ether_addr(dev_addr, addr)) + if (get_ether_addr(dev_addr, addr)) { + net->addr_assign_type = NET_ADDR_RANDOM; dev_warn(&g->dev, "using random %s ethernet address\n", "self"); + } else { + net->addr_assign_type = NET_ADDR_SET; + } eth_hw_addr_set(net, addr); if (get_ether_addr(host_addr, dev->host_mac)) dev_warn(&g->dev, @@ -844,6 +848,10 @@ struct net_device *gether_setup_name_default(const char *netname) eth_random_addr(dev->dev_mac); pr_warn("using random %s ethernet address\n", "self"); + + /* by default we always have a random MAC address */ + net->addr_assign_type = NET_ADDR_RANDOM; + eth_random_addr(dev->host_mac); pr_warn("using random %s ethernet address\n", "host"); @@ -871,7 +879,6 @@ int gether_register_netdev(struct net_device *net) dev = netdev_priv(net); g = dev->gadget; - net->addr_assign_type = NET_ADDR_RANDOM; eth_hw_addr_set(net, dev->dev_mac); status = register_netdev(net); @@ -912,6 +919,7 @@ int gether_set_dev_addr(struct net_device *net, const char *dev_addr) if (get_ether_addr(dev_addr, new_addr)) return -EINVAL; memcpy(dev->dev_mac, new_addr, ETH_ALEN); + net->addr_assign_type = NET_ADDR_SET; return 0; } EXPORT_SYMBOL_GPL(gether_set_dev_addr); diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c index a9bb4553db84..d42bb3346745 100644 --- a/drivers/usb/gadget/function/uvc_video.c +++ b/drivers/usb/gadget/function/uvc_video.c @@ -424,6 +424,9 @@ static void uvcg_video_pump(struct work_struct *work) uvcg_queue_cancel(queue, 0); break; } + + /* Endpoint now owns the request */ + req = NULL; video->req_int_count++; } diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c index 241740024c50..2acece16b890 100644 --- a/drivers/usb/gadget/legacy/raw_gadget.c +++ b/drivers/usb/gadget/legacy/raw_gadget.c @@ -11,6 +11,7 @@ #include <linux/ctype.h> #include <linux/debugfs.h> #include <linux/delay.h> +#include <linux/idr.h> #include <linux/kref.h> #include <linux/miscdevice.h> #include <linux/module.h> @@ -36,6 +37,9 @@ MODULE_LICENSE("GPL"); /*----------------------------------------------------------------------*/ +static DEFINE_IDA(driver_id_numbers); +#define DRIVER_DRIVER_NAME_LENGTH_MAX 32 + #define RAW_EVENT_QUEUE_SIZE 16 struct raw_event_queue { @@ -161,6 +165,9 @@ struct raw_dev { /* Reference to misc device: */ struct device *dev; + /* Make driver names unique */ + int driver_id_number; + /* Protected by lock: */ enum dev_state state; bool gadget_registered; @@ -189,6 +196,7 @@ static struct raw_dev *dev_new(void) spin_lock_init(&dev->lock); init_completion(&dev->ep0_done); raw_event_queue_init(&dev->queue); + dev->driver_id_number = -1; return dev; } @@ -199,6 +207,9 @@ static void dev_free(struct kref *kref) kfree(dev->udc_name); kfree(dev->driver.udc_name); + kfree(dev->driver.driver.name); + if (dev->driver_id_number >= 0) + ida_free(&driver_id_numbers, dev->driver_id_number); if (dev->req) { if (dev->ep0_urb_queued) usb_ep_dequeue(dev->gadget->ep0, dev->req); @@ -419,9 +430,11 @@ out_put: static int raw_ioctl_init(struct raw_dev *dev, unsigned long value) { int ret = 0; + int driver_id_number; struct usb_raw_init arg; char *udc_driver_name; char *udc_device_name; + char *driver_driver_name; unsigned long flags; if (copy_from_user(&arg, (void __user *)value, sizeof(arg))) @@ -440,36 +453,43 @@ static int raw_ioctl_init(struct raw_dev *dev, unsigned long value) return -EINVAL; } + driver_id_number = ida_alloc(&driver_id_numbers, GFP_KERNEL); + if (driver_id_number < 0) + return driver_id_number; + + driver_driver_name = kmalloc(DRIVER_DRIVER_NAME_LENGTH_MAX, GFP_KERNEL); + if (!driver_driver_name) { + ret = -ENOMEM; + goto out_free_driver_id_number; + } + snprintf(driver_driver_name, DRIVER_DRIVER_NAME_LENGTH_MAX, + DRIVER_NAME ".%d", driver_id_number); + udc_driver_name = kmalloc(UDC_NAME_LENGTH_MAX, GFP_KERNEL); - if (!udc_driver_name) - return -ENOMEM; + if (!udc_driver_name) { + ret = -ENOMEM; + goto out_free_driver_driver_name; + } ret = strscpy(udc_driver_name, &arg.driver_name[0], UDC_NAME_LENGTH_MAX); - if (ret < 0) { - kfree(udc_driver_name); - return ret; - } + if (ret < 0) + goto out_free_udc_driver_name; ret = 0; udc_device_name = kmalloc(UDC_NAME_LENGTH_MAX, GFP_KERNEL); if (!udc_device_name) { - kfree(udc_driver_name); - return -ENOMEM; + ret = -ENOMEM; + goto out_free_udc_driver_name; } ret = strscpy(udc_device_name, &arg.device_name[0], UDC_NAME_LENGTH_MAX); - if (ret < 0) { - kfree(udc_driver_name); - kfree(udc_device_name); - return ret; - } + if (ret < 0) + goto out_free_udc_device_name; ret = 0; spin_lock_irqsave(&dev->lock, flags); if (dev->state != STATE_DEV_OPENED) { dev_dbg(dev->dev, "fail, device is not opened\n"); - kfree(udc_driver_name); - kfree(udc_device_name); ret = -EINVAL; goto out_unlock; } @@ -484,14 +504,25 @@ static int raw_ioctl_init(struct raw_dev *dev, unsigned long value) dev->driver.suspend = gadget_suspend; dev->driver.resume = gadget_resume; dev->driver.reset = gadget_reset; - dev->driver.driver.name = DRIVER_NAME; + dev->driver.driver.name = driver_driver_name; dev->driver.udc_name = udc_device_name; dev->driver.match_existing_only = 1; + dev->driver_id_number = driver_id_number; dev->state = STATE_DEV_INITIALIZED; + spin_unlock_irqrestore(&dev->lock, flags); + return ret; out_unlock: spin_unlock_irqrestore(&dev->lock, flags); +out_free_udc_device_name: + kfree(udc_device_name); +out_free_udc_driver_name: + kfree(udc_driver_name); +out_free_driver_driver_name: + kfree(driver_driver_name); +out_free_driver_id_number: + ida_free(&driver_id_numbers, driver_id_number); return ret; } diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c index 6117ae8e7242..cea10cdb83ae 100644 --- a/drivers/usb/gadget/udc/lpc32xx_udc.c +++ b/drivers/usb/gadget/udc/lpc32xx_udc.c @@ -3016,6 +3016,7 @@ static int lpc32xx_udc_probe(struct platform_device *pdev) } udc->isp1301_i2c_client = isp1301_get_client(isp1301_node); + of_node_put(isp1301_node); if (!udc->isp1301_i2c_client) { return -EPROBE_DEFER; } diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index c54f2bc23d3f..0fdc014c9401 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -652,7 +652,7 @@ struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd) * It will release and re-aquire the lock while calling ACPI * method. */ -static void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd, +void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd, u16 index, bool on, unsigned long *flags) __must_hold(&xhci->lock) { diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index fac9492a8bda..dce6c0ec8d34 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -61,6 +61,8 @@ #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI 0x461e #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_XHCI 0x464e #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI 0x51ed +#define PCI_DEVICE_ID_INTEL_RAPTOR_LAKE_XHCI 0xa71e +#define PCI_DEVICE_ID_INTEL_METEOR_LAKE_XHCI 0x7ec0 #define PCI_DEVICE_ID_AMD_RENOIR_XHCI 0x1639 #define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9 @@ -269,7 +271,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_XHCI || - pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI)) + pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI || + pdev->device == PCI_DEVICE_ID_INTEL_RAPTOR_LAKE_XHCI || + pdev->device == PCI_DEVICE_ID_INTEL_METEOR_LAKE_XHCI)) xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW; if (pdev->vendor == PCI_VENDOR_ID_ETRON && diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index f0ab63138016..65858f607437 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -611,15 +611,37 @@ static int xhci_init(struct usb_hcd *hcd) static int xhci_run_finished(struct xhci_hcd *xhci) { + unsigned long flags; + u32 temp; + + /* + * Enable interrupts before starting the host (xhci 4.2 and 5.5.2). + * Protect the short window before host is running with a lock + */ + spin_lock_irqsave(&xhci->lock, flags); + + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Enable interrupts"); + temp = readl(&xhci->op_regs->command); + temp |= (CMD_EIE); + writel(temp, &xhci->op_regs->command); + + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Enable primary interrupter"); + temp = readl(&xhci->ir_set->irq_pending); + writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending); + if (xhci_start(xhci)) { xhci_halt(xhci); + spin_unlock_irqrestore(&xhci->lock, flags); return -ENODEV; } + xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; if (xhci->quirks & XHCI_NEC_HOST) xhci_ring_cmd_db(xhci); + spin_unlock_irqrestore(&xhci->lock, flags); + return 0; } @@ -668,19 +690,6 @@ int xhci_run(struct usb_hcd *hcd) temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK; writel(temp, &xhci->ir_set->irq_control); - /* Set the HCD state before we enable the irqs */ - temp = readl(&xhci->op_regs->command); - temp |= (CMD_EIE); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Enable interrupts, cmd = 0x%x.", temp); - writel(temp, &xhci->op_regs->command); - - temp = readl(&xhci->ir_set->irq_pending); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Enabling event ring interrupter %p by writing 0x%x to irq_pending", - xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); - writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending); - if (xhci->quirks & XHCI_NEC_HOST) { struct xhci_command *command; @@ -782,6 +791,8 @@ static void xhci_stop(struct usb_hcd *hcd) void xhci_shutdown(struct usb_hcd *hcd) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); + unsigned long flags; + int i; if (xhci->quirks & XHCI_SPURIOUS_REBOOT) usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev)); @@ -797,12 +808,21 @@ void xhci_shutdown(struct usb_hcd *hcd) del_timer_sync(&xhci->shared_hcd->rh_timer); } - spin_lock_irq(&xhci->lock); + spin_lock_irqsave(&xhci->lock, flags); xhci_halt(xhci); + + /* Power off USB2 ports*/ + for (i = 0; i < xhci->usb2_rhub.num_ports; i++) + xhci_set_port_power(xhci, xhci->main_hcd, i, false, &flags); + + /* Power off USB3 ports*/ + for (i = 0; i < xhci->usb3_rhub.num_ports; i++) + xhci_set_port_power(xhci, xhci->shared_hcd, i, false, &flags); + /* Workaround for spurious wakeups at shutdown with HSW */ if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) xhci_reset(xhci, XHCI_RESET_SHORT_USEC); - spin_unlock_irq(&xhci->lock); + spin_unlock_irqrestore(&xhci->lock, flags); xhci_cleanup_msix(xhci); @@ -1107,7 +1127,6 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) { u32 command, temp = 0; struct usb_hcd *hcd = xhci_to_hcd(xhci); - struct usb_hcd *secondary_hcd; int retval = 0; bool comp_timer_running = false; bool pending_portevent = false; @@ -1214,23 +1233,19 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) * first with the primary HCD, and then with the secondary HCD. * If we don't do the same, the host will never be started. */ - if (!usb_hcd_is_primary_hcd(hcd)) - secondary_hcd = hcd; - else - secondary_hcd = xhci->shared_hcd; - xhci_dbg(xhci, "Initialize the xhci_hcd\n"); - retval = xhci_init(hcd->primary_hcd); + retval = xhci_init(hcd); if (retval) return retval; comp_timer_running = true; xhci_dbg(xhci, "Start the primary HCD\n"); - retval = xhci_run(hcd->primary_hcd); - if (!retval && secondary_hcd) { + retval = xhci_run(hcd); + if (!retval && xhci->shared_hcd) { xhci_dbg(xhci, "Start the secondary HCD\n"); - retval = xhci_run(secondary_hcd); + retval = xhci_run(xhci->shared_hcd); } + hcd->state = HC_STATE_SUSPENDED; if (xhci->shared_hcd) xhci->shared_hcd->state = HC_STATE_SUSPENDED; diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 0bd76c94a4b1..28aaf031f9a8 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -2196,6 +2196,8 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, int xhci_hub_status_data(struct usb_hcd *hcd, char *buf); int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1); struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd); +void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd, u16 index, + bool on, unsigned long *flags); void xhci_hc_died(struct xhci_hcd *xhci); diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c index a7b3c15957ba..feba2a8d1233 100644 --- a/drivers/usb/serial/io_ti.c +++ b/drivers/usb/serial/io_ti.c @@ -166,6 +166,7 @@ static const struct usb_device_id edgeport_2port_id_table[] = { { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8S) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416B) }, + { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_E5805A) }, { } }; @@ -204,6 +205,7 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8S) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416B) }, + { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_E5805A) }, { } }; diff --git a/drivers/usb/serial/io_usbvend.h b/drivers/usb/serial/io_usbvend.h index 52cbc353051f..9a6f742ad3ab 100644 --- a/drivers/usb/serial/io_usbvend.h +++ b/drivers/usb/serial/io_usbvend.h @@ -212,6 +212,7 @@ // // Definitions for other product IDs #define ION_DEVICE_ID_MT4X56USB 0x1403 // OEM device +#define ION_DEVICE_ID_E5805A 0x1A01 // OEM device (rebranded Edgeport/4) #define GENERATION_ID_FROM_USB_PRODUCT_ID(ProductId) \ diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index e60425bbf537..de59fa919540 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -252,10 +252,12 @@ static void option_instat_callback(struct urb *urb); #define QUECTEL_PRODUCT_EG95 0x0195 #define QUECTEL_PRODUCT_BG96 0x0296 #define QUECTEL_PRODUCT_EP06 0x0306 +#define QUECTEL_PRODUCT_EM05G 0x030a #define QUECTEL_PRODUCT_EM12 0x0512 #define QUECTEL_PRODUCT_RM500Q 0x0800 #define QUECTEL_PRODUCT_EC200S_CN 0x6002 #define QUECTEL_PRODUCT_EC200T 0x6026 +#define QUECTEL_PRODUCT_RM500K 0x7001 #define CMOTECH_VENDOR_ID 0x16d8 #define CMOTECH_PRODUCT_6001 0x6001 @@ -432,6 +434,8 @@ static void option_instat_callback(struct urb *urb); #define CINTERION_PRODUCT_CLS8 0x00b0 #define CINTERION_PRODUCT_MV31_MBIM 0x00b3 #define CINTERION_PRODUCT_MV31_RMNET 0x00b7 +#define CINTERION_PRODUCT_MV31_2_MBIM 0x00b8 +#define CINTERION_PRODUCT_MV31_2_RMNET 0x00b9 #define CINTERION_PRODUCT_MV32_WA 0x00f1 #define CINTERION_PRODUCT_MV32_WB 0x00f2 @@ -1132,6 +1136,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff), .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) }, + { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G, 0xff), + .driver_info = RSVD(6) | ZLP }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff), .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) }, @@ -1145,6 +1151,7 @@ static const struct usb_device_id option_ids[] = { .driver_info = ZLP }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, @@ -1277,6 +1284,7 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1231, 0xff), /* Telit LE910Cx (RNDIS) */ .driver_info = NCTRL(2) | RSVD(3) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x1250, 0xff, 0x00, 0x00) }, /* Telit LE910Cx (rmnet) */ { USB_DEVICE(TELIT_VENDOR_ID, 0x1260), .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, { USB_DEVICE(TELIT_VENDOR_ID, 0x1261), @@ -1979,6 +1987,10 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(3)}, { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff), .driver_info = RSVD(0)}, + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_2_MBIM, 0xff), + .driver_info = RSVD(3)}, + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_2_RMNET, 0xff), + .driver_info = RSVD(0)}, { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WA, 0xff), .driver_info = RSVD(3)}, { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WB, 0xff), diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 3506c47e1eef..40b1ab3d284d 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -436,22 +436,27 @@ static int pl2303_detect_type(struct usb_serial *serial) break; case 0x200: switch (bcdDevice) { - case 0x100: + case 0x100: /* GC */ case 0x105: + return TYPE_HXN; + case 0x300: /* GT / TA */ + if (pl2303_supports_hx_status(serial)) + return TYPE_TA; + fallthrough; case 0x305: + case 0x400: /* GL */ case 0x405: + return TYPE_HXN; + case 0x500: /* GE / TB */ + if (pl2303_supports_hx_status(serial)) + return TYPE_TB; + fallthrough; + case 0x505: + case 0x600: /* GS */ case 0x605: - /* - * Assume it's an HXN-type if the device doesn't - * support the old read request value. - */ - if (!pl2303_supports_hx_status(serial)) - return TYPE_HXN; - break; - case 0x300: - return TYPE_TA; - case 0x500: - return TYPE_TB; + case 0x700: /* GR */ + case 0x705: + return TYPE_HXN; } break; } diff --git a/drivers/usb/typec/tcpm/Kconfig b/drivers/usb/typec/tcpm/Kconfig index 557f392fe24d..073fd2ea5e0b 100644 --- a/drivers/usb/typec/tcpm/Kconfig +++ b/drivers/usb/typec/tcpm/Kconfig @@ -56,7 +56,6 @@ config TYPEC_WCOVE tristate "Intel WhiskeyCove PMIC USB Type-C PHY driver" depends on ACPI depends on MFD_INTEL_PMC_BXT - depends on INTEL_SOC_PMIC depends on BXT_WC_PMIC_OPREGION help This driver adds support for USB Type-C on Intel Broxton platforms diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index b7a955479156..1b6d46b86f81 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -107,7 +107,7 @@ struct mlx5_vdpa_virtqueue { /* Resources for implementing the notification channel from the device * to the driver. fwqp is the firmware end of an RC connection; the - * other end is vqqp used by the driver. cq is is where completions are + * other end is vqqp used by the driver. cq is where completions are * reported. */ struct mlx5_vdpa_cq cq; @@ -1814,12 +1814,13 @@ static virtio_net_ctrl_ack handle_ctrl_vlan(struct mlx5_vdpa_dev *mvdev, u8 cmd) id = mlx5vdpa16_to_cpu(mvdev, vlan); mac_vlan_del(ndev, ndev->config.mac, id, true); + status = VIRTIO_NET_OK; break; default: - break; -} + break; + } -return status; + return status; } static void mlx5_cvq_kick_handler(struct work_struct *work) diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c index d503848b3b6e..776ad7496f53 100644 --- a/drivers/vdpa/vdpa_user/vduse_dev.c +++ b/drivers/vdpa/vdpa_user/vduse_dev.c @@ -1345,9 +1345,9 @@ static int vduse_create_dev(struct vduse_dev_config *config, dev->minor = ret; dev->msg_timeout = VDUSE_MSG_DEFAULT_TIMEOUT; - dev->dev = device_create(vduse_class, NULL, - MKDEV(MAJOR(vduse_major), dev->minor), - dev, "%s", config->name); + dev->dev = device_create_with_groups(vduse_class, NULL, + MKDEV(MAJOR(vduse_major), dev->minor), + dev, vduse_dev_groups, "%s", config->name); if (IS_ERR(dev->dev)) { ret = PTR_ERR(dev->dev); goto err_dev; @@ -1596,7 +1596,6 @@ static int vduse_init(void) return PTR_ERR(vduse_class); vduse_class->devnode = vduse_devnode; - vduse_class->dev_groups = vduse_dev_groups; ret = alloc_chrdev_region(&vduse_major, 0, VDUSE_DEV_MAX, "vduse"); if (ret) diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 935a1d0ddb97..5ad2596c6e8a 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -499,6 +499,8 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, ops->set_vq_ready(vdpa, idx, s.num); return 0; case VHOST_VDPA_GET_VRING_GROUP: + if (!ops->get_vq_group) + return -EOPNOTSUPP; s.index = idx; s.num = ops->get_vq_group(vdpa, idx); if (s.num >= vdpa->ngroups) diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c index 14e2043d7685..eab55accf381 100644 --- a/drivers/vhost/vringh.c +++ b/drivers/vhost/vringh.c @@ -292,7 +292,7 @@ __vringh_iov(struct vringh *vrh, u16 i, int (*copy)(const struct vringh *vrh, void *dst, const void *src, size_t len)) { - int err, count = 0, up_next, desc_max; + int err, count = 0, indirect_count = 0, up_next, desc_max; struct vring_desc desc, *descs; struct vringh_range range = { -1ULL, 0 }, slowrange; bool slow = false; @@ -349,7 +349,12 @@ __vringh_iov(struct vringh *vrh, u16 i, continue; } - if (count++ == vrh->vring.num) { + if (up_next == -1) + count++; + else + indirect_count++; + + if (count > vrh->vring.num || indirect_count > desc_max) { vringh_bad("Descriptor loop in %p", descs); err = -ELOOP; goto fail; @@ -411,6 +416,7 @@ __vringh_iov(struct vringh *vrh, u16 i, i = return_from_indirect(vrh, &up_next, &descs, &desc_max); slow = false; + indirect_count = 0; } else break; } diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c index fa23bf0247b0..bd4dc97d4d34 100644 --- a/drivers/video/console/sticore.c +++ b/drivers/video/console/sticore.c @@ -1148,6 +1148,7 @@ int sti_call(const struct sti_struct *sti, unsigned long func, return ret; } +#if defined(CONFIG_FB_STI) /* check if given fb_info is the primary device */ int fb_is_primary_device(struct fb_info *info) { @@ -1163,6 +1164,7 @@ int fb_is_primary_device(struct fb_info *info) return (sti->info == info); } EXPORT_SYMBOL(fb_is_primary_device); +#endif MODULE_AUTHOR("Philipp Rumpf, Helge Deller, Thomas Bogendoerfer"); MODULE_DESCRIPTION("Core STI driver for HP's NGLE series graphics cards in HP PARISC machines"); diff --git a/drivers/video/fbdev/au1100fb.c b/drivers/video/fbdev/au1100fb.c index 52f731a61482..519313b8bb00 100644 --- a/drivers/video/fbdev/au1100fb.c +++ b/drivers/video/fbdev/au1100fb.c @@ -560,8 +560,7 @@ int au1100fb_drv_suspend(struct platform_device *dev, pm_message_t state) /* Blank the LCD */ au1100fb_fb_blank(VESA_POWERDOWN, &fbdev->info); - if (fbdev->lcdclk) - clk_disable(fbdev->lcdclk); + clk_disable(fbdev->lcdclk); memcpy(&fbregs, fbdev->regs, sizeof(struct au1100fb_regs)); @@ -577,8 +576,7 @@ int au1100fb_drv_resume(struct platform_device *dev) memcpy(fbdev->regs, &fbregs, sizeof(struct au1100fb_regs)); - if (fbdev->lcdclk) - clk_enable(fbdev->lcdclk); + clk_enable(fbdev->lcdclk); /* Unblank the LCD */ au1100fb_fb_blank(VESA_NO_BLANKING, &fbdev->info); diff --git a/drivers/video/fbdev/cirrusfb.c b/drivers/video/fbdev/cirrusfb.c index 3d47c347b897..51e072c03e1c 100644 --- a/drivers/video/fbdev/cirrusfb.c +++ b/drivers/video/fbdev/cirrusfb.c @@ -2184,12 +2184,6 @@ static struct pci_driver cirrusfb_pci_driver = { .id_table = cirrusfb_pci_table, .probe = cirrusfb_pci_register, .remove = cirrusfb_pci_unregister, -#ifdef CONFIG_PM -#if 0 - .suspend = cirrusfb_pci_suspend, - .resume = cirrusfb_pci_resume, -#endif -#endif }; #endif /* CONFIG_PCI */ diff --git a/drivers/video/fbdev/intelfb/intelfbdrv.c b/drivers/video/fbdev/intelfb/intelfbdrv.c index a9579964eaba..5647fca8c49a 100644 --- a/drivers/video/fbdev/intelfb/intelfbdrv.c +++ b/drivers/video/fbdev/intelfb/intelfbdrv.c @@ -472,7 +472,7 @@ static int intelfb_pci_register(struct pci_dev *pdev, struct fb_info *info; struct intelfb_info *dinfo; int i, err, dvo; - int aperture_size, stolen_size; + int aperture_size, stolen_size = 0; struct agp_kern_info gtt_info; int agp_memtype; const char *s; @@ -571,7 +571,7 @@ static int intelfb_pci_register(struct pci_dev *pdev, return -ENODEV; } - if (intelfbhw_get_memory(pdev, &aperture_size,&stolen_size)) { + if (intelfbhw_get_memory(pdev, &aperture_size, &stolen_size)) { cleanup(dinfo); return -ENODEV; } diff --git a/drivers/video/fbdev/intelfb/intelfbhw.c b/drivers/video/fbdev/intelfb/intelfbhw.c index 57aff7450bce..2086e06532ee 100644 --- a/drivers/video/fbdev/intelfb/intelfbhw.c +++ b/drivers/video/fbdev/intelfb/intelfbhw.c @@ -201,13 +201,11 @@ int intelfbhw_get_memory(struct pci_dev *pdev, int *aperture_size, case PCI_DEVICE_ID_INTEL_945GME: case PCI_DEVICE_ID_INTEL_965G: case PCI_DEVICE_ID_INTEL_965GM: - /* 915, 945 and 965 chipsets support a 256MB aperture. - Aperture size is determined by inspected the - base address of the aperture. */ - if (pci_resource_start(pdev, 2) & 0x08000000) - *aperture_size = MB(128); - else - *aperture_size = MB(256); + /* + * 915, 945 and 965 chipsets support 64MB, 128MB or 256MB + * aperture. Determine size from PCI resource length. + */ + *aperture_size = pci_resource_len(pdev, 2); break; default: if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M) diff --git a/drivers/video/fbdev/omap/sossi.c b/drivers/video/fbdev/omap/sossi.c index c90eb8ca58af..66aff6cd1df0 100644 --- a/drivers/video/fbdev/omap/sossi.c +++ b/drivers/video/fbdev/omap/sossi.c @@ -359,7 +359,7 @@ static void sossi_set_bits_per_cycle(int bpc) int bus_pick_count, bus_pick_width; /* - * We set explicitly the the bus_pick_count as well, although + * We set explicitly the bus_pick_count as well, although * with remapping/reordering disabled it will be calculated by HW * as (32 / bus_pick_width). */ diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c index 6fbfeb01b315..170463a7e1f4 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c @@ -143,7 +143,7 @@ int hdmi_phy_configure(struct hdmi_phy_data *phy, unsigned long hfbitclk, /* * In OMAP5+, the HFBITCLK must be divided by 2 before issuing the * HDMI_PHYPWRCMD_LDOON command. - */ + */ if (phy_feat->bist_ctrl) REG_FLD_MOD(phy->base, HDMI_TXPHY_BIST_CONTROL, 1, 11, 11); diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c index 043cc8f9ef1c..c3cd1e1cc01b 100644 --- a/drivers/video/fbdev/pxa3xx-gcu.c +++ b/drivers/video/fbdev/pxa3xx-gcu.c @@ -381,7 +381,7 @@ pxa3xx_gcu_write(struct file *file, const char *buff, struct pxa3xx_gcu_batch *buffer; struct pxa3xx_gcu_priv *priv = to_pxa3xx_gcu_priv(file); - int words = count / 4; + size_t words = count / 4; /* Does not need to be atomic. There's a lock in user space, * but anyhow, this is just for statistics. */ diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c index 2c198561c338..f96ce8801be4 100644 --- a/drivers/video/fbdev/simplefb.c +++ b/drivers/video/fbdev/simplefb.c @@ -237,8 +237,7 @@ static int simplefb_clocks_get(struct simplefb_par *par, if (IS_ERR(clock)) { if (PTR_ERR(clock) == -EPROBE_DEFER) { while (--i >= 0) { - if (par->clks[i]) - clk_put(par->clks[i]); + clk_put(par->clks[i]); } kfree(par->clks); return -EPROBE_DEFER; diff --git a/drivers/video/fbdev/skeletonfb.c b/drivers/video/fbdev/skeletonfb.c index bcacfb6934fa..d119b1d08007 100644 --- a/drivers/video/fbdev/skeletonfb.c +++ b/drivers/video/fbdev/skeletonfb.c @@ -96,7 +96,7 @@ static const struct fb_fix_screeninfo xxxfb_fix = { /* * Modern graphical hardware not only supports pipelines but some - * also support multiple monitors where each display can have its + * also support multiple monitors where each display can have * its own unique data. In this case each display could be * represented by a separate framebuffer device thus a separate * struct fb_info. Now the struct xxx_par represents the graphics @@ -838,9 +838,9 @@ static void xxxfb_remove(struct pci_dev *dev) * * See Documentation/driver-api/pm/devices.rst for more information */ -static int xxxfb_suspend(struct pci_dev *dev, pm_message_t msg) +static int xxxfb_suspend(struct device *dev) { - struct fb_info *info = pci_get_drvdata(dev); + struct fb_info *info = dev_get_drvdata(dev); struct xxxfb_par *par = info->par; /* suspend here */ @@ -853,9 +853,9 @@ static int xxxfb_suspend(struct pci_dev *dev, pm_message_t msg) * * See Documentation/driver-api/pm/devices.rst for more information */ -static int xxxfb_resume(struct pci_dev *dev) +static int xxxfb_resume(struct device *dev) { - struct fb_info *info = pci_get_drvdata(dev); + struct fb_info *info = dev_get_drvdata(dev); struct xxxfb_par *par = info->par; /* resume here */ @@ -873,14 +873,15 @@ static const struct pci_device_id xxxfb_id_table[] = { { 0, } }; +static SIMPLE_DEV_PM_OPS(xxxfb_pm_ops, xxxfb_suspend, xxxfb_resume); + /* For PCI drivers */ static struct pci_driver xxxfb_driver = { .name = "xxxfb", .id_table = xxxfb_id_table, .probe = xxxfb_probe, .remove = xxxfb_remove, - .suspend = xxxfb_suspend, /* optional but recommended */ - .resume = xxxfb_resume, /* optional but recommended */ + .driver.pm = xxxfb_pm_ops, /* optional but recommended */ }; MODULE_DEVICE_TABLE(pci, xxxfb_id_table); diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig index b5adf6abd241..a6dc8b5846fe 100644 --- a/drivers/virtio/Kconfig +++ b/drivers/virtio/Kconfig @@ -6,12 +6,6 @@ config VIRTIO bus, such as CONFIG_VIRTIO_PCI, CONFIG_VIRTIO_MMIO, CONFIG_RPMSG or CONFIG_S390_GUEST. -config ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS - bool - help - This option is selected if the architecture may need to enforce - VIRTIO_F_ACCESS_PLATFORM - config VIRTIO_PCI_LIB tristate help diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index ef04a96942bf..6bace84ae37e 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -5,6 +5,7 @@ #include <linux/module.h> #include <linux/idr.h> #include <linux/of.h> +#include <linux/platform-feature.h> #include <uapi/linux/virtio_ids.h> /* Unique numbering for virtio devices. */ @@ -170,12 +171,10 @@ EXPORT_SYMBOL_GPL(virtio_add_status); static int virtio_features_ok(struct virtio_device *dev) { unsigned int status; - int ret; might_sleep(); - ret = arch_has_restricted_virtio_memory_access(); - if (ret) { + if (platform_has(PLATFORM_VIRTIO_RESTRICTED_MEM_ACCESS)) { if (!virtio_has_feature(dev, VIRTIO_F_VERSION_1)) { dev_warn(&dev->dev, "device must provide VIRTIO_F_VERSION_1\n"); diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index f9a36bc7ac27..c9bec3813e94 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -255,7 +255,7 @@ static void vm_set_status(struct virtio_device *vdev, u8 status) /* * Per memory-barriers.txt, wmb() is not needed to guarantee - * that the the cache coherent memory writes have completed + * that the cache coherent memory writes have completed * before writing to the MMIO region. */ writel(status, vm_dev->base + VIRTIO_MMIO_STATUS); @@ -701,6 +701,7 @@ static int vm_cmdline_set(const char *device, if (!vm_cmdline_parent_registered) { err = device_register(&vm_cmdline_parent); if (err) { + put_device(&vm_cmdline_parent); pr_err("Failed to register parent device!\n"); return err; } diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c index a0fa14f28a7f..b790f30b2b56 100644 --- a/drivers/virtio/virtio_pci_modern_dev.c +++ b/drivers/virtio/virtio_pci_modern_dev.c @@ -469,7 +469,7 @@ void vp_modern_set_status(struct virtio_pci_modern_device *mdev, /* * Per memory-barriers.txt, wmb() is not needed to guarantee - * that the the cache coherent memory writes have completed + * that the cache coherent memory writes have completed * before writing to the MMIO region. */ vp_iowrite8(status, &cfg->device_status); diff --git a/drivers/watchdog/gxp-wdt.c b/drivers/watchdog/gxp-wdt.c index b0b2d7a6fdde..2fd85be88278 100644 --- a/drivers/watchdog/gxp-wdt.c +++ b/drivers/watchdog/gxp-wdt.c @@ -172,3 +172,4 @@ module_platform_driver(gxp_wdt_driver); MODULE_AUTHOR("Nick Hawkins <nick.hawkins@hpe.com>"); MODULE_AUTHOR("Jean-Marie Verdun <verdun@hpe.com>"); MODULE_DESCRIPTION("Driver for GXP watchdog timer"); +MODULE_LICENSE("GPL"); diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index 120d32f164ac..bfd5f4f706bc 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -335,4 +335,24 @@ config XEN_UNPOPULATED_ALLOC having to balloon out RAM regions in order to obtain physical memory space to create such mappings. +config XEN_GRANT_DMA_IOMMU + bool + select IOMMU_API + +config XEN_GRANT_DMA_OPS + bool + select DMA_OPS + +config XEN_VIRTIO + bool "Xen virtio support" + depends on VIRTIO + select XEN_GRANT_DMA_OPS + select XEN_GRANT_DMA_IOMMU if OF + help + Enable virtio support for running as Xen guest. Depending on the + guest type this will require special support on the backend side + (qemu or kernel, depending on the virtio device types used). + + If in doubt, say n. + endmenu diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index 5aae66e638a7..c0503f1c7d5b 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile @@ -39,3 +39,5 @@ xen-gntalloc-y := gntalloc.o xen-privcmd-y := privcmd.o privcmd-buf.o obj-$(CONFIG_XEN_FRONT_PGDIR_SHBUF) += xen-front-pgdir-shbuf.o obj-$(CONFIG_XEN_UNPOPULATED_ALLOC) += unpopulated-alloc.o +obj-$(CONFIG_XEN_GRANT_DMA_OPS) += grant-dma-ops.o +obj-$(CONFIG_XEN_GRANT_DMA_IOMMU) += grant-dma-iommu.o diff --git a/drivers/xen/features.c b/drivers/xen/features.c index 7b591443833c..87f1828d40d5 100644 --- a/drivers/xen/features.c +++ b/drivers/xen/features.c @@ -42,7 +42,7 @@ void xen_setup_features(void) if (HYPERVISOR_xen_version(XENVER_get_features, &fi) < 0) break; for (j = 0; j < 32; j++) - xen_features[i * 32 + j] = !!(fi.submap & 1<<j); + xen_features[i * 32 + j] = !!(fi.submap & 1U << j); } if (xen_pv_domain()) { diff --git a/drivers/xen/gntdev-common.h b/drivers/xen/gntdev-common.h index 20d7d059dadb..40ef379c28ab 100644 --- a/drivers/xen/gntdev-common.h +++ b/drivers/xen/gntdev-common.h @@ -16,6 +16,7 @@ #include <linux/mmu_notifier.h> #include <linux/types.h> #include <xen/interface/event_channel.h> +#include <xen/grant_table.h> struct gntdev_dmabuf_priv; @@ -56,6 +57,7 @@ struct gntdev_grant_map { struct gnttab_unmap_grant_ref *unmap_ops; struct gnttab_map_grant_ref *kmap_ops; struct gnttab_unmap_grant_ref *kunmap_ops; + bool *being_removed; struct page **pages; unsigned long pages_vm_start; @@ -73,6 +75,11 @@ struct gntdev_grant_map { /* Needed to avoid allocation in gnttab_dma_free_pages(). */ xen_pfn_t *frames; #endif + + /* Number of live grants */ + atomic_t live_grants; + /* Needed to avoid allocation in __unmap_grant_pages */ + struct gntab_unmap_queue_data unmap_data; }; struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count, diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 59ffea800079..4b56c39f766d 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -35,6 +35,7 @@ #include <linux/slab.h> #include <linux/highmem.h> #include <linux/refcount.h> +#include <linux/workqueue.h> #include <xen/xen.h> #include <xen/grant_table.h> @@ -60,10 +61,11 @@ module_param(limit, uint, 0644); MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by one mapping request"); +/* True in PV mode, false otherwise */ static int use_ptemod; -static int unmap_grant_pages(struct gntdev_grant_map *map, - int offset, int pages); +static void unmap_grant_pages(struct gntdev_grant_map *map, + int offset, int pages); static struct miscdevice gntdev_miscdev; @@ -120,6 +122,7 @@ static void gntdev_free_map(struct gntdev_grant_map *map) kvfree(map->unmap_ops); kvfree(map->kmap_ops); kvfree(map->kunmap_ops); + kvfree(map->being_removed); kfree(map); } @@ -140,10 +143,13 @@ struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count, add->unmap_ops = kvmalloc_array(count, sizeof(add->unmap_ops[0]), GFP_KERNEL); add->pages = kvcalloc(count, sizeof(add->pages[0]), GFP_KERNEL); + add->being_removed = + kvcalloc(count, sizeof(add->being_removed[0]), GFP_KERNEL); if (NULL == add->grants || NULL == add->map_ops || NULL == add->unmap_ops || - NULL == add->pages) + NULL == add->pages || + NULL == add->being_removed) goto err; if (use_ptemod) { add->kmap_ops = kvmalloc_array(count, sizeof(add->kmap_ops[0]), @@ -250,9 +256,36 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map) if (!refcount_dec_and_test(&map->users)) return; - if (map->pages && !use_ptemod) + if (map->pages && !use_ptemod) { + /* + * Increment the reference count. This ensures that the + * subsequent call to unmap_grant_pages() will not wind up + * re-entering itself. It *can* wind up calling + * gntdev_put_map() recursively, but such calls will be with a + * reference count greater than 1, so they will return before + * this code is reached. The recursion depth is thus limited to + * 1. Do NOT use refcount_inc() here, as it will detect that + * the reference count is zero and WARN(). + */ + refcount_set(&map->users, 1); + + /* + * Unmap the grants. This may or may not be asynchronous, so it + * is possible that the reference count is 1 on return, but it + * could also be greater than 1. + */ unmap_grant_pages(map, 0, map->count); + /* Check if the memory now needs to be freed */ + if (!refcount_dec_and_test(&map->users)) + return; + + /* + * All pages have been returned to the hypervisor, so free the + * map. + */ + } + if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) { notify_remote_via_evtchn(map->notify.event); evtchn_put(map->notify.event); @@ -283,6 +316,7 @@ static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data) int gntdev_map_grant_pages(struct gntdev_grant_map *map) { + size_t alloced = 0; int i, err = 0; if (!use_ptemod) { @@ -331,97 +365,116 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map) map->count); for (i = 0; i < map->count; i++) { - if (map->map_ops[i].status == GNTST_okay) + if (map->map_ops[i].status == GNTST_okay) { map->unmap_ops[i].handle = map->map_ops[i].handle; - else if (!err) + if (!use_ptemod) + alloced++; + } else if (!err) err = -EINVAL; if (map->flags & GNTMAP_device_map) map->unmap_ops[i].dev_bus_addr = map->map_ops[i].dev_bus_addr; if (use_ptemod) { - if (map->kmap_ops[i].status == GNTST_okay) + if (map->kmap_ops[i].status == GNTST_okay) { + if (map->map_ops[i].status == GNTST_okay) + alloced++; map->kunmap_ops[i].handle = map->kmap_ops[i].handle; - else if (!err) + } else if (!err) err = -EINVAL; } } + atomic_add(alloced, &map->live_grants); return err; } -static int __unmap_grant_pages(struct gntdev_grant_map *map, int offset, - int pages) +static void __unmap_grant_pages_done(int result, + struct gntab_unmap_queue_data *data) { - int i, err = 0; - struct gntab_unmap_queue_data unmap_data; - - if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) { - int pgno = (map->notify.addr >> PAGE_SHIFT); - if (pgno >= offset && pgno < offset + pages) { - /* No need for kmap, pages are in lowmem */ - uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno])); - tmp[map->notify.addr & (PAGE_SIZE-1)] = 0; - map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE; - } - } - - unmap_data.unmap_ops = map->unmap_ops + offset; - unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL; - unmap_data.pages = map->pages + offset; - unmap_data.count = pages; - - err = gnttab_unmap_refs_sync(&unmap_data); - if (err) - return err; + unsigned int i; + struct gntdev_grant_map *map = data->data; + unsigned int offset = data->unmap_ops - map->unmap_ops; - for (i = 0; i < pages; i++) { - if (map->unmap_ops[offset+i].status) - err = -EINVAL; + for (i = 0; i < data->count; i++) { + WARN_ON(map->unmap_ops[offset+i].status); pr_debug("unmap handle=%d st=%d\n", map->unmap_ops[offset+i].handle, map->unmap_ops[offset+i].status); map->unmap_ops[offset+i].handle = INVALID_GRANT_HANDLE; if (use_ptemod) { - if (map->kunmap_ops[offset+i].status) - err = -EINVAL; + WARN_ON(map->kunmap_ops[offset+i].status); pr_debug("kunmap handle=%u st=%d\n", map->kunmap_ops[offset+i].handle, map->kunmap_ops[offset+i].status); map->kunmap_ops[offset+i].handle = INVALID_GRANT_HANDLE; } } - return err; + /* + * Decrease the live-grant counter. This must happen after the loop to + * prevent premature reuse of the grants by gnttab_mmap(). + */ + atomic_sub(data->count, &map->live_grants); + + /* Release reference taken by __unmap_grant_pages */ + gntdev_put_map(NULL, map); +} + +static void __unmap_grant_pages(struct gntdev_grant_map *map, int offset, + int pages) +{ + if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) { + int pgno = (map->notify.addr >> PAGE_SHIFT); + + if (pgno >= offset && pgno < offset + pages) { + /* No need for kmap, pages are in lowmem */ + uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno])); + + tmp[map->notify.addr & (PAGE_SIZE-1)] = 0; + map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE; + } + } + + map->unmap_data.unmap_ops = map->unmap_ops + offset; + map->unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL; + map->unmap_data.pages = map->pages + offset; + map->unmap_data.count = pages; + map->unmap_data.done = __unmap_grant_pages_done; + map->unmap_data.data = map; + refcount_inc(&map->users); /* to keep map alive during async call below */ + + gnttab_unmap_refs_async(&map->unmap_data); } -static int unmap_grant_pages(struct gntdev_grant_map *map, int offset, - int pages) +static void unmap_grant_pages(struct gntdev_grant_map *map, int offset, + int pages) { - int range, err = 0; + int range; + + if (atomic_read(&map->live_grants) == 0) + return; /* Nothing to do */ pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages); /* It is possible the requested range will have a "hole" where we * already unmapped some of the grants. Only unmap valid ranges. */ - while (pages && !err) { - while (pages && - map->unmap_ops[offset].handle == INVALID_GRANT_HANDLE) { + while (pages) { + while (pages && map->being_removed[offset]) { offset++; pages--; } range = 0; while (range < pages) { - if (map->unmap_ops[offset + range].handle == - INVALID_GRANT_HANDLE) + if (map->being_removed[offset + range]) break; + map->being_removed[offset + range] = true; range++; } - err = __unmap_grant_pages(map, offset, range); + if (range) + __unmap_grant_pages(map, offset, range); offset += range; pages -= range; } - - return err; } /* ------------------------------------------------------------------ */ @@ -473,7 +526,6 @@ static bool gntdev_invalidate(struct mmu_interval_notifier *mn, struct gntdev_grant_map *map = container_of(mn, struct gntdev_grant_map, notifier); unsigned long mstart, mend; - int err; if (!mmu_notifier_range_blockable(range)) return false; @@ -494,10 +546,9 @@ static bool gntdev_invalidate(struct mmu_interval_notifier *mn, map->index, map->count, map->vma->vm_start, map->vma->vm_end, range->start, range->end, mstart, mend); - err = unmap_grant_pages(map, + unmap_grant_pages(map, (mstart - map->vma->vm_start) >> PAGE_SHIFT, (mend - mstart) >> PAGE_SHIFT); - WARN_ON(err); return true; } @@ -985,6 +1036,10 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) goto unlock_out; if (use_ptemod && map->vma) goto unlock_out; + if (atomic_read(&map->live_grants)) { + err = -EAGAIN; + goto unlock_out; + } refcount_inc(&map->users); vma->vm_ops = &gntdev_vmops; diff --git a/drivers/xen/grant-dma-iommu.c b/drivers/xen/grant-dma-iommu.c new file mode 100644 index 000000000000..16b8bc0c0b33 --- /dev/null +++ b/drivers/xen/grant-dma-iommu.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Stub IOMMU driver which does nothing. + * The main purpose of it being present is to reuse generic IOMMU device tree + * bindings by Xen grant DMA-mapping layer. + * + * Copyright (C) 2022 EPAM Systems Inc. + */ + +#include <linux/iommu.h> +#include <linux/of.h> +#include <linux/platform_device.h> + +struct grant_dma_iommu_device { + struct device *dev; + struct iommu_device iommu; +}; + +/* Nothing is really needed here */ +static const struct iommu_ops grant_dma_iommu_ops; + +static const struct of_device_id grant_dma_iommu_of_match[] = { + { .compatible = "xen,grant-dma" }, + { }, +}; + +static int grant_dma_iommu_probe(struct platform_device *pdev) +{ + struct grant_dma_iommu_device *mmu; + int ret; + + mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL); + if (!mmu) + return -ENOMEM; + + mmu->dev = &pdev->dev; + + ret = iommu_device_register(&mmu->iommu, &grant_dma_iommu_ops, &pdev->dev); + if (ret) + return ret; + + platform_set_drvdata(pdev, mmu); + + return 0; +} + +static int grant_dma_iommu_remove(struct platform_device *pdev) +{ + struct grant_dma_iommu_device *mmu = platform_get_drvdata(pdev); + + platform_set_drvdata(pdev, NULL); + iommu_device_unregister(&mmu->iommu); + + return 0; +} + +static struct platform_driver grant_dma_iommu_driver = { + .driver = { + .name = "grant-dma-iommu", + .of_match_table = grant_dma_iommu_of_match, + }, + .probe = grant_dma_iommu_probe, + .remove = grant_dma_iommu_remove, +}; + +static int __init grant_dma_iommu_init(void) +{ + struct device_node *iommu_np; + + iommu_np = of_find_matching_node(NULL, grant_dma_iommu_of_match); + if (!iommu_np) + return 0; + + of_node_put(iommu_np); + + return platform_driver_register(&grant_dma_iommu_driver); +} +subsys_initcall(grant_dma_iommu_init); diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c new file mode 100644 index 000000000000..fc0142484001 --- /dev/null +++ b/drivers/xen/grant-dma-ops.c @@ -0,0 +1,346 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Xen grant DMA-mapping layer - contains special DMA-mapping routines + * for providing grant references as DMA addresses to be used by frontends + * (e.g. virtio) in Xen guests + * + * Copyright (c) 2021, Juergen Gross <jgross@suse.com> + */ + +#include <linux/module.h> +#include <linux/dma-map-ops.h> +#include <linux/of.h> +#include <linux/pfn.h> +#include <linux/xarray.h> +#include <xen/xen.h> +#include <xen/xen-ops.h> +#include <xen/grant_table.h> + +struct xen_grant_dma_data { + /* The ID of backend domain */ + domid_t backend_domid; + /* Is device behaving sane? */ + bool broken; +}; + +static DEFINE_XARRAY(xen_grant_dma_devices); + +#define XEN_GRANT_DMA_ADDR_OFF (1ULL << 63) + +static inline dma_addr_t grant_to_dma(grant_ref_t grant) +{ + return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t)grant << PAGE_SHIFT); +} + +static inline grant_ref_t dma_to_grant(dma_addr_t dma) +{ + return (grant_ref_t)((dma & ~XEN_GRANT_DMA_ADDR_OFF) >> PAGE_SHIFT); +} + +static struct xen_grant_dma_data *find_xen_grant_dma_data(struct device *dev) +{ + struct xen_grant_dma_data *data; + + xa_lock(&xen_grant_dma_devices); + data = xa_load(&xen_grant_dma_devices, (unsigned long)dev); + xa_unlock(&xen_grant_dma_devices); + + return data; +} + +/* + * DMA ops for Xen frontends (e.g. virtio). + * + * Used to act as a kind of software IOMMU for Xen guests by using grants as + * DMA addresses. + * Such a DMA address is formed by using the grant reference as a frame + * number and setting the highest address bit (this bit is for the backend + * to be able to distinguish it from e.g. a mmio address). + */ +static void *xen_grant_dma_alloc(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, + unsigned long attrs) +{ + struct xen_grant_dma_data *data; + unsigned int i, n_pages = PFN_UP(size); + unsigned long pfn; + grant_ref_t grant; + void *ret; + + data = find_xen_grant_dma_data(dev); + if (!data) + return NULL; + + if (unlikely(data->broken)) + return NULL; + + ret = alloc_pages_exact(n_pages * PAGE_SIZE, gfp); + if (!ret) + return NULL; + + pfn = virt_to_pfn(ret); + + if (gnttab_alloc_grant_reference_seq(n_pages, &grant)) { + free_pages_exact(ret, n_pages * PAGE_SIZE); + return NULL; + } + + for (i = 0; i < n_pages; i++) { + gnttab_grant_foreign_access_ref(grant + i, data->backend_domid, + pfn_to_gfn(pfn + i), 0); + } + + *dma_handle = grant_to_dma(grant); + + return ret; +} + +static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle, unsigned long attrs) +{ + struct xen_grant_dma_data *data; + unsigned int i, n_pages = PFN_UP(size); + grant_ref_t grant; + + data = find_xen_grant_dma_data(dev); + if (!data) + return; + + if (unlikely(data->broken)) + return; + + grant = dma_to_grant(dma_handle); + + for (i = 0; i < n_pages; i++) { + if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) { + dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n"); + data->broken = true; + return; + } + } + + gnttab_free_grant_reference_seq(grant, n_pages); + + free_pages_exact(vaddr, n_pages * PAGE_SIZE); +} + +static struct page *xen_grant_dma_alloc_pages(struct device *dev, size_t size, + dma_addr_t *dma_handle, + enum dma_data_direction dir, + gfp_t gfp) +{ + void *vaddr; + + vaddr = xen_grant_dma_alloc(dev, size, dma_handle, gfp, 0); + if (!vaddr) + return NULL; + + return virt_to_page(vaddr); +} + +static void xen_grant_dma_free_pages(struct device *dev, size_t size, + struct page *vaddr, dma_addr_t dma_handle, + enum dma_data_direction dir) +{ + xen_grant_dma_free(dev, size, page_to_virt(vaddr), dma_handle, 0); +} + +static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + unsigned long attrs) +{ + struct xen_grant_dma_data *data; + unsigned int i, n_pages = PFN_UP(size); + grant_ref_t grant; + dma_addr_t dma_handle; + + if (WARN_ON(dir == DMA_NONE)) + return DMA_MAPPING_ERROR; + + data = find_xen_grant_dma_data(dev); + if (!data) + return DMA_MAPPING_ERROR; + + if (unlikely(data->broken)) + return DMA_MAPPING_ERROR; + + if (gnttab_alloc_grant_reference_seq(n_pages, &grant)) + return DMA_MAPPING_ERROR; + + for (i = 0; i < n_pages; i++) { + gnttab_grant_foreign_access_ref(grant + i, data->backend_domid, + xen_page_to_gfn(page) + i, dir == DMA_TO_DEVICE); + } + + dma_handle = grant_to_dma(grant) + offset; + + return dma_handle; +} + +static void xen_grant_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, + size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ + struct xen_grant_dma_data *data; + unsigned int i, n_pages = PFN_UP(size); + grant_ref_t grant; + + if (WARN_ON(dir == DMA_NONE)) + return; + + data = find_xen_grant_dma_data(dev); + if (!data) + return; + + if (unlikely(data->broken)) + return; + + grant = dma_to_grant(dma_handle); + + for (i = 0; i < n_pages; i++) { + if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) { + dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n"); + data->broken = true; + return; + } + } + + gnttab_free_grant_reference_seq(grant, n_pages); +} + +static void xen_grant_dma_unmap_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + unsigned long attrs) +{ + struct scatterlist *s; + unsigned int i; + + if (WARN_ON(dir == DMA_NONE)) + return; + + for_each_sg(sg, s, nents, i) + xen_grant_dma_unmap_page(dev, s->dma_address, sg_dma_len(s), dir, + attrs); +} + +static int xen_grant_dma_map_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + unsigned long attrs) +{ + struct scatterlist *s; + unsigned int i; + + if (WARN_ON(dir == DMA_NONE)) + return -EINVAL; + + for_each_sg(sg, s, nents, i) { + s->dma_address = xen_grant_dma_map_page(dev, sg_page(s), s->offset, + s->length, dir, attrs); + if (s->dma_address == DMA_MAPPING_ERROR) + goto out; + + sg_dma_len(s) = s->length; + } + + return nents; + +out: + xen_grant_dma_unmap_sg(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); + sg_dma_len(sg) = 0; + + return -EIO; +} + +static int xen_grant_dma_supported(struct device *dev, u64 mask) +{ + return mask == DMA_BIT_MASK(64); +} + +static const struct dma_map_ops xen_grant_dma_ops = { + .alloc = xen_grant_dma_alloc, + .free = xen_grant_dma_free, + .alloc_pages = xen_grant_dma_alloc_pages, + .free_pages = xen_grant_dma_free_pages, + .mmap = dma_common_mmap, + .get_sgtable = dma_common_get_sgtable, + .map_page = xen_grant_dma_map_page, + .unmap_page = xen_grant_dma_unmap_page, + .map_sg = xen_grant_dma_map_sg, + .unmap_sg = xen_grant_dma_unmap_sg, + .dma_supported = xen_grant_dma_supported, +}; + +bool xen_is_grant_dma_device(struct device *dev) +{ + struct device_node *iommu_np; + bool has_iommu; + + /* XXX Handle only DT devices for now */ + if (!dev->of_node) + return false; + + iommu_np = of_parse_phandle(dev->of_node, "iommus", 0); + has_iommu = iommu_np && of_device_is_compatible(iommu_np, "xen,grant-dma"); + of_node_put(iommu_np); + + return has_iommu; +} + +void xen_grant_setup_dma_ops(struct device *dev) +{ + struct xen_grant_dma_data *data; + struct of_phandle_args iommu_spec; + + data = find_xen_grant_dma_data(dev); + if (data) { + dev_err(dev, "Xen grant DMA data is already created\n"); + return; + } + + /* XXX ACPI device unsupported for now */ + if (!dev->of_node) + goto err; + + if (of_parse_phandle_with_args(dev->of_node, "iommus", "#iommu-cells", + 0, &iommu_spec)) { + dev_err(dev, "Cannot parse iommus property\n"); + goto err; + } + + if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") || + iommu_spec.args_count != 1) { + dev_err(dev, "Incompatible IOMMU node\n"); + of_node_put(iommu_spec.np); + goto err; + } + + of_node_put(iommu_spec.np); + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + goto err; + + /* + * The endpoint ID here means the ID of the domain where the corresponding + * backend is running + */ + data->backend_domid = iommu_spec.args[0]; + + if (xa_err(xa_store(&xen_grant_dma_devices, (unsigned long)dev, data, + GFP_KERNEL))) { + dev_err(dev, "Cannot store Xen grant DMA data\n"); + goto err; + } + + dev->dma_ops = &xen_grant_dma_ops; + + return; + +err: + dev_err(dev, "Cannot set up Xen grant DMA ops, retain platform DMA ops\n"); +} + +MODULE_DESCRIPTION("Xen grant DMA-mapping layer"); +MODULE_AUTHOR("Juergen Gross <jgross@suse.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 7a18292540bc..738029de3c67 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -33,6 +33,7 @@ #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt +#include <linux/bitmap.h> #include <linux/memblock.h> #include <linux/sched.h> #include <linux/mm.h> @@ -70,9 +71,32 @@ static grant_ref_t **gnttab_list; static unsigned int nr_grant_frames; + +/* + * Handling of free grants: + * + * Free grants are in a simple list anchored in gnttab_free_head. They are + * linked by grant ref, the last element contains GNTTAB_LIST_END. The number + * of free entries is stored in gnttab_free_count. + * Additionally there is a bitmap of free entries anchored in + * gnttab_free_bitmap. This is being used for simplifying allocation of + * multiple consecutive grants, which is needed e.g. for support of virtio. + * gnttab_last_free is used to add free entries of new frames at the end of + * the free list. + * gnttab_free_tail_ptr specifies the variable which references the start + * of consecutive free grants ending with gnttab_last_free. This pointer is + * updated in a rather defensive way, in order to avoid performance hits in + * hot paths. + * All those variables are protected by gnttab_list_lock. + */ static int gnttab_free_count; -static grant_ref_t gnttab_free_head; +static unsigned int gnttab_size; +static grant_ref_t gnttab_free_head = GNTTAB_LIST_END; +static grant_ref_t gnttab_last_free = GNTTAB_LIST_END; +static grant_ref_t *gnttab_free_tail_ptr; +static unsigned long *gnttab_free_bitmap; static DEFINE_SPINLOCK(gnttab_list_lock); + struct grant_frames xen_auto_xlat_grant_frames; static unsigned int xen_gnttab_version; module_param_named(version, xen_gnttab_version, uint, 0); @@ -168,16 +192,116 @@ static int get_free_entries(unsigned count) ref = head = gnttab_free_head; gnttab_free_count -= count; - while (count-- > 1) - head = gnttab_entry(head); + while (count--) { + bitmap_clear(gnttab_free_bitmap, head, 1); + if (gnttab_free_tail_ptr == __gnttab_entry(head)) + gnttab_free_tail_ptr = &gnttab_free_head; + if (count) + head = gnttab_entry(head); + } gnttab_free_head = gnttab_entry(head); gnttab_entry(head) = GNTTAB_LIST_END; + if (!gnttab_free_count) { + gnttab_last_free = GNTTAB_LIST_END; + gnttab_free_tail_ptr = NULL; + } + spin_unlock_irqrestore(&gnttab_list_lock, flags); return ref; } +static int get_seq_entry_count(void) +{ + if (gnttab_last_free == GNTTAB_LIST_END || !gnttab_free_tail_ptr || + *gnttab_free_tail_ptr == GNTTAB_LIST_END) + return 0; + + return gnttab_last_free - *gnttab_free_tail_ptr + 1; +} + +/* Rebuilds the free grant list and tries to find count consecutive entries. */ +static int get_free_seq(unsigned int count) +{ + int ret = -ENOSPC; + unsigned int from, to; + grant_ref_t *last; + + gnttab_free_tail_ptr = &gnttab_free_head; + last = &gnttab_free_head; + + for (from = find_first_bit(gnttab_free_bitmap, gnttab_size); + from < gnttab_size; + from = find_next_bit(gnttab_free_bitmap, gnttab_size, to + 1)) { + to = find_next_zero_bit(gnttab_free_bitmap, gnttab_size, + from + 1); + if (ret < 0 && to - from >= count) { + ret = from; + bitmap_clear(gnttab_free_bitmap, ret, count); + from += count; + gnttab_free_count -= count; + if (from == to) + continue; + } + + /* + * Recreate the free list in order to have it properly sorted. + * This is needed to make sure that the free tail has the maximum + * possible size. + */ + while (from < to) { + *last = from; + last = __gnttab_entry(from); + gnttab_last_free = from; + from++; + } + if (to < gnttab_size) + gnttab_free_tail_ptr = __gnttab_entry(to - 1); + } + + *last = GNTTAB_LIST_END; + if (gnttab_last_free != gnttab_size - 1) + gnttab_free_tail_ptr = NULL; + + return ret; +} + +static int get_free_entries_seq(unsigned int count) +{ + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&gnttab_list_lock, flags); + + if (gnttab_free_count < count) { + ret = gnttab_expand(count - gnttab_free_count); + if (ret < 0) + goto out; + } + + if (get_seq_entry_count() < count) { + ret = get_free_seq(count); + if (ret >= 0) + goto out; + ret = gnttab_expand(count - get_seq_entry_count()); + if (ret < 0) + goto out; + } + + ret = *gnttab_free_tail_ptr; + *gnttab_free_tail_ptr = gnttab_entry(ret + count - 1); + gnttab_free_count -= count; + if (!gnttab_free_count) + gnttab_free_tail_ptr = NULL; + bitmap_clear(gnttab_free_bitmap, ret, count); + + out: + spin_unlock_irqrestore(&gnttab_list_lock, flags); + + return ret; +} + static void do_free_callbacks(void) { struct gnttab_free_callback *callback, *next; @@ -204,21 +328,51 @@ static inline void check_free_callbacks(void) do_free_callbacks(); } -static void put_free_entry(grant_ref_t ref) +static void put_free_entry_locked(grant_ref_t ref) { - unsigned long flags; - if (unlikely(ref < GNTTAB_NR_RESERVED_ENTRIES)) return; - spin_lock_irqsave(&gnttab_list_lock, flags); gnttab_entry(ref) = gnttab_free_head; gnttab_free_head = ref; + if (!gnttab_free_count) + gnttab_last_free = ref; + if (gnttab_free_tail_ptr == &gnttab_free_head) + gnttab_free_tail_ptr = __gnttab_entry(ref); gnttab_free_count++; + bitmap_set(gnttab_free_bitmap, ref, 1); +} + +static void put_free_entry(grant_ref_t ref) +{ + unsigned long flags; + + spin_lock_irqsave(&gnttab_list_lock, flags); + put_free_entry_locked(ref); check_free_callbacks(); spin_unlock_irqrestore(&gnttab_list_lock, flags); } +static void gnttab_set_free(unsigned int start, unsigned int n) +{ + unsigned int i; + + for (i = start; i < start + n - 1; i++) + gnttab_entry(i) = i + 1; + + gnttab_entry(i) = GNTTAB_LIST_END; + if (!gnttab_free_count) { + gnttab_free_head = start; + gnttab_free_tail_ptr = &gnttab_free_head; + } else { + gnttab_entry(gnttab_last_free) = start; + } + gnttab_free_count += n; + gnttab_last_free = i; + + bitmap_set(gnttab_free_bitmap, start, n); +} + /* * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2. * Introducing a valid entry into the grant table: @@ -450,23 +604,31 @@ void gnttab_free_grant_references(grant_ref_t head) { grant_ref_t ref; unsigned long flags; - int count = 1; - if (head == GNTTAB_LIST_END) - return; + spin_lock_irqsave(&gnttab_list_lock, flags); - ref = head; - while (gnttab_entry(ref) != GNTTAB_LIST_END) { - ref = gnttab_entry(ref); - count++; + while (head != GNTTAB_LIST_END) { + ref = gnttab_entry(head); + put_free_entry_locked(head); + head = ref; } - gnttab_entry(ref) = gnttab_free_head; - gnttab_free_head = head; - gnttab_free_count += count; check_free_callbacks(); spin_unlock_irqrestore(&gnttab_list_lock, flags); } EXPORT_SYMBOL_GPL(gnttab_free_grant_references); +void gnttab_free_grant_reference_seq(grant_ref_t head, unsigned int count) +{ + unsigned long flags; + unsigned int i; + + spin_lock_irqsave(&gnttab_list_lock, flags); + for (i = count; i > 0; i--) + put_free_entry_locked(head + i - 1); + check_free_callbacks(); + spin_unlock_irqrestore(&gnttab_list_lock, flags); +} +EXPORT_SYMBOL_GPL(gnttab_free_grant_reference_seq); + int gnttab_alloc_grant_references(u16 count, grant_ref_t *head) { int h = get_free_entries(count); @@ -480,6 +642,24 @@ int gnttab_alloc_grant_references(u16 count, grant_ref_t *head) } EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references); +int gnttab_alloc_grant_reference_seq(unsigned int count, grant_ref_t *first) +{ + int h; + + if (count == 1) + h = get_free_entries(1); + else + h = get_free_entries_seq(count); + + if (h < 0) + return -ENOSPC; + + *first = h; + + return 0; +} +EXPORT_SYMBOL_GPL(gnttab_alloc_grant_reference_seq); + int gnttab_empty_grant_references(const grant_ref_t *private_head) { return (*private_head == GNTTAB_LIST_END); @@ -572,16 +752,13 @@ static int grow_gnttab_list(unsigned int more_frames) goto grow_nomem; } + gnttab_set_free(gnttab_size, extra_entries); - for (i = grefs_per_frame * nr_grant_frames; - i < grefs_per_frame * new_nr_grant_frames - 1; i++) - gnttab_entry(i) = i + 1; - - gnttab_entry(i) = gnttab_free_head; - gnttab_free_head = grefs_per_frame * nr_grant_frames; - gnttab_free_count += extra_entries; + if (!gnttab_free_tail_ptr) + gnttab_free_tail_ptr = __gnttab_entry(gnttab_size); nr_grant_frames = new_nr_grant_frames; + gnttab_size += extra_entries; check_free_callbacks(); @@ -1424,20 +1601,20 @@ static int gnttab_expand(unsigned int req_entries) int gnttab_init(void) { int i; - unsigned long max_nr_grant_frames; + unsigned long max_nr_grant_frames, max_nr_grefs; unsigned int max_nr_glist_frames, nr_glist_frames; - unsigned int nr_init_grefs; int ret; gnttab_request_version(); max_nr_grant_frames = gnttab_max_grant_frames(); + max_nr_grefs = max_nr_grant_frames * + gnttab_interface->grefs_per_grant_frame; nr_grant_frames = 1; /* Determine the maximum number of frames required for the * grant reference free list on the current hypervisor. */ - max_nr_glist_frames = (max_nr_grant_frames * - gnttab_interface->grefs_per_grant_frame / RPP); + max_nr_glist_frames = max_nr_grefs / RPP; gnttab_list = kmalloc_array(max_nr_glist_frames, sizeof(grant_ref_t *), @@ -1454,6 +1631,12 @@ int gnttab_init(void) } } + gnttab_free_bitmap = bitmap_zalloc(max_nr_grefs, GFP_KERNEL); + if (!gnttab_free_bitmap) { + ret = -ENOMEM; + goto ini_nomem; + } + ret = arch_gnttab_init(max_nr_grant_frames, nr_status_frames(max_nr_grant_frames)); if (ret < 0) @@ -1464,15 +1647,10 @@ int gnttab_init(void) goto ini_nomem; } - nr_init_grefs = nr_grant_frames * - gnttab_interface->grefs_per_grant_frame; - - for (i = GNTTAB_NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++) - gnttab_entry(i) = i + 1; + gnttab_size = nr_grant_frames * gnttab_interface->grefs_per_grant_frame; - gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END; - gnttab_free_count = nr_init_grefs - GNTTAB_NR_RESERVED_ENTRIES; - gnttab_free_head = GNTTAB_NR_RESERVED_ENTRIES; + gnttab_set_free(GNTTAB_NR_RESERVED_ENTRIES, + gnttab_size - GNTTAB_NR_RESERVED_ENTRIES); printk("Grant table initialized\n"); return 0; @@ -1481,6 +1659,7 @@ int gnttab_init(void) for (i--; i >= 0; i--) free_page((unsigned long)gnttab_list[i]); kfree(gnttab_list); + bitmap_free(gnttab_free_bitmap); return ret; } EXPORT_SYMBOL_GPL(gnttab_init); diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c index 34742c6e189e..f17c4c03db30 100644 --- a/drivers/xen/xlate_mmu.c +++ b/drivers/xen/xlate_mmu.c @@ -261,7 +261,6 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt, return 0; } -EXPORT_SYMBOL_GPL(xen_xlate_map_ballooned_pages); struct remap_pfn { struct mm_struct *mm; diff --git a/fs/9p/cache.c b/fs/9p/cache.c index 1c8dc696d516..cebba4eaa0b5 100644 --- a/fs/9p/cache.c +++ b/fs/9p/cache.c @@ -62,12 +62,12 @@ void v9fs_cache_inode_get_cookie(struct inode *inode) version = cpu_to_le32(v9inode->qid.version); path = cpu_to_le64(v9inode->qid.path); v9ses = v9fs_inode2v9ses(inode); - v9inode->netfs_ctx.cache = + v9inode->netfs.cache = fscache_acquire_cookie(v9fs_session_cache(v9ses), 0, &path, sizeof(path), &version, sizeof(version), - i_size_read(&v9inode->vfs_inode)); + i_size_read(&v9inode->netfs.inode)); p9_debug(P9_DEBUG_FSC, "inode %p get cookie %p\n", inode, v9fs_inode_cookie(v9inode)); diff --git a/fs/9p/fid.c b/fs/9p/fid.c index 79df61fe0e59..baf2b152229e 100644 --- a/fs/9p/fid.c +++ b/fs/9p/fid.c @@ -152,7 +152,7 @@ static struct p9_fid *v9fs_fid_lookup_with_uid(struct dentry *dentry, const unsigned char **wnames, *uname; int i, n, l, clone, access; struct v9fs_session_info *v9ses; - struct p9_fid *fid, *old_fid = NULL; + struct p9_fid *fid, *old_fid; v9ses = v9fs_dentry2v9ses(dentry); access = v9ses->flags & V9FS_ACCESS_MASK; @@ -194,13 +194,12 @@ static struct p9_fid *v9fs_fid_lookup_with_uid(struct dentry *dentry, if (IS_ERR(fid)) return fid; + refcount_inc(&fid->count); v9fs_fid_add(dentry->d_sb->s_root, fid); } /* If we are root ourself just return that */ - if (dentry->d_sb->s_root == dentry) { - refcount_inc(&fid->count); + if (dentry->d_sb->s_root == dentry) return fid; - } /* * Do a multipath walk with attached root. * When walking parent we need to make sure we @@ -212,6 +211,7 @@ static struct p9_fid *v9fs_fid_lookup_with_uid(struct dentry *dentry, fid = ERR_PTR(n); goto err_out; } + old_fid = fid; clone = 1; i = 0; while (i < n) { @@ -221,19 +221,15 @@ static struct p9_fid *v9fs_fid_lookup_with_uid(struct dentry *dentry, * walk to ensure none of the patch component change */ fid = p9_client_walk(fid, l, &wnames[i], clone); + /* non-cloning walk will return the same fid */ + if (fid != old_fid) { + p9_client_clunk(old_fid); + old_fid = fid; + } if (IS_ERR(fid)) { - if (old_fid) { - /* - * If we fail, clunk fid which are mapping - * to path component and not the last component - * of the path. - */ - p9_client_clunk(old_fid); - } kfree(wnames); goto err_out; } - old_fid = fid; i += l; clone = 0; } diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c index e28ddf763b3b..0129de2ea31a 100644 --- a/fs/9p/v9fs.c +++ b/fs/9p/v9fs.c @@ -625,7 +625,7 @@ static void v9fs_inode_init_once(void *foo) struct v9fs_inode *v9inode = (struct v9fs_inode *)foo; memset(&v9inode->qid, 0, sizeof(v9inode->qid)); - inode_init_once(&v9inode->vfs_inode); + inode_init_once(&v9inode->netfs.inode); } /** diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h index ec0e8df3b2eb..6acabc2e7dc9 100644 --- a/fs/9p/v9fs.h +++ b/fs/9p/v9fs.h @@ -109,11 +109,7 @@ struct v9fs_session_info { #define V9FS_INO_INVALID_ATTR 0x01 struct v9fs_inode { - struct { - /* These must be contiguous */ - struct inode vfs_inode; /* the VFS's inode record */ - struct netfs_i_context netfs_ctx; /* Netfslib context */ - }; + struct netfs_inode netfs; /* Netfslib context and vfs inode */ struct p9_qid qid; unsigned int cache_validity; struct p9_fid *writeback_fid; @@ -122,13 +118,13 @@ struct v9fs_inode { static inline struct v9fs_inode *V9FS_I(const struct inode *inode) { - return container_of(inode, struct v9fs_inode, vfs_inode); + return container_of(inode, struct v9fs_inode, netfs.inode); } static inline struct fscache_cookie *v9fs_inode_cookie(struct v9fs_inode *v9inode) { #ifdef CONFIG_9P_FSCACHE - return netfs_i_cookie(&v9inode->vfs_inode); + return netfs_i_cookie(&v9inode->netfs); #else return NULL; #endif diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c index 8ce82ff1e40a..d0833fa69faf 100644 --- a/fs/9p/vfs_addr.c +++ b/fs/9p/vfs_addr.c @@ -58,21 +58,33 @@ static void v9fs_issue_read(struct netfs_io_subrequest *subreq) */ static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file) { + struct inode *inode = file_inode(file); + struct v9fs_inode *v9inode = V9FS_I(inode); struct p9_fid *fid = file->private_data; + BUG_ON(!fid); + + /* we might need to read from a fid that was opened write-only + * for read-modify-write of page cache, use the writeback fid + * for that */ + if (rreq->origin == NETFS_READ_FOR_WRITE && + (fid->mode & O_ACCMODE) == O_WRONLY) { + fid = v9inode->writeback_fid; + BUG_ON(!fid); + } + refcount_inc(&fid->count); rreq->netfs_priv = fid; return 0; } /** - * v9fs_req_cleanup - Cleanup request initialized by v9fs_init_request - * @mapping: unused mapping of request to cleanup - * @priv: private data to cleanup, a fid, guaranted non-null. + * v9fs_free_request - Cleanup request initialized by v9fs_init_rreq + * @rreq: The I/O request to clean up */ -static void v9fs_req_cleanup(struct address_space *mapping, void *priv) +static void v9fs_free_request(struct netfs_io_request *rreq) { - struct p9_fid *fid = priv; + struct p9_fid *fid = rreq->netfs_priv; p9_client_clunk(fid); } @@ -94,9 +106,9 @@ static int v9fs_begin_cache_operation(struct netfs_io_request *rreq) const struct netfs_request_ops v9fs_req_ops = { .init_request = v9fs_init_request, + .free_request = v9fs_free_request, .begin_cache_operation = v9fs_begin_cache_operation, .issue_read = v9fs_issue_read, - .cleanup = v9fs_req_cleanup, }; /** @@ -140,7 +152,7 @@ static void v9fs_write_to_cache_done(void *priv, ssize_t transferred_or_error, transferred_or_error != -ENOBUFS) { version = cpu_to_le32(v9inode->qid.version); fscache_invalidate(v9fs_inode_cookie(v9inode), &version, - i_size_read(&v9inode->vfs_inode), 0); + i_size_read(&v9inode->netfs.inode), 0); } } @@ -274,7 +286,7 @@ static int v9fs_write_begin(struct file *filp, struct address_space *mapping, * file. We need to do this before we get a lock on the page in case * there's more than one writer competing for the same cache block. */ - retval = netfs_write_begin(filp, mapping, pos, len, &folio, fsdata); + retval = netfs_write_begin(&v9inode->netfs, filp, mapping, pos, len, &folio, fsdata); if (retval < 0) return retval; diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 55367ecb9442..3d8297714772 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -234,7 +234,7 @@ struct inode *v9fs_alloc_inode(struct super_block *sb) v9inode->writeback_fid = NULL; v9inode->cache_validity = 0; mutex_init(&v9inode->v_mutex); - return &v9inode->vfs_inode; + return &v9inode->netfs.inode; } /** @@ -252,7 +252,8 @@ void v9fs_free_inode(struct inode *inode) */ static void v9fs_set_netfs_context(struct inode *inode) { - netfs_i_context_init(inode, &v9fs_req_ops); + struct v9fs_inode *v9inode = V9FS_I(inode); + netfs_inode_init(&v9inode->netfs, &v9fs_req_ops); } int v9fs_init_inode(struct v9fs_session_info *v9ses, @@ -1250,15 +1251,15 @@ static const char *v9fs_vfs_get_link(struct dentry *dentry, return ERR_PTR(-ECHILD); v9ses = v9fs_dentry2v9ses(dentry); - fid = v9fs_fid_lookup(dentry); + if (!v9fs_proto_dotu(v9ses)) + return ERR_PTR(-EBADF); + p9_debug(P9_DEBUG_VFS, "%pd\n", dentry); + fid = v9fs_fid_lookup(dentry); if (IS_ERR(fid)) return ERR_CAST(fid); - if (!v9fs_proto_dotu(v9ses)) - return ERR_PTR(-EBADF); - st = p9_client_stat(fid); p9_client_clunk(fid); if (IS_ERR(st)) diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c index d17502a738a9..b6eb1160296c 100644 --- a/fs/9p/vfs_inode_dotl.c +++ b/fs/9p/vfs_inode_dotl.c @@ -274,6 +274,7 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry, if (IS_ERR(ofid)) { err = PTR_ERR(ofid); p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err); + p9_client_clunk(dfid); goto out; } @@ -285,6 +286,7 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry, if (err) { p9_debug(P9_DEBUG_VFS, "Failed to get acl values in creat %d\n", err); + p9_client_clunk(dfid); goto error; } err = p9_client_create_dotl(ofid, name, v9fs_open_to_dotl_flags(flags), @@ -292,6 +294,7 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry, if (err < 0) { p9_debug(P9_DEBUG_VFS, "p9_client_open_dotl failed in creat %d\n", err); + p9_client_clunk(dfid); goto error; } v9fs_invalidate_inode_attr(dir); diff --git a/fs/afs/callback.c b/fs/afs/callback.c index 1b4d5809808d..a484fa642808 100644 --- a/fs/afs/callback.c +++ b/fs/afs/callback.c @@ -30,7 +30,7 @@ void afs_invalidate_mmap_work(struct work_struct *work) { struct afs_vnode *vnode = container_of(work, struct afs_vnode, cb_work); - unmap_mapping_pages(vnode->vfs_inode.i_mapping, 0, 0, false); + unmap_mapping_pages(vnode->netfs.inode.i_mapping, 0, 0, false); } void afs_server_init_callback_work(struct work_struct *work) diff --git a/fs/afs/dir.c b/fs/afs/dir.c index 79f6b74336d2..56ae5cd5184f 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -109,7 +109,7 @@ struct afs_lookup_cookie { */ static void afs_dir_read_cleanup(struct afs_read *req) { - struct address_space *mapping = req->vnode->vfs_inode.i_mapping; + struct address_space *mapping = req->vnode->netfs.inode.i_mapping; struct folio *folio; pgoff_t last = req->nr_pages - 1; @@ -153,7 +153,7 @@ static bool afs_dir_check_folio(struct afs_vnode *dvnode, struct folio *folio, block = kmap_local_folio(folio, offset); if (block->hdr.magic != AFS_DIR_MAGIC) { printk("kAFS: %s(%lx): [%llx] bad magic %zx/%zx is %04hx\n", - __func__, dvnode->vfs_inode.i_ino, + __func__, dvnode->netfs.inode.i_ino, pos, offset, size, ntohs(block->hdr.magic)); trace_afs_dir_check_failed(dvnode, pos + offset, i_size); kunmap_local(block); @@ -183,7 +183,7 @@ error: static void afs_dir_dump(struct afs_vnode *dvnode, struct afs_read *req) { union afs_xdr_dir_block *block; - struct address_space *mapping = dvnode->vfs_inode.i_mapping; + struct address_space *mapping = dvnode->netfs.inode.i_mapping; struct folio *folio; pgoff_t last = req->nr_pages - 1; size_t offset, size; @@ -217,7 +217,7 @@ static void afs_dir_dump(struct afs_vnode *dvnode, struct afs_read *req) */ static int afs_dir_check(struct afs_vnode *dvnode, struct afs_read *req) { - struct address_space *mapping = dvnode->vfs_inode.i_mapping; + struct address_space *mapping = dvnode->netfs.inode.i_mapping; struct folio *folio; pgoff_t last = req->nr_pages - 1; int ret = 0; @@ -269,7 +269,7 @@ static int afs_dir_open(struct inode *inode, struct file *file) static struct afs_read *afs_read_dir(struct afs_vnode *dvnode, struct key *key) __acquires(&dvnode->validate_lock) { - struct address_space *mapping = dvnode->vfs_inode.i_mapping; + struct address_space *mapping = dvnode->netfs.inode.i_mapping; struct afs_read *req; loff_t i_size; int nr_pages, i; @@ -287,7 +287,7 @@ static struct afs_read *afs_read_dir(struct afs_vnode *dvnode, struct key *key) req->cleanup = afs_dir_read_cleanup; expand: - i_size = i_size_read(&dvnode->vfs_inode); + i_size = i_size_read(&dvnode->netfs.inode); if (i_size < 2048) { ret = afs_bad(dvnode, afs_file_error_dir_small); goto error; @@ -305,7 +305,7 @@ expand: req->actual_len = i_size; /* May change */ req->len = nr_pages * PAGE_SIZE; /* We can ask for more than there is */ req->data_version = dvnode->status.data_version; /* May change */ - iov_iter_xarray(&req->def_iter, READ, &dvnode->vfs_inode.i_mapping->i_pages, + iov_iter_xarray(&req->def_iter, READ, &dvnode->netfs.inode.i_mapping->i_pages, 0, i_size); req->iter = &req->def_iter; @@ -897,7 +897,7 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry, out_op: if (op->error == 0) { - inode = &op->file[1].vnode->vfs_inode; + inode = &op->file[1].vnode->netfs.inode; op->file[1].vnode = NULL; } @@ -1139,7 +1139,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) afs_stat_v(dir, n_reval); /* search the directory for this vnode */ - ret = afs_do_lookup_one(&dir->vfs_inode, dentry, &fid, key, &dir_version); + ret = afs_do_lookup_one(&dir->netfs.inode, dentry, &fid, key, &dir_version); switch (ret) { case 0: /* the filename maps to something */ @@ -1170,7 +1170,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) _debug("%pd: file deleted (uq %u -> %u I:%u)", dentry, fid.unique, vnode->fid.unique, - vnode->vfs_inode.i_generation); + vnode->netfs.inode.i_generation); goto not_found; } goto out_valid; @@ -1368,7 +1368,7 @@ static void afs_dir_remove_subdir(struct dentry *dentry) if (d_really_is_positive(dentry)) { struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry)); - clear_nlink(&vnode->vfs_inode); + clear_nlink(&vnode->netfs.inode); set_bit(AFS_VNODE_DELETED, &vnode->flags); clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags); clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags); @@ -1487,8 +1487,8 @@ static void afs_dir_remove_link(struct afs_operation *op) /* Already done */ } else if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) { write_seqlock(&vnode->cb_lock); - drop_nlink(&vnode->vfs_inode); - if (vnode->vfs_inode.i_nlink == 0) { + drop_nlink(&vnode->netfs.inode); + if (vnode->netfs.inode.i_nlink == 0) { set_bit(AFS_VNODE_DELETED, &vnode->flags); __afs_break_callback(vnode, afs_cb_break_for_unlink); } @@ -1504,7 +1504,7 @@ static void afs_dir_remove_link(struct afs_operation *op) op->error = ret; } - _debug("nlink %d [val %d]", vnode->vfs_inode.i_nlink, op->error); + _debug("nlink %d [val %d]", vnode->netfs.inode.i_nlink, op->error); } static void afs_unlink_success(struct afs_operation *op) @@ -1680,8 +1680,8 @@ static void afs_link_success(struct afs_operation *op) afs_update_dentry_version(op, dvp, op->dentry); if (op->dentry_2->d_parent == op->dentry->d_parent) afs_update_dentry_version(op, dvp, op->dentry_2); - ihold(&vp->vnode->vfs_inode); - d_instantiate(op->dentry, &vp->vnode->vfs_inode); + ihold(&vp->vnode->netfs.inode); + d_instantiate(op->dentry, &vp->vnode->netfs.inode); } static void afs_link_put(struct afs_operation *op) diff --git a/fs/afs/dir_edit.c b/fs/afs/dir_edit.c index d98e109ecee9..0ab7752d1b75 100644 --- a/fs/afs/dir_edit.c +++ b/fs/afs/dir_edit.c @@ -109,7 +109,7 @@ static void afs_clear_contig_bits(union afs_xdr_dir_block *block, */ static struct folio *afs_dir_get_folio(struct afs_vnode *vnode, pgoff_t index) { - struct address_space *mapping = vnode->vfs_inode.i_mapping; + struct address_space *mapping = vnode->netfs.inode.i_mapping; struct folio *folio; folio = __filemap_get_folio(mapping, index, @@ -216,7 +216,7 @@ void afs_edit_dir_add(struct afs_vnode *vnode, _enter(",,{%d,%s},", name->len, name->name); - i_size = i_size_read(&vnode->vfs_inode); + i_size = i_size_read(&vnode->netfs.inode); if (i_size > AFS_DIR_BLOCK_SIZE * AFS_DIR_MAX_BLOCKS || (i_size & (AFS_DIR_BLOCK_SIZE - 1))) { clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags); @@ -336,7 +336,7 @@ found_space: if (b < AFS_DIR_BLOCKS_WITH_CTR) meta->meta.alloc_ctrs[b] -= need_slots; - inode_inc_iversion_raw(&vnode->vfs_inode); + inode_inc_iversion_raw(&vnode->netfs.inode); afs_stat_v(vnode, n_dir_cr); _debug("Insert %s in %u[%u]", name->name, b, slot); @@ -383,7 +383,7 @@ void afs_edit_dir_remove(struct afs_vnode *vnode, _enter(",,{%d,%s},", name->len, name->name); - i_size = i_size_read(&vnode->vfs_inode); + i_size = i_size_read(&vnode->netfs.inode); if (i_size < AFS_DIR_BLOCK_SIZE || i_size > AFS_DIR_BLOCK_SIZE * AFS_DIR_MAX_BLOCKS || (i_size & (AFS_DIR_BLOCK_SIZE - 1))) { @@ -463,7 +463,7 @@ found_dirent: if (b < AFS_DIR_BLOCKS_WITH_CTR) meta->meta.alloc_ctrs[b] += need_slots; - inode_set_iversion_raw(&vnode->vfs_inode, vnode->status.data_version); + inode_set_iversion_raw(&vnode->netfs.inode, vnode->status.data_version); afs_stat_v(vnode, n_dir_rm); _debug("Remove %s from %u[%u]", name->name, b, slot); diff --git a/fs/afs/dir_silly.c b/fs/afs/dir_silly.c index 45cfd50a9521..bb5807e87fa4 100644 --- a/fs/afs/dir_silly.c +++ b/fs/afs/dir_silly.c @@ -131,7 +131,7 @@ int afs_sillyrename(struct afs_vnode *dvnode, struct afs_vnode *vnode, goto out; } while (!d_is_negative(sdentry)); - ihold(&vnode->vfs_inode); + ihold(&vnode->netfs.inode); ret = afs_do_silly_rename(dvnode, vnode, dentry, sdentry, key); switch (ret) { @@ -148,7 +148,7 @@ int afs_sillyrename(struct afs_vnode *dvnode, struct afs_vnode *vnode, d_drop(sdentry); } - iput(&vnode->vfs_inode); + iput(&vnode->netfs.inode); dput(sdentry); out: _leave(" = %d", ret); diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c index f120bcb8bf73..d7d9402ff718 100644 --- a/fs/afs/dynroot.c +++ b/fs/afs/dynroot.c @@ -76,7 +76,7 @@ struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root) /* there shouldn't be an existing inode */ BUG_ON(!(inode->i_state & I_NEW)); - netfs_i_context_init(inode, NULL); + netfs_inode_init(&vnode->netfs, NULL); inode->i_size = 0; inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO; if (root) { diff --git a/fs/afs/file.c b/fs/afs/file.c index a8e8832179e4..42118a4f3383 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -194,7 +194,7 @@ int afs_release(struct inode *inode, struct file *file) afs_put_wb_key(af->wb); if ((file->f_mode & FMODE_WRITE)) { - i_size = i_size_read(&vnode->vfs_inode); + i_size = i_size_read(&vnode->netfs.inode); afs_set_cache_aux(vnode, &aux); fscache_unuse_cookie(afs_vnode_cache(vnode), &aux, &i_size); } else { @@ -325,7 +325,7 @@ static void afs_issue_read(struct netfs_io_subrequest *subreq) fsreq->iter = &fsreq->def_iter; iov_iter_xarray(&fsreq->def_iter, READ, - &fsreq->vnode->vfs_inode.i_mapping->i_pages, + &fsreq->vnode->netfs.inode.i_mapping->i_pages, fsreq->pos, fsreq->len); afs_fetch_data(fsreq->vnode, fsreq); @@ -382,17 +382,17 @@ static int afs_check_write_begin(struct file *file, loff_t pos, unsigned len, return test_bit(AFS_VNODE_DELETED, &vnode->flags) ? -ESTALE : 0; } -static void afs_priv_cleanup(struct address_space *mapping, void *netfs_priv) +static void afs_free_request(struct netfs_io_request *rreq) { - key_put(netfs_priv); + key_put(rreq->netfs_priv); } const struct netfs_request_ops afs_req_ops = { .init_request = afs_init_request, + .free_request = afs_free_request, .begin_cache_operation = afs_begin_cache_operation, .check_write_begin = afs_check_write_begin, .issue_read = afs_issue_read, - .cleanup = afs_priv_cleanup, }; int afs_write_inode(struct inode *inode, struct writeback_control *wbc) diff --git a/fs/afs/fs_operation.c b/fs/afs/fs_operation.c index d222dfbe976b..7a3803ce3a22 100644 --- a/fs/afs/fs_operation.c +++ b/fs/afs/fs_operation.c @@ -232,14 +232,14 @@ int afs_put_operation(struct afs_operation *op) if (op->file[1].modification && op->file[1].vnode != op->file[0].vnode) clear_bit(AFS_VNODE_MODIFYING, &op->file[1].vnode->flags); if (op->file[0].put_vnode) - iput(&op->file[0].vnode->vfs_inode); + iput(&op->file[0].vnode->netfs.inode); if (op->file[1].put_vnode) - iput(&op->file[1].vnode->vfs_inode); + iput(&op->file[1].vnode->netfs.inode); if (op->more_files) { for (i = 0; i < op->nr_files - 2; i++) if (op->more_files[i].put_vnode) - iput(&op->more_files[i].vnode->vfs_inode); + iput(&op->more_files[i].vnode->netfs.inode); kfree(op->more_files); } diff --git a/fs/afs/inode.c b/fs/afs/inode.c index 30b066299d39..64dab70d4a4f 100644 --- a/fs/afs/inode.c +++ b/fs/afs/inode.c @@ -58,7 +58,7 @@ static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *paren */ static void afs_set_netfs_context(struct afs_vnode *vnode) { - netfs_i_context_init(&vnode->vfs_inode, &afs_req_ops); + netfs_inode_init(&vnode->netfs, &afs_req_ops); } /* @@ -96,7 +96,7 @@ static int afs_inode_init_from_status(struct afs_operation *op, inode->i_flags |= S_NOATIME; inode->i_uid = make_kuid(&init_user_ns, status->owner); inode->i_gid = make_kgid(&init_user_ns, status->group); - set_nlink(&vnode->vfs_inode, status->nlink); + set_nlink(&vnode->netfs.inode, status->nlink); switch (status->type) { case AFS_FTYPE_FILE: @@ -139,7 +139,7 @@ static int afs_inode_init_from_status(struct afs_operation *op, afs_set_netfs_context(vnode); vnode->invalid_before = status->data_version; - inode_set_iversion_raw(&vnode->vfs_inode, status->data_version); + inode_set_iversion_raw(&vnode->netfs.inode, status->data_version); if (!vp->scb.have_cb) { /* it's a symlink we just created (the fileserver @@ -163,7 +163,7 @@ static void afs_apply_status(struct afs_operation *op, { struct afs_file_status *status = &vp->scb.status; struct afs_vnode *vnode = vp->vnode; - struct inode *inode = &vnode->vfs_inode; + struct inode *inode = &vnode->netfs.inode; struct timespec64 t; umode_t mode; bool data_changed = false; @@ -246,7 +246,7 @@ static void afs_apply_status(struct afs_operation *op, * idea of what the size should be that's not the same as * what's on the server. */ - vnode->netfs_ctx.remote_i_size = status->size; + vnode->netfs.remote_i_size = status->size; if (change_size) { afs_set_i_size(vnode, status->size); inode->i_ctime = t; @@ -289,7 +289,7 @@ void afs_vnode_commit_status(struct afs_operation *op, struct afs_vnode_param *v */ if (vp->scb.status.abort_code == VNOVNODE) { set_bit(AFS_VNODE_DELETED, &vnode->flags); - clear_nlink(&vnode->vfs_inode); + clear_nlink(&vnode->netfs.inode); __afs_break_callback(vnode, afs_cb_break_for_deleted); op->flags &= ~AFS_OPERATION_DIR_CONFLICT; } @@ -306,8 +306,8 @@ void afs_vnode_commit_status(struct afs_operation *op, struct afs_vnode_param *v if (vp->scb.have_cb) afs_apply_callback(op, vp); } else if (vp->op_unlinked && !(op->flags & AFS_OPERATION_DIR_CONFLICT)) { - drop_nlink(&vnode->vfs_inode); - if (vnode->vfs_inode.i_nlink == 0) { + drop_nlink(&vnode->netfs.inode); + if (vnode->netfs.inode.i_nlink == 0) { set_bit(AFS_VNODE_DELETED, &vnode->flags); __afs_break_callback(vnode, afs_cb_break_for_deleted); } @@ -326,7 +326,7 @@ static void afs_fetch_status_success(struct afs_operation *op) struct afs_vnode *vnode = vp->vnode; int ret; - if (vnode->vfs_inode.i_state & I_NEW) { + if (vnode->netfs.inode.i_state & I_NEW) { ret = afs_inode_init_from_status(op, vp, vnode); op->error = ret; if (ret == 0) @@ -430,7 +430,7 @@ static void afs_get_inode_cache(struct afs_vnode *vnode) struct afs_vnode_cache_aux aux; if (vnode->status.type != AFS_FTYPE_FILE) { - vnode->netfs_ctx.cache = NULL; + vnode->netfs.cache = NULL; return; } @@ -457,7 +457,7 @@ static void afs_get_inode_cache(struct afs_vnode *vnode) struct inode *afs_iget(struct afs_operation *op, struct afs_vnode_param *vp) { struct afs_vnode_param *dvp = &op->file[0]; - struct super_block *sb = dvp->vnode->vfs_inode.i_sb; + struct super_block *sb = dvp->vnode->netfs.inode.i_sb; struct afs_vnode *vnode; struct inode *inode; int ret; @@ -582,10 +582,10 @@ static void afs_zap_data(struct afs_vnode *vnode) /* nuke all the non-dirty pages that aren't locked, mapped or being * written back in a regular file and completely discard the pages in a * directory or symlink */ - if (S_ISREG(vnode->vfs_inode.i_mode)) - invalidate_remote_inode(&vnode->vfs_inode); + if (S_ISREG(vnode->netfs.inode.i_mode)) + invalidate_remote_inode(&vnode->netfs.inode); else - invalidate_inode_pages2(vnode->vfs_inode.i_mapping); + invalidate_inode_pages2(vnode->netfs.inode.i_mapping); } /* @@ -683,8 +683,8 @@ int afs_validate(struct afs_vnode *vnode, struct key *key) key_serial(key)); if (unlikely(test_bit(AFS_VNODE_DELETED, &vnode->flags))) { - if (vnode->vfs_inode.i_nlink) - clear_nlink(&vnode->vfs_inode); + if (vnode->netfs.inode.i_nlink) + clear_nlink(&vnode->netfs.inode); goto valid; } @@ -745,7 +745,8 @@ int afs_getattr(struct user_namespace *mnt_userns, const struct path *path, _enter("{ ino=%lu v=%u }", inode->i_ino, inode->i_generation); - if (!(query_flags & AT_STATX_DONT_SYNC) && + if (vnode->volume && + !(query_flags & AT_STATX_DONT_SYNC) && !test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) { key = afs_request_key(vnode->volume->cell); if (IS_ERR(key)) @@ -826,7 +827,7 @@ void afs_evict_inode(struct inode *inode) static void afs_setattr_success(struct afs_operation *op) { struct afs_vnode_param *vp = &op->file[0]; - struct inode *inode = &vp->vnode->vfs_inode; + struct inode *inode = &vp->vnode->netfs.inode; loff_t old_i_size = i_size_read(inode); op->setattr.old_i_size = old_i_size; @@ -843,7 +844,7 @@ static void afs_setattr_success(struct afs_operation *op) static void afs_setattr_edit_file(struct afs_operation *op) { struct afs_vnode_param *vp = &op->file[0]; - struct inode *inode = &vp->vnode->vfs_inode; + struct inode *inode = &vp->vnode->netfs.inode; if (op->setattr.attr->ia_valid & ATTR_SIZE) { loff_t size = op->setattr.attr->ia_size; @@ -875,7 +876,7 @@ int afs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry, ATTR_MTIME | ATTR_MTIME_SET | ATTR_TIMES_SET | ATTR_TOUCH; struct afs_operation *op; struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry)); - struct inode *inode = &vnode->vfs_inode; + struct inode *inode = &vnode->netfs.inode; loff_t i_size; int ret; diff --git a/fs/afs/internal.h b/fs/afs/internal.h index a30995901266..a6f25d9e75b5 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -619,12 +619,7 @@ enum afs_lock_state { * leak from one inode to another. */ struct afs_vnode { - struct { - /* These must be contiguous */ - struct inode vfs_inode; /* the VFS's inode record */ - struct netfs_i_context netfs_ctx; /* Netfslib context */ - }; - + struct netfs_inode netfs; /* Netfslib context and vfs inode */ struct afs_volume *volume; /* volume on which vnode resides */ struct afs_fid fid; /* the file identifier for this inode */ struct afs_file_status status; /* AFS status info for this file */ @@ -675,7 +670,7 @@ struct afs_vnode { static inline struct fscache_cookie *afs_vnode_cache(struct afs_vnode *vnode) { #ifdef CONFIG_AFS_FSCACHE - return netfs_i_cookie(&vnode->vfs_inode); + return netfs_i_cookie(&vnode->netfs); #else return NULL; #endif @@ -685,7 +680,7 @@ static inline void afs_vnode_set_cache(struct afs_vnode *vnode, struct fscache_cookie *cookie) { #ifdef CONFIG_AFS_FSCACHE - vnode->netfs_ctx.cache = cookie; + vnode->netfs.cache = cookie; #endif } @@ -892,7 +887,7 @@ static inline void afs_invalidate_cache(struct afs_vnode *vnode, unsigned int fl afs_set_cache_aux(vnode, &aux); fscache_invalidate(afs_vnode_cache(vnode), &aux, - i_size_read(&vnode->vfs_inode), flags); + i_size_read(&vnode->netfs.inode), flags); } /* @@ -1217,7 +1212,7 @@ static inline struct afs_net *afs_i2net(struct inode *inode) static inline struct afs_net *afs_v2net(struct afs_vnode *vnode) { - return afs_i2net(&vnode->vfs_inode); + return afs_i2net(&vnode->netfs.inode); } static inline struct afs_net *afs_sock2net(struct sock *sk) @@ -1593,12 +1588,12 @@ extern void yfs_fs_store_opaque_acl2(struct afs_operation *); */ static inline struct afs_vnode *AFS_FS_I(struct inode *inode) { - return container_of(inode, struct afs_vnode, vfs_inode); + return container_of(inode, struct afs_vnode, netfs.inode); } static inline struct inode *AFS_VNODE_TO_I(struct afs_vnode *vnode) { - return &vnode->vfs_inode; + return &vnode->netfs.inode; } /* @@ -1621,8 +1616,8 @@ static inline void afs_update_dentry_version(struct afs_operation *op, */ static inline void afs_set_i_size(struct afs_vnode *vnode, u64 size) { - i_size_write(&vnode->vfs_inode, size); - vnode->vfs_inode.i_blocks = ((size + 1023) >> 10) << 1; + i_size_write(&vnode->netfs.inode, size); + vnode->netfs.inode.i_blocks = ((size + 1023) >> 10) << 1; } /* diff --git a/fs/afs/super.c b/fs/afs/super.c index 1fea195b0b27..95d713074dc8 100644 --- a/fs/afs/super.c +++ b/fs/afs/super.c @@ -659,7 +659,7 @@ static void afs_i_init_once(void *_vnode) struct afs_vnode *vnode = _vnode; memset(vnode, 0, sizeof(*vnode)); - inode_init_once(&vnode->vfs_inode); + inode_init_once(&vnode->netfs.inode); mutex_init(&vnode->io_lock); init_rwsem(&vnode->validate_lock); spin_lock_init(&vnode->wb_lock); @@ -700,8 +700,8 @@ static struct inode *afs_alloc_inode(struct super_block *sb) init_rwsem(&vnode->rmdir_lock); INIT_WORK(&vnode->cb_work, afs_invalidate_mmap_work); - _leave(" = %p", &vnode->vfs_inode); - return &vnode->vfs_inode; + _leave(" = %p", &vnode->netfs.inode); + return &vnode->netfs.inode; } static void afs_free_inode(struct inode *inode) diff --git a/fs/afs/volume.c b/fs/afs/volume.c index 94a3d247924b..cc665cef0abe 100644 --- a/fs/afs/volume.c +++ b/fs/afs/volume.c @@ -9,8 +9,7 @@ #include <linux/slab.h> #include "internal.h" -unsigned __read_mostly afs_volume_gc_delay = 10; -unsigned __read_mostly afs_volume_record_life = 60 * 60; +static unsigned __read_mostly afs_volume_record_life = 60 * 60; /* * Insert a volume into a cell. If there's an existing volume record, that is diff --git a/fs/afs/write.c b/fs/afs/write.c index 2236b2165e37..2c885b22de34 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -60,7 +60,7 @@ int afs_write_begin(struct file *file, struct address_space *mapping, * file. We need to do this before we get a lock on the page in case * there's more than one writer competing for the same cache block. */ - ret = netfs_write_begin(file, mapping, pos, len, &folio, fsdata); + ret = netfs_write_begin(&vnode->netfs, file, mapping, pos, len, &folio, fsdata); if (ret < 0) return ret; @@ -146,10 +146,10 @@ int afs_write_end(struct file *file, struct address_space *mapping, write_end_pos = pos + copied; - i_size = i_size_read(&vnode->vfs_inode); + i_size = i_size_read(&vnode->netfs.inode); if (write_end_pos > i_size) { write_seqlock(&vnode->cb_lock); - i_size = i_size_read(&vnode->vfs_inode); + i_size = i_size_read(&vnode->netfs.inode); if (write_end_pos > i_size) afs_set_i_size(vnode, write_end_pos); write_sequnlock(&vnode->cb_lock); @@ -257,7 +257,7 @@ static void afs_redirty_pages(struct writeback_control *wbc, */ static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsigned int len) { - struct address_space *mapping = vnode->vfs_inode.i_mapping; + struct address_space *mapping = vnode->netfs.inode.i_mapping; struct folio *folio; pgoff_t end; @@ -354,7 +354,6 @@ static const struct afs_operation_ops afs_store_data_operation = { static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t pos, bool laundering) { - struct netfs_i_context *ictx = &vnode->netfs_ctx; struct afs_operation *op; struct afs_wb_key *wbk = NULL; loff_t size = iov_iter_count(iter); @@ -385,9 +384,9 @@ static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t op->store.write_iter = iter; op->store.pos = pos; op->store.size = size; - op->store.i_size = max(pos + size, ictx->remote_i_size); + op->store.i_size = max(pos + size, vnode->netfs.remote_i_size); op->store.laundering = laundering; - op->mtime = vnode->vfs_inode.i_mtime; + op->mtime = vnode->netfs.inode.i_mtime; op->flags |= AFS_OPERATION_UNINTR; op->ops = &afs_store_data_operation; @@ -554,7 +553,7 @@ static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping, struct iov_iter iter; unsigned long priv; unsigned int offset, to, len, max_len; - loff_t i_size = i_size_read(&vnode->vfs_inode); + loff_t i_size = i_size_read(&vnode->netfs.inode); bool new_content = test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags); bool caching = fscache_cookie_enabled(afs_vnode_cache(vnode)); long count = wbc->nr_to_write; @@ -845,7 +844,7 @@ ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from) _enter("{%llx:%llu},{%zu},", vnode->fid.vid, vnode->fid.vnode, count); - if (IS_SWAPFILE(&vnode->vfs_inode)) { + if (IS_SWAPFILE(&vnode->netfs.inode)) { printk(KERN_INFO "AFS: Attempt to write to active swap file!\n"); return -EBUSY; @@ -958,8 +957,8 @@ void afs_prune_wb_keys(struct afs_vnode *vnode) /* Discard unused keys */ spin_lock(&vnode->wb_lock); - if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) && - !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) { + if (!mapping_tagged(&vnode->netfs.inode.i_data, PAGECACHE_TAG_WRITEBACK) && + !mapping_tagged(&vnode->netfs.inode.i_data, PAGECACHE_TAG_DIRTY)) { list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) { if (refcount_read(&wbk->usage) == 1) list_move(&wbk->vnode_link, &graveyard); @@ -1034,6 +1033,6 @@ static void afs_write_to_cache(struct afs_vnode *vnode, bool caching) { fscache_write_to_cache(afs_vnode_cache(vnode), - vnode->vfs_inode.i_mapping, start, len, i_size, + vnode->netfs.inode.i_mapping, start, len, i_size, afs_write_to_cache_done, vnode, caching); } diff --git a/fs/attr.c b/fs/attr.c index 66899b6e9bd8..dbe996b0dedf 100644 --- a/fs/attr.c +++ b/fs/attr.c @@ -61,9 +61,15 @@ static bool chgrp_ok(struct user_namespace *mnt_userns, const struct inode *inode, kgid_t gid) { kgid_t kgid = i_gid_into_mnt(mnt_userns, inode); - if (uid_eq(current_fsuid(), i_uid_into_mnt(mnt_userns, inode)) && - (in_group_p(gid) || gid_eq(gid, inode->i_gid))) - return true; + if (uid_eq(current_fsuid(), i_uid_into_mnt(mnt_userns, inode))) { + kgid_t mapped_gid; + + if (gid_eq(gid, inode->i_gid)) + return true; + mapped_gid = mapped_kgid_fs(mnt_userns, i_user_ns(inode), gid); + if (in_group_p(mapped_gid)) + return true; + } if (capable_wrt_inode_uidgid(mnt_userns, inode, CAP_CHOWN)) return true; if (gid_eq(kgid, INVALID_GID) && @@ -123,12 +129,20 @@ int setattr_prepare(struct user_namespace *mnt_userns, struct dentry *dentry, /* Make sure a caller can chmod. */ if (ia_valid & ATTR_MODE) { + kgid_t mapped_gid; + if (!inode_owner_or_capable(mnt_userns, inode)) return -EPERM; + + if (ia_valid & ATTR_GID) + mapped_gid = mapped_kgid_fs(mnt_userns, + i_user_ns(inode), attr->ia_gid); + else + mapped_gid = i_gid_into_mnt(mnt_userns, inode); + /* Also check the setgid bit! */ - if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid : - i_gid_into_mnt(mnt_userns, inode)) && - !capable_wrt_inode_uidgid(mnt_userns, inode, CAP_FSETID)) + if (!in_group_p(mapped_gid) && + !capable_wrt_inode_uidgid(mnt_userns, inode, CAP_FSETID)) attr->ia_mode &= ~S_ISGID; } diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index 3ac668ace50a..35e0e860cc0b 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -104,6 +104,7 @@ struct btrfs_block_group { unsigned int relocating_repair:1; unsigned int chunk_item_inserted:1; unsigned int zone_is_active:1; + unsigned int zoned_data_reloc_ongoing:1; int disk_cache_state; diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 0e49b1a0c071..415bf1823fb3 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1330,6 +1330,8 @@ struct btrfs_replace_extent_info { * existing extent into a file range. */ bool is_new_extent; + /* Indicate if we should update the inode's mtime and ctime. */ + bool update_times; /* Meaningful only if is_new_extent is true. */ int qgroup_reserved; /* diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 89e94ea2fef5..4ba005c41983 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -4632,6 +4632,17 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info) int ret; set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags); + + /* + * We may have the reclaim task running and relocating a data block group, + * in which case it may create delayed iputs. So stop it before we park + * the cleaner kthread otherwise we can get new delayed iputs after + * parking the cleaner, and that can make the async reclaim task to hang + * if it's waiting for delayed iputs to complete, since the cleaner is + * parked and can not run delayed iputs - this will make us hang when + * trying to stop the async reclaim task. + */ + cancel_work_sync(&fs_info->reclaim_bgs_work); /* * We don't want the cleaner to start new transactions, add more delayed * iputs, etc. while we're closing. We can't use kthread_stop() yet @@ -4672,8 +4683,6 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info) cancel_work_sync(&fs_info->async_data_reclaim_work); cancel_work_sync(&fs_info->preempt_reclaim_work); - cancel_work_sync(&fs_info->reclaim_bgs_work); - /* Cancel or finish ongoing discard work */ btrfs_discard_cleanup(fs_info); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 0867c5cd6e01..4157ecc27d4b 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3832,7 +3832,7 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group, block_group->start == fs_info->data_reloc_bg || fs_info->data_reloc_bg == 0); - if (block_group->ro) { + if (block_group->ro || block_group->zoned_data_reloc_ongoing) { ret = 1; goto out; } @@ -3894,8 +3894,24 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group, out: if (ret && ffe_ctl->for_treelog) fs_info->treelog_bg = 0; - if (ret && ffe_ctl->for_data_reloc) + if (ret && ffe_ctl->for_data_reloc && + fs_info->data_reloc_bg == block_group->start) { + /* + * Do not allow further allocations from this block group. + * Compared to increasing the ->ro, setting the + * ->zoned_data_reloc_ongoing flag still allows nocow + * writers to come in. See btrfs_inc_nocow_writers(). + * + * We need to disable an allocation to avoid an allocation of + * regular (non-relocation data) extent. With mix of relocation + * extents and regular extents, we can dispatch WRITE commands + * (for relocation extents) and ZONE APPEND commands (for + * regular extents) at the same time to the same zone, which + * easily break the write pointer. + */ + block_group->zoned_data_reloc_ongoing = 1; fs_info->data_reloc_bg = 0; + } spin_unlock(&fs_info->relocation_bg_lock); spin_unlock(&fs_info->treelog_bg_lock); spin_unlock(&block_group->lock); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 8f6b544ae616..04e36343da3a 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -5241,13 +5241,14 @@ int extent_writepages(struct address_space *mapping, */ btrfs_zoned_data_reloc_lock(BTRFS_I(inode)); ret = extent_write_cache_pages(mapping, wbc, &epd); - btrfs_zoned_data_reloc_unlock(BTRFS_I(inode)); ASSERT(ret <= 0); if (ret < 0) { + btrfs_zoned_data_reloc_unlock(BTRFS_I(inode)); end_write_bio(&epd, ret); return ret; } flush_write_bio(&epd); + btrfs_zoned_data_reloc_unlock(BTRFS_I(inode)); return ret; } diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 1fd827b99c1b..9dfde1af8a64 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -2323,25 +2323,62 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) */ btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP); - if (ret != BTRFS_NO_LOG_SYNC) { + if (ret == BTRFS_NO_LOG_SYNC) { + ret = btrfs_end_transaction(trans); + goto out; + } + + /* We successfully logged the inode, attempt to sync the log. */ + if (!ret) { + ret = btrfs_sync_log(trans, root, &ctx); if (!ret) { - ret = btrfs_sync_log(trans, root, &ctx); - if (!ret) { - ret = btrfs_end_transaction(trans); - goto out; - } - } - if (!full_sync) { - ret = btrfs_wait_ordered_range(inode, start, len); - if (ret) { - btrfs_end_transaction(trans); - goto out; - } + ret = btrfs_end_transaction(trans); + goto out; } - ret = btrfs_commit_transaction(trans); - } else { + } + + /* + * At this point we need to commit the transaction because we had + * btrfs_need_log_full_commit() or some other error. + * + * If we didn't do a full sync we have to stop the trans handle, wait on + * the ordered extents, start it again and commit the transaction. If + * we attempt to wait on the ordered extents here we could deadlock with + * something like fallocate() that is holding the extent lock trying to + * start a transaction while some other thread is trying to commit the + * transaction while we (fsync) are currently holding the transaction + * open. + */ + if (!full_sync) { ret = btrfs_end_transaction(trans); + if (ret) + goto out; + ret = btrfs_wait_ordered_range(inode, start, len); + if (ret) + goto out; + + /* + * This is safe to use here because we're only interested in + * making sure the transaction that had the ordered extents is + * committed. We aren't waiting on anything past this point, + * we're purely getting the transaction and committing it. + */ + trans = btrfs_attach_transaction_barrier(root); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + + /* + * We committed the transaction and there's no currently + * running transaction, this means everything we care + * about made it to disk and we are done. + */ + if (ret == -ENOENT) + ret = 0; + goto out; + } } + + ret = btrfs_commit_transaction(trans); out: ASSERT(list_empty(&ctx.list)); err = file_check_and_advance_wb_err(file); @@ -2719,7 +2756,8 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode, ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv, min_size, false); - BUG_ON(ret); + if (WARN_ON(ret)) + goto out_trans; trans->block_rsv = rsv; cur_offset = start; @@ -2803,6 +2841,25 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode, extent_info->file_offset += replace_len; } + /* + * We are releasing our handle on the transaction, balance the + * dirty pages of the btree inode and flush delayed items, and + * then get a new transaction handle, which may now point to a + * new transaction in case someone else may have committed the + * transaction we used to replace/drop file extent items. So + * bump the inode's iversion and update mtime and ctime except + * if we are called from a dedupe context. This is because a + * power failure/crash may happen after the transaction is + * committed and before we finish replacing/dropping all the + * file extent items we need. + */ + inode_inc_iversion(&inode->vfs_inode); + + if (!extent_info || extent_info->update_times) { + inode->vfs_inode.i_mtime = current_time(&inode->vfs_inode); + inode->vfs_inode.i_ctime = inode->vfs_inode.i_mtime; + } + ret = btrfs_update_inode(trans, root, inode); if (ret) break; @@ -2819,7 +2876,8 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode, ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv, min_size, false); - BUG_ON(ret); /* shouldn't happen */ + if (WARN_ON(ret)) + break; trans->block_rsv = rsv; cur_offset = drop_args.drop_end; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 81737eff92f3..05e0c4a5affd 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3195,6 +3195,8 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) ordered_extent->file_offset, ordered_extent->file_offset + logical_len); + btrfs_zoned_release_data_reloc_bg(fs_info, ordered_extent->disk_bytenr, + ordered_extent->disk_num_bytes); } else { BUG_ON(root == fs_info->tree_root); ret = insert_ordered_extent_file_extent(trans, ordered_extent); @@ -9897,6 +9899,7 @@ static struct btrfs_trans_handle *insert_prealloc_file_extent( extent_info.file_offset = file_offset; extent_info.extent_buf = (char *)&stack_fi; extent_info.is_new_extent = true; + extent_info.update_times = true; extent_info.qgroup_reserved = qgroup_released; extent_info.insertions = 0; diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index 313d9d685adb..33461b4f9c8b 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c @@ -45,7 +45,6 @@ void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting ne start_ns = ktime_get_ns(); down_read_nested(&eb->lock, nest); - eb->lock_owner = current->pid; trace_btrfs_tree_read_lock(eb, start_ns); } @@ -62,7 +61,6 @@ void btrfs_tree_read_lock(struct extent_buffer *eb) int btrfs_try_tree_read_lock(struct extent_buffer *eb) { if (down_read_trylock(&eb->lock)) { - eb->lock_owner = current->pid; trace_btrfs_try_tree_read_lock(eb); return 1; } @@ -90,7 +88,6 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb) void btrfs_tree_read_unlock(struct extent_buffer *eb) { trace_btrfs_tree_read_unlock(eb); - eb->lock_owner = 0; up_read(&eb->lock); } diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c index c39f8b3a5a4a..a3549d587464 100644 --- a/fs/btrfs/reflink.c +++ b/fs/btrfs/reflink.c @@ -344,6 +344,7 @@ static int btrfs_clone(struct inode *src, struct inode *inode, int ret; const u64 len = olen_aligned; u64 last_dest_end = destoff; + u64 prev_extent_end = off; ret = -ENOMEM; buf = kvmalloc(fs_info->nodesize, GFP_KERNEL); @@ -363,7 +364,6 @@ static int btrfs_clone(struct inode *src, struct inode *inode, key.offset = off; while (1) { - u64 next_key_min_offset = key.offset + 1; struct btrfs_file_extent_item *extent; u64 extent_gen; int type; @@ -431,14 +431,21 @@ process_slot: * The first search might have left us at an extent item that * ends before our target range's start, can happen if we have * holes and NO_HOLES feature enabled. + * + * Subsequent searches may leave us on a file range we have + * processed before - this happens due to a race with ordered + * extent completion for a file range that is outside our source + * range, but that range was part of a file extent item that + * also covered a leading part of our source range. */ - if (key.offset + datal <= off) { + if (key.offset + datal <= prev_extent_end) { path->slots[0]++; goto process_slot; } else if (key.offset >= off + len) { break; } - next_key_min_offset = key.offset + datal; + + prev_extent_end = key.offset + datal; size = btrfs_item_size(leaf, slot); read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, slot), size); @@ -489,6 +496,7 @@ process_slot: clone_info.file_offset = new_key.offset; clone_info.extent_buf = buf; clone_info.is_new_extent = false; + clone_info.update_times = !no_time_update; ret = btrfs_replace_file_extents(BTRFS_I(inode), path, drop_start, new_key.offset + datal - 1, &clone_info, &trans); @@ -550,7 +558,7 @@ process_slot: break; btrfs_release_path(path); - key.offset = next_key_min_offset; + key.offset = prev_extent_end; if (fatal_signal_pending(current)) { ret = -EINTR; diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index b1fdc6a26c76..6627dd7875ee 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -763,6 +763,8 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, compress_force = false; no_compress++; } else { + btrfs_err(info, "unrecognized compression value %s", + args[0].from); ret = -EINVAL; goto out; } @@ -821,8 +823,11 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, case Opt_thread_pool: ret = match_int(&args[0], &intarg); if (ret) { + btrfs_err(info, "unrecognized thread_pool value %s", + args[0].from); goto out; } else if (intarg == 0) { + btrfs_err(info, "invalid value 0 for thread_pool"); ret = -EINVAL; goto out; } @@ -883,8 +888,11 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, break; case Opt_ratio: ret = match_int(&args[0], &intarg); - if (ret) + if (ret) { + btrfs_err(info, "unrecognized metadata_ratio value %s", + args[0].from); goto out; + } info->metadata_ratio = intarg; btrfs_info(info, "metadata ratio %u", info->metadata_ratio); @@ -901,6 +909,8 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, btrfs_set_and_info(info, DISCARD_ASYNC, "turning on async discard"); } else { + btrfs_err(info, "unrecognized discard mode value %s", + args[0].from); ret = -EINVAL; goto out; } @@ -933,6 +943,8 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, btrfs_set_and_info(info, FREE_SPACE_TREE, "enabling free space tree"); } else { + btrfs_err(info, "unrecognized space_cache value %s", + args[0].from); ret = -EINVAL; goto out; } @@ -1014,8 +1026,12 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, break; case Opt_check_integrity_print_mask: ret = match_int(&args[0], &intarg); - if (ret) + if (ret) { + btrfs_err(info, + "unrecognized check_integrity_print_mask value %s", + args[0].from); goto out; + } info->check_integrity_print_mask = intarg; btrfs_info(info, "check_integrity_print_mask 0x%x", info->check_integrity_print_mask); @@ -1030,13 +1046,15 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, goto out; #endif case Opt_fatal_errors: - if (strcmp(args[0].from, "panic") == 0) + if (strcmp(args[0].from, "panic") == 0) { btrfs_set_opt(info->mount_opt, PANIC_ON_FATAL_ERROR); - else if (strcmp(args[0].from, "bug") == 0) + } else if (strcmp(args[0].from, "bug") == 0) { btrfs_clear_opt(info->mount_opt, PANIC_ON_FATAL_ERROR); - else { + } else { + btrfs_err(info, "unrecognized fatal_errors value %s", + args[0].from); ret = -EINVAL; goto out; } @@ -1044,8 +1062,12 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, case Opt_commit_interval: intarg = 0; ret = match_int(&args[0], &intarg); - if (ret) + if (ret) { + btrfs_err(info, "unrecognized commit_interval value %s", + args[0].from); + ret = -EINVAL; goto out; + } if (intarg == 0) { btrfs_info(info, "using default commit interval %us", @@ -1059,8 +1081,11 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, break; case Opt_rescue: ret = parse_rescue_options(info, args[0].from); - if (ret < 0) + if (ret < 0) { + btrfs_err(info, "unrecognized rescue value %s", + args[0].from); goto out; + } break; #ifdef CONFIG_BTRFS_DEBUG case Opt_fragment_all: @@ -1985,6 +2010,14 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data) if (ret) goto restore; + /* V1 cache is not supported for subpage mount. */ + if (fs_info->sectorsize < PAGE_SIZE && btrfs_test_opt(fs_info, SPACE_CACHE)) { + btrfs_warn(fs_info, + "v1 space cache is not supported for page size %lu with sectorsize %u", + PAGE_SIZE, fs_info->sectorsize); + ret = -EINVAL; + goto restore; + } btrfs_remount_begin(fs_info, old_opts, *flags); btrfs_resize_thread_pool(fs_info, fs_info->thread_pool_size, old_thread_pool_size); diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c index 11237a913bee..79e8c8cd75ed 100644 --- a/fs/btrfs/zoned.c +++ b/fs/btrfs/zoned.c @@ -2139,3 +2139,30 @@ bool btrfs_zoned_should_reclaim(struct btrfs_fs_info *fs_info) factor = div64_u64(used * 100, total); return factor >= fs_info->bg_reclaim_threshold; } + +void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info, u64 logical, + u64 length) +{ + struct btrfs_block_group *block_group; + + if (!btrfs_is_zoned(fs_info)) + return; + + block_group = btrfs_lookup_block_group(fs_info, logical); + /* It should be called on a previous data relocation block group. */ + ASSERT(block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)); + + spin_lock(&block_group->lock); + if (!block_group->zoned_data_reloc_ongoing) + goto out; + + /* All relocation extents are written. */ + if (block_group->start + block_group->alloc_offset == logical + length) { + /* Now, release this block group for further allocations. */ + block_group->zoned_data_reloc_ongoing = 0; + } + +out: + spin_unlock(&block_group->lock); + btrfs_put_block_group(block_group); +} diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h index bb1a189e11f9..6b2eec99162b 100644 --- a/fs/btrfs/zoned.h +++ b/fs/btrfs/zoned.h @@ -77,6 +77,8 @@ void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg, void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg); void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info); bool btrfs_zoned_should_reclaim(struct btrfs_fs_info *fs_info); +void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info, u64 logical, + u64 length); #else /* CONFIG_BLK_DEV_ZONED */ static inline int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos, struct blk_zone *zone) @@ -243,6 +245,9 @@ static inline bool btrfs_zoned_should_reclaim(struct btrfs_fs_info *fs_info) { return false; } + +static inline void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info, + u64 logical, u64 length) { } #endif static inline bool btrfs_dev_is_sequential(struct btrfs_device *device, u64 pos) diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index e5221be6eb55..6dee88815491 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -394,11 +394,10 @@ static int ceph_init_request(struct netfs_io_request *rreq, struct file *file) return 0; } -static void ceph_readahead_cleanup(struct address_space *mapping, void *priv) +static void ceph_netfs_free_request(struct netfs_io_request *rreq) { - struct inode *inode = mapping->host; - struct ceph_inode_info *ci = ceph_inode(inode); - int got = (uintptr_t)priv; + struct ceph_inode_info *ci = ceph_inode(rreq->inode); + int got = (uintptr_t)rreq->netfs_priv; if (got) ceph_put_cap_refs(ci, got); @@ -406,12 +405,12 @@ static void ceph_readahead_cleanup(struct address_space *mapping, void *priv) const struct netfs_request_ops ceph_netfs_ops = { .init_request = ceph_init_request, + .free_request = ceph_netfs_free_request, .begin_cache_operation = ceph_begin_cache_operation, .issue_read = ceph_netfs_issue_read, .expand_readahead = ceph_netfs_expand_readahead, .clamp_length = ceph_netfs_clamp_length, .check_write_begin = ceph_netfs_check_write_begin, - .cleanup = ceph_readahead_cleanup, }; #ifdef CONFIG_CEPH_FSCACHE @@ -1322,10 +1321,11 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping, struct page **pagep, void **fsdata) { struct inode *inode = file_inode(file); + struct ceph_inode_info *ci = ceph_inode(inode); struct folio *folio = NULL; int r; - r = netfs_write_begin(file, inode->i_mapping, pos, len, &folio, NULL); + r = netfs_write_begin(&ci->netfs, file, inode->i_mapping, pos, len, &folio, NULL); if (r == 0) folio_wait_fscache(folio); if (r < 0) { @@ -1798,7 +1798,7 @@ enum { static int __ceph_pool_perm_get(struct ceph_inode_info *ci, s64 pool, struct ceph_string *pool_ns) { - struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode); + struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->netfs.inode); struct ceph_mds_client *mdsc = fsc->mdsc; struct ceph_osd_request *rd_req = NULL, *wr_req = NULL; struct rb_node **p, *parent; @@ -1913,7 +1913,7 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci, 0, false, true); err = ceph_osdc_start_request(&fsc->client->osdc, rd_req, false); - wr_req->r_mtime = ci->vfs_inode.i_mtime; + wr_req->r_mtime = ci->netfs.inode.i_mtime; err2 = ceph_osdc_start_request(&fsc->client->osdc, wr_req, false); if (!err) diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c index ddea99922073..177d8e8d73fe 100644 --- a/fs/ceph/cache.c +++ b/fs/ceph/cache.c @@ -29,9 +29,9 @@ void ceph_fscache_register_inode_cookie(struct inode *inode) if (!(inode->i_state & I_NEW)) return; - WARN_ON_ONCE(ci->netfs_ctx.cache); + WARN_ON_ONCE(ci->netfs.cache); - ci->netfs_ctx.cache = + ci->netfs.cache = fscache_acquire_cookie(fsc->fscache, 0, &ci->i_vino, sizeof(ci->i_vino), &ci->i_version, sizeof(ci->i_version), diff --git a/fs/ceph/cache.h b/fs/ceph/cache.h index 7255b790a4c1..dc502daac49a 100644 --- a/fs/ceph/cache.h +++ b/fs/ceph/cache.h @@ -28,7 +28,7 @@ void ceph_fscache_invalidate(struct inode *inode, bool dio_write); static inline struct fscache_cookie *ceph_fscache_cookie(struct ceph_inode_info *ci) { - return netfs_i_cookie(&ci->vfs_inode); + return netfs_i_cookie(&ci->netfs); } static inline void ceph_fscache_resize(struct inode *inode, loff_t to) diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index bf2e94005598..38c930384d41 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -492,7 +492,7 @@ static void __cap_set_timeouts(struct ceph_mds_client *mdsc, struct ceph_mount_options *opt = mdsc->fsc->mount_options; ci->i_hold_caps_max = round_jiffies(jiffies + opt->caps_wanted_delay_max * HZ); - dout("__cap_set_timeouts %p %lu\n", &ci->vfs_inode, + dout("__cap_set_timeouts %p %lu\n", &ci->netfs.inode, ci->i_hold_caps_max - jiffies); } @@ -507,7 +507,7 @@ static void __cap_set_timeouts(struct ceph_mds_client *mdsc, static void __cap_delay_requeue(struct ceph_mds_client *mdsc, struct ceph_inode_info *ci) { - dout("__cap_delay_requeue %p flags 0x%lx at %lu\n", &ci->vfs_inode, + dout("__cap_delay_requeue %p flags 0x%lx at %lu\n", &ci->netfs.inode, ci->i_ceph_flags, ci->i_hold_caps_max); if (!mdsc->stopping) { spin_lock(&mdsc->cap_delay_lock); @@ -531,7 +531,7 @@ no_change: static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc, struct ceph_inode_info *ci) { - dout("__cap_delay_requeue_front %p\n", &ci->vfs_inode); + dout("__cap_delay_requeue_front %p\n", &ci->netfs.inode); spin_lock(&mdsc->cap_delay_lock); ci->i_ceph_flags |= CEPH_I_FLUSH; if (!list_empty(&ci->i_cap_delay_list)) @@ -548,7 +548,7 @@ static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc, static void __cap_delay_cancel(struct ceph_mds_client *mdsc, struct ceph_inode_info *ci) { - dout("__cap_delay_cancel %p\n", &ci->vfs_inode); + dout("__cap_delay_cancel %p\n", &ci->netfs.inode); if (list_empty(&ci->i_cap_delay_list)) return; spin_lock(&mdsc->cap_delay_lock); @@ -568,7 +568,7 @@ static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap, * Each time we receive FILE_CACHE anew, we increment * i_rdcache_gen. */ - if (S_ISREG(ci->vfs_inode.i_mode) && + if (S_ISREG(ci->netfs.inode.i_mode) && (issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) && (had & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0) { ci->i_rdcache_gen++; @@ -583,14 +583,14 @@ static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap, if ((issued & CEPH_CAP_FILE_SHARED) != (had & CEPH_CAP_FILE_SHARED)) { if (issued & CEPH_CAP_FILE_SHARED) atomic_inc(&ci->i_shared_gen); - if (S_ISDIR(ci->vfs_inode.i_mode)) { - dout(" marking %p NOT complete\n", &ci->vfs_inode); + if (S_ISDIR(ci->netfs.inode.i_mode)) { + dout(" marking %p NOT complete\n", &ci->netfs.inode); __ceph_dir_clear_complete(ci); } } /* Wipe saved layout if we're losing DIR_CREATE caps */ - if (S_ISDIR(ci->vfs_inode.i_mode) && (had & CEPH_CAP_DIR_CREATE) && + if (S_ISDIR(ci->netfs.inode.i_mode) && (had & CEPH_CAP_DIR_CREATE) && !(issued & CEPH_CAP_DIR_CREATE)) { ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns)); memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout)); @@ -771,7 +771,7 @@ static int __cap_is_valid(struct ceph_cap *cap) if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) { dout("__cap_is_valid %p cap %p issued %s " - "but STALE (gen %u vs %u)\n", &cap->ci->vfs_inode, + "but STALE (gen %u vs %u)\n", &cap->ci->netfs.inode, cap, ceph_cap_string(cap->issued), cap->cap_gen, gen); return 0; } @@ -797,7 +797,7 @@ int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented) if (!__cap_is_valid(cap)) continue; dout("__ceph_caps_issued %p cap %p issued %s\n", - &ci->vfs_inode, cap, ceph_cap_string(cap->issued)); + &ci->netfs.inode, cap, ceph_cap_string(cap->issued)); have |= cap->issued; if (implemented) *implemented |= cap->implemented; @@ -844,12 +844,12 @@ static void __touch_cap(struct ceph_cap *cap) spin_lock(&s->s_cap_lock); if (!s->s_cap_iterator) { - dout("__touch_cap %p cap %p mds%d\n", &cap->ci->vfs_inode, cap, + dout("__touch_cap %p cap %p mds%d\n", &cap->ci->netfs.inode, cap, s->s_mds); list_move_tail(&cap->session_caps, &s->s_caps); } else { dout("__touch_cap %p cap %p mds%d NOP, iterating over caps\n", - &cap->ci->vfs_inode, cap, s->s_mds); + &cap->ci->netfs.inode, cap, s->s_mds); } spin_unlock(&s->s_cap_lock); } @@ -867,7 +867,7 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch) if ((have & mask) == mask) { dout("__ceph_caps_issued_mask ino 0x%llx snap issued %s" - " (mask %s)\n", ceph_ino(&ci->vfs_inode), + " (mask %s)\n", ceph_ino(&ci->netfs.inode), ceph_cap_string(have), ceph_cap_string(mask)); return 1; @@ -879,7 +879,7 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch) continue; if ((cap->issued & mask) == mask) { dout("__ceph_caps_issued_mask ino 0x%llx cap %p issued %s" - " (mask %s)\n", ceph_ino(&ci->vfs_inode), cap, + " (mask %s)\n", ceph_ino(&ci->netfs.inode), cap, ceph_cap_string(cap->issued), ceph_cap_string(mask)); if (touch) @@ -891,7 +891,7 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch) have |= cap->issued; if ((have & mask) == mask) { dout("__ceph_caps_issued_mask ino 0x%llx combo issued %s" - " (mask %s)\n", ceph_ino(&ci->vfs_inode), + " (mask %s)\n", ceph_ino(&ci->netfs.inode), ceph_cap_string(cap->issued), ceph_cap_string(mask)); if (touch) { @@ -919,7 +919,7 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch) int __ceph_caps_issued_mask_metric(struct ceph_inode_info *ci, int mask, int touch) { - struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb); + struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb); int r; r = __ceph_caps_issued_mask(ci, mask, touch); @@ -950,7 +950,7 @@ int __ceph_caps_revoking_other(struct ceph_inode_info *ci, int ceph_caps_revoking(struct ceph_inode_info *ci, int mask) { - struct inode *inode = &ci->vfs_inode; + struct inode *inode = &ci->netfs.inode; int ret; spin_lock(&ci->i_ceph_lock); @@ -969,8 +969,8 @@ int __ceph_caps_used(struct ceph_inode_info *ci) if (ci->i_rd_ref) used |= CEPH_CAP_FILE_RD; if (ci->i_rdcache_ref || - (S_ISREG(ci->vfs_inode.i_mode) && - ci->vfs_inode.i_data.nrpages)) + (S_ISREG(ci->netfs.inode.i_mode) && + ci->netfs.inode.i_data.nrpages)) used |= CEPH_CAP_FILE_CACHE; if (ci->i_wr_ref) used |= CEPH_CAP_FILE_WR; @@ -993,11 +993,11 @@ int __ceph_caps_file_wanted(struct ceph_inode_info *ci) const int WR_SHIFT = ffs(CEPH_FILE_MODE_WR); const int LAZY_SHIFT = ffs(CEPH_FILE_MODE_LAZY); struct ceph_mount_options *opt = - ceph_inode_to_client(&ci->vfs_inode)->mount_options; + ceph_inode_to_client(&ci->netfs.inode)->mount_options; unsigned long used_cutoff = jiffies - opt->caps_wanted_delay_max * HZ; unsigned long idle_cutoff = jiffies - opt->caps_wanted_delay_min * HZ; - if (S_ISDIR(ci->vfs_inode.i_mode)) { + if (S_ISDIR(ci->netfs.inode.i_mode)) { int want = 0; /* use used_cutoff here, to keep dir's wanted caps longer */ @@ -1050,7 +1050,7 @@ int __ceph_caps_file_wanted(struct ceph_inode_info *ci) int __ceph_caps_wanted(struct ceph_inode_info *ci) { int w = __ceph_caps_file_wanted(ci) | __ceph_caps_used(ci); - if (S_ISDIR(ci->vfs_inode.i_mode)) { + if (S_ISDIR(ci->netfs.inode.i_mode)) { /* we want EXCL if holding caps of dir ops */ if (w & CEPH_CAP_ANY_DIR_OPS) w |= CEPH_CAP_FILE_EXCL; @@ -1116,9 +1116,9 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release) lockdep_assert_held(&ci->i_ceph_lock); - dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode); + dout("__ceph_remove_cap %p from %p\n", cap, &ci->netfs.inode); - mdsc = ceph_inode_to_client(&ci->vfs_inode)->mdsc; + mdsc = ceph_inode_to_client(&ci->netfs.inode)->mdsc; /* remove from inode's cap rbtree, and clear auth cap */ rb_erase(&cap->ci_node, &ci->i_caps); @@ -1169,7 +1169,7 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release) * keep i_snap_realm. */ if (ci->i_wr_ref == 0 && ci->i_snap_realm) - ceph_change_snap_realm(&ci->vfs_inode, NULL); + ceph_change_snap_realm(&ci->netfs.inode, NULL); __cap_delay_cancel(mdsc, ci); } @@ -1188,11 +1188,11 @@ void ceph_remove_cap(struct ceph_cap *cap, bool queue_release) lockdep_assert_held(&ci->i_ceph_lock); - fsc = ceph_inode_to_client(&ci->vfs_inode); + fsc = ceph_inode_to_client(&ci->netfs.inode); WARN_ON_ONCE(ci->i_auth_cap == cap && !list_empty(&ci->i_dirty_item) && !fsc->blocklisted && - !ceph_inode_is_shutdown(&ci->vfs_inode)); + !ceph_inode_is_shutdown(&ci->netfs.inode)); __ceph_remove_cap(cap, queue_release); } @@ -1343,7 +1343,7 @@ static void __prep_cap(struct cap_msg_args *arg, struct ceph_cap *cap, int flushing, u64 flush_tid, u64 oldest_flush_tid) { struct ceph_inode_info *ci = cap->ci; - struct inode *inode = &ci->vfs_inode; + struct inode *inode = &ci->netfs.inode; int held, revoking; lockdep_assert_held(&ci->i_ceph_lock); @@ -1440,7 +1440,7 @@ static void __prep_cap(struct cap_msg_args *arg, struct ceph_cap *cap, static void __send_cap(struct cap_msg_args *arg, struct ceph_inode_info *ci) { struct ceph_msg *msg; - struct inode *inode = &ci->vfs_inode; + struct inode *inode = &ci->netfs.inode; msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, CAP_MSG_SIZE, GFP_NOFS, false); if (!msg) { @@ -1528,7 +1528,7 @@ static void __ceph_flush_snaps(struct ceph_inode_info *ci, __releases(ci->i_ceph_lock) __acquires(ci->i_ceph_lock) { - struct inode *inode = &ci->vfs_inode; + struct inode *inode = &ci->netfs.inode; struct ceph_mds_client *mdsc = session->s_mdsc; struct ceph_cap_snap *capsnap; u64 oldest_flush_tid = 0; @@ -1622,7 +1622,7 @@ static void __ceph_flush_snaps(struct ceph_inode_info *ci, void ceph_flush_snaps(struct ceph_inode_info *ci, struct ceph_mds_session **psession) { - struct inode *inode = &ci->vfs_inode; + struct inode *inode = &ci->netfs.inode; struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; struct ceph_mds_session *session = NULL; int mds; @@ -1682,8 +1682,8 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask, struct ceph_cap_flush **pcf) { struct ceph_mds_client *mdsc = - ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; - struct inode *inode = &ci->vfs_inode; + ceph_sb_to_client(ci->netfs.inode.i_sb)->mdsc; + struct inode *inode = &ci->netfs.inode; int was = ci->i_dirty_caps; int dirty = 0; @@ -1696,7 +1696,7 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask, return 0; } - dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->vfs_inode, + dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->netfs.inode, ceph_cap_string(mask), ceph_cap_string(was), ceph_cap_string(was | mask)); ci->i_dirty_caps |= mask; @@ -1712,7 +1712,7 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask, ci->i_snap_realm->cached_context); } dout(" inode %p now dirty snapc %p auth cap %p\n", - &ci->vfs_inode, ci->i_head_snapc, ci->i_auth_cap); + &ci->netfs.inode, ci->i_head_snapc, ci->i_auth_cap); BUG_ON(!list_empty(&ci->i_dirty_item)); spin_lock(&mdsc->cap_dirty_lock); list_add(&ci->i_dirty_item, &session->s_cap_dirty); @@ -1875,7 +1875,7 @@ static int try_nonblocking_invalidate(struct inode *inode) bool __ceph_should_report_size(struct ceph_inode_info *ci) { - loff_t size = i_size_read(&ci->vfs_inode); + loff_t size = i_size_read(&ci->netfs.inode); /* mds will adjust max size according to the reported size */ if (ci->i_flushing_caps & CEPH_CAP_FILE_WR) return false; @@ -1900,7 +1900,7 @@ bool __ceph_should_report_size(struct ceph_inode_info *ci) void ceph_check_caps(struct ceph_inode_info *ci, int flags, struct ceph_mds_session *session) { - struct inode *inode = &ci->vfs_inode; + struct inode *inode = &ci->netfs.inode; struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb); struct ceph_cap *cap; u64 flush_tid, oldest_flush_tid; @@ -2467,7 +2467,7 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc, __releases(ci->i_ceph_lock) __acquires(ci->i_ceph_lock) { - struct inode *inode = &ci->vfs_inode; + struct inode *inode = &ci->netfs.inode; struct ceph_cap *cap; struct ceph_cap_flush *cf; int ret; @@ -2560,7 +2560,7 @@ void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc, cap = ci->i_auth_cap; if (!(cap && cap->session == session)) { pr_err("%p auth cap %p not mds%d ???\n", - &ci->vfs_inode, cap, session->s_mds); + &ci->netfs.inode, cap, session->s_mds); spin_unlock(&ci->i_ceph_lock); continue; } @@ -2610,7 +2610,7 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc, cap = ci->i_auth_cap; if (!(cap && cap->session == session)) { pr_err("%p auth cap %p not mds%d ???\n", - &ci->vfs_inode, cap, session->s_mds); + &ci->netfs.inode, cap, session->s_mds); spin_unlock(&ci->i_ceph_lock); continue; } @@ -2630,7 +2630,7 @@ void ceph_kick_flushing_inode_caps(struct ceph_mds_session *session, lockdep_assert_held(&ci->i_ceph_lock); - dout("%s %p flushing %s\n", __func__, &ci->vfs_inode, + dout("%s %p flushing %s\n", __func__, &ci->netfs.inode, ceph_cap_string(ci->i_flushing_caps)); if (!list_empty(&ci->i_cap_flush_list)) { @@ -2673,10 +2673,10 @@ void ceph_take_cap_refs(struct ceph_inode_info *ci, int got, } if (got & CEPH_CAP_FILE_BUFFER) { if (ci->i_wb_ref == 0) - ihold(&ci->vfs_inode); + ihold(&ci->netfs.inode); ci->i_wb_ref++; dout("%s %p wb %d -> %d (?)\n", __func__, - &ci->vfs_inode, ci->i_wb_ref-1, ci->i_wb_ref); + &ci->netfs.inode, ci->i_wb_ref-1, ci->i_wb_ref); } } @@ -3004,7 +3004,7 @@ int ceph_get_caps(struct file *filp, int need, int want, loff_t endoff, int *got return ret; } - if (S_ISREG(ci->vfs_inode.i_mode) && + if (S_ISREG(ci->netfs.inode.i_mode) && ci->i_inline_version != CEPH_INLINE_NONE && (_got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) && i_size_read(inode) > 0) { @@ -3094,7 +3094,7 @@ enum put_cap_refs_mode { static void __ceph_put_cap_refs(struct ceph_inode_info *ci, int had, enum put_cap_refs_mode mode) { - struct inode *inode = &ci->vfs_inode; + struct inode *inode = &ci->netfs.inode; int last = 0, put = 0, flushsnaps = 0, wake = 0; bool check_flushsnaps = false; @@ -3202,7 +3202,7 @@ void ceph_put_cap_refs_no_check_caps(struct ceph_inode_info *ci, int had) void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr, struct ceph_snap_context *snapc) { - struct inode *inode = &ci->vfs_inode; + struct inode *inode = &ci->netfs.inode; struct ceph_cap_snap *capsnap = NULL, *iter; int put = 0; bool last = false; @@ -3698,7 +3698,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid, session->s_mds, &list_first_entry(&session->s_cap_flushing, struct ceph_inode_info, - i_flushing_item)->vfs_inode); + i_flushing_item)->netfs.inode); } } mdsc->num_cap_flushing--; @@ -4345,7 +4345,7 @@ unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc) break; list_del_init(&ci->i_cap_delay_list); - inode = igrab(&ci->vfs_inode); + inode = igrab(&ci->netfs.inode); if (inode) { spin_unlock(&mdsc->cap_delay_lock); dout("check_delayed_caps on %p\n", inode); @@ -4373,7 +4373,7 @@ static void flush_dirty_session_caps(struct ceph_mds_session *s) while (!list_empty(&s->s_cap_dirty)) { ci = list_first_entry(&s->s_cap_dirty, struct ceph_inode_info, i_dirty_item); - inode = &ci->vfs_inode; + inode = &ci->netfs.inode; ihold(inode); dout("flush_dirty_caps %llx.%llx\n", ceph_vinop(inode)); spin_unlock(&mdsc->cap_dirty_lock); @@ -4407,7 +4407,7 @@ void __ceph_touch_fmode(struct ceph_inode_info *ci, void ceph_get_fmode(struct ceph_inode_info *ci, int fmode, int count) { - struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(ci->vfs_inode.i_sb); + struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(ci->netfs.inode.i_sb); int bits = (fmode << 1) | 1; bool already_opened = false; int i; @@ -4441,7 +4441,7 @@ void ceph_get_fmode(struct ceph_inode_info *ci, int fmode, int count) */ void ceph_put_fmode(struct ceph_inode_info *ci, int fmode, int count) { - struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(ci->vfs_inode.i_sb); + struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(ci->netfs.inode.i_sb); int bits = (fmode << 1) | 1; bool is_closed = true; int i; @@ -4656,7 +4656,7 @@ int ceph_purge_inode_cap(struct inode *inode, struct ceph_cap *cap, bool *invali lockdep_assert_held(&ci->i_ceph_lock); dout("removing cap %p, ci is %p, inode is %p\n", - cap, ci, &ci->vfs_inode); + cap, ci, &ci->netfs.inode); is_auth = (cap == ci->i_auth_cap); __ceph_remove_cap(cap, false); diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 8c8226c0feac..da59e836a06e 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -205,7 +205,7 @@ static int ceph_init_file_info(struct inode *inode, struct file *file, { struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_mount_options *opt = - ceph_inode_to_client(&ci->vfs_inode)->mount_options; + ceph_inode_to_client(&ci->netfs.inode)->mount_options; struct ceph_file_info *fi; int ret; diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index b7e9cac3aeef..56c53ab3618e 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -176,7 +176,7 @@ static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci, rb_insert_color(&frag->node, &ci->i_fragtree); dout("get_or_create_frag added %llx.%llx frag %x\n", - ceph_vinop(&ci->vfs_inode), f); + ceph_vinop(&ci->netfs.inode), f); return frag; } @@ -457,10 +457,10 @@ struct inode *ceph_alloc_inode(struct super_block *sb) if (!ci) return NULL; - dout("alloc_inode %p\n", &ci->vfs_inode); + dout("alloc_inode %p\n", &ci->netfs.inode); /* Set parameters for the netfs library */ - netfs_i_context_init(&ci->vfs_inode, &ceph_netfs_ops); + netfs_inode_init(&ci->netfs, &ceph_netfs_ops); spin_lock_init(&ci->i_ceph_lock); @@ -547,7 +547,7 @@ struct inode *ceph_alloc_inode(struct super_block *sb) INIT_WORK(&ci->i_work, ceph_inode_work); ci->i_work_mask = 0; memset(&ci->i_btime, '\0', sizeof(ci->i_btime)); - return &ci->vfs_inode; + return &ci->netfs.inode; } void ceph_free_inode(struct inode *inode) @@ -1978,7 +1978,7 @@ static void ceph_inode_work(struct work_struct *work) { struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, i_work); - struct inode *inode = &ci->vfs_inode; + struct inode *inode = &ci->netfs.inode; if (test_and_clear_bit(CEPH_I_WORK_WRITEBACK, &ci->i_work_mask)) { dout("writeback %p\n", inode); diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index f5d110d90b77..33f517d549ce 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -1564,7 +1564,7 @@ int ceph_iterate_session_caps(struct ceph_mds_session *session, p = session->s_caps.next; while (p != &session->s_caps) { cap = list_entry(p, struct ceph_cap, session_caps); - inode = igrab(&cap->ci->vfs_inode); + inode = igrab(&cap->ci->netfs.inode); if (!inode) { p = p->next; continue; @@ -1622,7 +1622,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap, int iputs; dout("removing cap %p, ci is %p, inode is %p\n", - cap, ci, &ci->vfs_inode); + cap, ci, &ci->netfs.inode); spin_lock(&ci->i_ceph_lock); iputs = ceph_purge_inode_cap(inode, cap, &invalidate); spin_unlock(&ci->i_ceph_lock); diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index 322ee5add942..864cdaa0d2bd 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c @@ -521,7 +521,7 @@ static bool has_new_snaps(struct ceph_snap_context *o, static void ceph_queue_cap_snap(struct ceph_inode_info *ci, struct ceph_cap_snap **pcapsnap) { - struct inode *inode = &ci->vfs_inode; + struct inode *inode = &ci->netfs.inode; struct ceph_snap_context *old_snapc, *new_snapc; struct ceph_cap_snap *capsnap = *pcapsnap; struct ceph_buffer *old_blob = NULL; @@ -652,7 +652,7 @@ update_snapc: int __ceph_finish_cap_snap(struct ceph_inode_info *ci, struct ceph_cap_snap *capsnap) { - struct inode *inode = &ci->vfs_inode; + struct inode *inode = &ci->netfs.inode; struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb); BUG_ON(capsnap->writing); @@ -712,7 +712,7 @@ static void queue_realm_cap_snaps(struct ceph_snap_realm *realm) spin_lock(&realm->inodes_with_caps_lock); list_for_each_entry(ci, &realm->inodes_with_caps, i_snap_realm_item) { - struct inode *inode = igrab(&ci->vfs_inode); + struct inode *inode = igrab(&ci->netfs.inode); if (!inode) continue; spin_unlock(&realm->inodes_with_caps_lock); @@ -904,7 +904,7 @@ static void flush_snaps(struct ceph_mds_client *mdsc) while (!list_empty(&mdsc->snap_flush_list)) { ci = list_first_entry(&mdsc->snap_flush_list, struct ceph_inode_info, i_snap_flush_item); - inode = &ci->vfs_inode; + inode = &ci->netfs.inode; ihold(inode); spin_unlock(&mdsc->snap_flush_lock); ceph_flush_snaps(ci, &session); diff --git a/fs/ceph/super.c b/fs/ceph/super.c index b73b4f75462c..40140805bdcf 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -876,7 +876,7 @@ mempool_t *ceph_wb_pagevec_pool; static void ceph_inode_init_once(void *foo) { struct ceph_inode_info *ci = foo; - inode_init_once(&ci->vfs_inode); + inode_init_once(&ci->netfs.inode); } static int __init init_caches(void) diff --git a/fs/ceph/super.h b/fs/ceph/super.h index dd7dac0f984a..f59dac66955b 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -316,11 +316,7 @@ struct ceph_inode_xattrs_info { * Ceph inode. */ struct ceph_inode_info { - struct { - /* These must be contiguous */ - struct inode vfs_inode; - struct netfs_i_context netfs_ctx; /* Netfslib context */ - }; + struct netfs_inode netfs; /* Netfslib context and vfs inode */ struct ceph_vino i_vino; /* ceph ino + snap */ spinlock_t i_ceph_lock; @@ -436,7 +432,7 @@ struct ceph_inode_info { static inline struct ceph_inode_info * ceph_inode(const struct inode *inode) { - return container_of(inode, struct ceph_inode_info, vfs_inode); + return container_of(inode, struct ceph_inode_info, netfs.inode); } static inline struct ceph_fs_client * @@ -1316,7 +1312,7 @@ static inline void __ceph_update_quota(struct ceph_inode_info *ci, has_quota = __ceph_has_quota(ci, QUOTA_GET_ANY); if (had_quota != has_quota) - ceph_adjust_quota_realms_count(&ci->vfs_inode, has_quota); + ceph_adjust_quota_realms_count(&ci->netfs.inode, has_quota); } extern void ceph_handle_quota(struct ceph_mds_client *mdsc, diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c index 8c2dc2c762a4..f141f5246163 100644 --- a/fs/ceph/xattr.c +++ b/fs/ceph/xattr.c @@ -57,7 +57,7 @@ static bool ceph_vxattrcb_layout_exists(struct ceph_inode_info *ci) static ssize_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val, size_t size) { - struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb); + struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb); struct ceph_osd_client *osdc = &fsc->client->osdc; struct ceph_string *pool_ns; s64 pool = ci->i_layout.pool_id; @@ -69,7 +69,7 @@ static ssize_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val, pool_ns = ceph_try_get_string(ci->i_layout.pool_ns); - dout("ceph_vxattrcb_layout %p\n", &ci->vfs_inode); + dout("ceph_vxattrcb_layout %p\n", &ci->netfs.inode); down_read(&osdc->lock); pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool); if (pool_name) { @@ -161,7 +161,7 @@ static ssize_t ceph_vxattrcb_layout_pool(struct ceph_inode_info *ci, char *val, size_t size) { ssize_t ret; - struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb); + struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb); struct ceph_osd_client *osdc = &fsc->client->osdc; s64 pool = ci->i_layout.pool_id; const char *pool_name; @@ -313,7 +313,7 @@ static ssize_t ceph_vxattrcb_snap_btime(struct ceph_inode_info *ci, char *val, static ssize_t ceph_vxattrcb_cluster_fsid(struct ceph_inode_info *ci, char *val, size_t size) { - struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb); + struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb); return ceph_fmt_xattr(val, size, "%pU", &fsc->client->fsid); } @@ -321,7 +321,7 @@ static ssize_t ceph_vxattrcb_cluster_fsid(struct ceph_inode_info *ci, static ssize_t ceph_vxattrcb_client_id(struct ceph_inode_info *ci, char *val, size_t size) { - struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb); + struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb); return ceph_fmt_xattr(val, size, "client%lld", ceph_client_gid(fsc->client)); @@ -629,7 +629,7 @@ static int __set_xattr(struct ceph_inode_info *ci, } dout("__set_xattr_val added %llx.%llx xattr %p %.*s=%.*s\n", - ceph_vinop(&ci->vfs_inode), xattr, name_len, name, val_len, val); + ceph_vinop(&ci->netfs.inode), xattr, name_len, name, val_len, val); return 0; } @@ -871,7 +871,7 @@ struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci) struct ceph_buffer *old_blob = NULL; void *dest; - dout("__build_xattrs_blob %p\n", &ci->vfs_inode); + dout("__build_xattrs_blob %p\n", &ci->netfs.inode); if (ci->i_xattrs.dirty) { int need = __get_required_blob_size(ci, 0, 0); diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c index 1dd995efd5b8..2cfbac8bb965 100644 --- a/fs/cifs/cifs_debug.c +++ b/fs/cifs/cifs_debug.c @@ -162,6 +162,8 @@ cifs_dump_iface(struct seq_file *m, struct cifs_server_iface *iface) seq_printf(m, "\t\tIPv4: %pI4\n", &ipv4->sin_addr); else if (iface->sockaddr.ss_family == AF_INET6) seq_printf(m, "\t\tIPv6: %pI6\n", &ipv6->sin6_addr); + if (!iface->is_active) + seq_puts(m, "\t\t[for-cleanup]\n"); } static int cifs_debug_files_proc_show(struct seq_file *m, void *v) @@ -221,6 +223,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v) struct TCP_Server_Info *server; struct cifs_ses *ses; struct cifs_tcon *tcon; + struct cifs_server_iface *iface; int c, i, j; seq_puts(m, @@ -456,11 +459,10 @@ skip_rdma: if (ses->iface_count) seq_printf(m, "\n\n\tServer interfaces: %zu", ses->iface_count); - for (j = 0; j < ses->iface_count; j++) { - struct cifs_server_iface *iface; - - iface = &ses->iface_list[j]; - seq_printf(m, "\n\t%d)", j+1); + j = 0; + list_for_each_entry(iface, &ses->iface_list, + iface_head) { + seq_printf(m, "\n\t%d)", ++j); cifs_dump_iface(m, iface); if (is_ses_using_iface(ses, iface)) seq_puts(m, "\t\t[CONNECTED]\n"); diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 12c872800326..8f2e003e0590 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -377,7 +377,7 @@ cifs_alloc_inode(struct super_block *sb) cifs_inode->flags = 0; spin_lock_init(&cifs_inode->writers_lock); cifs_inode->writers = 0; - cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */ + cifs_inode->netfs.inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */ cifs_inode->server_eof = 0; cifs_inode->uniqueid = 0; cifs_inode->createtime = 0; @@ -389,12 +389,12 @@ cifs_alloc_inode(struct super_block *sb) * Can not set i_flags here - they get immediately overwritten to zero * by the VFS. */ - /* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME; */ + /* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */ INIT_LIST_HEAD(&cifs_inode->openFileList); INIT_LIST_HEAD(&cifs_inode->llist); INIT_LIST_HEAD(&cifs_inode->deferred_closes); spin_lock_init(&cifs_inode->deferred_lock); - return &cifs_inode->vfs_inode; + return &cifs_inode->netfs.inode; } static void @@ -1086,7 +1086,7 @@ struct file_system_type cifs_fs_type = { }; MODULE_ALIAS_FS("cifs"); -static struct file_system_type smb3_fs_type = { +struct file_system_type smb3_fs_type = { .owner = THIS_MODULE, .name = "smb3", .init_fs_context = smb3_init_fs_context, @@ -1418,7 +1418,7 @@ cifs_init_once(void *inode) { struct cifsInodeInfo *cifsi = inode; - inode_init_once(&cifsi->vfs_inode); + inode_init_once(&cifsi->netfs.inode); init_rwsem(&cifsi->lock_sem); } diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index dd7e070ca243..b17be47a8e59 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -38,7 +38,7 @@ static inline unsigned long cifs_get_time(struct dentry *dentry) return (unsigned long) dentry->d_fsdata; } -extern struct file_system_type cifs_fs_type; +extern struct file_system_type cifs_fs_type, smb3_fs_type; extern const struct address_space_operations cifs_addr_ops; extern const struct address_space_operations cifs_addr_ops_smallbuf; diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index f873379066c7..a643c84ff1e9 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -80,6 +80,9 @@ #define SMB_DNS_RESOLVE_INTERVAL_MIN 120 #define SMB_DNS_RESOLVE_INTERVAL_DEFAULT 600 +/* smb multichannel query server interfaces interval in seconds */ +#define SMB_INTERFACE_POLL_INTERVAL 600 + /* maximum number of PDUs in one compound */ #define MAX_COMPOUND 5 @@ -933,15 +936,67 @@ static inline void cifs_set_net_ns(struct TCP_Server_Info *srv, struct net *net) #endif struct cifs_server_iface { + struct list_head iface_head; + struct kref refcount; size_t speed; unsigned int rdma_capable : 1; unsigned int rss_capable : 1; + unsigned int is_active : 1; /* unset if non existent */ struct sockaddr_storage sockaddr; }; +/* release iface when last ref is dropped */ +static inline void +release_iface(struct kref *ref) +{ + struct cifs_server_iface *iface = container_of(ref, + struct cifs_server_iface, + refcount); + list_del_init(&iface->iface_head); + kfree(iface); +} + +/* + * compare two interfaces a and b + * return 0 if everything matches. + * return 1 if a has higher link speed, or rdma capable, or rss capable + * return -1 otherwise. + */ +static inline int +iface_cmp(struct cifs_server_iface *a, struct cifs_server_iface *b) +{ + int cmp_ret = 0; + + WARN_ON(!a || !b); + if (a->speed == b->speed) { + if (a->rdma_capable == b->rdma_capable) { + if (a->rss_capable == b->rss_capable) { + cmp_ret = memcmp(&a->sockaddr, &b->sockaddr, + sizeof(a->sockaddr)); + if (!cmp_ret) + return 0; + else if (cmp_ret > 0) + return 1; + else + return -1; + } else if (a->rss_capable > b->rss_capable) + return 1; + else + return -1; + } else if (a->rdma_capable > b->rdma_capable) + return 1; + else + return -1; + } else if (a->speed > b->speed) + return 1; + else + return -1; +} + struct cifs_chan { unsigned int in_reconnect : 1; /* if session setup in progress for this channel */ struct TCP_Server_Info *server; + struct cifs_server_iface *iface; /* interface in use */ __u8 signkey[SMB3_SIGN_KEY_SIZE]; }; @@ -993,7 +1048,7 @@ struct cifs_ses { */ spinlock_t iface_lock; /* ========= begin: protected by iface_lock ======== */ - struct cifs_server_iface *iface_list; + struct list_head iface_list; size_t iface_count; unsigned long iface_last_update; /* jiffies */ /* ========= end: protected by iface_lock ======== */ @@ -1203,6 +1258,7 @@ struct cifs_tcon { #ifdef CONFIG_CIFS_DFS_UPCALL struct list_head ulist; /* cache update list */ #endif + struct delayed_work query_interfaces; /* query interfaces workqueue job */ }; /* @@ -1479,20 +1535,16 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file); #define CIFS_CACHE_RW_FLG (CIFS_CACHE_READ_FLG | CIFS_CACHE_WRITE_FLG) #define CIFS_CACHE_RHW_FLG (CIFS_CACHE_RW_FLG | CIFS_CACHE_HANDLE_FLG) -#define CIFS_CACHE_READ(cinode) ((cinode->oplock & CIFS_CACHE_READ_FLG) || (CIFS_SB(cinode->vfs_inode.i_sb)->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)) +#define CIFS_CACHE_READ(cinode) ((cinode->oplock & CIFS_CACHE_READ_FLG) || (CIFS_SB(cinode->netfs.inode.i_sb)->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)) #define CIFS_CACHE_HANDLE(cinode) (cinode->oplock & CIFS_CACHE_HANDLE_FLG) -#define CIFS_CACHE_WRITE(cinode) ((cinode->oplock & CIFS_CACHE_WRITE_FLG) || (CIFS_SB(cinode->vfs_inode.i_sb)->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)) +#define CIFS_CACHE_WRITE(cinode) ((cinode->oplock & CIFS_CACHE_WRITE_FLG) || (CIFS_SB(cinode->netfs.inode.i_sb)->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)) /* * One of these for each file inode */ struct cifsInodeInfo { - struct { - /* These must be contiguous */ - struct inode vfs_inode; /* the VFS's inode record */ - struct netfs_i_context netfs_ctx; /* Netfslib context */ - }; + struct netfs_inode netfs; /* Netfslib context and vfs inode */ bool can_cache_brlcks; struct list_head llist; /* locks helb by this inode */ /* @@ -1531,7 +1583,7 @@ struct cifsInodeInfo { static inline struct cifsInodeInfo * CIFS_I(struct inode *inode) { - return container_of(inode, struct cifsInodeInfo, vfs_inode); + return container_of(inode, struct cifsInodeInfo, netfs.inode); } static inline struct cifs_sb_info * diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 3b7366ec03c7..d59aebefa71c 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -636,6 +636,13 @@ cifs_chan_clear_need_reconnect(struct cifs_ses *ses, bool cifs_chan_needs_reconnect(struct cifs_ses *ses, struct TCP_Server_Info *server); +bool +cifs_chan_is_iface_active(struct cifs_ses *ses, + struct TCP_Server_Info *server); +int +cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server); +int +SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon); void extract_unc_hostname(const char *unc, const char **h, size_t *len); int copy_path_name(char *dst, const char *src); diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index d46702f5a663..fa29c9aae24b 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -97,6 +97,10 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server) if (!server->hostname) return -EINVAL; + /* if server hostname isn't populated, there's nothing to do here */ + if (server->hostname[0] == '\0') + return 0; + len = strlen(server->hostname) + 3; unc = kmalloc(len, GFP_KERNEL); @@ -141,6 +145,25 @@ requeue_resolve: return rc; } +static void smb2_query_server_interfaces(struct work_struct *work) +{ + int rc; + struct cifs_tcon *tcon = container_of(work, + struct cifs_tcon, + query_interfaces.work); + + /* + * query server network interfaces, in case they change + */ + rc = SMB3_request_interfaces(0, tcon); + if (rc) { + cifs_dbg(FYI, "%s: failed to query server interfaces: %d\n", + __func__, rc); + } + + queue_delayed_work(cifsiod_wq, &tcon->query_interfaces, + (SMB_INTERFACE_POLL_INTERVAL * HZ)); +} static void cifs_resolve_server(struct work_struct *work) { @@ -213,7 +236,7 @@ cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server, bool mark_smb_session) { struct TCP_Server_Info *pserver; - struct cifs_ses *ses; + struct cifs_ses *ses, *nses; struct cifs_tcon *tcon; /* @@ -227,7 +250,20 @@ cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server, spin_lock(&cifs_tcp_ses_lock); - list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) { + list_for_each_entry_safe(ses, nses, &pserver->smb_ses_list, smb_ses_list) { + /* check if iface is still active */ + if (!cifs_chan_is_iface_active(ses, server)) { + /* + * HACK: drop the lock before calling + * cifs_chan_update_iface to avoid deadlock + */ + ses->ses_count++; + spin_unlock(&cifs_tcp_ses_lock); + cifs_chan_update_iface(ses, server); + spin_lock(&cifs_tcp_ses_lock); + ses->ses_count--; + } + spin_lock(&ses->chan_lock); if (!mark_smb_session && cifs_chan_needs_reconnect(ses, server)) goto next_session; @@ -1890,9 +1926,11 @@ void cifs_put_smb_ses(struct cifs_ses *ses) int i; for (i = 1; i < chan_count; i++) { - spin_unlock(&ses->chan_lock); + if (ses->chans[i].iface) { + kref_put(&ses->chans[i].iface->refcount, release_iface); + ses->chans[i].iface = NULL; + } cifs_put_tcp_session(ses->chans[i].server, 0); - spin_lock(&ses->chan_lock); ses->chans[i].server = NULL; } } @@ -2266,6 +2304,9 @@ cifs_put_tcon(struct cifs_tcon *tcon) list_del_init(&tcon->tcon_list); spin_unlock(&cifs_tcp_ses_lock); + /* cancel polling of interfaces */ + cancel_delayed_work_sync(&tcon->query_interfaces); + if (tcon->use_witness) { int rc; @@ -2503,6 +2544,12 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx) tcon->local_lease = ctx->local_lease; INIT_LIST_HEAD(&tcon->pending_opens); + /* schedule query interfaces poll */ + INIT_DELAYED_WORK(&tcon->query_interfaces, + smb2_query_server_interfaces); + queue_delayed_work(cifsiod_wq, &tcon->query_interfaces, + (SMB_INTERFACE_POLL_INTERVAL * HZ)); + spin_lock(&cifs_tcp_ses_lock); list_add(&tcon->tcon_list, &ses->tcon_list); spin_unlock(&cifs_tcp_ses_lock); @@ -3978,10 +4025,16 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses, struct nls_table *nls_info) { int rc = -ENOSYS; + struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr; + struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr; bool is_binding = false; - spin_lock(&cifs_tcp_ses_lock); + if (server->dstaddr.ss_family == AF_INET6) + scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI6", &addr6->sin6_addr); + else + scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI4", &addr->sin_addr); + if (ses->ses_status != SES_GOOD && ses->ses_status != SES_NEW && ses->ses_status != SES_NEED_RECON) { diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 1618e0537d58..e64cda7a7610 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -2004,7 +2004,7 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only) { struct cifsFileInfo *open_file = NULL; - struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb); + struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb); /* only filter by fsuid on multiuser mounts */ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) @@ -2060,7 +2060,7 @@ cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags, return rc; } - cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb); + cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb); /* only filter by fsuid on multiuser mounts */ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) @@ -4669,14 +4669,14 @@ bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file) /* This inode is open for write at least once */ struct cifs_sb_info *cifs_sb; - cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb); + cifs_sb = CIFS_SB(cifsInode->netfs.inode.i_sb); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) { /* since no page cache to corrupt on directio we can change size safely */ return true; } - if (i_size_read(&cifsInode->vfs_inode) < end_of_file) + if (i_size_read(&cifsInode->netfs.inode) < end_of_file) return true; return false; diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c index a638b29e9062..23ef56f55ce5 100644 --- a/fs/cifs/fscache.c +++ b/fs/cifs/fscache.c @@ -101,13 +101,13 @@ void cifs_fscache_get_inode_cookie(struct inode *inode) struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); - cifs_fscache_fill_coherency(&cifsi->vfs_inode, &cd); + cifs_fscache_fill_coherency(&cifsi->netfs.inode, &cd); - cifsi->netfs_ctx.cache = + cifsi->netfs.cache = fscache_acquire_cookie(tcon->fscache, 0, &cifsi->uniqueid, sizeof(cifsi->uniqueid), &cd, sizeof(cd), - i_size_read(&cifsi->vfs_inode)); + i_size_read(&cifsi->netfs.inode)); } void cifs_fscache_unuse_inode_cookie(struct inode *inode, bool update) @@ -131,7 +131,7 @@ void cifs_fscache_release_inode_cookie(struct inode *inode) if (cookie) { cifs_dbg(FYI, "%s: (0x%p)\n", __func__, cookie); fscache_relinquish_cookie(cookie, false); - cifsi->netfs_ctx.cache = NULL; + cifsi->netfs.cache = NULL; } } diff --git a/fs/cifs/fscache.h b/fs/cifs/fscache.h index 52355c0912ae..aa3b941a5555 100644 --- a/fs/cifs/fscache.h +++ b/fs/cifs/fscache.h @@ -52,16 +52,16 @@ void cifs_fscache_fill_coherency(struct inode *inode, struct cifsInodeInfo *cifsi = CIFS_I(inode); memset(cd, 0, sizeof(*cd)); - cd->last_write_time_sec = cpu_to_le64(cifsi->vfs_inode.i_mtime.tv_sec); - cd->last_write_time_nsec = cpu_to_le32(cifsi->vfs_inode.i_mtime.tv_nsec); - cd->last_change_time_sec = cpu_to_le64(cifsi->vfs_inode.i_ctime.tv_sec); - cd->last_change_time_nsec = cpu_to_le32(cifsi->vfs_inode.i_ctime.tv_nsec); + cd->last_write_time_sec = cpu_to_le64(cifsi->netfs.inode.i_mtime.tv_sec); + cd->last_write_time_nsec = cpu_to_le32(cifsi->netfs.inode.i_mtime.tv_nsec); + cd->last_change_time_sec = cpu_to_le64(cifsi->netfs.inode.i_ctime.tv_sec); + cd->last_change_time_nsec = cpu_to_le32(cifsi->netfs.inode.i_ctime.tv_nsec); } static inline struct fscache_cookie *cifs_inode_cookie(struct inode *inode) { - return netfs_i_cookie(inode); + return netfs_i_cookie(&CIFS_I(inode)->netfs); } static inline void cifs_invalidate_cache(struct inode *inode, unsigned int flags) diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 2f9e7d2f81b6..81da81e18553 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -115,7 +115,7 @@ cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr) __func__, cifs_i->uniqueid); set_bit(CIFS_INO_INVALID_MAPPING, &cifs_i->flags); /* Invalidate fscache cookie */ - cifs_fscache_fill_coherency(&cifs_i->vfs_inode, &cd); + cifs_fscache_fill_coherency(&cifs_i->netfs.inode, &cd); fscache_invalidate(cifs_inode_cookie(inode), &cd, i_size_read(inode), 0); } @@ -2499,7 +2499,7 @@ int cifs_fiemap(struct inode *inode, struct fiemap_extent_info *fei, u64 start, u64 len) { struct cifsInodeInfo *cifs_i = CIFS_I(inode); - struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_i->vfs_inode.i_sb); + struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_i->netfs.inode.i_sb); struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); struct TCP_Server_Info *server = tcon->ses->server; struct cifsFileInfo *cfile; diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index 35962a1a23b9..0e84e6fcf8ab 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -75,6 +75,7 @@ sesInfoAlloc(void) INIT_LIST_HEAD(&ret_buf->tcon_list); mutex_init(&ret_buf->session_mutex); spin_lock_init(&ret_buf->iface_lock); + INIT_LIST_HEAD(&ret_buf->iface_list); spin_lock_init(&ret_buf->chan_lock); } return ret_buf; @@ -83,6 +84,8 @@ sesInfoAlloc(void) void sesInfoFree(struct cifs_ses *buf_to_free) { + struct cifs_server_iface *iface = NULL, *niface = NULL; + if (buf_to_free == NULL) { cifs_dbg(FYI, "Null buffer passed to sesInfoFree\n"); return; @@ -96,7 +99,11 @@ sesInfoFree(struct cifs_ses *buf_to_free) kfree(buf_to_free->user_name); kfree(buf_to_free->domainName); kfree_sensitive(buf_to_free->auth_key.response); - kfree(buf_to_free->iface_list); + spin_lock(&buf_to_free->iface_lock); + list_for_each_entry_safe(iface, niface, &buf_to_free->iface_list, + iface_head) + kref_put(&iface->refcount, release_iface); + spin_unlock(&buf_to_free->iface_lock); kfree_sensitive(buf_to_free); } @@ -537,11 +544,11 @@ void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock) if (oplock == OPLOCK_EXCLUSIVE) { cinode->oplock = CIFS_CACHE_WRITE_FLG | CIFS_CACHE_READ_FLG; cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n", - &cinode->vfs_inode); + &cinode->netfs.inode); } else if (oplock == OPLOCK_READ) { cinode->oplock = CIFS_CACHE_READ_FLG; cifs_dbg(FYI, "Level II Oplock granted on inode %p\n", - &cinode->vfs_inode); + &cinode->netfs.inode); } else cinode->oplock = 0; } @@ -1211,18 +1218,23 @@ static struct super_block *__cifs_get_super(void (*f)(struct super_block *, void .data = data, .sb = NULL, }; + struct file_system_type **fs_type = (struct file_system_type *[]) { + &cifs_fs_type, &smb3_fs_type, NULL, + }; - iterate_supers_type(&cifs_fs_type, f, &sd); - - if (!sd.sb) - return ERR_PTR(-EINVAL); - /* - * Grab an active reference in order to prevent automounts (DFS links) - * of expiring and then freeing up our cifs superblock pointer while - * we're doing failover. - */ - cifs_sb_active(sd.sb); - return sd.sb; + for (; *fs_type; fs_type++) { + iterate_supers_type(*fs_type, f, &sd); + if (sd.sb) { + /* + * Grab an active reference in order to prevent automounts (DFS links) + * of expiring and then freeing up our cifs superblock pointer while + * we're doing failover. + */ + cifs_sb_active(sd.sb); + return sd.sb; + } + } + return ERR_PTR(-EINVAL); } static void __cifs_put_super(struct super_block *sb) diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 3b7915af1f62..b85718f32b53 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -58,7 +58,7 @@ bool is_ses_using_iface(struct cifs_ses *ses, struct cifs_server_iface *iface) spin_lock(&ses->chan_lock); for (i = 0; i < ses->chan_count; i++) { - if (is_server_using_iface(ses->chans[i].server, iface)) { + if (ses->chans[i].iface == iface) { spin_unlock(&ses->chan_lock); return true; } @@ -81,6 +81,9 @@ cifs_ses_get_chan_index(struct cifs_ses *ses, } /* If we didn't find the channel, it is likely a bug */ + if (server) + cifs_dbg(VFS, "unable to get chan index for server: 0x%llx", + server->conn_id); WARN_ON(1); return 0; } @@ -143,16 +146,24 @@ cifs_chan_needs_reconnect(struct cifs_ses *ses, return CIFS_CHAN_NEEDS_RECONNECT(ses, chan_index); } +bool +cifs_chan_is_iface_active(struct cifs_ses *ses, + struct TCP_Server_Info *server) +{ + unsigned int chan_index = cifs_ses_get_chan_index(ses, server); + + return ses->chans[chan_index].iface && + ses->chans[chan_index].iface->is_active; +} + /* returns number of channels added */ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses) { int old_chan_count, new_chan_count; int left; - int i = 0; int rc = 0; int tries = 0; - struct cifs_server_iface *ifaces = NULL; - size_t iface_count; + struct cifs_server_iface *iface = NULL, *niface = NULL; spin_lock(&ses->chan_lock); @@ -182,32 +193,16 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses) spin_unlock(&ses->chan_lock); /* - * Make a copy of the iface list at the time and use that - * instead so as to not hold the iface spinlock for opening - * channels - */ - spin_lock(&ses->iface_lock); - iface_count = ses->iface_count; - if (iface_count <= 0) { - spin_unlock(&ses->iface_lock); - cifs_dbg(VFS, "no iface list available to open channels\n"); - return 0; - } - ifaces = kmemdup(ses->iface_list, iface_count*sizeof(*ifaces), - GFP_ATOMIC); - if (!ifaces) { - spin_unlock(&ses->iface_lock); - return 0; - } - spin_unlock(&ses->iface_lock); - - /* * Keep connecting to same, fastest, iface for all channels as * long as its RSS. Try next fastest one if not RSS or channel * creation fails. */ + spin_lock(&ses->iface_lock); + iface = list_first_entry(&ses->iface_list, struct cifs_server_iface, + iface_head); + spin_unlock(&ses->iface_lock); + while (left > 0) { - struct cifs_server_iface *iface; tries++; if (tries > 3*ses->chan_max) { @@ -216,31 +211,128 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses) break; } - iface = &ifaces[i]; - if (is_ses_using_iface(ses, iface) && !iface->rss_capable) { - i = (i+1) % iface_count; - continue; + spin_lock(&ses->iface_lock); + if (!ses->iface_count) { + spin_unlock(&ses->iface_lock); + break; } - rc = cifs_ses_add_channel(cifs_sb, ses, iface); - if (rc) { - cifs_dbg(FYI, "failed to open extra channel on iface#%d rc=%d\n", - i, rc); - i = (i+1) % iface_count; - continue; + list_for_each_entry_safe_from(iface, niface, &ses->iface_list, + iface_head) { + /* skip ifaces that are unusable */ + if (!iface->is_active || + (is_ses_using_iface(ses, iface) && + !iface->rss_capable)) { + continue; + } + + /* take ref before unlock */ + kref_get(&iface->refcount); + + spin_unlock(&ses->iface_lock); + rc = cifs_ses_add_channel(cifs_sb, ses, iface); + spin_lock(&ses->iface_lock); + + if (rc) { + cifs_dbg(VFS, "failed to open extra channel on iface:%pIS rc=%d\n", + &iface->sockaddr, + rc); + kref_put(&iface->refcount, release_iface); + continue; + } + + cifs_dbg(FYI, "successfully opened new channel on iface:%pIS\n", + &iface->sockaddr); + break; } + spin_unlock(&ses->iface_lock); - cifs_dbg(FYI, "successfully opened new channel on iface#%d\n", - i); left--; new_chan_count++; } - kfree(ifaces); return new_chan_count - old_chan_count; } /* + * update the iface for the channel if necessary. + * will return 0 when iface is updated, 1 if removed, 2 otherwise + * Must be called with chan_lock held. + */ +int +cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server) +{ + unsigned int chan_index; + struct cifs_server_iface *iface = NULL; + struct cifs_server_iface *old_iface = NULL; + int rc = 0; + + spin_lock(&ses->chan_lock); + chan_index = cifs_ses_get_chan_index(ses, server); + if (!chan_index) { + spin_unlock(&ses->chan_lock); + return 0; + } + + if (ses->chans[chan_index].iface) { + old_iface = ses->chans[chan_index].iface; + if (old_iface->is_active) { + spin_unlock(&ses->chan_lock); + return 1; + } + } + spin_unlock(&ses->chan_lock); + + spin_lock(&ses->iface_lock); + /* then look for a new one */ + list_for_each_entry(iface, &ses->iface_list, iface_head) { + if (!iface->is_active || + (is_ses_using_iface(ses, iface) && + !iface->rss_capable)) { + continue; + } + kref_get(&iface->refcount); + } + + if (!list_entry_is_head(iface, &ses->iface_list, iface_head)) { + rc = 1; + iface = NULL; + cifs_dbg(FYI, "unable to find a suitable iface\n"); + } + + /* now drop the ref to the current iface */ + if (old_iface && iface) { + kref_put(&old_iface->refcount, release_iface); + cifs_dbg(FYI, "replacing iface: %pIS with %pIS\n", + &old_iface->sockaddr, + &iface->sockaddr); + } else if (old_iface) { + kref_put(&old_iface->refcount, release_iface); + cifs_dbg(FYI, "releasing ref to iface: %pIS\n", + &old_iface->sockaddr); + } else { + WARN_ON(!iface); + cifs_dbg(FYI, "adding new iface: %pIS\n", &iface->sockaddr); + } + spin_unlock(&ses->iface_lock); + + spin_lock(&ses->chan_lock); + chan_index = cifs_ses_get_chan_index(ses, server); + ses->chans[chan_index].iface = iface; + + /* No iface is found. if secondary chan, drop connection */ + if (!iface && CIFS_SERVER_IS_CHAN(server)) + ses->chans[chan_index].server = NULL; + + spin_unlock(&ses->chan_lock); + + if (!iface && CIFS_SERVER_IS_CHAN(server)) + cifs_put_tcp_session(server, false); + + return rc; +} + +/* * If server is a channel of ses, return the corresponding enclosing * cifs_chan otherwise return NULL. */ @@ -301,7 +393,10 @@ cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses, /* Auth */ ctx.domainauto = ses->domainAuto; ctx.domainname = ses->domainName; - ctx.server_hostname = ses->server->hostname; + + /* no hostname for extra channels */ + ctx.server_hostname = ""; + ctx.username = ses->user_name; ctx.password = ses->password; ctx.sectype = ses->sectype; @@ -349,6 +444,7 @@ cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses, spin_unlock(&ses->chan_lock); goto out; } + chan->iface = iface; ses->chan_count++; atomic_set(&ses->chan_seq, 0); diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 98a76fa791c0..8802995b2d3d 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -512,73 +512,41 @@ smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) static int parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf, size_t buf_len, - struct cifs_server_iface **iface_list, - size_t *iface_count) + struct cifs_ses *ses) { struct network_interface_info_ioctl_rsp *p; struct sockaddr_in *addr4; struct sockaddr_in6 *addr6; struct iface_info_ipv4 *p4; struct iface_info_ipv6 *p6; - struct cifs_server_iface *info; + struct cifs_server_iface *info = NULL, *iface = NULL, *niface = NULL; + struct cifs_server_iface tmp_iface; ssize_t bytes_left; size_t next = 0; int nb_iface = 0; - int rc = 0; - - *iface_list = NULL; - *iface_count = 0; - - /* - * Fist pass: count and sanity check - */ + int rc = 0, ret = 0; bytes_left = buf_len; p = buf; - while (bytes_left >= sizeof(*p)) { - nb_iface++; - next = le32_to_cpu(p->Next); - if (!next) { - bytes_left -= sizeof(*p); - break; - } - p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next); - bytes_left -= next; - } - - if (!nb_iface) { - cifs_dbg(VFS, "%s: malformed interface info\n", __func__); - rc = -EINVAL; - goto out; - } - - /* Azure rounds the buffer size up 8, to a 16 byte boundary */ - if ((bytes_left > 8) || p->Next) - cifs_dbg(VFS, "%s: incomplete interface info\n", __func__); - + spin_lock(&ses->iface_lock); /* - * Second pass: extract info to internal structure + * Go through iface_list and do kref_put to remove + * any unused ifaces. ifaces in use will be removed + * when the last user calls a kref_put on it */ - - *iface_list = kcalloc(nb_iface, sizeof(**iface_list), GFP_KERNEL); - if (!*iface_list) { - rc = -ENOMEM; - goto out; + list_for_each_entry_safe(iface, niface, &ses->iface_list, + iface_head) { + iface->is_active = 0; + kref_put(&iface->refcount, release_iface); } + spin_unlock(&ses->iface_lock); - info = *iface_list; - bytes_left = buf_len; - p = buf; while (bytes_left >= sizeof(*p)) { - info->speed = le64_to_cpu(p->LinkSpeed); - info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0; - info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE) ? 1 : 0; - - cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count); - cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed); - cifs_dbg(FYI, "%s: capabilities 0x%08x\n", __func__, - le32_to_cpu(p->Capability)); + memset(&tmp_iface, 0, sizeof(tmp_iface)); + tmp_iface.speed = le64_to_cpu(p->LinkSpeed); + tmp_iface.rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0; + tmp_iface.rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE) ? 1 : 0; switch (p->Family) { /* @@ -587,7 +555,7 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf, * conversion explicit in case either one changes. */ case INTERNETWORK: - addr4 = (struct sockaddr_in *)&info->sockaddr; + addr4 = (struct sockaddr_in *)&tmp_iface.sockaddr; p4 = (struct iface_info_ipv4 *)p->Buffer; addr4->sin_family = AF_INET; memcpy(&addr4->sin_addr, &p4->IPv4Address, 4); @@ -599,7 +567,7 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf, &addr4->sin_addr); break; case INTERNETWORKV6: - addr6 = (struct sockaddr_in6 *)&info->sockaddr; + addr6 = (struct sockaddr_in6 *)&tmp_iface.sockaddr; p6 = (struct iface_info_ipv6 *)p->Buffer; addr6->sin6_family = AF_INET6; memcpy(&addr6->sin6_addr, &p6->IPv6Address, 16); @@ -619,46 +587,96 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf, goto next_iface; } - (*iface_count)++; - info++; + /* + * The iface_list is assumed to be sorted by speed. + * Check if the new interface exists in that list. + * NEVER change iface. it could be in use. + * Add a new one instead + */ + spin_lock(&ses->iface_lock); + iface = niface = NULL; + list_for_each_entry_safe(iface, niface, &ses->iface_list, + iface_head) { + ret = iface_cmp(iface, &tmp_iface); + if (!ret) { + /* just get a ref so that it doesn't get picked/freed */ + iface->is_active = 1; + kref_get(&iface->refcount); + spin_unlock(&ses->iface_lock); + goto next_iface; + } else if (ret < 0) { + /* all remaining ifaces are slower */ + kref_get(&iface->refcount); + break; + } + } + spin_unlock(&ses->iface_lock); + + /* no match. insert the entry in the list */ + info = kmalloc(sizeof(struct cifs_server_iface), + GFP_KERNEL); + if (!info) { + rc = -ENOMEM; + goto out; + } + memcpy(info, &tmp_iface, sizeof(tmp_iface)); + + /* add this new entry to the list */ + kref_init(&info->refcount); + info->is_active = 1; + + cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, ses->iface_count); + cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed); + cifs_dbg(FYI, "%s: capabilities 0x%08x\n", __func__, + le32_to_cpu(p->Capability)); + + spin_lock(&ses->iface_lock); + if (!list_entry_is_head(iface, &ses->iface_list, iface_head)) { + list_add_tail(&info->iface_head, &iface->iface_head); + kref_put(&iface->refcount, release_iface); + } else + list_add_tail(&info->iface_head, &ses->iface_list); + spin_unlock(&ses->iface_lock); + + ses->iface_count++; + ses->iface_last_update = jiffies; next_iface: + nb_iface++; next = le32_to_cpu(p->Next); - if (!next) + if (!next) { + bytes_left -= sizeof(*p); break; + } p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next); bytes_left -= next; } - if (!*iface_count) { + if (!nb_iface) { + cifs_dbg(VFS, "%s: malformed interface info\n", __func__); rc = -EINVAL; goto out; } -out: - if (rc) { - kfree(*iface_list); - *iface_count = 0; - *iface_list = NULL; - } - return rc; -} + /* Azure rounds the buffer size up 8, to a 16 byte boundary */ + if ((bytes_left > 8) || p->Next) + cifs_dbg(VFS, "%s: incomplete interface info\n", __func__); -static int compare_iface(const void *ia, const void *ib) -{ - const struct cifs_server_iface *a = (struct cifs_server_iface *)ia; - const struct cifs_server_iface *b = (struct cifs_server_iface *)ib; - return a->speed == b->speed ? 0 : (a->speed > b->speed ? -1 : 1); + if (!ses->iface_count) { + rc = -EINVAL; + goto out; + } + +out: + return rc; } -static int +int SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon) { int rc; unsigned int ret_data_len = 0; struct network_interface_info_ioctl_rsp *out_buf = NULL; - struct cifs_server_iface *iface_list; - size_t iface_count; struct cifs_ses *ses = tcon->ses; rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, @@ -674,21 +692,10 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon) goto out; } - rc = parse_server_interfaces(out_buf, ret_data_len, - &iface_list, &iface_count); + rc = parse_server_interfaces(out_buf, ret_data_len, ses); if (rc) goto out; - /* sort interfaces from fastest to slowest */ - sort(iface_list, iface_count, sizeof(*iface_list), compare_iface, NULL); - - spin_lock(&ses->iface_lock); - kfree(ses->iface_list); - ses->iface_list = iface_list; - ses->iface_count = iface_count; - ses->iface_last_update = jiffies; - spin_unlock(&ses->iface_lock); - out: kfree(out_buf); return rc; @@ -4260,15 +4267,15 @@ smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock, if (oplock == SMB2_OPLOCK_LEVEL_BATCH) { cinode->oplock = CIFS_CACHE_RHW_FLG; cifs_dbg(FYI, "Batch Oplock granted on inode %p\n", - &cinode->vfs_inode); + &cinode->netfs.inode); } else if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE) { cinode->oplock = CIFS_CACHE_RW_FLG; cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n", - &cinode->vfs_inode); + &cinode->netfs.inode); } else if (oplock == SMB2_OPLOCK_LEVEL_II) { cinode->oplock = CIFS_CACHE_READ_FLG; cifs_dbg(FYI, "Level II Oplock granted on inode %p\n", - &cinode->vfs_inode); + &cinode->netfs.inode); } else cinode->oplock = 0; } @@ -4307,7 +4314,7 @@ smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock, cinode->oplock = new_oplock; cifs_dbg(FYI, "%s Lease granted on inode %p\n", message, - &cinode->vfs_inode); + &cinode->netfs.inode); } static void diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 0e8c85249579..12b4dddaedb0 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -288,6 +288,9 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon, mutex_unlock(&ses->session_mutex); rc = -EHOSTDOWN; goto failed; + } else if (rc) { + mutex_unlock(&ses->session_mutex); + goto out; } } else { mutex_unlock(&ses->session_mutex); @@ -540,6 +543,7 @@ assemble_neg_contexts(struct smb2_negotiate_req *req, struct TCP_Server_Info *server, unsigned int *total_len) { char *pneg_ctxt; + char *hostname = NULL; unsigned int ctxt_len, neg_context_count; if (*total_len > 200) { @@ -567,16 +571,24 @@ assemble_neg_contexts(struct smb2_negotiate_req *req, *total_len += ctxt_len; pneg_ctxt += ctxt_len; - ctxt_len = build_netname_ctxt((struct smb2_netname_neg_context *)pneg_ctxt, - server->hostname); - *total_len += ctxt_len; - pneg_ctxt += ctxt_len; - build_posix_ctxt((struct smb2_posix_neg_context *)pneg_ctxt); *total_len += sizeof(struct smb2_posix_neg_context); pneg_ctxt += sizeof(struct smb2_posix_neg_context); - neg_context_count = 4; + /* + * secondary channels don't have the hostname field populated + * use the hostname field in the primary channel instead + */ + hostname = CIFS_SERVER_IS_CHAN(server) ? + server->primary_server->hostname : server->hostname; + if (hostname && (hostname[0] != 0)) { + ctxt_len = build_netname_ctxt((struct smb2_netname_neg_context *)pneg_ctxt, + hostname); + *total_len += ctxt_len; + pneg_ctxt += ctxt_len; + neg_context_count = 4; + } else /* second channels do not have a hostname */ + neg_context_count = 3; if (server->compress_algorithm) { build_compression_ctxt((struct smb2_compression_capabilities_context *) @@ -5151,6 +5163,8 @@ SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, data = &info; size = sizeof(struct smb2_file_eof_info); + trace_smb3_set_eof(xid, persistent_fid, tcon->tid, tcon->ses->Suid, le64_to_cpu(*eof)); + return send_set_info(xid, tcon, persistent_fid, volatile_fid, pid, FILE_END_OF_FILE_INFORMATION, SMB2_O_INFO_FILE, 0, 1, &data, &size); diff --git a/fs/cifs/trace.h b/fs/cifs/trace.h index 2be5e0c8564d..6b88dc2e364f 100644 --- a/fs/cifs/trace.h +++ b/fs/cifs/trace.h @@ -121,6 +121,44 @@ DEFINE_SMB3_RW_DONE_EVENT(query_dir_done); DEFINE_SMB3_RW_DONE_EVENT(zero_done); DEFINE_SMB3_RW_DONE_EVENT(falloc_done); +/* For logging successful set EOF (truncate) */ +DECLARE_EVENT_CLASS(smb3_eof_class, + TP_PROTO(unsigned int xid, + __u64 fid, + __u32 tid, + __u64 sesid, + __u64 offset), + TP_ARGS(xid, fid, tid, sesid, offset), + TP_STRUCT__entry( + __field(unsigned int, xid) + __field(__u64, fid) + __field(__u32, tid) + __field(__u64, sesid) + __field(__u64, offset) + ), + TP_fast_assign( + __entry->xid = xid; + __entry->fid = fid; + __entry->tid = tid; + __entry->sesid = sesid; + __entry->offset = offset; + ), + TP_printk("xid=%u sid=0x%llx tid=0x%x fid=0x%llx offset=0x%llx", + __entry->xid, __entry->sesid, __entry->tid, __entry->fid, + __entry->offset) +) + +#define DEFINE_SMB3_EOF_EVENT(name) \ +DEFINE_EVENT(smb3_eof_class, smb3_##name, \ + TP_PROTO(unsigned int xid, \ + __u64 fid, \ + __u32 tid, \ + __u64 sesid, \ + __u64 offset), \ + TP_ARGS(xid, fid, tid, sesid, offset)) + +DEFINE_SMB3_EOF_EVENT(set_eof); + /* * For handle based calls other than read and write, and get/set info */ diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c index 76acc3721951..c6eaf7e9ea74 100644 --- a/fs/exfat/namei.c +++ b/fs/exfat/namei.c @@ -1198,7 +1198,9 @@ static int __exfat_rename(struct inode *old_parent_inode, return -ENOENT; } - exfat_chain_dup(&olddir, &ei->dir); + exfat_chain_set(&olddir, EXFAT_I(old_parent_inode)->start_clu, + EXFAT_B_TO_CLU_ROUND_UP(i_size_read(old_parent_inode), sbi), + EXFAT_I(old_parent_inode)->flags); dentry = ei->entry; ep = exfat_get_dentry(sb, &olddir, dentry, &old_bh); diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c index 2c2f179b6977..43de293cef56 100644 --- a/fs/ext2/dir.c +++ b/fs/ext2/dir.c @@ -672,17 +672,14 @@ int ext2_empty_dir (struct inode * inode) void *page_addr = NULL; struct page *page = NULL; unsigned long i, npages = dir_pages(inode); - int dir_has_error = 0; for (i = 0; i < npages; i++) { char *kaddr; ext2_dirent * de; - page = ext2_get_page(inode, i, dir_has_error, &page_addr); + page = ext2_get_page(inode, i, 0, &page_addr); - if (IS_ERR(page)) { - dir_has_error = 1; - continue; - } + if (IS_ERR(page)) + goto not_empty; kaddr = page_addr; de = (ext2_dirent *)kaddr; diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 360ce3604a2d..e6b932219803 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -1549,7 +1549,7 @@ static int __ext2_write_inode(struct inode *inode, int do_sync) if (IS_ERR(raw_inode)) return -EIO; - /* For fields not not tracking in the in-memory inode, + /* For fields not tracking in the in-memory inode, * initialise them to zero for new inodes. */ if (ei->i_state & EXT2_STATE_NEW) memset(raw_inode, 0, EXT2_SB(sb)->s_inode_size); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 3dce7d058985..84c0eb55071d 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -829,7 +829,7 @@ int ext4_get_block_unwritten(struct inode *inode, sector_t iblock, ext4_debug("ext4_get_block_unwritten: inode %lu, create flag %d\n", inode->i_ino, create); return _ext4_get_block(inode, iblock, bh_result, - EXT4_GET_BLOCKS_IO_CREATE_EXT); + EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT); } /* Maximum number of blocks we map for direct IO at once. */ diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 9f12f29bc346..9e06334771a3 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -4104,6 +4104,15 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac, size = size >> bsbits; start = start_off >> bsbits; + /* + * For tiny groups (smaller than 8MB) the chosen allocation + * alignment may be larger than group size. Make sure the + * alignment does not move allocation to a different group which + * makes mballoc fail assertions later. + */ + start = max(start, rounddown(ac->ac_o_ex.fe_logical, + (ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb))); + /* don't cover already allocated blocks in selected range */ if (ar->pleft && start <= ar->lleft) { size -= ar->lleft + 1 - start; @@ -4176,7 +4185,22 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac, } rcu_read_unlock(); - if (start + size <= ac->ac_o_ex.fe_logical && + /* + * In this function "start" and "size" are normalized for better + * alignment and length such that we could preallocate more blocks. + * This normalization is done such that original request of + * ac->ac_o_ex.fe_logical & fe_len should always lie within "start" and + * "size" boundaries. + * (Note fe_len can be relaxed since FS block allocation API does not + * provide gurantee on number of contiguous blocks allocation since that + * depends upon free space left, etc). + * In case of inode pa, later we use the allocated blocks + * [pa_start + fe_logical - pa_lstart, fe_len/size] from the preallocated + * range of goal/best blocks [start, size] to put it at the + * ac_o_ex.fe_logical extent of this inode. + * (See ext4_mb_use_inode_pa() for more details) + */ + if (start + size <= ac->ac_o_ex.fe_logical || start > ac->ac_o_ex.fe_logical) { ext4_msg(ac->ac_sb, KERN_ERR, "start %lu, size %lu, fe_logical %lu", diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c index 7a5353a8cfd7..42f590518b4c 100644 --- a/fs/ext4/migrate.c +++ b/fs/ext4/migrate.c @@ -438,7 +438,7 @@ int ext4_ext_migrate(struct inode *inode) /* * Worst case we can touch the allocation bitmaps and a block - * group descriptor block. We do need need to worry about + * group descriptor block. We do need to worry about * credits for modifying the quota inode. */ handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 47d0ca4c795b..db4ba99d1ceb 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -1929,7 +1929,8 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, struct dx_hash_info *hinfo) { unsigned blocksize = dir->i_sb->s_blocksize; - unsigned count, continued; + unsigned continued; + int count; struct buffer_head *bh2; ext4_lblk_t newblock; u32 hash2; diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 14695e2b5042..97fa7b4c645f 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -465,7 +465,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io, /* * In the first loop we prepare and mark buffers to submit. We have to * mark all buffers in the page before submitting so that - * end_page_writeback() cannot be called from ext4_bio_end_io() when IO + * end_page_writeback() cannot be called from ext4_end_bio() when IO * on the first buffer finishes and we are still working on submitting * the second buffer. */ diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index 90a941d20dff..8b70a4701293 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c @@ -54,6 +54,16 @@ int ext4_resize_begin(struct super_block *sb) return -EPERM; /* + * If the reserved GDT blocks is non-zero, the resize_inode feature + * should always be set. + */ + if (EXT4_SB(sb)->s_es->s_reserved_gdt_blocks && + !ext4_has_feature_resize_inode(sb)) { + ext4_error(sb, "resize_inode disabled but reserved GDT blocks non-zero"); + return -EFSCORRUPTED; + } + + /* * If we are not using the primary superblock/GDT copy don't resize, * because the user tools have no way of handling this. Probably a * bad time to do it anyways. diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 450c918d68fc..845f2f8aee5f 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -87,7 +87,7 @@ static struct inode *ext4_get_journal_inode(struct super_block *sb, static int ext4_validate_options(struct fs_context *fc); static int ext4_check_opt_consistency(struct fs_context *fc, struct super_block *sb); -static int ext4_apply_options(struct fs_context *fc, struct super_block *sb); +static void ext4_apply_options(struct fs_context *fc, struct super_block *sb); static int ext4_parse_param(struct fs_context *fc, struct fs_parameter *param); static int ext4_get_tree(struct fs_context *fc); static int ext4_reconfigure(struct fs_context *fc); @@ -1870,31 +1870,12 @@ ext4_sb_read_encoding(const struct ext4_super_block *es) } #endif -static int ext4_set_test_dummy_encryption(struct super_block *sb, char *arg) -{ -#ifdef CONFIG_FS_ENCRYPTION - struct ext4_sb_info *sbi = EXT4_SB(sb); - int err; - - err = fscrypt_set_test_dummy_encryption(sb, arg, - &sbi->s_dummy_enc_policy); - if (err) { - ext4_msg(sb, KERN_WARNING, - "Error while setting test dummy encryption [%d]", err); - return err; - } - ext4_msg(sb, KERN_WARNING, "Test dummy encryption mode enabled"); -#endif - return 0; -} - #define EXT4_SPEC_JQUOTA (1 << 0) #define EXT4_SPEC_JQFMT (1 << 1) #define EXT4_SPEC_DATAJ (1 << 2) #define EXT4_SPEC_SB_BLOCK (1 << 3) #define EXT4_SPEC_JOURNAL_DEV (1 << 4) #define EXT4_SPEC_JOURNAL_IOPRIO (1 << 5) -#define EXT4_SPEC_DUMMY_ENCRYPTION (1 << 6) #define EXT4_SPEC_s_want_extra_isize (1 << 7) #define EXT4_SPEC_s_max_batch_time (1 << 8) #define EXT4_SPEC_s_min_batch_time (1 << 9) @@ -1911,7 +1892,7 @@ static int ext4_set_test_dummy_encryption(struct super_block *sb, char *arg) struct ext4_fs_context { char *s_qf_names[EXT4_MAXQUOTAS]; - char *test_dummy_enc_arg; + struct fscrypt_dummy_policy dummy_enc_policy; int s_jquota_fmt; /* Format of quota to use */ #ifdef CONFIG_EXT4_DEBUG int s_fc_debug_max_replay; @@ -1953,7 +1934,7 @@ static void ext4_fc_free(struct fs_context *fc) for (i = 0; i < EXT4_MAXQUOTAS; i++) kfree(ctx->s_qf_names[i]); - kfree(ctx->test_dummy_enc_arg); + fscrypt_free_dummy_policy(&ctx->dummy_enc_policy); kfree(ctx); } @@ -2029,6 +2010,29 @@ static int unnote_qf_name(struct fs_context *fc, int qtype) } #endif +static int ext4_parse_test_dummy_encryption(const struct fs_parameter *param, + struct ext4_fs_context *ctx) +{ + int err; + + if (!IS_ENABLED(CONFIG_FS_ENCRYPTION)) { + ext4_msg(NULL, KERN_WARNING, + "test_dummy_encryption option not supported"); + return -EINVAL; + } + err = fscrypt_parse_test_dummy_encryption(param, + &ctx->dummy_enc_policy); + if (err == -EINVAL) { + ext4_msg(NULL, KERN_WARNING, + "Value of option \"%s\" is unrecognized", param->key); + } else if (err == -EEXIST) { + ext4_msg(NULL, KERN_WARNING, + "Conflicting test_dummy_encryption options"); + return -EINVAL; + } + return err; +} + #define EXT4_SET_CTX(name) \ static inline void ctx_set_##name(struct ext4_fs_context *ctx, \ unsigned long flag) \ @@ -2291,29 +2295,7 @@ static int ext4_parse_param(struct fs_context *fc, struct fs_parameter *param) ctx->spec |= EXT4_SPEC_JOURNAL_IOPRIO; return 0; case Opt_test_dummy_encryption: -#ifdef CONFIG_FS_ENCRYPTION - if (param->type == fs_value_is_flag) { - ctx->spec |= EXT4_SPEC_DUMMY_ENCRYPTION; - ctx->test_dummy_enc_arg = NULL; - return 0; - } - if (*param->string && - !(!strcmp(param->string, "v1") || - !strcmp(param->string, "v2"))) { - ext4_msg(NULL, KERN_WARNING, - "Value of option \"%s\" is unrecognized", - param->key); - return -EINVAL; - } - ctx->spec |= EXT4_SPEC_DUMMY_ENCRYPTION; - ctx->test_dummy_enc_arg = kmemdup_nul(param->string, param->size, - GFP_KERNEL); - return 0; -#else - ext4_msg(NULL, KERN_WARNING, - "test_dummy_encryption option not supported"); - return -EINVAL; -#endif + return ext4_parse_test_dummy_encryption(param, ctx); case Opt_dax: case Opt_dax_type: #ifdef CONFIG_FS_DAX @@ -2504,7 +2486,8 @@ parse_failed: if (s_ctx->spec & EXT4_SPEC_JOURNAL_IOPRIO) m_ctx->journal_ioprio = s_ctx->journal_ioprio; - ret = ext4_apply_options(fc, sb); + ext4_apply_options(fc, sb); + ret = 0; out_free: if (fc) { @@ -2673,11 +2656,11 @@ err_jquota_specified: static int ext4_check_test_dummy_encryption(const struct fs_context *fc, struct super_block *sb) { -#ifdef CONFIG_FS_ENCRYPTION const struct ext4_fs_context *ctx = fc->fs_private; const struct ext4_sb_info *sbi = EXT4_SB(sb); + int err; - if (!(ctx->spec & EXT4_SPEC_DUMMY_ENCRYPTION)) + if (!fscrypt_is_dummy_policy_set(&ctx->dummy_enc_policy)) return 0; if (!ext4_has_feature_encrypt(sb)) { @@ -2691,14 +2674,46 @@ static int ext4_check_test_dummy_encryption(const struct fs_context *fc, * needed to allow it to be set or changed during remount. We do allow * it to be specified during remount, but only if there is no change. */ - if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE && - !sbi->s_dummy_enc_policy.policy) { + if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) { + if (fscrypt_dummy_policies_equal(&sbi->s_dummy_enc_policy, + &ctx->dummy_enc_policy)) + return 0; ext4_msg(NULL, KERN_WARNING, - "Can't set test_dummy_encryption on remount"); + "Can't set or change test_dummy_encryption on remount"); return -EINVAL; } -#endif /* CONFIG_FS_ENCRYPTION */ - return 0; + /* Also make sure s_mount_opts didn't contain a conflicting value. */ + if (fscrypt_is_dummy_policy_set(&sbi->s_dummy_enc_policy)) { + if (fscrypt_dummy_policies_equal(&sbi->s_dummy_enc_policy, + &ctx->dummy_enc_policy)) + return 0; + ext4_msg(NULL, KERN_WARNING, + "Conflicting test_dummy_encryption options"); + return -EINVAL; + } + /* + * fscrypt_add_test_dummy_key() technically changes the super_block, so + * technically it should be delayed until ext4_apply_options() like the + * other changes. But since we never get here for remounts (see above), + * and this is the last chance to report errors, we do it here. + */ + err = fscrypt_add_test_dummy_key(sb, &ctx->dummy_enc_policy); + if (err) + ext4_msg(NULL, KERN_WARNING, + "Error adding test dummy encryption key [%d]", err); + return err; +} + +static void ext4_apply_test_dummy_encryption(struct ext4_fs_context *ctx, + struct super_block *sb) +{ + if (!fscrypt_is_dummy_policy_set(&ctx->dummy_enc_policy) || + /* if already set, it was already verified to be the same */ + fscrypt_is_dummy_policy_set(&EXT4_SB(sb)->s_dummy_enc_policy)) + return; + EXT4_SB(sb)->s_dummy_enc_policy = ctx->dummy_enc_policy; + memset(&ctx->dummy_enc_policy, 0, sizeof(ctx->dummy_enc_policy)); + ext4_msg(sb, KERN_WARNING, "Test dummy encryption mode enabled"); } static int ext4_check_opt_consistency(struct fs_context *fc, @@ -2785,11 +2800,10 @@ fail_dax_change_remount: return ext4_check_quota_consistency(fc, sb); } -static int ext4_apply_options(struct fs_context *fc, struct super_block *sb) +static void ext4_apply_options(struct fs_context *fc, struct super_block *sb) { struct ext4_fs_context *ctx = fc->fs_private; struct ext4_sb_info *sbi = fc->s_fs_info; - int ret = 0; sbi->s_mount_opt &= ~ctx->mask_s_mount_opt; sbi->s_mount_opt |= ctx->vals_s_mount_opt; @@ -2825,11 +2839,7 @@ static int ext4_apply_options(struct fs_context *fc, struct super_block *sb) #endif ext4_apply_quota_options(fc, sb); - - if (ctx->spec & EXT4_SPEC_DUMMY_ENCRYPTION) - ret = ext4_set_test_dummy_encryption(sb, ctx->test_dummy_enc_arg); - - return ret; + ext4_apply_test_dummy_encryption(ctx, sb); } @@ -4552,9 +4562,7 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb) if (err < 0) goto failed_mount; - err = ext4_apply_options(fc, sb); - if (err < 0) - goto failed_mount; + ext4_apply_options(fc, sb); #if IS_ENABLED(CONFIG_UNICODE) if (ext4_has_feature_casefold(sb) && !sb->s_encoding) { @@ -5302,14 +5310,6 @@ no_journal: err = percpu_counter_init(&sbi->s_freeinodes_counter, freei, GFP_KERNEL); } - /* - * Update the checksum after updating free space/inode - * counters. Otherwise the superblock can have an incorrect - * checksum in the buffer cache until it is written out and - * e2fsprogs programs trying to open a file system immediately - * after it is mounted can fail. - */ - ext4_superblock_csum_set(sb); if (!err) err = percpu_counter_init(&sbi->s_dirs_counter, ext4_count_dirs(sb), GFP_KERNEL); @@ -5367,6 +5367,14 @@ no_journal: EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS; ext4_orphan_cleanup(sb, es); EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS; + /* + * Update the checksum after updating free space/inode counters and + * ext4_orphan_cleanup. Otherwise the superblock can have an incorrect + * checksum in the buffer cache until it is written out and + * e2fsprogs programs trying to open a file system immediately + * after it is mounted can fail. + */ + ext4_superblock_csum_set(sb); if (needs_recovery) { ext4_msg(sb, KERN_INFO, "recovery complete"); err = ext4_mark_recovery_complete(sb, es); @@ -5898,7 +5906,6 @@ static void ext4_update_super(struct super_block *sb) static int ext4_commit_super(struct super_block *sb) { struct buffer_head *sbh = EXT4_SB(sb)->s_sbh; - int error = 0; if (!sbh) return -EINVAL; @@ -5907,6 +5914,13 @@ static int ext4_commit_super(struct super_block *sb) ext4_update_super(sb); + lock_buffer(sbh); + /* Buffer got discarded which means block device got invalidated */ + if (!buffer_mapped(sbh)) { + unlock_buffer(sbh); + return -EIO; + } + if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) { /* * Oh, dear. A previous attempt to write the @@ -5921,17 +5935,21 @@ static int ext4_commit_super(struct super_block *sb) clear_buffer_write_io_error(sbh); set_buffer_uptodate(sbh); } - BUFFER_TRACE(sbh, "marking dirty"); - mark_buffer_dirty(sbh); - error = __sync_dirty_buffer(sbh, - REQ_SYNC | (test_opt(sb, BARRIER) ? REQ_FUA : 0)); + get_bh(sbh); + /* Clear potential dirty bit if it was journalled update */ + clear_buffer_dirty(sbh); + sbh->b_end_io = end_buffer_write_sync; + submit_bh(REQ_OP_WRITE, + REQ_SYNC | (test_opt(sb, BARRIER) ? REQ_FUA : 0), sbh); + wait_on_buffer(sbh); if (buffer_write_io_error(sbh)) { ext4_msg(sb, KERN_ERR, "I/O error while writing " "superblock"); clear_buffer_write_io_error(sbh); set_buffer_uptodate(sbh); + return -EIO; } - return error; + return 0; } /* diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 042325349098..564e28a1aa94 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -1895,11 +1895,10 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, unlock_buffer(bs->bh); ea_bdebug(bs->bh, "cloning"); - s->base = kmalloc(bs->bh->b_size, GFP_NOFS); + s->base = kmemdup(BHDR(bs->bh), bs->bh->b_size, GFP_NOFS); error = -ENOMEM; if (s->base == NULL) goto cleanup; - memcpy(s->base, BHDR(bs->bh), bs->bh->b_size); s->first = ENTRY(header(s->base)+1); header(s->base)->h_refcount = cpu_to_le32(1); s->here = ENTRY(s->base + offset); diff --git a/fs/f2fs/iostat.c b/fs/f2fs/iostat.c index be599f31d3c4..d84c5f6cc09d 100644 --- a/fs/f2fs/iostat.c +++ b/fs/f2fs/iostat.c @@ -91,8 +91,9 @@ static inline void __record_iostat_latency(struct f2fs_sb_info *sbi) unsigned int cnt; struct f2fs_iostat_latency iostat_lat[MAX_IO_TYPE][NR_PAGE_TYPE]; struct iostat_lat_info *io_lat = sbi->iostat_io_lat; + unsigned long flags; - spin_lock_bh(&sbi->iostat_lat_lock); + spin_lock_irqsave(&sbi->iostat_lat_lock, flags); for (idx = 0; idx < MAX_IO_TYPE; idx++) { for (io = 0; io < NR_PAGE_TYPE; io++) { cnt = io_lat->bio_cnt[idx][io]; @@ -106,7 +107,7 @@ static inline void __record_iostat_latency(struct f2fs_sb_info *sbi) io_lat->bio_cnt[idx][io] = 0; } } - spin_unlock_bh(&sbi->iostat_lat_lock); + spin_unlock_irqrestore(&sbi->iostat_lat_lock, flags); trace_f2fs_iostat_latency(sbi, iostat_lat); } @@ -115,14 +116,15 @@ static inline void f2fs_record_iostat(struct f2fs_sb_info *sbi) { unsigned long long iostat_diff[NR_IO_TYPE]; int i; + unsigned long flags; if (time_is_after_jiffies(sbi->iostat_next_period)) return; /* Need double check under the lock */ - spin_lock_bh(&sbi->iostat_lock); + spin_lock_irqsave(&sbi->iostat_lock, flags); if (time_is_after_jiffies(sbi->iostat_next_period)) { - spin_unlock_bh(&sbi->iostat_lock); + spin_unlock_irqrestore(&sbi->iostat_lock, flags); return; } sbi->iostat_next_period = jiffies + @@ -133,7 +135,7 @@ static inline void f2fs_record_iostat(struct f2fs_sb_info *sbi) sbi->prev_rw_iostat[i]; sbi->prev_rw_iostat[i] = sbi->rw_iostat[i]; } - spin_unlock_bh(&sbi->iostat_lock); + spin_unlock_irqrestore(&sbi->iostat_lock, flags); trace_f2fs_iostat(sbi, iostat_diff); @@ -145,25 +147,27 @@ void f2fs_reset_iostat(struct f2fs_sb_info *sbi) struct iostat_lat_info *io_lat = sbi->iostat_io_lat; int i; - spin_lock_bh(&sbi->iostat_lock); + spin_lock_irq(&sbi->iostat_lock); for (i = 0; i < NR_IO_TYPE; i++) { sbi->rw_iostat[i] = 0; sbi->prev_rw_iostat[i] = 0; } - spin_unlock_bh(&sbi->iostat_lock); + spin_unlock_irq(&sbi->iostat_lock); - spin_lock_bh(&sbi->iostat_lat_lock); + spin_lock_irq(&sbi->iostat_lat_lock); memset(io_lat, 0, sizeof(struct iostat_lat_info)); - spin_unlock_bh(&sbi->iostat_lat_lock); + spin_unlock_irq(&sbi->iostat_lat_lock); } void f2fs_update_iostat(struct f2fs_sb_info *sbi, enum iostat_type type, unsigned long long io_bytes) { + unsigned long flags; + if (!sbi->iostat_enable) return; - spin_lock_bh(&sbi->iostat_lock); + spin_lock_irqsave(&sbi->iostat_lock, flags); sbi->rw_iostat[type] += io_bytes; if (type == APP_BUFFERED_IO || type == APP_DIRECT_IO) @@ -172,7 +176,7 @@ void f2fs_update_iostat(struct f2fs_sb_info *sbi, if (type == APP_BUFFERED_READ_IO || type == APP_DIRECT_READ_IO) sbi->rw_iostat[APP_READ_IO] += io_bytes; - spin_unlock_bh(&sbi->iostat_lock); + spin_unlock_irqrestore(&sbi->iostat_lock, flags); f2fs_record_iostat(sbi); } @@ -185,6 +189,7 @@ static inline void __update_iostat_latency(struct bio_iostat_ctx *iostat_ctx, struct f2fs_sb_info *sbi = iostat_ctx->sbi; struct iostat_lat_info *io_lat = sbi->iostat_io_lat; int idx; + unsigned long flags; if (!sbi->iostat_enable) return; @@ -202,12 +207,12 @@ static inline void __update_iostat_latency(struct bio_iostat_ctx *iostat_ctx, idx = WRITE_ASYNC_IO; } - spin_lock_bh(&sbi->iostat_lat_lock); + spin_lock_irqsave(&sbi->iostat_lat_lock, flags); io_lat->sum_lat[idx][iotype] += ts_diff; io_lat->bio_cnt[idx][iotype]++; if (ts_diff > io_lat->peak_lat[idx][iotype]) io_lat->peak_lat[idx][iotype] = ts_diff; - spin_unlock_bh(&sbi->iostat_lat_lock); + spin_unlock_irqrestore(&sbi->iostat_lat_lock, flags); } void iostat_update_and_unbind_ctx(struct bio *bio, int rw) diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index c549acb52ac4..bf00d5057abb 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -89,8 +89,6 @@ static struct inode *f2fs_new_inode(struct user_namespace *mnt_userns, if (test_opt(sbi, INLINE_XATTR)) set_inode_flag(inode, FI_INLINE_XATTR); - if (test_opt(sbi, INLINE_DATA) && f2fs_may_inline_data(inode)) - set_inode_flag(inode, FI_INLINE_DATA); if (f2fs_may_inline_dentry(inode)) set_inode_flag(inode, FI_INLINE_DENTRY); @@ -107,10 +105,6 @@ static struct inode *f2fs_new_inode(struct user_namespace *mnt_userns, f2fs_init_extent_tree(inode, NULL); - stat_inc_inline_xattr(inode); - stat_inc_inline_inode(inode); - stat_inc_inline_dir(inode); - F2FS_I(inode)->i_flags = f2fs_mask_flags(mode, F2FS_I(dir)->i_flags & F2FS_FL_INHERITED); @@ -127,6 +121,14 @@ static struct inode *f2fs_new_inode(struct user_namespace *mnt_userns, set_compress_context(inode); } + /* Should enable inline_data after compression set */ + if (test_opt(sbi, INLINE_DATA) && f2fs_may_inline_data(inode)) + set_inode_flag(inode, FI_INLINE_DATA); + + stat_inc_inline_xattr(inode); + stat_inc_inline_inode(inode); + stat_inc_inline_dir(inode); + f2fs_set_inode_flags(inode); trace_f2fs_new_inode(inode, 0); @@ -325,6 +327,9 @@ static void set_compress_inode(struct f2fs_sb_info *sbi, struct inode *inode, if (!is_extension_exist(name, ext[i], false)) continue; + /* Do not use inline_data with compression */ + stat_dec_inline_inode(inode); + clear_inode_flag(inode, FI_INLINE_DATA); set_compress_context(inode); return; } diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 836c79a20afc..cf6f7fc83c08 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -1450,7 +1450,9 @@ page_hit: out_err: ClearPageUptodate(page); out_put_err: - f2fs_handle_page_eio(sbi, page->index, NODE); + /* ENOENT comes from read_node_page which is not an error. */ + if (err != -ENOENT) + f2fs_handle_page_eio(sbi, page->index, NODE); f2fs_put_page(page, 1); return ERR_PTR(err); } diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index a21d8f1a56d1..05221366a16d 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -120,6 +120,7 @@ static bool inode_io_list_move_locked(struct inode *inode, struct list_head *head) { assert_spin_locked(&wb->list_lock); + assert_spin_locked(&inode->i_lock); list_move(&inode->i_io_list, head); @@ -1365,9 +1366,9 @@ static int move_expired_inodes(struct list_head *delaying_queue, inode = wb_inode(delaying_queue->prev); if (inode_dirtied_after(inode, dirtied_before)) break; + spin_lock(&inode->i_lock); list_move(&inode->i_io_list, &tmp); moved++; - spin_lock(&inode->i_lock); inode->i_state |= I_SYNC_QUEUED; spin_unlock(&inode->i_lock); if (sb_is_blkdev_sb(inode->i_sb)) @@ -1383,7 +1384,12 @@ static int move_expired_inodes(struct list_head *delaying_queue, goto out; } - /* Move inodes from one superblock together */ + /* + * Although inode's i_io_list is moved from 'tmp' to 'dispatch_queue', + * we don't take inode->i_lock here because it is just a pointless overhead. + * Inode is already marked as I_SYNC_QUEUED so writeback list handling is + * fully under our control. + */ while (!list_empty(&tmp)) { sb = wb_inode(tmp.prev)->i_sb; list_for_each_prev_safe(pos, node, &tmp) { @@ -1826,8 +1832,8 @@ static long writeback_sb_inodes(struct super_block *sb, * We'll have another go at writing back this inode * when we completed a full scan of b_io. */ - spin_unlock(&inode->i_lock); requeue_io(inode, wb); + spin_unlock(&inode->i_lock); trace_writeback_sb_inodes_requeue(inode); continue; } @@ -2358,6 +2364,7 @@ void __mark_inode_dirty(struct inode *inode, int flags) { struct super_block *sb = inode->i_sb; int dirtytime = 0; + struct bdi_writeback *wb = NULL; trace_writeback_mark_inode_dirty(inode, flags); @@ -2410,13 +2417,24 @@ void __mark_inode_dirty(struct inode *inode, int flags) inode->i_state |= flags; /* + * Grab inode's wb early because it requires dropping i_lock and we + * need to make sure following checks happen atomically with dirty + * list handling so that we don't move inodes under flush worker's + * hands. + */ + if (!was_dirty) { + wb = locked_inode_to_wb_and_lock_list(inode); + spin_lock(&inode->i_lock); + } + + /* * If the inode is queued for writeback by flush worker, just * update its dirty state. Once the flush worker is done with * the inode it will place it on the appropriate superblock * list, based upon its state. */ if (inode->i_state & I_SYNC_QUEUED) - goto out_unlock_inode; + goto out_unlock; /* * Only add valid (hashed) inodes to the superblock's @@ -2424,22 +2442,19 @@ void __mark_inode_dirty(struct inode *inode, int flags) */ if (!S_ISBLK(inode->i_mode)) { if (inode_unhashed(inode)) - goto out_unlock_inode; + goto out_unlock; } if (inode->i_state & I_FREEING) - goto out_unlock_inode; + goto out_unlock; /* * If the inode was already on b_dirty/b_io/b_more_io, don't * reposition it (that would break b_dirty time-ordering). */ if (!was_dirty) { - struct bdi_writeback *wb; struct list_head *dirty_list; bool wakeup_bdi = false; - wb = locked_inode_to_wb_and_lock_list(inode); - inode->dirtied_when = jiffies; if (dirtytime) inode->dirtied_time_when = jiffies; @@ -2453,6 +2468,7 @@ void __mark_inode_dirty(struct inode *inode, int flags) dirty_list); spin_unlock(&wb->list_lock); + spin_unlock(&inode->i_lock); trace_writeback_dirty_inode_enqueue(inode); /* @@ -2467,6 +2483,9 @@ void __mark_inode_dirty(struct inode *inode, int flags) return; } } +out_unlock: + if (wb) + spin_unlock(&wb->list_lock); out_unlock_inode: spin_unlock(&inode->i_lock); } diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 62408047e8d7..02eb72351b15 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -600,41 +600,79 @@ static void hugetlb_vmtruncate(struct inode *inode, loff_t offset) remove_inode_hugepages(inode, offset, LLONG_MAX); } +static void hugetlbfs_zero_partial_page(struct hstate *h, + struct address_space *mapping, + loff_t start, + loff_t end) +{ + pgoff_t idx = start >> huge_page_shift(h); + struct folio *folio; + + folio = filemap_lock_folio(mapping, idx); + if (!folio) + return; + + start = start & ~huge_page_mask(h); + end = end & ~huge_page_mask(h); + if (!end) + end = huge_page_size(h); + + folio_zero_segment(folio, (size_t)start, (size_t)end); + + folio_unlock(folio); + folio_put(folio); +} + static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) { + struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); + struct address_space *mapping = inode->i_mapping; struct hstate *h = hstate_inode(inode); loff_t hpage_size = huge_page_size(h); loff_t hole_start, hole_end; /* - * For hole punch round up the beginning offset of the hole and - * round down the end. + * hole_start and hole_end indicate the full pages within the hole. */ hole_start = round_up(offset, hpage_size); hole_end = round_down(offset + len, hpage_size); - if (hole_end > hole_start) { - struct address_space *mapping = inode->i_mapping; - struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); + inode_lock(inode); - inode_lock(inode); + /* protected by i_rwsem */ + if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { + inode_unlock(inode); + return -EPERM; + } - /* protected by i_rwsem */ - if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { - inode_unlock(inode); - return -EPERM; - } + i_mmap_lock_write(mapping); + + /* If range starts before first full page, zero partial page. */ + if (offset < hole_start) + hugetlbfs_zero_partial_page(h, mapping, + offset, min(offset + len, hole_start)); - i_mmap_lock_write(mapping); + /* Unmap users of full pages in the hole. */ + if (hole_end > hole_start) { if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) hugetlb_vmdelete_list(&mapping->i_mmap, hole_start >> PAGE_SHIFT, hole_end >> PAGE_SHIFT, 0); - i_mmap_unlock_write(mapping); - remove_inode_hugepages(inode, hole_start, hole_end); - inode_unlock(inode); } + /* If range extends beyond last full page, zero partial page. */ + if ((offset + len) > hole_end && (offset + len) > hole_start) + hugetlbfs_zero_partial_page(h, mapping, + hole_end, offset + len); + + i_mmap_unlock_write(mapping); + + /* Remove full pages from the file. */ + if (hole_end > hole_start) + remove_inode_hugepages(inode, hole_start, hole_end); + + inode_unlock(inode); + return 0; } diff --git a/fs/inode.c b/fs/inode.c index 9d9b422504d1..bd4da9c5207e 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -27,7 +27,7 @@ * Inode locking rules: * * inode->i_lock protects: - * inode->i_state, inode->i_hash, __iget() + * inode->i_state, inode->i_hash, __iget(), inode->i_io_list * Inode LRU list locks protect: * inode->i_sb->s_inode_lru, inode->i_lru * inode->i_sb->s_inode_list_lock protects: diff --git a/fs/io_uring.c b/fs/io_uring.c index 3aab4182fd89..5ff2cdb425bc 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -298,8 +298,8 @@ struct io_buffer_list { /* below is for ring provided buffers */ __u16 buf_nr_pages; __u16 nr_entries; - __u32 head; - __u32 mask; + __u16 head; + __u16 mask; }; struct io_buffer { @@ -576,7 +576,6 @@ struct io_close { struct file *file; int fd; u32 file_slot; - u32 flags; }; struct io_timeout_data { @@ -784,12 +783,6 @@ struct io_msg { u32 len; }; -struct io_nop { - struct file *file; - u64 extra1; - u64 extra2; -}; - struct io_async_connect { struct sockaddr_storage address; }; @@ -851,6 +844,7 @@ enum { REQ_F_SINGLE_POLL_BIT, REQ_F_DOUBLE_POLL_BIT, REQ_F_PARTIAL_IO_BIT, + REQ_F_CQE32_INIT_BIT, REQ_F_APOLL_MULTISHOT_BIT, /* keep async read/write and isreg together and in order */ REQ_F_SUPPORT_NOWAIT_BIT, @@ -920,6 +914,8 @@ enum { REQ_F_PARTIAL_IO = BIT(REQ_F_PARTIAL_IO_BIT), /* fast poll multishot mode */ REQ_F_APOLL_MULTISHOT = BIT(REQ_F_APOLL_MULTISHOT_BIT), + /* ->extra1 and ->extra2 are initialised */ + REQ_F_CQE32_INIT = BIT(REQ_F_CQE32_INIT_BIT), }; struct async_poll { @@ -994,7 +990,6 @@ struct io_kiocb { struct io_msg msg; struct io_xattr xattr; struct io_socket sock; - struct io_nop nop; struct io_uring_cmd uring_cmd; }; @@ -1121,7 +1116,6 @@ static const struct io_op_def io_op_defs[] = { [IORING_OP_NOP] = { .audit_skip = 1, .iopoll = 1, - .buffer_select = 1, }, [IORING_OP_READV] = { .needs_file = 1, @@ -1729,9 +1723,16 @@ static void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags) if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))) return; - /* don't recycle if we already did IO to this buffer */ - if (req->flags & REQ_F_PARTIAL_IO) + /* + * For legacy provided buffer mode, don't recycle if we already did + * IO to this buffer. For ring-mapped provided buffer mode, we should + * increment ring->head to explicitly monopolize the buffer to avoid + * multiple use. + */ + if ((req->flags & REQ_F_BUFFER_SELECTED) && + (req->flags & REQ_F_PARTIAL_IO)) return; + /* * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear * the flag and hence ensure that bl->head doesn't get incremented. @@ -1739,8 +1740,13 @@ static void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags) */ if (req->flags & REQ_F_BUFFER_RING) { if (req->buf_list) { - req->buf_index = req->buf_list->bgid; - req->flags &= ~REQ_F_BUFFER_RING; + if (req->flags & REQ_F_PARTIAL_IO) { + req->buf_list->head++; + req->buf_list = NULL; + } else { + req->buf_index = req->buf_list->bgid; + req->flags &= ~REQ_F_BUFFER_RING; + } } return; } @@ -1969,7 +1975,7 @@ static inline void io_req_track_inflight(struct io_kiocb *req) { if (!(req->flags & REQ_F_INFLIGHT)) { req->flags |= REQ_F_INFLIGHT; - atomic_inc(¤t->io_uring->inflight_tracked); + atomic_inc(&req->task->io_uring->inflight_tracked); } } @@ -2441,94 +2447,66 @@ static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, return true; } -static inline bool __io_fill_cqe(struct io_ring_ctx *ctx, u64 user_data, - s32 res, u32 cflags) +static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx, + struct io_kiocb *req) { struct io_uring_cqe *cqe; - /* - * If we can't get a cq entry, userspace overflowed the - * submission (by quite a lot). Increment the overflow count in - * the ring. - */ - cqe = io_get_cqe(ctx); - if (likely(cqe)) { - WRITE_ONCE(cqe->user_data, user_data); - WRITE_ONCE(cqe->res, res); - WRITE_ONCE(cqe->flags, cflags); - return true; - } - return io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0); -} + if (!(ctx->flags & IORING_SETUP_CQE32)) { + trace_io_uring_complete(req->ctx, req, req->cqe.user_data, + req->cqe.res, req->cqe.flags, 0, 0); -static inline bool __io_fill_cqe_req_filled(struct io_ring_ctx *ctx, - struct io_kiocb *req) -{ - struct io_uring_cqe *cqe; + /* + * If we can't get a cq entry, userspace overflowed the + * submission (by quite a lot). Increment the overflow count in + * the ring. + */ + cqe = io_get_cqe(ctx); + if (likely(cqe)) { + memcpy(cqe, &req->cqe, sizeof(*cqe)); + return true; + } - trace_io_uring_complete(req->ctx, req, req->cqe.user_data, - req->cqe.res, req->cqe.flags, 0, 0); + return io_cqring_event_overflow(ctx, req->cqe.user_data, + req->cqe.res, req->cqe.flags, + 0, 0); + } else { + u64 extra1 = 0, extra2 = 0; - /* - * If we can't get a cq entry, userspace overflowed the - * submission (by quite a lot). Increment the overflow count in - * the ring. - */ - cqe = io_get_cqe(ctx); - if (likely(cqe)) { - memcpy(cqe, &req->cqe, sizeof(*cqe)); - return true; - } - return io_cqring_event_overflow(ctx, req->cqe.user_data, - req->cqe.res, req->cqe.flags, 0, 0); -} + if (req->flags & REQ_F_CQE32_INIT) { + extra1 = req->extra1; + extra2 = req->extra2; + } -static inline bool __io_fill_cqe32_req_filled(struct io_ring_ctx *ctx, - struct io_kiocb *req) -{ - struct io_uring_cqe *cqe; - u64 extra1 = req->extra1; - u64 extra2 = req->extra2; + trace_io_uring_complete(req->ctx, req, req->cqe.user_data, + req->cqe.res, req->cqe.flags, extra1, extra2); - trace_io_uring_complete(req->ctx, req, req->cqe.user_data, - req->cqe.res, req->cqe.flags, extra1, extra2); + /* + * If we can't get a cq entry, userspace overflowed the + * submission (by quite a lot). Increment the overflow count in + * the ring. + */ + cqe = io_get_cqe(ctx); + if (likely(cqe)) { + memcpy(cqe, &req->cqe, sizeof(struct io_uring_cqe)); + WRITE_ONCE(cqe->big_cqe[0], extra1); + WRITE_ONCE(cqe->big_cqe[1], extra2); + return true; + } - /* - * If we can't get a cq entry, userspace overflowed the - * submission (by quite a lot). Increment the overflow count in - * the ring. - */ - cqe = io_get_cqe(ctx); - if (likely(cqe)) { - memcpy(cqe, &req->cqe, sizeof(struct io_uring_cqe)); - cqe->big_cqe[0] = extra1; - cqe->big_cqe[1] = extra2; - return true; + return io_cqring_event_overflow(ctx, req->cqe.user_data, + req->cqe.res, req->cqe.flags, + extra1, extra2); } - - return io_cqring_event_overflow(ctx, req->cqe.user_data, req->cqe.res, - req->cqe.flags, extra1, extra2); } -static inline bool __io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags) -{ - trace_io_uring_complete(req->ctx, req, req->cqe.user_data, res, cflags, 0, 0); - return __io_fill_cqe(req->ctx, req->cqe.user_data, res, cflags); -} - -static inline void __io_fill_cqe32_req(struct io_kiocb *req, s32 res, u32 cflags, - u64 extra1, u64 extra2) +static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, + s32 res, u32 cflags) { - struct io_ring_ctx *ctx = req->ctx; struct io_uring_cqe *cqe; - if (WARN_ON_ONCE(!(ctx->flags & IORING_SETUP_CQE32))) - return; - if (req->flags & REQ_F_CQE_SKIP) - return; - - trace_io_uring_complete(ctx, req, req->cqe.user_data, res, cflags, - extra1, extra2); + ctx->cq_extra++; + trace_io_uring_complete(ctx, NULL, user_data, res, cflags, 0, 0); /* * If we can't get a cq entry, userspace overflowed the @@ -2537,23 +2515,17 @@ static inline void __io_fill_cqe32_req(struct io_kiocb *req, s32 res, u32 cflags */ cqe = io_get_cqe(ctx); if (likely(cqe)) { - WRITE_ONCE(cqe->user_data, req->cqe.user_data); + WRITE_ONCE(cqe->user_data, user_data); WRITE_ONCE(cqe->res, res); WRITE_ONCE(cqe->flags, cflags); - WRITE_ONCE(cqe->big_cqe[0], extra1); - WRITE_ONCE(cqe->big_cqe[1], extra2); - return; - } - io_cqring_event_overflow(ctx, req->cqe.user_data, res, cflags, extra1, extra2); -} - -static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, - s32 res, u32 cflags) -{ - ctx->cq_extra++; - trace_io_uring_complete(ctx, NULL, user_data, res, cflags, 0, 0); - return __io_fill_cqe(ctx, user_data, res, cflags); + if (ctx->flags & IORING_SETUP_CQE32) { + WRITE_ONCE(cqe->big_cqe[0], 0); + WRITE_ONCE(cqe->big_cqe[1], 0); + } + return true; + } + return io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0); } static void __io_req_complete_put(struct io_kiocb *req) @@ -2590,16 +2562,11 @@ static void __io_req_complete_put(struct io_kiocb *req) static void __io_req_complete_post(struct io_kiocb *req, s32 res, u32 cflags) { - if (!(req->flags & REQ_F_CQE_SKIP)) - __io_fill_cqe_req(req, res, cflags); - __io_req_complete_put(req); -} - -static void __io_req_complete_post32(struct io_kiocb *req, s32 res, - u32 cflags, u64 extra1, u64 extra2) -{ - if (!(req->flags & REQ_F_CQE_SKIP)) - __io_fill_cqe32_req(req, res, cflags, extra1, extra2); + if (!(req->flags & REQ_F_CQE_SKIP)) { + req->cqe.res = res; + req->cqe.flags = cflags; + __io_fill_cqe_req(req->ctx, req); + } __io_req_complete_put(req); } @@ -2614,18 +2581,6 @@ static void io_req_complete_post(struct io_kiocb *req, s32 res, u32 cflags) io_cqring_ev_posted(ctx); } -static void io_req_complete_post32(struct io_kiocb *req, s32 res, - u32 cflags, u64 extra1, u64 extra2) -{ - struct io_ring_ctx *ctx = req->ctx; - - spin_lock(&ctx->completion_lock); - __io_req_complete_post32(req, res, cflags, extra1, extra2); - io_commit_cqring(ctx); - spin_unlock(&ctx->completion_lock); - io_cqring_ev_posted(ctx); -} - static inline void io_req_complete_state(struct io_kiocb *req, s32 res, u32 cflags) { @@ -2643,19 +2598,6 @@ static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags, io_req_complete_post(req, res, cflags); } -static inline void __io_req_complete32(struct io_kiocb *req, - unsigned int issue_flags, s32 res, - u32 cflags, u64 extra1, u64 extra2) -{ - if (issue_flags & IO_URING_F_COMPLETE_DEFER) { - io_req_complete_state(req, res, cflags); - req->extra1 = extra1; - req->extra2 = extra2; - } else { - io_req_complete_post32(req, res, cflags, extra1, extra2); - } -} - static inline void io_req_complete(struct io_kiocb *req, s32 res) { if (res < 0) @@ -3202,12 +3144,8 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx) struct io_kiocb *req = container_of(node, struct io_kiocb, comp_list); - if (!(req->flags & REQ_F_CQE_SKIP)) { - if (!(ctx->flags & IORING_SETUP_CQE32)) - __io_fill_cqe_req_filled(ctx, req); - else - __io_fill_cqe32_req_filled(ctx, req); - } + if (!(req->flags & REQ_F_CQE_SKIP)) + __io_fill_cqe_req(ctx, req); } io_commit_cqring(ctx); @@ -3326,7 +3264,9 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) nr_events++; if (unlikely(req->flags & REQ_F_CQE_SKIP)) continue; - __io_fill_cqe_req(req, req->cqe.res, io_put_kbuf(req, 0)); + + req->cqe.flags = io_put_kbuf(req, 0); + __io_fill_cqe_req(req->ctx, req); } if (unlikely(!nr_events)) @@ -3497,7 +3437,7 @@ static bool __io_complete_rw_common(struct io_kiocb *req, long res) if (unlikely(res != req->cqe.res)) { if ((res == -EAGAIN || res == -EOPNOTSUPP) && io_rw_should_reissue(req)) { - req->flags |= REQ_F_REISSUE; + req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO; return true; } req_set_fail(req); @@ -3547,7 +3487,7 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res) kiocb_end_write(req); if (unlikely(res != req->cqe.res)) { if (res == -EAGAIN && io_rw_should_reissue(req)) { - req->flags |= REQ_F_REISSUE; + req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO; return; } req->cqe.res = res; @@ -3677,6 +3617,20 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) int ret; kiocb->ki_pos = READ_ONCE(sqe->off); + /* used for fixed read/write too - just read unconditionally */ + req->buf_index = READ_ONCE(sqe->buf_index); + + if (req->opcode == IORING_OP_READ_FIXED || + req->opcode == IORING_OP_WRITE_FIXED) { + struct io_ring_ctx *ctx = req->ctx; + u16 index; + + if (unlikely(req->buf_index >= ctx->nr_user_bufs)) + return -EFAULT; + index = array_index_nospec(req->buf_index, ctx->nr_user_bufs); + req->imu = ctx->user_bufs[index]; + io_req_set_rsrc_node(req, ctx, 0); + } ioprio = READ_ONCE(sqe->ioprio); if (ioprio) { @@ -3689,12 +3643,9 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) kiocb->ki_ioprio = get_current_ioprio(); } - req->imu = NULL; req->rw.addr = READ_ONCE(sqe->addr); req->rw.len = READ_ONCE(sqe->len); req->rw.flags = READ_ONCE(sqe->rw_flags); - /* used for fixed read/write too - just read unconditionally */ - req->buf_index = READ_ONCE(sqe->buf_index); return 0; } @@ -3826,20 +3777,9 @@ static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter, unsigned int issue_flags) { - struct io_mapped_ubuf *imu = req->imu; - u16 index, buf_index = req->buf_index; - - if (likely(!imu)) { - struct io_ring_ctx *ctx = req->ctx; - - if (unlikely(buf_index >= ctx->nr_user_bufs)) - return -EFAULT; - io_req_set_rsrc_node(req, ctx, issue_flags); - index = array_index_nospec(buf_index, ctx->nr_user_bufs); - imu = READ_ONCE(ctx->user_bufs[index]); - req->imu = imu; - } - return __io_import_fixed(req, rw, iter, imu); + if (WARN_ON_ONCE(!req->imu)) + return -EFAULT; + return __io_import_fixed(req, rw, iter, req->imu); } static int io_buffer_add_list(struct io_ring_ctx *ctx, @@ -3876,19 +3816,17 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len, { struct io_uring_buf_ring *br = bl->buf_ring; struct io_uring_buf *buf; - __u32 head = bl->head; + __u16 head = bl->head; - if (unlikely(smp_load_acquire(&br->tail) == head)) { - io_ring_submit_unlock(req->ctx, issue_flags); + if (unlikely(smp_load_acquire(&br->tail) == head)) return NULL; - } head &= bl->mask; if (head < IO_BUFFER_LIST_BUF_PER_PAGE) { buf = &br->bufs[head]; } else { int off = head & (IO_BUFFER_LIST_BUF_PER_PAGE - 1); - int index = head / IO_BUFFER_LIST_BUF_PER_PAGE - 1; + int index = head / IO_BUFFER_LIST_BUF_PER_PAGE; buf = page_address(bl->buf_pages[index]); buf += off; } @@ -3898,7 +3836,7 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len, req->buf_list = bl; req->buf_index = buf->bid; - if (issue_flags & IO_URING_F_UNLOCKED) { + if (issue_flags & IO_URING_F_UNLOCKED || !file_can_poll(req->file)) { /* * If we came in unlocked, we have no choice but to consume the * buffer here. This does mean it'll be pinned until the IO @@ -5079,10 +5017,18 @@ void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd, req->uring_cmd.task_work_cb = task_work_cb; req->io_task_work.func = io_uring_cmd_work; - io_req_task_prio_work_add(req); + io_req_task_work_add(req); } EXPORT_SYMBOL_GPL(io_uring_cmd_complete_in_task); +static inline void io_req_set_cqe32_extra(struct io_kiocb *req, + u64 extra1, u64 extra2) +{ + req->extra1 = extra1; + req->extra2 = extra2; + req->flags |= REQ_F_CQE32_INIT; +} + /* * Called by consumers of io_uring_cmd, if they originally returned * -EIOCBQUEUED upon receiving the command. @@ -5093,10 +5039,10 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2) if (ret < 0) req_set_fail(req); + if (req->ctx->flags & IORING_SETUP_CQE32) - __io_req_complete32(req, 0, ret, 0, res2, 0); - else - io_req_complete(req, ret); + io_req_set_cqe32_extra(req, res2, 0); + io_req_complete(req, ret); } EXPORT_SYMBOL_GPL(io_uring_cmd_done); @@ -5258,14 +5204,6 @@ done: static int io_nop_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { - /* - * If the ring is setup with CQE32, relay back addr/addr - */ - if (req->ctx->flags & IORING_SETUP_CQE32) { - req->nop.extra1 = READ_ONCE(sqe->addr); - req->nop.extra2 = READ_ONCE(sqe->addr2); - } - return 0; } @@ -5274,23 +5212,7 @@ static int io_nop_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) */ static int io_nop(struct io_kiocb *req, unsigned int issue_flags) { - unsigned int cflags; - void __user *buf; - - if (req->flags & REQ_F_BUFFER_SELECT) { - size_t len = 1; - - buf = io_buffer_select(req, &len, issue_flags); - if (!buf) - return -ENOBUFS; - } - - cflags = io_put_kbuf(req, issue_flags); - if (!(req->ctx->flags & IORING_SETUP_CQE32)) - __io_req_complete(req, issue_flags, 0, cflags); - else - __io_req_complete32(req, issue_flags, 0, cflags, - req->nop.extra1, req->nop.extra2); + __io_req_complete(req, issue_flags, 0, 0); return 0; } @@ -5988,18 +5910,14 @@ static int io_statx(struct io_kiocb *req, unsigned int issue_flags) static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { - if (sqe->off || sqe->addr || sqe->len || sqe->buf_index) + if (sqe->off || sqe->addr || sqe->len || sqe->rw_flags || sqe->buf_index) return -EINVAL; if (req->flags & REQ_F_FIXED_FILE) return -EBADF; req->close.fd = READ_ONCE(sqe->fd); req->close.file_slot = READ_ONCE(sqe->file_index); - req->close.flags = READ_ONCE(sqe->close_flags); - if (req->close.flags & ~IORING_CLOSE_FD_AND_FILE_SLOT) - return -EINVAL; - if (!(req->close.flags & IORING_CLOSE_FD_AND_FILE_SLOT) && - req->close.file_slot && req->close.fd) + if (req->close.file_slot && req->close.fd) return -EINVAL; return 0; @@ -6015,8 +5933,7 @@ static int io_close(struct io_kiocb *req, unsigned int issue_flags) if (req->close.file_slot) { ret = io_close_fixed(req, issue_flags); - if (ret || !(req->close.flags & IORING_CLOSE_FD_AND_FILE_SLOT)) - goto err; + goto err; } spin_lock(&files->file_lock); @@ -6160,8 +6077,6 @@ static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) if (unlikely(sqe->file_index)) return -EINVAL; - if (unlikely(sqe->addr2 || sqe->file_index)) - return -EINVAL; sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); sr->len = READ_ONCE(sqe->len); @@ -6398,8 +6313,6 @@ static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) if (unlikely(sqe->file_index)) return -EINVAL; - if (unlikely(sqe->addr2 || sqe->file_index)) - return -EINVAL; sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); sr->len = READ_ONCE(sqe->len); @@ -7037,7 +6950,8 @@ static void io_apoll_task_func(struct io_kiocb *req, bool *locked) io_req_complete_failed(req, ret); } -static void __io_poll_execute(struct io_kiocb *req, int mask, __poll_t events) +static void __io_poll_execute(struct io_kiocb *req, int mask, + __poll_t __maybe_unused events) { req->cqe.res = mask; /* @@ -7046,7 +6960,6 @@ static void __io_poll_execute(struct io_kiocb *req, int mask, __poll_t events) * CPU. We want to avoid pulling in req->apoll->events for that * case. */ - req->apoll_events = events; if (req->opcode == IORING_OP_POLL_ADD) req->io_task_work.func = io_poll_task_func; else @@ -7197,6 +7110,8 @@ static int __io_arm_poll_handler(struct io_kiocb *req, io_init_poll_iocb(poll, mask, io_poll_wake); poll->file = req->file; + req->apoll_events = poll->events; + ipt->pt._key = mask; ipt->req = req; ipt->error = 0; @@ -7227,8 +7142,11 @@ static int __io_arm_poll_handler(struct io_kiocb *req, if (mask) { /* can't multishot if failed, just queue the event we've got */ - if (unlikely(ipt->error || !ipt->nr_entries)) + if (unlikely(ipt->error || !ipt->nr_entries)) { poll->events |= EPOLLONESHOT; + req->apoll_events |= EPOLLONESHOT; + ipt->error = 0; + } __io_poll_execute(req, mask, poll->events); return 0; } @@ -7290,6 +7208,7 @@ static int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags) mask |= EPOLLEXCLUSIVE; if (req->flags & REQ_F_POLLED) { apoll = req->apoll; + kfree(apoll->double_poll); } else if (!(issue_flags & IO_URING_F_UNLOCKED) && !list_empty(&ctx->apoll_cache)) { apoll = list_first_entry(&ctx->apoll_cache, struct async_poll, @@ -7475,7 +7394,7 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe return -EINVAL; io_req_set_refcount(req); - req->apoll_events = poll->events = io_poll_parse_events(sqe, flags); + poll->events = io_poll_parse_events(sqe, flags); return 0; } @@ -7488,6 +7407,8 @@ static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags) ipt.pt._qproc = io_poll_queue_proc; ret = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events); + if (!ret && ipt.error) + req_set_fail(req); ret = ret ?: ipt.error; if (ret) __io_req_complete(req, issue_flags, ret, 0); @@ -8063,8 +7984,8 @@ static int io_files_update_with_index_alloc(struct io_kiocb *req, if (ret < 0) break; if (copy_to_user(&fds[done], &ret, sizeof(ret))) { - ret = -EFAULT; __io_close_fixed(req, issue_flags, ret); + ret = -EFAULT; break; } } @@ -8773,6 +8694,7 @@ static void io_queue_async(struct io_kiocb *req, int ret) * Queued up for async execution, worker will release * submit reference when the iocb is actually submitted. */ + io_kbuf_recycle(req, 0); io_queue_iowq(req, NULL); break; case IO_APOLL_OK: @@ -9788,11 +9710,19 @@ static void __io_sqe_files_unregister(struct io_ring_ctx *ctx) static int io_sqe_files_unregister(struct io_ring_ctx *ctx) { + unsigned nr = ctx->nr_user_files; int ret; if (!ctx->file_data) return -ENXIO; + + /* + * Quiesce may unlock ->uring_lock, and while it's not held + * prevent new requests using the table. + */ + ctx->nr_user_files = 0; ret = io_rsrc_ref_quiesce(ctx->file_data, ctx); + ctx->nr_user_files = nr; if (!ret) __io_sqe_files_unregister(ctx); return ret; @@ -10690,12 +10620,19 @@ static void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx) static int io_sqe_buffers_unregister(struct io_ring_ctx *ctx) { + unsigned nr = ctx->nr_user_bufs; int ret; if (!ctx->buf_data) return -ENXIO; + /* + * Quiesce may unlock ->uring_lock, and while it's not held + * prevent new requests using the table. + */ + ctx->nr_user_bufs = 0; ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx); + ctx->nr_user_bufs = nr; if (!ret) __io_sqe_buffers_unregister(ctx); return ret; @@ -13002,6 +12939,10 @@ static int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg) if (!is_power_of_2(reg.ring_entries)) return -EINVAL; + /* cannot disambiguate full vs empty due to head/tail size */ + if (reg.ring_entries >= 65536) + return -EINVAL; + if (unlikely(reg.bgid < BGID_ARRAY && !ctx->io_bl)) { int ret = io_init_bl_list(ctx); if (ret) diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index e49bb0938376..e9c308ae475f 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -2114,7 +2114,7 @@ out: /** * jbd2_journal_try_to_free_buffers() - try to free page buffers. * @journal: journal for operation - * @page: to try and free + * @folio: Folio to detach data from. * * For all the buffers on this page, * if they are fully written out ordered data, move them onto BUF_CLEAN diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c index 8742d22dfd2b..42f892c5712e 100644 --- a/fs/netfs/buffered_read.c +++ b/fs/netfs/buffered_read.c @@ -155,7 +155,7 @@ static void netfs_rreq_expand(struct netfs_io_request *rreq, void netfs_readahead(struct readahead_control *ractl) { struct netfs_io_request *rreq; - struct netfs_i_context *ctx = netfs_i_context(ractl->mapping->host); + struct netfs_inode *ctx = netfs_inode(ractl->mapping->host); int ret; _enter("%lx,%x", readahead_index(ractl), readahead_count(ractl)); @@ -215,7 +215,7 @@ int netfs_read_folio(struct file *file, struct folio *folio) { struct address_space *mapping = folio_file_mapping(folio); struct netfs_io_request *rreq; - struct netfs_i_context *ctx = netfs_i_context(mapping->host); + struct netfs_inode *ctx = netfs_inode(mapping->host); int ret; _enter("%lx", folio_index(folio)); @@ -297,6 +297,7 @@ zero_out: /** * netfs_write_begin - Helper to prepare for writing + * @ctx: The netfs context * @file: The file to read from * @mapping: The mapping to read from * @pos: File position at which the write will begin @@ -326,12 +327,12 @@ zero_out: * * This is usable whether or not caching is enabled. */ -int netfs_write_begin(struct file *file, struct address_space *mapping, +int netfs_write_begin(struct netfs_inode *ctx, + struct file *file, struct address_space *mapping, loff_t pos, unsigned int len, struct folio **_folio, void **_fsdata) { struct netfs_io_request *rreq; - struct netfs_i_context *ctx = netfs_i_context(file_inode(file )); struct folio *folio; unsigned int fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE; pgoff_t index = pos >> PAGE_SHIFT; diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h index b7b0e3d18d9e..43fac1b14e40 100644 --- a/fs/netfs/internal.h +++ b/fs/netfs/internal.h @@ -91,7 +91,7 @@ static inline void netfs_stat_d(atomic_t *stat) /* * Miscellaneous functions. */ -static inline bool netfs_is_cache_enabled(struct netfs_i_context *ctx) +static inline bool netfs_is_cache_enabled(struct netfs_inode *ctx) { #if IS_ENABLED(CONFIG_FSCACHE) struct fscache_cookie *cookie = ctx->cache; diff --git a/fs/netfs/objects.c b/fs/netfs/objects.c index e86107b30ba4..e17cdf53f6a7 100644 --- a/fs/netfs/objects.c +++ b/fs/netfs/objects.c @@ -18,7 +18,7 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping, { static atomic_t debug_ids; struct inode *inode = file ? file_inode(file) : mapping->host; - struct netfs_i_context *ctx = netfs_i_context(inode); + struct netfs_inode *ctx = netfs_inode(inode); struct netfs_io_request *rreq; int ret; @@ -75,10 +75,10 @@ static void netfs_free_request(struct work_struct *work) struct netfs_io_request *rreq = container_of(work, struct netfs_io_request, work); - netfs_clear_subrequests(rreq, false); - if (rreq->netfs_priv) - rreq->netfs_ops->cleanup(rreq->mapping, rreq->netfs_priv); trace_netfs_rreq(rreq, netfs_rreq_trace_free); + netfs_clear_subrequests(rreq, false); + if (rreq->netfs_ops->free_request) + rreq->netfs_ops->free_request(rreq); if (rreq->cache_resources.ops) rreq->cache_resources.ops->end_operation(&rreq->cache_resources); kfree(rreq); diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index c8520284dda7..c1eda73254e1 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c @@ -288,6 +288,7 @@ static u32 initiate_file_draining(struct nfs_client *clp, rv = NFS4_OK; break; case -ENOENT: + set_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags); /* Embrace your forgetfulness! */ rv = NFS4ERR_NOMATCHING_LAYOUT; diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index a8ecdd527662..0c4e8dd6aa96 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -2124,6 +2124,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry, } goto out; } + file->f_mode |= FMODE_CAN_ODIRECT; err = nfs_finish_open(ctx, ctx->dentry, file, open_flags); trace_nfs_atomic_open_exit(dir, ctx, open_flags, err); diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c index 03d3a270eff4..e88f6b18445e 100644 --- a/fs/nfs/nfs4file.c +++ b/fs/nfs/nfs4file.c @@ -93,6 +93,7 @@ nfs4_file_open(struct inode *inode, struct file *filp) nfs_file_set_open_context(filp, ctx); nfs_fscache_open_file(inode, filp); err = 0; + filp->f_mode |= FMODE_CAN_ODIRECT; out_put_ctx: put_nfs_open_context(ctx); diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 68a87be3e6f9..41a9b6b58fb9 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -469,6 +469,7 @@ pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo, pnfs_clear_lseg_state(lseg, lseg_list); pnfs_clear_layoutreturn_info(lo); pnfs_free_returned_lsegs(lo, lseg_list, &range, 0); + set_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags); if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags) && !test_and_set_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) pnfs_clear_layoutreturn_waitbit(lo); @@ -1917,8 +1918,9 @@ static void nfs_layoutget_begin(struct pnfs_layout_hdr *lo) static void nfs_layoutget_end(struct pnfs_layout_hdr *lo) { - if (atomic_dec_and_test(&lo->plh_outstanding)) - wake_up_var(&lo->plh_outstanding); + if (atomic_dec_and_test(&lo->plh_outstanding) && + test_and_clear_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags)) + wake_up_bit(&lo->plh_flags, NFS_LAYOUT_DRAIN); } static bool pnfs_is_first_layoutget(struct pnfs_layout_hdr *lo) @@ -2025,11 +2027,11 @@ lookup_again: * If the layout segment list is empty, but there are outstanding * layoutget calls, then they might be subject to a layoutrecall. */ - if ((list_empty(&lo->plh_segs) || !pnfs_layout_is_valid(lo)) && + if (test_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags) && atomic_read(&lo->plh_outstanding) != 0) { spin_unlock(&ino->i_lock); - lseg = ERR_PTR(wait_var_event_killable(&lo->plh_outstanding, - !atomic_read(&lo->plh_outstanding))); + lseg = ERR_PTR(wait_on_bit(&lo->plh_flags, NFS_LAYOUT_DRAIN, + TASK_KILLABLE)); if (IS_ERR(lseg)) goto out_put_layout_hdr; pnfs_put_layout_hdr(lo); @@ -2152,6 +2154,12 @@ lookup_again: case -ERECALLCONFLICT: case -EAGAIN: break; + case -ENODATA: + /* The server returned NFS4ERR_LAYOUTUNAVAILABLE */ + pnfs_layout_set_fail_bit( + lo, pnfs_iomode_to_fail_bit(iomode)); + lseg = NULL; + goto out_put_layout_hdr; default: if (!nfs_error_is_fatal(PTR_ERR(lseg))) { pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode)); @@ -2407,7 +2415,8 @@ pnfs_layout_process(struct nfs4_layoutget *lgp) goto out_forget; } - if (!pnfs_layout_is_valid(lo) && !pnfs_is_first_layoutget(lo)) + if (test_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags) && + !pnfs_is_first_layoutget(lo)) goto out_forget; if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) { diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 07f11489e4e9..f331f067691b 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -105,6 +105,7 @@ enum { NFS_LAYOUT_FIRST_LAYOUTGET, /* Serialize first layoutget */ NFS_LAYOUT_INODE_FREEING, /* The inode is being freed */ NFS_LAYOUT_HASHED, /* The layout visible */ + NFS_LAYOUT_DRAIN, }; enum layoutdriver_policy_flags { diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c index f172412447f5..9cb2d590c036 100644 --- a/fs/nfsd/filecache.c +++ b/fs/nfsd/filecache.c @@ -309,11 +309,12 @@ nfsd_file_put(struct nfsd_file *nf) if (test_bit(NFSD_FILE_HASHED, &nf->nf_flags) == 0) { nfsd_file_flush(nf); nfsd_file_put_noref(nf); - } else { + } else if (nf->nf_file) { nfsd_file_put_noref(nf); - if (nf->nf_file) - nfsd_file_schedule_laundrette(); - } + nfsd_file_schedule_laundrette(); + } else + nfsd_file_put_noref(nf); + if (atomic_long_read(&nfsd_filecache_count) >= NFSD_FILE_LRU_LIMIT) nfsd_file_gc(); } diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index a74aef99bd3d..09d1307959d0 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -79,6 +79,7 @@ #include <linux/capability.h> #include <linux/quotaops.h> #include <linux/blkdev.h> +#include <linux/sched/mm.h> #include "../internal.h" /* ugh */ #include <linux/uaccess.h> @@ -425,9 +426,11 @@ EXPORT_SYMBOL(mark_info_dirty); int dquot_acquire(struct dquot *dquot) { int ret = 0, ret2 = 0; + unsigned int memalloc; struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); mutex_lock(&dquot->dq_lock); + memalloc = memalloc_nofs_save(); if (!test_bit(DQ_READ_B, &dquot->dq_flags)) { ret = dqopt->ops[dquot->dq_id.type]->read_dqblk(dquot); if (ret < 0) @@ -458,6 +461,7 @@ int dquot_acquire(struct dquot *dquot) smp_mb__before_atomic(); set_bit(DQ_ACTIVE_B, &dquot->dq_flags); out_iolock: + memalloc_nofs_restore(memalloc); mutex_unlock(&dquot->dq_lock); return ret; } @@ -469,9 +473,11 @@ EXPORT_SYMBOL(dquot_acquire); int dquot_commit(struct dquot *dquot) { int ret = 0; + unsigned int memalloc; struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); mutex_lock(&dquot->dq_lock); + memalloc = memalloc_nofs_save(); if (!clear_dquot_dirty(dquot)) goto out_lock; /* Inactive dquot can be only if there was error during read/init @@ -481,6 +487,7 @@ int dquot_commit(struct dquot *dquot) else ret = -EIO; out_lock: + memalloc_nofs_restore(memalloc); mutex_unlock(&dquot->dq_lock); return ret; } @@ -492,9 +499,11 @@ EXPORT_SYMBOL(dquot_commit); int dquot_release(struct dquot *dquot) { int ret = 0, ret2 = 0; + unsigned int memalloc; struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); mutex_lock(&dquot->dq_lock); + memalloc = memalloc_nofs_save(); /* Check whether we are not racing with some other dqget() */ if (dquot_is_busy(dquot)) goto out_dqlock; @@ -510,6 +519,7 @@ int dquot_release(struct dquot *dquot) } clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); out_dqlock: + memalloc_nofs_restore(memalloc); mutex_unlock(&dquot->dq_lock); return ret; } diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c index de7252715b12..81d26abf486f 100644 --- a/fs/tracefs/inode.c +++ b/fs/tracefs/inode.c @@ -553,7 +553,7 @@ struct dentry *tracefs_create_dir(const char *name, struct dentry *parent) * * Only one instances directory is allowed. * - * The instances directory is special as it allows for mkdir and rmdir to + * The instances directory is special as it allows for mkdir and rmdir * to be done by userspace. When a mkdir or rmdir is performed, the inode * locks are released and the methods passed in (@mkdir and @rmdir) are * called without locks and with the name of the directory being created diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c index 836ab1b8ed7b..1824f61621a2 100644 --- a/fs/xfs/libxfs/xfs_attr.c +++ b/fs/xfs/libxfs/xfs_attr.c @@ -997,9 +997,11 @@ xfs_attr_set( /* * We have no control over the attribute names that userspace passes us * to remove, so we have to allow the name lookup prior to attribute - * removal to fail as well. + * removal to fail as well. Preserve the logged flag, since we need + * to pass that through to the logging code. */ - args->op_flags = XFS_DA_OP_OKNOENT; + args->op_flags = XFS_DA_OP_OKNOENT | + (args->op_flags & XFS_DA_OP_LOGGED); if (args->value) { XFS_STATS_INC(mp, xs_attr_set); @@ -1439,12 +1441,11 @@ static int xfs_attr_node_try_addname( struct xfs_attr_intent *attr) { - struct xfs_da_args *args = attr->xattri_da_args; struct xfs_da_state *state = attr->xattri_da_state; struct xfs_da_state_blk *blk; int error; - trace_xfs_attr_node_addname(args); + trace_xfs_attr_node_addname(state->args); blk = &state->path.blk[state->path.active-1]; ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC); diff --git a/fs/xfs/libxfs/xfs_attr.h b/fs/xfs/libxfs/xfs_attr.h index e329da3e7afa..b4a2fc77017e 100644 --- a/fs/xfs/libxfs/xfs_attr.h +++ b/fs/xfs/libxfs/xfs_attr.h @@ -28,16 +28,6 @@ struct xfs_attr_list_context; */ #define ATTR_MAX_VALUELEN (64*1024) /* max length of a value */ -static inline bool xfs_has_larp(struct xfs_mount *mp) -{ -#ifdef DEBUG - /* Logged xattrs require a V5 super for log_incompat */ - return xfs_has_crc(mp) && xfs_globals.larp; -#else - return false; -#endif -} - /* * Kernel-internal version of the attrlist cursor. */ @@ -624,7 +614,7 @@ static inline enum xfs_delattr_state xfs_attr_init_replace_state(struct xfs_da_args *args) { args->op_flags |= XFS_DA_OP_ADDNAME | XFS_DA_OP_REPLACE; - if (xfs_has_larp(args->dp->i_mount)) + if (args->op_flags & XFS_DA_OP_LOGGED) return xfs_attr_init_remove_state(args); return xfs_attr_init_add_state(args); } diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c index 15a990409463..37e7c33f6283 100644 --- a/fs/xfs/libxfs/xfs_attr_leaf.c +++ b/fs/xfs/libxfs/xfs_attr_leaf.c @@ -1530,7 +1530,7 @@ xfs_attr3_leaf_add_work( if (tmp) entry->flags |= XFS_ATTR_LOCAL; if (args->op_flags & XFS_DA_OP_REPLACE) { - if (!xfs_has_larp(mp)) + if (!(args->op_flags & XFS_DA_OP_LOGGED)) entry->flags |= XFS_ATTR_INCOMPLETE; if ((args->blkno2 == args->blkno) && (args->index2 <= args->index)) { diff --git a/fs/xfs/libxfs/xfs_da_btree.h b/fs/xfs/libxfs/xfs_da_btree.h index d33b7686a0b3..ffa3df5b2893 100644 --- a/fs/xfs/libxfs/xfs_da_btree.h +++ b/fs/xfs/libxfs/xfs_da_btree.h @@ -92,6 +92,7 @@ typedef struct xfs_da_args { #define XFS_DA_OP_NOTIME (1u << 5) /* don't update inode timestamps */ #define XFS_DA_OP_REMOVE (1u << 6) /* this is a remove operation */ #define XFS_DA_OP_RECOVERY (1u << 7) /* Log recovery operation */ +#define XFS_DA_OP_LOGGED (1u << 8) /* Use intent items to track op */ #define XFS_DA_OP_FLAGS \ { XFS_DA_OP_JUSTCHECK, "JUSTCHECK" }, \ @@ -101,7 +102,8 @@ typedef struct xfs_da_args { { XFS_DA_OP_CILOOKUP, "CILOOKUP" }, \ { XFS_DA_OP_NOTIME, "NOTIME" }, \ { XFS_DA_OP_REMOVE, "REMOVE" }, \ - { XFS_DA_OP_RECOVERY, "RECOVERY" } + { XFS_DA_OP_RECOVERY, "RECOVERY" }, \ + { XFS_DA_OP_LOGGED, "LOGGED" } /* * Storage for holding state during Btree searches and split/join ops. diff --git a/fs/xfs/xfs_attr_item.c b/fs/xfs/xfs_attr_item.c index 4a28c2d77070..135d44133477 100644 --- a/fs/xfs/xfs_attr_item.c +++ b/fs/xfs/xfs_attr_item.c @@ -413,18 +413,20 @@ xfs_attr_create_intent( struct xfs_mount *mp = tp->t_mountp; struct xfs_attri_log_item *attrip; struct xfs_attr_intent *attr; + struct xfs_da_args *args; ASSERT(count == 1); - if (!xfs_sb_version_haslogxattrs(&mp->m_sb)) - return NULL; - /* * Each attr item only performs one attribute operation at a time, so * this is a list of one */ attr = list_first_entry_or_null(items, struct xfs_attr_intent, xattri_list); + args = attr->xattri_da_args; + + if (!(args->op_flags & XFS_DA_OP_LOGGED)) + return NULL; /* * Create a buffer to store the attribute name and value. This buffer @@ -432,8 +434,6 @@ xfs_attr_create_intent( * and the lower level xattr log items. */ if (!attr->xattri_nameval) { - struct xfs_da_args *args = attr->xattri_da_args; - /* * Transfer our reference to the name/value buffer to the * deferred work state structure. @@ -617,7 +617,10 @@ xfs_attri_item_recover( args->namelen = nv->name.i_len; args->hashval = xfs_da_hashname(args->name, args->namelen); args->attr_filter = attrp->alfi_attr_filter & XFS_ATTRI_FILTER_MASK; - args->op_flags = XFS_DA_OP_RECOVERY | XFS_DA_OP_OKNOENT; + args->op_flags = XFS_DA_OP_RECOVERY | XFS_DA_OP_OKNOENT | + XFS_DA_OP_LOGGED; + + ASSERT(xfs_sb_version_haslogxattrs(&mp->m_sb)); switch (attr->xattri_op_flags) { case XFS_ATTRI_OP_FLAGS_SET: diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index 5a364a7d58fd..0d67ff8a8961 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -1096,7 +1096,8 @@ xfs_flags2diflags2( { uint64_t di_flags2 = (ip->i_diflags2 & (XFS_DIFLAG2_REFLINK | - XFS_DIFLAG2_BIGTIME)); + XFS_DIFLAG2_BIGTIME | + XFS_DIFLAG2_NREXT64)); if (xflags & FS_XFLAG_DAX) di_flags2 |= XFS_DIFLAG2_DAX; diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c index 35e13e125ec6..c325a28b89a8 100644 --- a/fs/xfs/xfs_xattr.c +++ b/fs/xfs/xfs_xattr.c @@ -68,6 +68,18 @@ xfs_attr_rele_log_assist( xlog_drop_incompat_feat(mp->m_log); } +static inline bool +xfs_attr_want_log_assist( + struct xfs_mount *mp) +{ +#ifdef DEBUG + /* Logged xattrs require a V5 super for log_incompat */ + return xfs_has_crc(mp) && xfs_globals.larp; +#else + return false; +#endif +} + /* * Set or remove an xattr, having grabbed the appropriate logging resources * prior to calling libxfs. @@ -80,11 +92,14 @@ xfs_attr_change( bool use_logging = false; int error; - if (xfs_has_larp(mp)) { + ASSERT(!(args->op_flags & XFS_DA_OP_LOGGED)); + + if (xfs_attr_want_log_assist(mp)) { error = xfs_attr_grab_log_assist(mp); if (error) return error; + args->op_flags |= XFS_DA_OP_LOGGED; use_logging = true; } diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c index bcb21aea990a..053299758deb 100644 --- a/fs/zonefs/super.c +++ b/fs/zonefs/super.c @@ -110,15 +110,51 @@ static inline void zonefs_i_size_write(struct inode *inode, loff_t isize) } } -static int zonefs_iomap_begin(struct inode *inode, loff_t offset, loff_t length, - unsigned int flags, struct iomap *iomap, - struct iomap *srcmap) +static int zonefs_read_iomap_begin(struct inode *inode, loff_t offset, + loff_t length, unsigned int flags, + struct iomap *iomap, struct iomap *srcmap) { struct zonefs_inode_info *zi = ZONEFS_I(inode); struct super_block *sb = inode->i_sb; loff_t isize; - /* All I/Os should always be within the file maximum size */ + /* + * All blocks are always mapped below EOF. If reading past EOF, + * act as if there is a hole up to the file maximum size. + */ + mutex_lock(&zi->i_truncate_mutex); + iomap->bdev = inode->i_sb->s_bdev; + iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize); + isize = i_size_read(inode); + if (iomap->offset >= isize) { + iomap->type = IOMAP_HOLE; + iomap->addr = IOMAP_NULL_ADDR; + iomap->length = length; + } else { + iomap->type = IOMAP_MAPPED; + iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset; + iomap->length = isize - iomap->offset; + } + mutex_unlock(&zi->i_truncate_mutex); + + trace_zonefs_iomap_begin(inode, iomap); + + return 0; +} + +static const struct iomap_ops zonefs_read_iomap_ops = { + .iomap_begin = zonefs_read_iomap_begin, +}; + +static int zonefs_write_iomap_begin(struct inode *inode, loff_t offset, + loff_t length, unsigned int flags, + struct iomap *iomap, struct iomap *srcmap) +{ + struct zonefs_inode_info *zi = ZONEFS_I(inode); + struct super_block *sb = inode->i_sb; + loff_t isize; + + /* All write I/Os should always be within the file maximum size */ if (WARN_ON_ONCE(offset + length > zi->i_max_size)) return -EIO; @@ -128,7 +164,7 @@ static int zonefs_iomap_begin(struct inode *inode, loff_t offset, loff_t length, * operation. */ if (WARN_ON_ONCE(zi->i_ztype == ZONEFS_ZTYPE_SEQ && - (flags & IOMAP_WRITE) && !(flags & IOMAP_DIRECT))) + !(flags & IOMAP_DIRECT))) return -EIO; /* @@ -137,47 +173,44 @@ static int zonefs_iomap_begin(struct inode *inode, loff_t offset, loff_t length, * write pointer) and unwriten beyond. */ mutex_lock(&zi->i_truncate_mutex); + iomap->bdev = inode->i_sb->s_bdev; + iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize); + iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset; isize = i_size_read(inode); - if (offset >= isize) + if (iomap->offset >= isize) { iomap->type = IOMAP_UNWRITTEN; - else + iomap->length = zi->i_max_size - iomap->offset; + } else { iomap->type = IOMAP_MAPPED; - if (flags & IOMAP_WRITE) - length = zi->i_max_size - offset; - else - length = min(length, isize - offset); + iomap->length = isize - iomap->offset; + } mutex_unlock(&zi->i_truncate_mutex); - iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize); - iomap->length = ALIGN(offset + length, sb->s_blocksize) - iomap->offset; - iomap->bdev = inode->i_sb->s_bdev; - iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset; - trace_zonefs_iomap_begin(inode, iomap); return 0; } -static const struct iomap_ops zonefs_iomap_ops = { - .iomap_begin = zonefs_iomap_begin, +static const struct iomap_ops zonefs_write_iomap_ops = { + .iomap_begin = zonefs_write_iomap_begin, }; static int zonefs_read_folio(struct file *unused, struct folio *folio) { - return iomap_read_folio(folio, &zonefs_iomap_ops); + return iomap_read_folio(folio, &zonefs_read_iomap_ops); } static void zonefs_readahead(struct readahead_control *rac) { - iomap_readahead(rac, &zonefs_iomap_ops); + iomap_readahead(rac, &zonefs_read_iomap_ops); } /* * Map blocks for page writeback. This is used only on conventional zone files, * which implies that the page range can only be within the fixed inode size. */ -static int zonefs_map_blocks(struct iomap_writepage_ctx *wpc, - struct inode *inode, loff_t offset) +static int zonefs_write_map_blocks(struct iomap_writepage_ctx *wpc, + struct inode *inode, loff_t offset) { struct zonefs_inode_info *zi = ZONEFS_I(inode); @@ -191,12 +224,12 @@ static int zonefs_map_blocks(struct iomap_writepage_ctx *wpc, offset < wpc->iomap.offset + wpc->iomap.length) return 0; - return zonefs_iomap_begin(inode, offset, zi->i_max_size - offset, - IOMAP_WRITE, &wpc->iomap, NULL); + return zonefs_write_iomap_begin(inode, offset, zi->i_max_size - offset, + IOMAP_WRITE, &wpc->iomap, NULL); } static const struct iomap_writeback_ops zonefs_writeback_ops = { - .map_blocks = zonefs_map_blocks, + .map_blocks = zonefs_write_map_blocks, }; static int zonefs_writepage(struct page *page, struct writeback_control *wbc) @@ -226,7 +259,8 @@ static int zonefs_swap_activate(struct swap_info_struct *sis, return -EINVAL; } - return iomap_swapfile_activate(sis, swap_file, span, &zonefs_iomap_ops); + return iomap_swapfile_activate(sis, swap_file, span, + &zonefs_read_iomap_ops); } static const struct address_space_operations zonefs_file_aops = { @@ -647,7 +681,7 @@ static vm_fault_t zonefs_filemap_page_mkwrite(struct vm_fault *vmf) /* Serialize against truncates */ filemap_invalidate_lock_shared(inode->i_mapping); - ret = iomap_page_mkwrite(vmf, &zonefs_iomap_ops); + ret = iomap_page_mkwrite(vmf, &zonefs_write_iomap_ops); filemap_invalidate_unlock_shared(inode->i_mapping); sb_end_pagefault(inode->i_sb); @@ -899,7 +933,7 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from) if (append) ret = zonefs_file_dio_append(iocb, from); else - ret = iomap_dio_rw(iocb, from, &zonefs_iomap_ops, + ret = iomap_dio_rw(iocb, from, &zonefs_write_iomap_ops, &zonefs_write_dio_ops, 0, NULL, 0); if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && (ret > 0 || ret == -EIOCBQUEUED)) { @@ -948,7 +982,7 @@ static ssize_t zonefs_file_buffered_write(struct kiocb *iocb, if (ret <= 0) goto inode_unlock; - ret = iomap_file_buffered_write(iocb, from, &zonefs_iomap_ops); + ret = iomap_file_buffered_write(iocb, from, &zonefs_write_iomap_ops); if (ret > 0) iocb->ki_pos += ret; else if (ret == -EIO) @@ -1041,7 +1075,7 @@ static ssize_t zonefs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) goto inode_unlock; } file_accessed(iocb->ki_filp); - ret = iomap_dio_rw(iocb, to, &zonefs_iomap_ops, + ret = iomap_dio_rw(iocb, to, &zonefs_read_iomap_ops, &zonefs_read_dio_ops, 0, NULL, 0); } else { ret = generic_file_read_iter(iocb, to); @@ -1085,7 +1119,8 @@ static int zonefs_seq_file_write_open(struct inode *inode) if (sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) { - if (wro > sbi->s_max_wro_seq_files) { + if (sbi->s_max_wro_seq_files + && wro > sbi->s_max_wro_seq_files) { atomic_dec(&sbi->s_wro_seq_files); ret = -EBUSY; goto unlock; @@ -1760,12 +1795,6 @@ static int zonefs_fill_super(struct super_block *sb, void *data, int silent) atomic_set(&sbi->s_wro_seq_files, 0); sbi->s_max_wro_seq_files = bdev_max_open_zones(sb->s_bdev); - if (!sbi->s_max_wro_seq_files && - sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) { - zonefs_info(sb, "No open zones limit. Ignoring explicit_open mount option\n"); - sbi->s_mount_opts &= ~ZONEFS_MNTOPT_EXPLICIT_OPEN; - } - atomic_set(&sbi->s_active_seq_files, 0); sbi->s_max_active_seq_files = bdev_max_active_zones(sb->s_bdev); @@ -1790,6 +1819,14 @@ static int zonefs_fill_super(struct super_block *sb, void *data, int silent) zonefs_info(sb, "Mounting %u zones", blkdev_nr_zones(sb->s_bdev->bd_disk)); + if (!sbi->s_max_wro_seq_files && + !sbi->s_max_active_seq_files && + sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) { + zonefs_info(sb, + "No open and active zone limits. Ignoring explicit_open mount option\n"); + sbi->s_mount_opts &= ~ZONEFS_MNTOPT_EXPLICIT_OPEN; + } + /* Create root directory inode */ ret = -ENOMEM; inode = new_inode(sb); diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild index 302506bbc2a4..8e47d483b524 100644 --- a/include/asm-generic/Kbuild +++ b/include/asm-generic/Kbuild @@ -44,6 +44,7 @@ mandatory-y += msi.h mandatory-y += pci.h mandatory-y += percpu.h mandatory-y += pgalloc.h +mandatory-y += platform-feature.h mandatory-y += preempt.h mandatory-y += rwonce.h mandatory-y += sections.h diff --git a/include/asm-generic/platform-feature.h b/include/asm-generic/platform-feature.h new file mode 100644 index 000000000000..4b0af3d51588 --- /dev/null +++ b/include/asm-generic/platform-feature.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_PLATFORM_FEATURE_H +#define _ASM_GENERIC_PLATFORM_FEATURE_H + +/* Number of arch specific feature flags. */ +#define PLATFORM_ARCH_FEAT_N 0 + +#endif /* _ASM_GENERIC_PLATFORM_FEATURE_H */ diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index 0777725085df..10b1990bc1f6 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -1022,6 +1022,7 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p); for ((__i) = 0; \ (__i) < (__state)->num_private_objs && \ ((obj) = (__state)->private_objs[__i].ptr, \ + (void)(obj) /* Only to avoid unused-but-set-variable warning */, \ (new_obj_state) = (__state)->private_objs[__i].new_state, 1); \ (__i)++) diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h index 441653693970..ca89a48c2460 100644 --- a/include/drm/ttm/ttm_resource.h +++ b/include/drm/ttm/ttm_resource.h @@ -311,12 +311,12 @@ ttm_resource_manager_cleanup(struct ttm_resource_manager *man) } void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk); -void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk, - struct ttm_resource *res); -void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk, - struct ttm_resource *res); void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk); +void ttm_resource_add_bulk_move(struct ttm_resource *res, + struct ttm_buffer_object *bo); +void ttm_resource_del_bulk_move(struct ttm_resource *res, + struct ttm_buffer_object *bo); void ttm_resource_move_to_lru_tail(struct ttm_resource *res); void ttm_resource_init(struct ttm_buffer_object *bo, diff --git a/include/keys/asymmetric-type.h b/include/keys/asymmetric-type.h index 6c5d4963e15b..69a13e1e5b2e 100644 --- a/include/keys/asymmetric-type.h +++ b/include/keys/asymmetric-type.h @@ -84,6 +84,9 @@ extern struct key *find_asymmetric_key(struct key *keyring, const struct asymmetric_key_id *id_2, bool partial); +int x509_load_certificate_list(const u8 cert_list[], const unsigned long list_size, + const struct key *keyring); + /* * The payload is at the discretion of the subtype. */ diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 2bd073fa6bb5..d452071db572 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -119,6 +119,8 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio); extern struct backing_dev_info noop_backing_dev_info; +int bdi_init(struct backing_dev_info *bdi); + /** * writeback_in_progress - determine whether there is writeback in progress * @wb: bdi_writeback of interest diff --git a/include/linux/bio.h b/include/linux/bio.h index 1cf3738ef1ea..992ee987f273 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -403,7 +403,6 @@ enum { extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags); extern void bioset_exit(struct bio_set *); extern int biovec_init_pool(mempool_t *pool, int pool_entries); -extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src); struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs, unsigned int opf, gfp_t gfp_mask, diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 608d577734c2..2f7b43444c5f 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -342,7 +342,6 @@ static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev, */ struct blk_independent_access_range { struct kobject kobj; - struct request_queue *queue; sector_t sector; sector_t nr_sectors; }; @@ -482,7 +481,6 @@ struct request_queue { #endif /* CONFIG_BLK_DEV_ZONED */ int node; - struct mutex debugfs_mutex; #ifdef CONFIG_BLK_DEV_IO_TRACE struct blk_trace __rcu *blk_trace; #endif @@ -526,11 +524,12 @@ struct request_queue { struct bio_set bio_split; struct dentry *debugfs_dir; - -#ifdef CONFIG_BLK_DEBUG_FS struct dentry *sched_debugfs_dir; struct dentry *rqos_debugfs_dir; -#endif + /* + * Serializes all debugfs metadata operations using the above dentries. + */ + struct mutex debugfs_mutex; bool mq_sysfs_init_done; @@ -575,6 +574,7 @@ struct request_queue { #define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */ #define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */ #define QUEUE_FLAG_NOWAIT 29 /* device supports NOWAIT */ +#define QUEUE_FLAG_SQ_SCHED 30 /* single queue style io dispatch */ #define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ (1 << QUEUE_FLAG_SAME_COMP) | \ @@ -616,6 +616,7 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); #define blk_queue_pm_only(q) atomic_read(&(q)->pm_only) #define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags) #define blk_queue_nowait(q) test_bit(QUEUE_FLAG_NOWAIT, &(q)->queue_flags) +#define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags) extern void blk_set_pm_only(struct request_queue *q); extern void blk_clear_pm_only(struct request_queue *q); @@ -1006,8 +1007,6 @@ void disk_set_independent_access_ranges(struct gendisk *disk, */ /* Supports zoned block devices sequential write constraint */ #define ELEVATOR_F_ZBD_SEQ_WRITE (1U << 0) -/* Supports scheduling on multiple hardware queues */ -#define ELEVATOR_F_MQ_AWARE (1U << 1) extern void blk_queue_required_elevator_features(struct request_queue *q, unsigned int features); diff --git a/include/linux/console.h b/include/linux/console.h index 143653090c48..8c1686e2c233 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -16,7 +16,6 @@ #include <linux/atomic.h> #include <linux/types.h> -#include <linux/mutex.h> struct vc_data; struct console_font_op; @@ -154,22 +153,6 @@ struct console { uint ospeed; u64 seq; unsigned long dropped; - struct task_struct *thread; - bool blocked; - - /* - * The per-console lock is used by printing kthreads to synchronize - * this console with callers of console_lock(). This is necessary in - * order to allow printing kthreads to run in parallel to each other, - * while each safely accessing the @blocked field and synchronizing - * against direct printing via console_lock/console_unlock. - * - * Note: For synchronizing against direct printing via - * console_trylock/console_unlock, see the static global - * variable @console_kthreads_active. - */ - struct mutex lock; - void *data; struct console *next; }; diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 54dc2f9a2d56..2c7477354744 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -65,6 +65,9 @@ extern ssize_t cpu_show_tsx_async_abort(struct device *dev, extern ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf); extern ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_mmio_stale_data(struct device *dev, + struct device_attribute *attr, + char *buf); extern __printf(4, 5) struct device *cpu_device_create(struct device *parent, void *drvdata, diff --git a/include/linux/crc-itu-t.h b/include/linux/crc-itu-t.h index a4367051e192..2f991a427ade 100644 --- a/include/linux/crc-itu-t.h +++ b/include/linux/crc-itu-t.h @@ -4,7 +4,7 @@ * * Implements the standard CRC ITU-T V.41: * Width 16 - * Poly 0x1021 (x^16 + x^12 + x^15 + 1) + * Poly 0x1021 (x^16 + x^12 + x^5 + 1) * Init 0 */ diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index b1e0f1f8ee2e..54c3c6506503 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -167,21 +167,24 @@ struct gpio_irq_chip { */ irq_flow_handler_t parent_handler; - /** - * @parent_handler_data: - * - * If @per_parent_data is false, @parent_handler_data is a single - * pointer used as the data associated with every parent interrupt. - * - * @parent_handler_data_array: - * - * If @per_parent_data is true, @parent_handler_data_array is - * an array of @num_parents pointers, and is used to associate - * different data for each parent. This cannot be NULL if - * @per_parent_data is true. - */ union { + /** + * @parent_handler_data: + * + * If @per_parent_data is false, @parent_handler_data is a + * single pointer used as the data associated with every + * parent interrupt. + */ void *parent_handler_data; + + /** + * @parent_handler_data_array: + * + * If @per_parent_data is true, @parent_handler_data_array is + * an array of @num_parents pointers, and is used to associate + * different data for each parent. This cannot be NULL if + * @per_parent_data is true. + */ void **parent_handler_data_array; }; diff --git a/include/linux/libata.h b/include/linux/libata.h index 732de9014626..0f2a59c9c735 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -822,7 +822,6 @@ struct ata_port { struct ata_queued_cmd qcmd[ATA_MAX_QUEUE + 1]; u64 qc_active; int nr_active_links; /* #links with active qcs */ - unsigned int sas_last_tag; /* track next tag hw expects */ struct ata_link link; /* host default link */ struct ata_link *slave_link; /* see ata_slave_link_init() */ diff --git a/include/linux/mm.h b/include/linux/mm.h index bc8f326be0ce..cf3d0d673f6b 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1600,7 +1600,7 @@ static inline bool is_pinnable_page(struct page *page) if (mt == MIGRATE_CMA || mt == MIGRATE_ISOLATE) return false; #endif - return !(is_zone_movable_page(page) || is_zero_pfn(page_to_pfn(page))); + return !is_zone_movable_page(page) || is_zero_pfn(page_to_pfn(page)); } #else static inline bool is_pinnable_page(struct page *page) @@ -3232,6 +3232,7 @@ enum mf_flags { MF_MUST_KILL = 1 << 2, MF_SOFT_OFFLINE = 1 << 3, MF_UNPOISON = 1 << 4, + MF_SW_SIMULATED = 1 << 5, }; extern int memory_failure(unsigned long pfn, int flags); extern void memory_failure_queue(unsigned long pfn, int flags); diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index b34ff2cdbc4f..c29ab4c0cd5c 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -227,6 +227,7 @@ struct page { * struct folio - Represents a contiguous set of bytes. * @flags: Identical to the page flags. * @lru: Least Recently Used list; tracks how recently this folio was used. + * @mlock_count: Number of times this folio has been pinned by mlock(). * @mapping: The file this page belongs to, or refers to the anon_vma for * anonymous memory. * @index: Offset within the file, in units of pages. For anonymous memory, @@ -255,10 +256,14 @@ struct folio { unsigned long flags; union { struct list_head lru; + /* private: avoid cluttering the output */ struct { void *__filler; + /* public: */ unsigned int mlock_count; + /* private: */ }; + /* public: */ }; struct address_space *mapping; pgoff_t index; diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index 1ede4c89805a..42218a1164f6 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -351,7 +351,7 @@ struct spi_nor_flash_parameter; * @bouncebuf_size: size of the bounce buffer * @info: SPI NOR part JEDEC MFR ID and other info * @manufacturer: SPI NOR manufacturer - * @addr_width: number of address bytes + * @addr_nbytes: number of address bytes * @erase_opcode: the opcode for erasing a sector * @read_opcode: the read opcode * @read_dummy: the dummy needed by the read operation @@ -381,7 +381,7 @@ struct spi_nor { size_t bouncebuf_size; const struct flash_info *info; const struct spi_nor_manufacturer *manufacturer; - u8 addr_width; + u8 addr_nbytes; u8 erase_opcode; u8 read_opcode; u8 read_dummy; diff --git a/include/linux/netfs.h b/include/linux/netfs.h index 77fa6a61706a..1773e5df8e65 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -119,9 +119,10 @@ typedef void (*netfs_io_terminated_t)(void *priv, ssize_t transferred_or_error, bool was_async); /* - * Per-inode description. This must be directly after the inode struct. + * Per-inode context. This wraps the VFS inode. */ -struct netfs_i_context { +struct netfs_inode { + struct inode inode; /* The VFS inode */ const struct netfs_request_ops *ops; #if IS_ENABLED(CONFIG_FSCACHE) struct fscache_cookie *cache; @@ -205,7 +206,9 @@ struct netfs_io_request { */ struct netfs_request_ops { int (*init_request)(struct netfs_io_request *rreq, struct file *file); + void (*free_request)(struct netfs_io_request *rreq); int (*begin_cache_operation)(struct netfs_io_request *rreq); + void (*expand_readahead)(struct netfs_io_request *rreq); bool (*clamp_length)(struct netfs_io_subrequest *subreq); void (*issue_read)(struct netfs_io_subrequest *subreq); @@ -213,7 +216,6 @@ struct netfs_request_ops { int (*check_write_begin)(struct file *file, loff_t pos, unsigned len, struct folio *folio, void **_fsdata); void (*done)(struct netfs_io_request *rreq); - void (*cleanup)(struct address_space *mapping, void *netfs_priv); }; /* @@ -256,7 +258,7 @@ struct netfs_cache_ops { * boundary as appropriate. */ enum netfs_io_source (*prepare_read)(struct netfs_io_subrequest *subreq, - loff_t i_size); + loff_t i_size); /* Prepare a write operation, working out what part of the write we can * actually do. @@ -276,7 +278,8 @@ struct netfs_cache_ops { struct readahead_control; extern void netfs_readahead(struct readahead_control *); int netfs_read_folio(struct file *, struct folio *); -extern int netfs_write_begin(struct file *, struct address_space *, +extern int netfs_write_begin(struct netfs_inode *, + struct file *, struct address_space *, loff_t, unsigned int, struct folio **, void **); @@ -288,71 +291,56 @@ extern void netfs_put_subrequest(struct netfs_io_subrequest *subreq, extern void netfs_stats_show(struct seq_file *); /** - * netfs_i_context - Get the netfs inode context from the inode + * netfs_inode - Get the netfs inode context from the inode * @inode: The inode to query * * Get the netfs lib inode context from the network filesystem's inode. The * context struct is expected to directly follow on from the VFS inode struct. */ -static inline struct netfs_i_context *netfs_i_context(struct inode *inode) -{ - return (void *)inode + sizeof(*inode); -} - -/** - * netfs_inode - Get the netfs inode from the inode context - * @ctx: The context to query - * - * Get the netfs inode from the netfs library's inode context. The VFS inode - * is expected to directly precede the context struct. - */ -static inline struct inode *netfs_inode(struct netfs_i_context *ctx) +static inline struct netfs_inode *netfs_inode(struct inode *inode) { - return (void *)ctx - sizeof(struct inode); + return container_of(inode, struct netfs_inode, inode); } /** - * netfs_i_context_init - Initialise a netfs lib context - * @inode: The inode with which the context is associated + * netfs_inode_init - Initialise a netfslib inode context + * @ctx: The netfs inode to initialise * @ops: The netfs's operations list * * Initialise the netfs library context struct. This is expected to follow on * directly from the VFS inode struct. */ -static inline void netfs_i_context_init(struct inode *inode, - const struct netfs_request_ops *ops) +static inline void netfs_inode_init(struct netfs_inode *ctx, + const struct netfs_request_ops *ops) { - struct netfs_i_context *ctx = netfs_i_context(inode); - - memset(ctx, 0, sizeof(*ctx)); ctx->ops = ops; - ctx->remote_i_size = i_size_read(inode); + ctx->remote_i_size = i_size_read(&ctx->inode); +#if IS_ENABLED(CONFIG_FSCACHE) + ctx->cache = NULL; +#endif } /** * netfs_resize_file - Note that a file got resized - * @inode: The inode being resized + * @ctx: The netfs inode being resized * @new_i_size: The new file size * * Inform the netfs lib that a file got resized so that it can adjust its state. */ -static inline void netfs_resize_file(struct inode *inode, loff_t new_i_size) +static inline void netfs_resize_file(struct netfs_inode *ctx, loff_t new_i_size) { - struct netfs_i_context *ctx = netfs_i_context(inode); - ctx->remote_i_size = new_i_size; } /** * netfs_i_cookie - Get the cache cookie from the inode - * @inode: The inode to query + * @ctx: The netfs inode to query * * Get the caching cookie (if enabled) from the network filesystem's inode. */ -static inline struct fscache_cookie *netfs_i_cookie(struct inode *inode) +static inline struct fscache_cookie *netfs_i_cookie(struct netfs_inode *ctx) { #if IS_ENABLED(CONFIG_FSCACHE) - struct netfs_i_context *ctx = netfs_i_context(inode); return ctx->cache; #else return NULL; diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 29ec3e3481ff..e3934003f239 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -233,8 +233,8 @@ enum { }; enum { - NVME_CAP_CRMS_CRIMS = 1ULL << 59, - NVME_CAP_CRMS_CRWMS = 1ULL << 60, + NVME_CAP_CRMS_CRWMS = 1ULL << 59, + NVME_CAP_CRMS_CRIMS = 1ULL << 60, }; struct nvme_id_power_state { diff --git a/include/linux/objtool.h b/include/linux/objtool.h index 6491fa8fba6d..15b940ec1eac 100644 --- a/include/linux/objtool.h +++ b/include/linux/objtool.h @@ -143,6 +143,12 @@ struct unwind_hint { .popsection .endm +.macro STACK_FRAME_NON_STANDARD_FP func:req +#ifdef CONFIG_FRAME_POINTER + STACK_FRAME_NON_STANDARD \func +#endif +.endm + .macro ANNOTATE_NOENDBR .Lhere_\@: .pushsection .discard.noendbr diff --git a/include/linux/platform-feature.h b/include/linux/platform-feature.h new file mode 100644 index 000000000000..b2f48be999fa --- /dev/null +++ b/include/linux/platform-feature.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _PLATFORM_FEATURE_H +#define _PLATFORM_FEATURE_H + +#include <linux/bitops.h> +#include <asm/platform-feature.h> + +/* The platform features are starting with the architecture specific ones. */ + +/* Used to enable platform specific DMA handling for virtio devices. */ +#define PLATFORM_VIRTIO_RESTRICTED_MEM_ACCESS (0 + PLATFORM_ARCH_FEAT_N) + +#define PLATFORM_FEAT_N (1 + PLATFORM_ARCH_FEAT_N) + +void platform_set(unsigned int feature); +void platform_clear(unsigned int feature); +bool platform_has(unsigned int feature); + +#endif /* _PLATFORM_FEATURE_H */ diff --git a/include/linux/printk.h b/include/linux/printk.h index 10ec29bc0135..cf7d666ab1f8 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -169,9 +169,6 @@ extern void __printk_safe_exit(void); #define printk_deferred_enter __printk_safe_enter #define printk_deferred_exit __printk_safe_exit -extern void printk_prefer_direct_enter(void); -extern void printk_prefer_direct_exit(void); - extern bool pr_flush(int timeout_ms, bool reset_on_progress); /* @@ -224,14 +221,6 @@ static inline void printk_deferred_exit(void) { } -static inline void printk_prefer_direct_enter(void) -{ -} - -static inline void printk_prefer_direct_exit(void) -{ -} - static inline bool pr_flush(int timeout_ms, bool reset_on_progress) { return true; diff --git a/include/linux/random.h b/include/linux/random.h index fae0c84027fd..20e389a14e5c 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -13,7 +13,7 @@ struct notifier_block; void add_device_randomness(const void *buf, size_t len); -void add_bootloader_randomness(const void *buf, size_t len); +void __init add_bootloader_randomness(const void *buf, size_t len); void add_input_randomness(unsigned int type, unsigned int code, unsigned int value) __latent_entropy; void add_interrupt_randomness(int irq) __latent_entropy; @@ -74,7 +74,6 @@ static inline unsigned long get_random_canary(void) int __init random_init(const char *command_line); bool rng_is_initialized(void); -bool rng_has_arch_random(void); int wait_for_random_bytes(void); /* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes). diff --git a/include/linux/ratelimit_types.h b/include/linux/ratelimit_types.h index c21c7f8103e2..002266693e50 100644 --- a/include/linux/ratelimit_types.h +++ b/include/linux/ratelimit_types.h @@ -23,12 +23,16 @@ struct ratelimit_state { unsigned long flags; }; -#define RATELIMIT_STATE_INIT(name, interval_init, burst_init) { \ - .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ - .interval = interval_init, \ - .burst = burst_init, \ +#define RATELIMIT_STATE_INIT_FLAGS(name, interval_init, burst_init, flags_init) { \ + .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ + .interval = interval_init, \ + .burst = burst_init, \ + .flags = flags_init, \ } +#define RATELIMIT_STATE_INIT(name, interval_init, burst_init) \ + RATELIMIT_STATE_INIT_FLAGS(name, interval_init, burst_init, 0) + #define RATELIMIT_STATE_INIT_DISABLED \ RATELIMIT_STATE_INIT(ratelimit_state, 0, DEFAULT_RATELIMIT_BURST) diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index 1c58646ba381..704111f63993 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -13,8 +13,9 @@ #include <linux/notifier.h> #include <linux/types.h> -#define SCMI_MAX_STR_SIZE 64 -#define SCMI_MAX_NUM_RATES 16 +#define SCMI_MAX_STR_SIZE 64 +#define SCMI_SHORT_NAME_MAX_SIZE 16 +#define SCMI_MAX_NUM_RATES 16 /** * struct scmi_revision_info - version information structure @@ -36,8 +37,8 @@ struct scmi_revision_info { u8 num_protocols; u8 num_agents; u32 impl_ver; - char vendor_id[SCMI_MAX_STR_SIZE]; - char sub_vendor_id[SCMI_MAX_STR_SIZE]; + char vendor_id[SCMI_SHORT_NAME_MAX_SIZE]; + char sub_vendor_id[SCMI_SHORT_NAME_MAX_SIZE]; }; struct scmi_clock_info { diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index cbd5070bc87f..657a0fc68a3f 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -45,6 +45,7 @@ struct uart_ops { void (*unthrottle)(struct uart_port *); void (*send_xchar)(struct uart_port *, char ch); void (*stop_rx)(struct uart_port *); + void (*start_rx)(struct uart_port *); void (*enable_ms)(struct uart_port *); void (*break_ctl)(struct uart_port *, int ctl); int (*startup)(struct uart_port *); diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 4417f667c757..5860f32e3958 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -243,7 +243,7 @@ extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, extern __be32 *xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes); extern int xdr_reserve_space_vec(struct xdr_stream *xdr, struct kvec *vec, size_t nbytes); -extern void xdr_commit_encode(struct xdr_stream *xdr); +extern void __xdr_commit_encode(struct xdr_stream *xdr); extern void xdr_truncate_encode(struct xdr_stream *xdr, size_t len); extern int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen); extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, @@ -307,6 +307,20 @@ xdr_reset_scratch_buffer(struct xdr_stream *xdr) } /** + * xdr_commit_encode - Ensure all data is written to xdr->buf + * @xdr: pointer to xdr_stream + * + * Handle encoding across page boundaries by giving the caller a + * temporary location to write to, then later copying the data into + * place. __xdr_commit_encode() does that copying. + */ +static inline void xdr_commit_encode(struct xdr_stream *xdr) +{ + if (unlikely(xdr->scratch.iov_len)) + __xdr_commit_encode(xdr); +} + +/** * xdr_stream_remaining - Return the number of bytes remaining in the stream * @xdr: pointer to struct xdr_stream * diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index 4700a88a28f6..7b4a13d3bd91 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -178,7 +178,8 @@ struct vdpa_map_file { * for the device * @vdev: vdpa device * Returns virtqueue algin requirement - * @get_vq_group: Get the group id for a specific virtqueue + * @get_vq_group: Get the group id for a specific + * virtqueue (optional) * @vdev: vdpa device * @idx: virtqueue index * Returns u32: group id for this virtqueue @@ -243,7 +244,7 @@ struct vdpa_map_file { * Returns the iova range supported by * the device. * @set_group_asid: Set address space identifier for a - * virtqueue group + * virtqueue group (optional) * @vdev: vdpa device * @group: virtqueue group * @asid: address space id for this group diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 9a36051ceb76..49c7c32815f1 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -604,13 +604,4 @@ static inline void virtio_cwrite64(struct virtio_device *vdev, _r; \ }) -#ifdef CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS -int arch_has_restricted_virtio_memory_access(void); -#else -static inline int arch_has_restricted_virtio_memory_access(void) -{ - return 0; -} -#endif /* CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS */ - #endif /* _LINUX_VIRTIO_CONFIG_H */ diff --git a/include/linux/visorbus.h b/include/linux/visorbus.h deleted file mode 100644 index 0d8bd6769b13..000000000000 --- a/include/linux/visorbus.h +++ /dev/null @@ -1,344 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Copyright (C) 2010 - 2013 UNISYS CORPORATION - * All rights reserved. - */ - -/* - * This header file is to be included by other kernel mode components that - * implement a particular kind of visor_device. Each of these other kernel - * mode components is called a visor device driver. Refer to visortemplate - * for a minimal sample visor device driver. - * - * There should be nothing in this file that is private to the visorbus - * bus implementation itself. - */ - -#ifndef __VISORBUS_H__ -#define __VISORBUS_H__ - -#include <linux/device.h> - -#define VISOR_CHANNEL_SIGNATURE ('L' << 24 | 'N' << 16 | 'C' << 8 | 'E') - -/* - * enum channel_serverstate - * @CHANNELSRV_UNINITIALIZED: Channel is in an undefined state. - * @CHANNELSRV_READY: Channel has been initialized by server. - */ -enum channel_serverstate { - CHANNELSRV_UNINITIALIZED = 0, - CHANNELSRV_READY = 1 -}; - -/* - * enum channel_clientstate - * @CHANNELCLI_DETACHED: - * @CHANNELCLI_DISABLED: Client can see channel but is NOT allowed to use it - * unless given TBD* explicit request - * (should actually be < DETACHED). - * @CHANNELCLI_ATTACHING: Legacy EFI client request for EFI server to attach. - * @CHANNELCLI_ATTACHED: Idle, but client may want to use channel any time. - * @CHANNELCLI_BUSY: Client either wants to use or is using channel. - * @CHANNELCLI_OWNED: "No worries" state - client can access channel - * anytime. - */ -enum channel_clientstate { - CHANNELCLI_DETACHED = 0, - CHANNELCLI_DISABLED = 1, - CHANNELCLI_ATTACHING = 2, - CHANNELCLI_ATTACHED = 3, - CHANNELCLI_BUSY = 4, - CHANNELCLI_OWNED = 5 -}; - -/* - * Values for VISOR_CHANNEL_PROTOCOL.Features: This define exists so that - * a guest can look at the FeatureFlags in the io channel, and configure the - * driver to use interrupts or not based on this setting. All feature bits for - * all channels should be defined here. The io channel feature bits are defined - * below. - */ -#define VISOR_DRIVER_ENABLES_INTS (0x1ULL << 1) -#define VISOR_CHANNEL_IS_POLLING (0x1ULL << 3) -#define VISOR_IOVM_OK_DRIVER_DISABLING_INTS (0x1ULL << 4) -#define VISOR_DRIVER_DISABLES_INTS (0x1ULL << 5) -#define VISOR_DRIVER_ENHANCED_RCVBUF_CHECKING (0x1ULL << 6) - -/* - * struct channel_header - Common Channel Header - * @signature: Signature. - * @legacy_state: DEPRECATED - being replaced by. - * @header_size: sizeof(struct channel_header). - * @size: Total size of this channel in bytes. - * @features: Flags to modify behavior. - * @chtype: Channel type: data, bus, control, etc.. - * @partition_handle: ID of guest partition. - * @handle: Device number of this channel in client. - * @ch_space_offset: Offset in bytes to channel specific area. - * @version_id: Struct channel_header Version ID. - * @partition_index: Index of guest partition. - * @zone_uuid: Guid of Channel's zone. - * @cli_str_offset: Offset from channel header to null-terminated - * ClientString (0 if ClientString not present). - * @cli_state_boot: CHANNEL_CLIENTSTATE of pre-boot EFI client of this - * channel. - * @cmd_state_cli: CHANNEL_COMMANDSTATE (overloaded in Windows drivers, see - * ServerStateUp, ServerStateDown, etc). - * @cli_state_os: CHANNEL_CLIENTSTATE of Guest OS client of this channel. - * @ch_characteristic: CHANNEL_CHARACTERISTIC_<xxx>. - * @cmd_state_srv: CHANNEL_COMMANDSTATE (overloaded in Windows drivers, see - * ServerStateUp, ServerStateDown, etc). - * @srv_state: CHANNEL_SERVERSTATE. - * @cli_error_boot: Bits to indicate err states for boot clients, so err - * messages can be throttled. - * @cli_error_os: Bits to indicate err states for OS clients, so err - * messages can be throttled. - * @filler: Pad out to 128 byte cacheline. - * @recover_channel: Please add all new single-byte values below here. - */ -struct channel_header { - u64 signature; - u32 legacy_state; - /* SrvState, CliStateBoot, and CliStateOS below */ - u32 header_size; - u64 size; - u64 features; - guid_t chtype; - u64 partition_handle; - u64 handle; - u64 ch_space_offset; - u32 version_id; - u32 partition_index; - guid_t zone_guid; - u32 cli_str_offset; - u32 cli_state_boot; - u32 cmd_state_cli; - u32 cli_state_os; - u32 ch_characteristic; - u32 cmd_state_srv; - u32 srv_state; - u8 cli_error_boot; - u8 cli_error_os; - u8 filler[1]; - u8 recover_channel; -} __packed; - -#define VISOR_CHANNEL_ENABLE_INTS (0x1ULL << 0) - -/* - * struct signal_queue_header - Subheader for the Signal Type variation of the - * Common Channel. - * @version: SIGNAL_QUEUE_HEADER Version ID. - * @chtype: Queue type: storage, network. - * @size: Total size of this queue in bytes. - * @sig_base_offset: Offset to signal queue area. - * @features: Flags to modify behavior. - * @num_sent: Total # of signals placed in this queue. - * @num_overflows: Total # of inserts failed due to full queue. - * @signal_size: Total size of a signal for this queue. - * @max_slots: Max # of slots in queue, 1 slot is always empty. - * @max_signals: Max # of signals in queue (MaxSignalSlots-1). - * @head: Queue head signal #. - * @num_received: Total # of signals removed from this queue. - * @tail: Queue tail signal. - * @reserved1: Reserved field. - * @reserved2: Reserved field. - * @client_queue: - * @num_irq_received: Total # of Interrupts received. This is incremented by the - * ISR in the guest windows driver. - * @num_empty: Number of times that visor_signal_remove is called and - * returned Empty Status. - * @errorflags: Error bits set during SignalReinit to denote trouble with - * client's fields. - * @filler: Pad out to 64 byte cacheline. - */ -struct signal_queue_header { - /* 1st cache line */ - u32 version; - u32 chtype; - u64 size; - u64 sig_base_offset; - u64 features; - u64 num_sent; - u64 num_overflows; - u32 signal_size; - u32 max_slots; - u32 max_signals; - u32 head; - /* 2nd cache line */ - u64 num_received; - u32 tail; - u32 reserved1; - u64 reserved2; - u64 client_queue; - u64 num_irq_received; - u64 num_empty; - u32 errorflags; - u8 filler[12]; -} __packed; - -/* VISORCHANNEL Guids */ -/* {414815ed-c58c-11da-95a9-00e08161165f} */ -#define VISOR_VHBA_CHANNEL_GUID \ - GUID_INIT(0x414815ed, 0xc58c, 0x11da, \ - 0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f) -#define VISOR_VHBA_CHANNEL_GUID_STR \ - "414815ed-c58c-11da-95a9-00e08161165f" -struct visorchipset_state { - u32 created:1; - u32 attached:1; - u32 configured:1; - u32 running:1; - /* Remaining bits in this 32-bit word are reserved. */ -}; - -/** - * struct visor_device - A device type for things "plugged" into the visorbus - * bus - * @visorchannel: Points to the channel that the device is - * associated with. - * @channel_type_guid: Identifies the channel type to the bus driver. - * @device: Device struct meant for use by the bus driver - * only. - * @list_all: Used by the bus driver to enumerate devices. - * @timer: Timer fired periodically to do interrupt-type - * activity. - * @being_removed: Indicates that the device is being removed from - * the bus. Private bus driver use only. - * @visordriver_callback_lock: Used by the bus driver to lock when adding and - * removing devices. - * @pausing: Indicates that a change towards a paused state. - * is in progress. Only modified by the bus driver. - * @resuming: Indicates that a change towards a running state - * is in progress. Only modified by the bus driver. - * @chipset_bus_no: Private field used by the bus driver. - * @chipset_dev_no: Private field used the bus driver. - * @state: Used to indicate the current state of the - * device. - * @inst: Unique GUID for this instance of the device. - * @name: Name of the device. - * @pending_msg_hdr: For private use by bus driver to respond to - * hypervisor requests. - * @vbus_hdr_info: A pointer to header info. Private use by bus - * driver. - * @partition_guid: Indicates client partion id. This should be the - * same across all visor_devices in the current - * guest. Private use by bus driver only. - */ -struct visor_device { - struct visorchannel *visorchannel; - guid_t channel_type_guid; - /* These fields are for private use by the bus driver only. */ - struct device device; - struct list_head list_all; - struct timer_list timer; - bool timer_active; - bool being_removed; - struct mutex visordriver_callback_lock; /* synchronize probe/remove */ - bool pausing; - bool resuming; - u32 chipset_bus_no; - u32 chipset_dev_no; - struct visorchipset_state state; - guid_t inst; - u8 *name; - struct controlvm_message_header *pending_msg_hdr; - void *vbus_hdr_info; - guid_t partition_guid; - struct dentry *debugfs_dir; - struct dentry *debugfs_bus_info; -}; - -#define to_visor_device(x) container_of(x, struct visor_device, device) - -typedef void (*visorbus_state_complete_func) (struct visor_device *dev, - int status); - -/* - * This struct describes a specific visor channel, by providing its GUID, name, - * and sizes. - */ -struct visor_channeltype_descriptor { - const guid_t guid; - const char *name; - u64 min_bytes; - u32 version; -}; - -/** - * struct visor_driver - Information provided by each visor driver when it - * registers with the visorbus driver - * @name: Name of the visor driver. - * @owner: The module owner. - * @channel_types: Types of channels handled by this driver, ending with - * a zero GUID. Our specialized BUS.match() method knows - * about this list, and uses it to determine whether this - * driver will in fact handle a new device that it has - * detected. - * @probe: Called when a new device comes online, by our probe() - * function specified by driver.probe() (triggered - * ultimately by some call to driver_register(), - * bus_add_driver(), or driver_attach()). - * @remove: Called when a new device is removed, by our remove() - * function specified by driver.remove() (triggered - * ultimately by some call to device_release_driver()). - * @channel_interrupt: Called periodically, whenever there is a possiblity - * that "something interesting" may have happened to the - * channel. - * @pause: Called to initiate a change of the device's state. If - * the return valu`e is < 0, there was an error and the - * state transition will NOT occur. If the return value - * is >= 0, then the state transition was INITIATED - * successfully, and complete_func() will be called (or - * was just called) with the final status when either the - * state transition fails or completes successfully. - * @resume: Behaves similar to pause. - * @driver: Private reference to the device driver. For use by bus - * driver only. - */ -struct visor_driver { - const char *name; - struct module *owner; - struct visor_channeltype_descriptor *channel_types; - int (*probe)(struct visor_device *dev); - void (*remove)(struct visor_device *dev); - void (*channel_interrupt)(struct visor_device *dev); - int (*pause)(struct visor_device *dev, - visorbus_state_complete_func complete_func); - int (*resume)(struct visor_device *dev, - visorbus_state_complete_func complete_func); - - /* These fields are for private use by the bus driver only. */ - struct device_driver driver; -}; - -#define to_visor_driver(x) (container_of(x, struct visor_driver, driver)) - -int visor_check_channel(struct channel_header *ch, struct device *dev, - const guid_t *expected_uuid, char *chname, - u64 expected_min_bytes, u32 expected_version, - u64 expected_signature); - -int visorbus_register_visor_driver(struct visor_driver *drv); -void visorbus_unregister_visor_driver(struct visor_driver *drv); -int visorbus_read_channel(struct visor_device *dev, - unsigned long offset, void *dest, - unsigned long nbytes); -int visorbus_write_channel(struct visor_device *dev, - unsigned long offset, void *src, - unsigned long nbytes); -int visorbus_enable_channel_interrupts(struct visor_device *dev); -void visorbus_disable_channel_interrupts(struct visor_device *dev); - -int visorchannel_signalremove(struct visorchannel *channel, u32 queue, - void *msg); -int visorchannel_signalinsert(struct visorchannel *channel, u32 queue, - void *msg); -bool visorchannel_signalempty(struct visorchannel *channel, u32 queue); -const guid_t *visorchannel_get_guid(struct visorchannel *channel); - -#define BUS_ROOT_DEVICE UINT_MAX -struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no, - struct visor_device *from); -#endif diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index b159c2789961..096d48aa3437 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -215,6 +215,7 @@ extern struct vm_struct *__get_vm_area_caller(unsigned long size, void free_vm_area(struct vm_struct *area); extern struct vm_struct *remove_vm_area(const void *addr); extern struct vm_struct *find_vm_area(const void *addr); +struct vmap_area *find_vmap_area(unsigned long addr); static inline bool is_vm_area_hugepages(const void *addr) { diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 7fee9b6cfede..62e75dd40d9a 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -406,7 +406,7 @@ alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...); * alloc_ordered_workqueue - allocate an ordered workqueue * @fmt: printf format for the name of the workqueue * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful) - * @args...: args for @fmt + * @args: args for @fmt * * Allocate an ordered workqueue. An ordered workqueue executes at * most one work item at any given time in the queued order. They are @@ -445,7 +445,7 @@ extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay); extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork); -extern void flush_workqueue(struct workqueue_struct *wq); +extern void __flush_workqueue(struct workqueue_struct *wq); extern void drain_workqueue(struct workqueue_struct *wq); extern int schedule_on_each_cpu(work_func_t func); @@ -563,15 +563,23 @@ static inline bool schedule_work(struct work_struct *work) return queue_work(system_wq, work); } +/* + * Detect attempt to flush system-wide workqueues at compile time when possible. + * + * See https://lkml.kernel.org/r/49925af7-78a8-a3dd-bce6-cfc02e1a9236@I-love.SAKURA.ne.jp + * for reasons and steps for converting system-wide workqueues into local workqueues. + */ +extern void __warn_flushing_systemwide_wq(void) + __compiletime_warning("Please avoid flushing system-wide workqueues."); + /** * flush_scheduled_work - ensure that any scheduled work has run to completion. * * Forces execution of the kernel-global workqueue and blocks until its * completion. * - * Think twice before calling this function! It's very easy to get into - * trouble if you don't take great care. Either of the following situations - * will lead to deadlock: + * It's very easy to get into trouble if you don't take great care. + * Either of the following situations will lead to deadlock: * * One of the work items currently on the workqueue needs to acquire * a lock held by your code or its caller. @@ -586,11 +594,51 @@ static inline bool schedule_work(struct work_struct *work) * need to know that a particular work item isn't queued and isn't running. * In such cases you should use cancel_delayed_work_sync() or * cancel_work_sync() instead. + * + * Please stop calling this function! A conversion to stop flushing system-wide + * workqueues is in progress. This function will be removed after all in-tree + * users stopped calling this function. */ -static inline void flush_scheduled_work(void) -{ - flush_workqueue(system_wq); -} +/* + * The background of commit 771c035372a036f8 ("deprecate the + * '__deprecated' attribute warnings entirely and for good") is that, + * since Linus builds all modules between every single pull he does, + * the standard kernel build needs to be _clean_ in order to be able to + * notice when new problems happen. Therefore, don't emit warning while + * there are in-tree users. + */ +#define flush_scheduled_work() \ +({ \ + if (0) \ + __warn_flushing_systemwide_wq(); \ + __flush_workqueue(system_wq); \ +}) + +/* + * Although there is no longer in-tree caller, for now just emit warning + * in order to give out-of-tree callers time to update. + */ +#define flush_workqueue(wq) \ +({ \ + struct workqueue_struct *_wq = (wq); \ + \ + if ((__builtin_constant_p(_wq == system_wq) && \ + _wq == system_wq) || \ + (__builtin_constant_p(_wq == system_highpri_wq) && \ + _wq == system_highpri_wq) || \ + (__builtin_constant_p(_wq == system_long_wq) && \ + _wq == system_long_wq) || \ + (__builtin_constant_p(_wq == system_unbound_wq) && \ + _wq == system_unbound_wq) || \ + (__builtin_constant_p(_wq == system_freezable_wq) && \ + _wq == system_freezable_wq) || \ + (__builtin_constant_p(_wq == system_power_efficient_wq) && \ + _wq == system_power_efficient_wq) || \ + (__builtin_constant_p(_wq == system_freezable_power_efficient_wq) && \ + _wq == system_freezable_power_efficient_wq)) \ + __warn_flushing_systemwide_wq(); \ + __flush_workqueue(_wq); \ +}) /** * schedule_delayed_work_on - queue work in global workqueue on CPU after delay diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 72feab5ea8d4..c29e11b2c073 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -1508,6 +1508,7 @@ void *xas_find_marked(struct xa_state *, unsigned long max, xa_mark_t); void xas_init_marks(const struct xa_state *); bool xas_nomem(struct xa_state *, gfp_t); +void xas_destroy(struct xa_state *); void xas_pause(struct xa_state *); void xas_create_range(struct xa_state *); diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index 021778a7e1af..6484095a8c01 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -612,5 +612,6 @@ int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch, enum tc_setup_type type, void *data, struct flow_block_offload *bo, void (*cleanup)(struct flow_block_cb *block_cb)); +bool flow_indr_dev_exists(void); #endif /* _NET_FLOW_OFFLOAD_H */ diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 077cd730ce2f..85cd695e7fd1 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -25,7 +25,6 @@ #undef INET_CSK_CLEAR_TIMERS struct inet_bind_bucket; -struct inet_bind2_bucket; struct tcp_congestion_ops; /* @@ -58,7 +57,6 @@ struct inet_connection_sock_af_ops { * * @icsk_accept_queue: FIFO of established children * @icsk_bind_hash: Bind node - * @icsk_bind2_hash: Bind node in the bhash2 table * @icsk_timeout: Timeout * @icsk_retransmit_timer: Resend (no ack) * @icsk_rto: Retransmit timeout @@ -85,7 +83,6 @@ struct inet_connection_sock { struct inet_sock icsk_inet; struct request_sock_queue icsk_accept_queue; struct inet_bind_bucket *icsk_bind_hash; - struct inet_bind2_bucket *icsk_bind2_hash; unsigned long icsk_timeout; struct timer_list icsk_retransmit_timer; struct timer_list icsk_delack_timer; diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index a0887b70967b..ebfa3df6f8dc 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -90,32 +90,11 @@ struct inet_bind_bucket { struct hlist_head owners; }; -struct inet_bind2_bucket { - possible_net_t ib_net; - int l3mdev; - unsigned short port; - union { -#if IS_ENABLED(CONFIG_IPV6) - struct in6_addr v6_rcv_saddr; -#endif - __be32 rcv_saddr; - }; - /* Node in the inet2_bind_hashbucket chain */ - struct hlist_node node; - /* List of sockets hashed to this bucket */ - struct hlist_head owners; -}; - static inline struct net *ib_net(struct inet_bind_bucket *ib) { return read_pnet(&ib->ib_net); } -static inline struct net *ib2_net(struct inet_bind2_bucket *ib) -{ - return read_pnet(&ib->ib_net); -} - #define inet_bind_bucket_for_each(tb, head) \ hlist_for_each_entry(tb, head, node) @@ -124,15 +103,6 @@ struct inet_bind_hashbucket { struct hlist_head chain; }; -/* This is synchronized using the inet_bind_hashbucket's spinlock. - * Instead of having separate spinlocks, the inet_bind2_hashbucket can share - * the inet_bind_hashbucket's given that in every case where the bhash2 table - * is useful, a lookup in the bhash table also occurs. - */ -struct inet_bind2_hashbucket { - struct hlist_head chain; -}; - /* Sockets can be hashed in established or listening table. * We must use different 'nulls' end-of-chain value for all hash buckets : * A socket might transition from ESTABLISH to LISTEN state without @@ -164,12 +134,6 @@ struct inet_hashinfo { */ struct kmem_cache *bind_bucket_cachep; struct inet_bind_hashbucket *bhash; - /* The 2nd binding table hashed by port and address. - * This is used primarily for expediting the resolution of bind - * conflicts. - */ - struct kmem_cache *bind2_bucket_cachep; - struct inet_bind2_hashbucket *bhash2; unsigned int bhash_size; /* The 2nd listener table hashed by local port and address */ @@ -229,36 +193,6 @@ inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net, void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb); -static inline bool check_bind_bucket_match(struct inet_bind_bucket *tb, - struct net *net, - const unsigned short port, - int l3mdev) -{ - return net_eq(ib_net(tb), net) && tb->port == port && - tb->l3mdev == l3mdev; -} - -struct inet_bind2_bucket * -inet_bind2_bucket_create(struct kmem_cache *cachep, struct net *net, - struct inet_bind2_hashbucket *head, - const unsigned short port, int l3mdev, - const struct sock *sk); - -void inet_bind2_bucket_destroy(struct kmem_cache *cachep, - struct inet_bind2_bucket *tb); - -struct inet_bind2_bucket * -inet_bind2_bucket_find(struct inet_hashinfo *hinfo, struct net *net, - const unsigned short port, int l3mdev, - struct sock *sk, - struct inet_bind2_hashbucket **head); - -bool check_bind2_bucket_match_nulladdr(struct inet_bind2_bucket *tb, - struct net *net, - const unsigned short port, - int l3mdev, - const struct sock *sk); - static inline u32 inet_bhashfn(const struct net *net, const __u16 lport, const u32 bhash_size) { @@ -266,7 +200,7 @@ static inline u32 inet_bhashfn(const struct net *net, const __u16 lport, } void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, - struct inet_bind2_bucket *tb2, const unsigned short snum); + const unsigned short snum); /* Caller must disable local BH processing. */ int __inet_inherit_port(const struct sock *sk, struct sock *child); diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index c1b5dcd6597c..daead5fb389a 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -253,6 +253,11 @@ struct inet_sock { #define IP_CMSG_CHECKSUM BIT(7) #define IP_CMSG_RECVFRAGSIZE BIT(8) +static inline bool sk_is_inet(struct sock *sk) +{ + return sk->sk_family == AF_INET || sk->sk_family == AF_INET6; +} + /** * sk_to_full_sk - Access to a full socket * @sk: pointer to a socket diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 5b38bf1a586b..de9dcc5652c4 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -1063,7 +1063,7 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr); int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), - void *from, int length, int transhdrlen, + void *from, size_t length, int transhdrlen, struct ipcm6_cookie *ipc6, struct flowi6 *fl6, struct rt6_info *rt, unsigned int flags); @@ -1079,7 +1079,7 @@ struct sk_buff *__ip6_make_skb(struct sock *sk, struct sk_buff_head *queue, struct sk_buff *ip6_make_skb(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), - void *from, int length, int transhdrlen, + void *from, size_t length, int transhdrlen, struct ipcm6_cookie *ipc6, struct rt6_info *rt, unsigned int flags, struct inet_cork_full *cork); diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 20af9d3557b9..279ae0fff7ad 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -1090,7 +1090,6 @@ struct nft_stats { struct nft_hook { struct list_head list; - bool inactive; struct nf_hook_ops ops; struct rcu_head rcu; }; diff --git a/include/net/netfilter/nf_tables_offload.h b/include/net/netfilter/nf_tables_offload.h index 797147843958..3568b6a2f5f0 100644 --- a/include/net/netfilter/nf_tables_offload.h +++ b/include/net/netfilter/nf_tables_offload.h @@ -92,7 +92,7 @@ int nft_flow_rule_offload_commit(struct net *net); NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg) \ memset(&(__reg)->mask, 0xff, (__reg)->len); -int nft_chain_offload_priority(struct nft_base_chain *basechain); +bool nft_chain_offload_support(const struct nft_base_chain *basechain); int nft_offload_init(void); void nft_offload_exit(void); diff --git a/include/net/sock.h b/include/net/sock.h index c585ef6565d9..72ca97ccb460 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -348,7 +348,6 @@ struct sk_filter; * @sk_txtime_report_errors: set report errors mode for SO_TXTIME * @sk_txtime_unused: unused txtime flags * @ns_tracker: tracker for netns reference - * @sk_bind2_node: bind node in the bhash2 table */ struct sock { /* @@ -538,7 +537,6 @@ struct sock { #endif struct rcu_head sk_rcu; netns_tracker ns_tracker; - struct hlist_node sk_bind2_node; }; enum sk_pacing { @@ -819,16 +817,6 @@ static inline void sk_add_bind_node(struct sock *sk, hlist_add_head(&sk->sk_bind_node, list); } -static inline void __sk_del_bind2_node(struct sock *sk) -{ - __hlist_del(&sk->sk_bind2_node); -} - -static inline void sk_add_bind2_node(struct sock *sk, struct hlist_head *list) -{ - hlist_add_head(&sk->sk_bind2_node, list); -} - #define sk_for_each(__sk, list) \ hlist_for_each_entry(__sk, list, sk_node) #define sk_for_each_rcu(__sk, list) \ @@ -846,8 +834,6 @@ static inline void sk_add_bind2_node(struct sock *sk, struct hlist_head *list) hlist_for_each_entry_safe(__sk, tmp, list, sk_node) #define sk_for_each_bound(__sk, list) \ hlist_for_each_entry(__sk, list, sk_bind_node) -#define sk_for_each_bound_bhash2(__sk, list) \ - hlist_for_each_entry(__sk, list, sk_bind2_node) /** * sk_for_each_entry_offset_rcu - iterate over a list at a given struct offset diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h index 66fcc5a1a5b1..aa2f951b07cd 100644 --- a/include/trace/events/io_uring.h +++ b/include/trace/events/io_uring.h @@ -158,6 +158,8 @@ TRACE_EVENT(io_uring_queue_async_work, __field( unsigned int, flags ) __field( struct io_wq_work *, work ) __field( int, rw ) + + __string( op_str, io_uring_get_opcode(opcode) ) ), TP_fast_assign( @@ -168,11 +170,13 @@ TRACE_EVENT(io_uring_queue_async_work, __entry->opcode = opcode; __entry->work = work; __entry->rw = rw; + + __assign_str(op_str, io_uring_get_opcode(opcode)); ), TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, flags 0x%x, %s queue, work %p", __entry->ctx, __entry->req, __entry->user_data, - io_uring_get_opcode(__entry->opcode), + __get_str(op_str), __entry->flags, __entry->rw ? "hashed" : "normal", __entry->work) ); @@ -198,6 +202,8 @@ TRACE_EVENT(io_uring_defer, __field( void *, req ) __field( unsigned long long, data ) __field( u8, opcode ) + + __string( op_str, io_uring_get_opcode(opcode) ) ), TP_fast_assign( @@ -205,11 +211,13 @@ TRACE_EVENT(io_uring_defer, __entry->req = req; __entry->data = user_data; __entry->opcode = opcode; + + __assign_str(op_str, io_uring_get_opcode(opcode)); ), TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s", __entry->ctx, __entry->req, __entry->data, - io_uring_get_opcode(__entry->opcode)) + __get_str(op_str)) ); /** @@ -298,6 +306,8 @@ TRACE_EVENT(io_uring_fail_link, __field( unsigned long long, user_data ) __field( u8, opcode ) __field( void *, link ) + + __string( op_str, io_uring_get_opcode(opcode) ) ), TP_fast_assign( @@ -306,11 +316,13 @@ TRACE_EVENT(io_uring_fail_link, __entry->user_data = user_data; __entry->opcode = opcode; __entry->link = link; + + __assign_str(op_str, io_uring_get_opcode(opcode)); ), TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, link %p", __entry->ctx, __entry->req, __entry->user_data, - io_uring_get_opcode(__entry->opcode), __entry->link) + __get_str(op_str), __entry->link) ); /** @@ -390,6 +402,8 @@ TRACE_EVENT(io_uring_submit_sqe, __field( u32, flags ) __field( bool, force_nonblock ) __field( bool, sq_thread ) + + __string( op_str, io_uring_get_opcode(opcode) ) ), TP_fast_assign( @@ -400,11 +414,13 @@ TRACE_EVENT(io_uring_submit_sqe, __entry->flags = flags; __entry->force_nonblock = force_nonblock; __entry->sq_thread = sq_thread; + + __assign_str(op_str, io_uring_get_opcode(opcode)); ), TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, flags 0x%x, " "non block %d, sq_thread %d", __entry->ctx, __entry->req, - __entry->user_data, io_uring_get_opcode(__entry->opcode), + __entry->user_data, __get_str(op_str), __entry->flags, __entry->force_nonblock, __entry->sq_thread) ); @@ -435,6 +451,8 @@ TRACE_EVENT(io_uring_poll_arm, __field( u8, opcode ) __field( int, mask ) __field( int, events ) + + __string( op_str, io_uring_get_opcode(opcode) ) ), TP_fast_assign( @@ -444,11 +462,13 @@ TRACE_EVENT(io_uring_poll_arm, __entry->opcode = opcode; __entry->mask = mask; __entry->events = events; + + __assign_str(op_str, io_uring_get_opcode(opcode)); ), TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, mask 0x%x, events 0x%x", __entry->ctx, __entry->req, __entry->user_data, - io_uring_get_opcode(__entry->opcode), + __get_str(op_str), __entry->mask, __entry->events) ); @@ -474,6 +494,8 @@ TRACE_EVENT(io_uring_task_add, __field( unsigned long long, user_data ) __field( u8, opcode ) __field( int, mask ) + + __string( op_str, io_uring_get_opcode(opcode) ) ), TP_fast_assign( @@ -482,11 +504,13 @@ TRACE_EVENT(io_uring_task_add, __entry->user_data = user_data; __entry->opcode = opcode; __entry->mask = mask; + + __assign_str(op_str, io_uring_get_opcode(opcode)); ), TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, mask %x", __entry->ctx, __entry->req, __entry->user_data, - io_uring_get_opcode(__entry->opcode), + __get_str(op_str), __entry->mask) ); @@ -523,6 +547,8 @@ TRACE_EVENT(io_uring_req_failed, __field( u64, pad1 ) __field( u64, addr3 ) __field( int, error ) + + __string( op_str, io_uring_get_opcode(sqe->opcode) ) ), TP_fast_assign( @@ -542,6 +568,8 @@ TRACE_EVENT(io_uring_req_failed, __entry->pad1 = sqe->__pad2[0]; __entry->addr3 = sqe->addr3; __entry->error = error; + + __assign_str(op_str, io_uring_get_opcode(sqe->opcode)); ), TP_printk("ring %p, req %p, user_data 0x%llx, " @@ -550,7 +578,7 @@ TRACE_EVENT(io_uring_req_failed, "personality=%d, file_index=%d, pad=0x%llx, addr3=%llx, " "error=%d", __entry->ctx, __entry->req, __entry->user_data, - io_uring_get_opcode(__entry->opcode), + __get_str(op_str), __entry->flags, __entry->ioprio, (unsigned long long)__entry->off, (unsigned long long) __entry->addr, __entry->len, diff --git a/include/trace/events/libata.h b/include/trace/events/libata.h index d4e631aa976f..6025dd8ba4aa 100644 --- a/include/trace/events/libata.h +++ b/include/trace/events/libata.h @@ -288,6 +288,7 @@ DECLARE_EVENT_CLASS(ata_qc_complete_template, __entry->hob_feature = qc->result_tf.hob_feature; __entry->nsect = qc->result_tf.nsect; __entry->hob_nsect = qc->result_tf.hob_nsect; + __entry->flags = qc->flags; ), TP_printk("ata_port=%u ata_dev=%u tag=%d flags=%s status=%s " \ diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h index 6154a2e72bce..262d52021c23 100644 --- a/include/trace/events/workqueue.h +++ b/include/trace/events/workqueue.h @@ -22,7 +22,7 @@ struct pool_workqueue; */ TRACE_EVENT(workqueue_queue_work, - TP_PROTO(unsigned int req_cpu, struct pool_workqueue *pwq, + TP_PROTO(int req_cpu, struct pool_workqueue *pwq, struct work_struct *work), TP_ARGS(req_cpu, pwq, work), @@ -31,8 +31,8 @@ TRACE_EVENT(workqueue_queue_work, __field( void *, work ) __field( void *, function) __string( workqueue, pwq->wq->name) - __field( unsigned int, req_cpu ) - __field( unsigned int, cpu ) + __field( int, req_cpu ) + __field( int, cpu ) ), TP_fast_assign( @@ -43,7 +43,7 @@ TRACE_EVENT(workqueue_queue_work, __entry->cpu = pwq->pool->cpu; ), - TP_printk("work struct=%p function=%ps workqueue=%s req_cpu=%u cpu=%u", + TP_printk("work struct=%p function=%ps workqueue=%s req_cpu=%d cpu=%d", __entry->work, __entry->function, __get_str(workqueue), __entry->req_cpu, __entry->cpu) ); diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 776e0278f9dd..53e7dae92e42 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -47,7 +47,6 @@ struct io_uring_sqe { __u32 unlink_flags; __u32 hardlink_flags; __u32 xattr_flags; - __u32 close_flags; }; __u64 user_data; /* data to be passed back at completion time */ /* pack this to avoid bogus arm OABI complaints */ @@ -260,11 +259,6 @@ enum io_uring_op { #define IORING_ACCEPT_MULTISHOT (1U << 0) /* - * close flags, store in sqe->close_flags - */ -#define IORING_CLOSE_FD_AND_FILE_SLOT (1U << 0) - -/* * IO completion data structure (Completion Queue Entry) */ struct io_uring_cqe { diff --git a/include/uapi/linux/tls.h b/include/uapi/linux/tls.h index ac39328eabe7..bb8f80812b0b 100644 --- a/include/uapi/linux/tls.h +++ b/include/uapi/linux/tls.h @@ -39,7 +39,7 @@ /* TLS socket options */ #define TLS_TX 1 /* Set transmit parameters */ #define TLS_RX 2 /* Set receive parameters */ -#define TLS_TX_ZEROCOPY_SENDFILE 3 /* transmit zerocopy sendfile */ +#define TLS_TX_ZEROCOPY_RO 3 /* TX zerocopy (only sendfile now) */ /* Supported versions */ #define TLS_VERSION_MINOR(ver) ((ver) & 0xFF) @@ -161,7 +161,7 @@ enum { TLS_INFO_CIPHER, TLS_INFO_TXCONF, TLS_INFO_RXCONF, - TLS_INFO_ZC_SENDFILE, + TLS_INFO_ZC_RO_TX, __TLS_INFO_MAX, }; #define TLS_INFO_MAX (__TLS_INFO_MAX - 1) diff --git a/include/xen/arm/xen-ops.h b/include/xen/arm/xen-ops.h new file mode 100644 index 000000000000..b0766a660338 --- /dev/null +++ b/include/xen/arm/xen-ops.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_ARM_XEN_OPS_H +#define _ASM_ARM_XEN_OPS_H + +#include <xen/swiotlb-xen.h> +#include <xen/xen-ops.h> + +static inline void xen_setup_dma_ops(struct device *dev) +{ +#ifdef CONFIG_XEN + if (xen_is_grant_dma_device(dev)) + xen_grant_setup_dma_ops(dev); + else if (xen_swiotlb_detect()) + dev->dma_ops = &xen_swiotlb_dma_ops; +#endif +} + +#endif /* _ASM_ARM_XEN_OPS_H */ diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h index 527c9907f99c..e279be353e3f 100644 --- a/include/xen/grant_table.h +++ b/include/xen/grant_table.h @@ -127,10 +127,14 @@ int gnttab_try_end_foreign_access(grant_ref_t ref); */ int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head); +int gnttab_alloc_grant_reference_seq(unsigned int count, grant_ref_t *first); + void gnttab_free_grant_reference(grant_ref_t ref); void gnttab_free_grant_references(grant_ref_t head); +void gnttab_free_grant_reference_seq(grant_ref_t head, unsigned int count); + int gnttab_empty_grant_references(const grant_ref_t *pprivate_head); int gnttab_claim_grant_reference(grant_ref_t *pprivate_head); diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h index c7c1b46ff4cd..80546960f8b7 100644 --- a/include/xen/xen-ops.h +++ b/include/xen/xen-ops.h @@ -214,4 +214,17 @@ static inline void xen_preemptible_hcall_end(void) { } #endif /* CONFIG_XEN_PV && !CONFIG_PREEMPTION */ +#ifdef CONFIG_XEN_GRANT_DMA_OPS +void xen_grant_setup_dma_ops(struct device *dev); +bool xen_is_grant_dma_device(struct device *dev); +#else +static inline void xen_grant_setup_dma_ops(struct device *dev) +{ +} +static inline bool xen_is_grant_dma_device(struct device *dev) +{ + return false; +} +#endif /* CONFIG_XEN_GRANT_DMA_OPS */ + #endif /* INCLUDE_XEN_OPS_H */ diff --git a/include/xen/xen.h b/include/xen/xen.h index a99bab817523..0780a81e140d 100644 --- a/include/xen/xen.h +++ b/include/xen/xen.h @@ -52,6 +52,14 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, extern u64 xen_saved_max_mem_size; #endif +#include <linux/platform-feature.h> + +static inline void xen_set_restricted_virtio_memory_access(void) +{ + if (IS_ENABLED(CONFIG_XEN_VIRTIO) && xen_domain()) + platform_set(PLATFORM_VIRTIO_RESTRICTED_MEM_ACCESS); +} + #ifdef CONFIG_XEN_UNPOPULATED_ALLOC int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages); void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages); diff --git a/init/Kconfig b/init/Kconfig index c984afc489de..c7900e8975f1 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -885,6 +885,15 @@ config CC_IMPLICIT_FALLTHROUGH default "-Wimplicit-fallthrough=5" if CC_IS_GCC && $(cc-option,-Wimplicit-fallthrough=5) default "-Wimplicit-fallthrough" if CC_IS_CLANG && $(cc-option,-Wunreachable-code-fallthrough) +# Currently, disable gcc-12 array-bounds globally. +# We may want to target only particular configurations some day. +config GCC12_NO_ARRAY_BOUNDS + def_bool y + +config CC_NO_ARRAY_BOUNDS + bool + default y if CC_IS_GCC && GCC_VERSION >= 120000 && GCC_VERSION < 130000 && GCC12_NO_ARRAY_BOUNDS + # # For architectures that know their GCC __int128 support is sound # diff --git a/kernel/Makefile b/kernel/Makefile index 318789c728d3..a7e1f49ab2b3 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -7,7 +7,7 @@ obj-y = fork.o exec_domain.o panic.o \ cpu.o exit.o softirq.o resource.o \ sysctl.o capability.o ptrace.o user.o \ signal.o sys.o umh.o workqueue.o pid.o task_work.o \ - extable.o params.o \ + extable.o params.o platform-feature.o \ kthread.o sys_ni.o nsproxy.o \ notifier.o ksysfs.o cred.o reboot.o \ async.o range.o smpboot.o ucount.o regset.o diff --git a/kernel/auditsc.c b/kernel/auditsc.c index f3a2abd6d1a1..3a8c9d744800 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -1014,10 +1014,10 @@ static void audit_reset_context(struct audit_context *ctx) ctx->target_comm[0] = '\0'; unroll_tree_refs(ctx, NULL, 0); WARN_ON(!list_empty(&ctx->killed_trees)); - ctx->type = 0; audit_free_module(ctx); ctx->fds[0] = -1; audit_proctitle_free(ctx); + ctx->type = 0; /* reset last for audit_free_*() */ } static inline struct audit_context *audit_alloc_context(enum audit_state state) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 7bccaa4646e5..eb12d4f705cc 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -4815,6 +4815,7 @@ static int btf_check_type_tags(struct btf_verifier_env *env, n = btf_nr_types(btf); for (i = start_id; i < n; i++) { const struct btf_type *t; + int chain_limit = 32; u32 cur_id = i; t = btf_type_by_id(btf, i); @@ -4827,6 +4828,10 @@ static int btf_check_type_tags(struct btf_verifier_env *env, in_tags = btf_type_is_type_tag(t); while (btf_type_is_modifier(t)) { + if (!chain_limit--) { + btf_verifier_log(env, "Max chain length or cycle detected"); + return -ELOOP; + } if (btf_type_is_type_tag(t)) { if (!in_tags) { btf_verifier_log(env, "Type tags don't precede modifiers"); @@ -6054,6 +6059,7 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, struct bpf_reg_state *regs, bool ptr_to_mem_ok) { + enum bpf_prog_type prog_type = resolve_prog_type(env->prog); struct bpf_verifier_log *log = &env->log; u32 i, nargs, ref_id, ref_obj_id = 0; bool is_kfunc = btf_is_kernel(btf); @@ -6171,7 +6177,7 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, return -EINVAL; } /* rest of the arguments can be anything, like normal kfunc */ - } else if (btf_get_prog_ctx_type(log, btf, t, env->prog->type, i)) { + } else if (btf_get_prog_ctx_type(log, btf, t, prog_type, i)) { /* If function expects ctx type in BTF check that caller * is passing PTR_TO_CTX. */ diff --git a/kernel/cfi.c b/kernel/cfi.c index 9594cfd1cf2c..08102d19ec15 100644 --- a/kernel/cfi.c +++ b/kernel/cfi.c @@ -281,6 +281,8 @@ static inline cfi_check_fn find_module_check_fn(unsigned long ptr) static inline cfi_check_fn find_check_fn(unsigned long ptr) { cfi_check_fn fn = NULL; + unsigned long flags; + bool rcu_idle; if (is_kernel_text(ptr)) return __cfi_check; @@ -290,13 +292,21 @@ static inline cfi_check_fn find_check_fn(unsigned long ptr) * the shadow and __module_address use RCU, so we need to wake it * up if necessary. */ - RCU_NONIDLE({ - if (IS_ENABLED(CONFIG_CFI_CLANG_SHADOW)) - fn = find_shadow_check_fn(ptr); + rcu_idle = !rcu_is_watching(); + if (rcu_idle) { + local_irq_save(flags); + rcu_irq_enter(); + } + + if (IS_ENABLED(CONFIG_CFI_CLANG_SHADOW)) + fn = find_shadow_check_fn(ptr); + if (!fn) + fn = find_module_check_fn(ptr); - if (!fn) - fn = find_module_check_fn(ptr); - }); + if (rcu_idle) { + rcu_irq_exit(); + local_irq_restore(flags); + } return fn; } diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c index ac740630c79c..2caafd13f8aa 100644 --- a/kernel/dma/debug.c +++ b/kernel/dma/debug.c @@ -564,7 +564,7 @@ static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs) rc = active_cacheline_insert(entry); if (rc == -ENOMEM) { - pr_err("cacheline tracking ENOMEM, dma-debug disabled\n"); + pr_err_once("cacheline tracking ENOMEM, dma-debug disabled\n"); global_disable = true; } else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) { err_printk(entry->dev, entry, diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index e978f36e6be8..8d0b68a17042 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -357,7 +357,7 @@ void dma_direct_free(struct device *dev, size_t size, } else { if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED)) arch_dma_clear_uncached(cpu_addr, size); - if (dma_set_encrypted(dev, cpu_addr, 1 << page_order)) + if (dma_set_encrypted(dev, cpu_addr, size)) return; } @@ -392,7 +392,6 @@ void dma_direct_free_pages(struct device *dev, size_t size, struct page *page, dma_addr_t dma_addr, enum dma_data_direction dir) { - unsigned int page_order = get_order(size); void *vaddr = page_address(page); /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */ @@ -400,7 +399,7 @@ void dma_direct_free_pages(struct device *dev, size_t size, dma_free_from_pool(dev, vaddr, size)) return; - if (dma_set_encrypted(dev, vaddr, 1 << page_order)) + if (dma_set_encrypted(dev, vaddr, size)) return; __dma_direct_free_pages(dev, page, size); } diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index dfa1de89dc94..cb50f8d38360 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -192,7 +192,7 @@ void __init swiotlb_update_mem_attributes(void) } static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start, - unsigned long nslabs, bool late_alloc) + unsigned long nslabs, unsigned int flags, bool late_alloc) { void *vaddr = phys_to_virt(start); unsigned long bytes = nslabs << IO_TLB_SHIFT, i; @@ -203,8 +203,7 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start, mem->index = 0; mem->late_alloc = late_alloc; - if (swiotlb_force_bounce) - mem->force_bounce = true; + mem->force_bounce = swiotlb_force_bounce || (flags & SWIOTLB_FORCE); spin_lock_init(&mem->lock); for (i = 0; i < mem->nslabs; i++) { @@ -275,8 +274,7 @@ retry: panic("%s: Failed to allocate %zu bytes align=0x%lx\n", __func__, alloc_size, PAGE_SIZE); - swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, false); - mem->force_bounce = flags & SWIOTLB_FORCE; + swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, flags, false); if (flags & SWIOTLB_VERBOSE) swiotlb_print_info(); @@ -348,7 +346,7 @@ retry: set_memory_decrypted((unsigned long)vstart, (nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT); - swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, true); + swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, 0, true); swiotlb_print_info(); return 0; @@ -835,8 +833,8 @@ static int rmem_swiotlb_device_init(struct reserved_mem *rmem, set_memory_decrypted((unsigned long)phys_to_virt(rmem->base), rmem->size >> PAGE_SHIFT); - swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, false); - mem->force_bounce = true; + swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, SWIOTLB_FORCE, + false); mem->for_alloc = true; rmem->priv = mem; diff --git a/kernel/entry/kvm.c b/kernel/entry/kvm.c index 9d09f489b60e..2e0f75bcb7fd 100644 --- a/kernel/entry/kvm.c +++ b/kernel/entry/kvm.c @@ -9,12 +9,6 @@ static int xfer_to_guest_mode_work(struct kvm_vcpu *vcpu, unsigned long ti_work) int ret; if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) { - clear_notify_signal(); - if (task_work_pending(current)) - task_work_run(); - } - - if (ti_work & _TIF_SIGPENDING) { kvm_handle_signal_exit(vcpu); return -EINTR; } diff --git a/kernel/hung_task.c b/kernel/hung_task.c index 80bfea5dd5c4..cff3ae8c818f 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c @@ -127,8 +127,6 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout) * complain: */ if (sysctl_hung_task_warnings) { - printk_prefer_direct_enter(); - if (sysctl_hung_task_warnings > 0) sysctl_hung_task_warnings--; pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n", @@ -144,8 +142,6 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout) if (sysctl_hung_task_all_cpu_backtrace) hung_task_show_all_bt = true; - - printk_prefer_direct_exit(); } touch_nmi_watchdog(); @@ -208,17 +204,12 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout) } unlock: rcu_read_unlock(); - if (hung_task_show_lock) { - printk_prefer_direct_enter(); + if (hung_task_show_lock) debug_show_all_locks(); - printk_prefer_direct_exit(); - } if (hung_task_show_all_bt) { hung_task_show_all_bt = false; - printk_prefer_direct_enter(); trigger_all_cpu_backtrace(); - printk_prefer_direct_exit(); } if (hung_task_call_panic) diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index e6b8e564b37f..886789dcee43 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -1006,8 +1006,10 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle, if (desc->irq_data.chip != &no_irq_chip) mask_ack_irq(desc); irq_state_set_disabled(desc); - if (is_chained) + if (is_chained) { desc->action = NULL; + WARN_ON(irq_chip_pm_put(irq_desc_get_irq_data(desc))); + } desc->depth = 1; } desc->handle_irq = handle; @@ -1033,6 +1035,7 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle, irq_settings_set_norequest(desc); irq_settings_set_nothread(desc); desc->action = &chained_action; + WARN_ON(irq_chip_pm_get(irq_desc_get_irq_data(desc))); irq_activate_and_startup(desc, IRQ_RESEND); } } diff --git a/kernel/kthread.c b/kernel/kthread.c index 544fd4097406..3c677918d8f2 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -340,7 +340,7 @@ static int kthread(void *_create) self = to_kthread(current); - /* If user was SIGKILLed, I release the structure. */ + /* Release the structure when caller killed by a fatal signal. */ done = xchg(&create->done, NULL); if (!done) { kfree(create); @@ -398,7 +398,7 @@ static void create_kthread(struct kthread_create_info *create) /* We want our own signal handler (we take no signals by default). */ pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD); if (pid < 0) { - /* If user was SIGKILLed, I release the structure. */ + /* Release the structure when caller killed by a fatal signal. */ struct completion *done = xchg(&create->done, NULL); if (!done) { @@ -440,9 +440,9 @@ struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data), */ if (unlikely(wait_for_completion_killable(&done))) { /* - * If I was SIGKILLed before kthreadd (or new kernel thread) - * calls complete(), leave the cleanup of this structure to - * that thread. + * If I was killed by a fatal signal before kthreadd (or new + * kernel thread) calls complete(), leave the cleanup of this + * structure to that thread. */ if (xchg(&create->done, NULL)) return ERR_PTR(-EINTR); @@ -876,7 +876,7 @@ fail_task: * * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM) * when the needed structures could not get allocated, and ERR_PTR(-EINTR) - * when the worker was SIGKILLed. + * when the caller was killed by a fatal signal. */ struct kthread_worker * kthread_create_worker(unsigned int flags, const char namefmt[], ...) @@ -925,7 +925,7 @@ EXPORT_SYMBOL(kthread_create_worker); * Return: * The pointer to the allocated worker on success, ERR_PTR(-ENOMEM) * when the needed structures could not get allocated, and ERR_PTR(-EINTR) - * when the worker was SIGKILLed. + * when the caller was killed by a fatal signal. */ struct kthread_worker * kthread_create_worker_on_cpu(int cpu, unsigned int flags, diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 81e87280513e..f06b91ca6482 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -5432,7 +5432,7 @@ static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock) * be guessable and still allows some pin nesting in * our u32 pin_count. */ - cookie.val = 1 + (prandom_u32() >> 16); + cookie.val = 1 + (sched_clock() & 0xffff); hlock->pin_count += cookie.val; return cookie; } diff --git a/kernel/panic.c b/kernel/panic.c index a3c758dba15a..a3308af28a21 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -603,8 +603,6 @@ void __warn(const char *file, int line, void *caller, unsigned taint, { disable_trace_on_warning(); - printk_prefer_direct_enter(); - if (file) pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n", raw_smp_processor_id(), current->pid, file, line, @@ -634,8 +632,6 @@ void __warn(const char *file, int line, void *caller, unsigned taint, /* Just a warning, don't kill lockdep. */ add_taint(taint, LOCKDEP_STILL_OK); - - printk_prefer_direct_exit(); } #ifndef __WARN_FLAGS diff --git a/kernel/platform-feature.c b/kernel/platform-feature.c new file mode 100644 index 000000000000..cb6a6c3e4fed --- /dev/null +++ b/kernel/platform-feature.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bitops.h> +#include <linux/cache.h> +#include <linux/export.h> +#include <linux/platform-feature.h> + +#define PLATFORM_FEAT_ARRAY_SZ BITS_TO_LONGS(PLATFORM_FEAT_N) +static unsigned long __read_mostly platform_features[PLATFORM_FEAT_ARRAY_SZ]; + +void platform_set(unsigned int feature) +{ + set_bit(feature, platform_features); +} +EXPORT_SYMBOL_GPL(platform_set); + +void platform_clear(unsigned int feature) +{ + clear_bit(feature, platform_features); +} +EXPORT_SYMBOL_GPL(platform_clear); + +bool platform_has(unsigned int feature) +{ + return test_bit(feature, platform_features); +} +EXPORT_SYMBOL_GPL(platform_has); diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 20a66bf9f465..89c71fce225d 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -665,7 +665,7 @@ static void power_down(void) hibernation_platform_enter(); fallthrough; case HIBERNATION_SHUTDOWN: - if (pm_power_off) + if (kernel_can_power_off()) kernel_power_off(); break; } diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index ea3dd55709e7..b49c6ff6dca0 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -224,33 +224,6 @@ int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, static int nr_ext_console_drivers; /* - * Used to synchronize printing kthreads against direct printing via - * console_trylock/console_unlock. - * - * Values: - * -1 = console kthreads atomically blocked (via global trylock) - * 0 = no kthread printing, console not locked (via trylock) - * >0 = kthread(s) actively printing - * - * Note: For synchronizing against direct printing via - * console_lock/console_unlock, see the @lock variable in - * struct console. - */ -static atomic_t console_kthreads_active = ATOMIC_INIT(0); - -#define console_kthreads_atomic_tryblock() \ - (atomic_cmpxchg(&console_kthreads_active, 0, -1) == 0) -#define console_kthreads_atomic_unblock() \ - atomic_cmpxchg(&console_kthreads_active, -1, 0) -#define console_kthreads_atomically_blocked() \ - (atomic_read(&console_kthreads_active) == -1) - -#define console_kthread_printing_tryenter() \ - atomic_inc_unless_negative(&console_kthreads_active) -#define console_kthread_printing_exit() \ - atomic_dec(&console_kthreads_active) - -/* * Helper macros to handle lockdep when locking/unlocking console_sem. We use * macros instead of functions so that _RET_IP_ contains useful information. */ @@ -298,49 +271,14 @@ static bool panic_in_progress(void) } /* - * Tracks whether kthread printers are all blocked. A value of true implies - * that the console is locked via console_lock() or the console is suspended. - * Writing to this variable requires holding @console_sem. + * This is used for debugging the mess that is the VT code by + * keeping track if we have the console semaphore held. It's + * definitely not the perfect debug tool (we don't know if _WE_ + * hold it and are racing, but it helps tracking those weird code + * paths in the console code where we end up in places I want + * locked without the console semaphore held). */ -static bool console_kthreads_blocked; - -/* - * Block all kthread printers from a schedulable context. - * - * Requires holding @console_sem. - */ -static void console_kthreads_block(void) -{ - struct console *con; - - for_each_console(con) { - mutex_lock(&con->lock); - con->blocked = true; - mutex_unlock(&con->lock); - } - - console_kthreads_blocked = true; -} - -/* - * Unblock all kthread printers from a schedulable context. - * - * Requires holding @console_sem. - */ -static void console_kthreads_unblock(void) -{ - struct console *con; - - for_each_console(con) { - mutex_lock(&con->lock); - con->blocked = false; - mutex_unlock(&con->lock); - } - - console_kthreads_blocked = false; -} - -static int console_suspended; +static int console_locked, console_suspended; /* * Array of consoles built from command line options (console=) @@ -423,75 +361,7 @@ static int console_msg_format = MSG_FORMAT_DEFAULT; /* syslog_lock protects syslog_* variables and write access to clear_seq. */ static DEFINE_MUTEX(syslog_lock); -/* - * A flag to signify if printk_activate_kthreads() has already started the - * kthread printers. If true, any later registered consoles must start their - * own kthread directly. The flag is write protected by the console_lock. - */ -static bool printk_kthreads_available; - #ifdef CONFIG_PRINTK -static atomic_t printk_prefer_direct = ATOMIC_INIT(0); - -/** - * printk_prefer_direct_enter - cause printk() calls to attempt direct - * printing to all enabled consoles - * - * Since it is not possible to call into the console printing code from any - * context, there is no guarantee that direct printing will occur. - * - * This globally effects all printk() callers. - * - * Context: Any context. - */ -void printk_prefer_direct_enter(void) -{ - atomic_inc(&printk_prefer_direct); -} - -/** - * printk_prefer_direct_exit - restore printk() behavior - * - * Context: Any context. - */ -void printk_prefer_direct_exit(void) -{ - WARN_ON(atomic_dec_if_positive(&printk_prefer_direct) < 0); -} - -/* - * Calling printk() always wakes kthread printers so that they can - * flush the new message to their respective consoles. Also, if direct - * printing is allowed, printk() tries to flush the messages directly. - * - * Direct printing is allowed in situations when the kthreads - * are not available or the system is in a problematic state. - * - * See the implementation about possible races. - */ -static inline bool allow_direct_printing(void) -{ - /* - * Checking kthread availability is a possible race because the - * kthread printers can become permanently disabled during runtime. - * However, doing that requires holding the console_lock, so any - * pending messages will be direct printed by console_unlock(). - */ - if (!printk_kthreads_available) - return true; - - /* - * Prefer direct printing when the system is in a problematic state. - * The context that sets this state will always see the updated value. - * The other contexts do not care. Anyway, direct printing is just a - * best effort. The direct output is only possible when console_lock - * is not already taken and no kthread printers are actively printing. - */ - return (system_state > SYSTEM_RUNNING || - oops_in_progress || - atomic_read(&printk_prefer_direct)); -} - DECLARE_WAIT_QUEUE_HEAD(log_wait); /* All 3 protected by @syslog_lock. */ /* the next printk record to read by syslog(READ) or /proc/kmsg */ @@ -2382,10 +2252,10 @@ asmlinkage int vprintk_emit(int facility, int level, printed_len = vprintk_store(facility, level, dev_info, fmt, args); /* If called from the scheduler, we can not call up(). */ - if (!in_sched && allow_direct_printing()) { + if (!in_sched) { /* * The caller may be holding system-critical or - * timing-sensitive locks. Disable preemption during direct + * timing-sensitive locks. Disable preemption during * printing of all remaining records to all consoles so that * this context can return as soon as possible. Hopefully * another printk() caller will take over the printing. @@ -2428,8 +2298,6 @@ EXPORT_SYMBOL(_printk); static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress); -static void printk_start_kthread(struct console *con); - #else /* CONFIG_PRINTK */ #define CONSOLE_LOG_MAX 0 @@ -2463,8 +2331,6 @@ static void call_console_driver(struct console *con, const char *text, size_t le } static bool suppress_message_printing(int level) { return false; } static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; } -static void printk_start_kthread(struct console *con) { } -static bool allow_direct_printing(void) { return true; } #endif /* CONFIG_PRINTK */ @@ -2683,14 +2549,6 @@ static int console_cpu_notify(unsigned int cpu) /* If trylock fails, someone else is doing the printing */ if (console_trylock()) console_unlock(); - else { - /* - * If a new CPU comes online, the conditions for - * printer_should_wake() may have changed for some - * kthread printer with !CON_ANYTIME. - */ - wake_up_klogd(); - } } return 0; } @@ -2710,7 +2568,7 @@ void console_lock(void) down_console_sem(); if (console_suspended) return; - console_kthreads_block(); + console_locked = 1; console_may_schedule = 1; } EXPORT_SYMBOL(console_lock); @@ -2731,30 +2589,15 @@ int console_trylock(void) up_console_sem(); return 0; } - if (!console_kthreads_atomic_tryblock()) { - up_console_sem(); - return 0; - } + console_locked = 1; console_may_schedule = 0; return 1; } EXPORT_SYMBOL(console_trylock); -/* - * This is used to help to make sure that certain paths within the VT code are - * running with the console lock held. It is definitely not the perfect debug - * tool (it is not known if the VT code is the task holding the console lock), - * but it helps tracking those weird code paths in the console code such as - * when the console is suspended: where the console is not locked but no - * console printing may occur. - * - * Note: This returns true when the console is suspended but is not locked. - * This is intentional because the VT code must consider that situation - * the same as if the console was locked. - */ int is_console_locked(void) { - return (console_kthreads_blocked || atomic_read(&console_kthreads_active)); + return console_locked; } EXPORT_SYMBOL(is_console_locked); @@ -2777,9 +2620,18 @@ static bool abandon_console_lock_in_panic(void) return atomic_read(&panic_cpu) != raw_smp_processor_id(); } -static inline bool __console_is_usable(short flags) +/* + * Check if the given console is currently capable and allowed to print + * records. + * + * Requires the console_lock. + */ +static inline bool console_is_usable(struct console *con) { - if (!(flags & CON_ENABLED)) + if (!(con->flags & CON_ENABLED)) + return false; + + if (!con->write) return false; /* @@ -2788,43 +2640,15 @@ static inline bool __console_is_usable(short flags) * cope (CON_ANYTIME) don't call them until this CPU is officially up. */ if (!cpu_online(raw_smp_processor_id()) && - !(flags & CON_ANYTIME)) + !(con->flags & CON_ANYTIME)) return false; return true; } -/* - * Check if the given console is currently capable and allowed to print - * records. - * - * Requires holding the console_lock. - */ -static inline bool console_is_usable(struct console *con) -{ - if (!con->write) - return false; - - return __console_is_usable(con->flags); -} - static void __console_unlock(void) { - /* - * Depending on whether console_lock() or console_trylock() was used, - * appropriately allow the kthread printers to continue. - */ - if (console_kthreads_blocked) - console_kthreads_unblock(); - else - console_kthreads_atomic_unblock(); - - /* - * New records may have arrived while the console was locked. - * Wake the kthread printers to print them. - */ - wake_up_klogd(); - + console_locked = 0; up_console_sem(); } @@ -2842,19 +2666,17 @@ static void __console_unlock(void) * * @handover will be set to true if a printk waiter has taken over the * console_lock, in which case the caller is no longer holding the - * console_lock. Otherwise it is set to false. A NULL pointer may be provided - * to disable allowing the console_lock to be taken over by a printk waiter. + * console_lock. Otherwise it is set to false. * * Returns false if the given console has no next record to print, otherwise * true. * - * Requires the console_lock if @handover is non-NULL. - * Requires con->lock otherwise. + * Requires the console_lock. */ -static bool __console_emit_next_record(struct console *con, char *text, char *ext_text, - char *dropped_text, bool *handover) +static bool console_emit_next_record(struct console *con, char *text, char *ext_text, + char *dropped_text, bool *handover) { - static atomic_t panic_console_dropped = ATOMIC_INIT(0); + static int panic_console_dropped; struct printk_info info; struct printk_record r; unsigned long flags; @@ -2863,8 +2685,7 @@ static bool __console_emit_next_record(struct console *con, char *text, char *ex prb_rec_init_rd(&r, &info, text, CONSOLE_LOG_MAX); - if (handover) - *handover = false; + *handover = false; if (!prb_read_valid(prb, con->seq, &r)) return false; @@ -2872,8 +2693,7 @@ static bool __console_emit_next_record(struct console *con, char *text, char *ex if (con->seq != r.info->seq) { con->dropped += r.info->seq - con->seq; con->seq = r.info->seq; - if (panic_in_progress() && - atomic_fetch_inc_relaxed(&panic_console_dropped) > 10) { + if (panic_in_progress() && panic_console_dropped++ > 10) { suppress_panic_printk = 1; pr_warn_once("Too many dropped messages. Suppress messages on non-panic CPUs to prevent livelock.\n"); } @@ -2895,62 +2715,32 @@ static bool __console_emit_next_record(struct console *con, char *text, char *ex len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time); } - if (handover) { - /* - * While actively printing out messages, if another printk() - * were to occur on another CPU, it may wait for this one to - * finish. This task can not be preempted if there is a - * waiter waiting to take over. - * - * Interrupts are disabled because the hand over to a waiter - * must not be interrupted until the hand over is completed - * (@console_waiter is cleared). - */ - printk_safe_enter_irqsave(flags); - console_lock_spinning_enable(); - - /* don't trace irqsoff print latency */ - stop_critical_timings(); - } + /* + * While actively printing out messages, if another printk() + * were to occur on another CPU, it may wait for this one to + * finish. This task can not be preempted if there is a + * waiter waiting to take over. + * + * Interrupts are disabled because the hand over to a waiter + * must not be interrupted until the hand over is completed + * (@console_waiter is cleared). + */ + printk_safe_enter_irqsave(flags); + console_lock_spinning_enable(); + stop_critical_timings(); /* don't trace print latency */ call_console_driver(con, write_text, len, dropped_text); + start_critical_timings(); con->seq++; - if (handover) { - start_critical_timings(); - *handover = console_lock_spinning_disable_and_check(); - printk_safe_exit_irqrestore(flags); - } + *handover = console_lock_spinning_disable_and_check(); + printk_safe_exit_irqrestore(flags); skip: return true; } /* - * Print a record for a given console, but allow another printk() caller to - * take over the console_lock and continue printing. - * - * Requires the console_lock, but depending on @handover after the call, the - * caller may no longer have the console_lock. - * - * See __console_emit_next_record() for argument and return details. - */ -static bool console_emit_next_record_transferable(struct console *con, char *text, char *ext_text, - char *dropped_text, bool *handover) -{ - /* - * Handovers are only supported if threaded printers are atomically - * blocked. The context taking over the console_lock may be atomic. - */ - if (!console_kthreads_atomically_blocked()) { - *handover = false; - handover = NULL; - } - - return __console_emit_next_record(con, text, ext_text, dropped_text, handover); -} - -/* * Print out all remaining records to all consoles. * * @do_cond_resched is set by the caller. It can be true only in schedulable @@ -2968,8 +2758,8 @@ static bool console_emit_next_record_transferable(struct console *con, char *tex * were flushed to all usable consoles. A returned false informs the caller * that everything was not flushed (either there were no usable consoles or * another context has taken over printing or it is a panic situation and this - * is not the panic CPU or direct printing is not preferred). Regardless the - * reason, the caller should assume it is not useful to immediately try again. + * is not the panic CPU). Regardless the reason, the caller should assume it + * is not useful to immediately try again. * * Requires the console_lock. */ @@ -2986,10 +2776,6 @@ static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handove *handover = false; do { - /* Let the kthread printers do the work if they can. */ - if (!allow_direct_printing()) - return false; - any_progress = false; for_each_console(con) { @@ -3001,11 +2787,13 @@ static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handove if (con->flags & CON_EXTENDED) { /* Extended consoles do not print "dropped messages". */ - progress = console_emit_next_record_transferable(con, &text[0], - &ext_text[0], NULL, handover); + progress = console_emit_next_record(con, &text[0], + &ext_text[0], NULL, + handover); } else { - progress = console_emit_next_record_transferable(con, &text[0], - NULL, &dropped_text[0], handover); + progress = console_emit_next_record(con, &text[0], + NULL, &dropped_text[0], + handover); } if (*handover) return false; @@ -3120,13 +2908,10 @@ void console_unblank(void) if (oops_in_progress) { if (down_trylock_console_sem() != 0) return; - if (!console_kthreads_atomic_tryblock()) { - up_console_sem(); - return; - } } else console_lock(); + console_locked = 1; console_may_schedule = 0; for_each_console(c) if ((c->flags & CON_ENABLED) && c->unblank) @@ -3405,10 +3190,6 @@ void register_console(struct console *newcon) nr_ext_console_drivers++; newcon->dropped = 0; - newcon->thread = NULL; - newcon->blocked = true; - mutex_init(&newcon->lock); - if (newcon->flags & CON_PRINTBUFFER) { /* Get a consistent copy of @syslog_seq. */ mutex_lock(&syslog_lock); @@ -3418,10 +3199,6 @@ void register_console(struct console *newcon) /* Begin with next message. */ newcon->seq = prb_next_seq(prb); } - - if (printk_kthreads_available) - printk_start_kthread(newcon); - console_unlock(); console_sysfs_notify(); @@ -3448,7 +3225,6 @@ EXPORT_SYMBOL(register_console); int unregister_console(struct console *console) { - struct task_struct *thd; struct console *con; int res; @@ -3489,20 +3265,7 @@ int unregister_console(struct console *console) console_drivers->flags |= CON_CONSDEV; console->flags &= ~CON_ENABLED; - - /* - * console->thread can only be cleared under the console lock. But - * stopping the thread must be done without the console lock. The - * task that clears @thread is the task that stops the kthread. - */ - thd = console->thread; - console->thread = NULL; - console_unlock(); - - if (thd) - kthread_stop(thd); - console_sysfs_notify(); if (console->exit) @@ -3598,20 +3361,6 @@ static int __init printk_late_init(void) } late_initcall(printk_late_init); -static int __init printk_activate_kthreads(void) -{ - struct console *con; - - console_lock(); - printk_kthreads_available = true; - for_each_console(con) - printk_start_kthread(con); - console_unlock(); - - return 0; -} -early_initcall(printk_activate_kthreads); - #if defined CONFIG_PRINTK /* If @con is specified, only wait for that console. Otherwise wait for all. */ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) @@ -3686,206 +3435,11 @@ bool pr_flush(int timeout_ms, bool reset_on_progress) } EXPORT_SYMBOL(pr_flush); -static void __printk_fallback_preferred_direct(void) -{ - printk_prefer_direct_enter(); - pr_err("falling back to preferred direct printing\n"); - printk_kthreads_available = false; -} - -/* - * Enter preferred direct printing, but never exit. Mark console threads as - * unavailable. The system is then forever in preferred direct printing and - * any printing threads will exit. - * - * Must *not* be called under console_lock. Use - * __printk_fallback_preferred_direct() if already holding console_lock. - */ -static void printk_fallback_preferred_direct(void) -{ - console_lock(); - __printk_fallback_preferred_direct(); - console_unlock(); -} - -/* - * Print a record for a given console, not allowing another printk() caller - * to take over. This is appropriate for contexts that do not have the - * console_lock. - * - * See __console_emit_next_record() for argument and return details. - */ -static bool console_emit_next_record(struct console *con, char *text, char *ext_text, - char *dropped_text) -{ - return __console_emit_next_record(con, text, ext_text, dropped_text, NULL); -} - -static bool printer_should_wake(struct console *con, u64 seq) -{ - short flags; - - if (kthread_should_stop() || !printk_kthreads_available) - return true; - - if (con->blocked || - console_kthreads_atomically_blocked()) { - return false; - } - - /* - * This is an unsafe read from con->flags, but a false positive is - * not a problem. Worst case it would allow the printer to wake up - * although it is disabled. But the printer will notice that when - * attempting to print and instead go back to sleep. - */ - flags = data_race(READ_ONCE(con->flags)); - - if (!__console_is_usable(flags)) - return false; - - return prb_read_valid(prb, seq, NULL); -} - -static int printk_kthread_func(void *data) -{ - struct console *con = data; - char *dropped_text = NULL; - char *ext_text = NULL; - u64 seq = 0; - char *text; - int error; - - text = kmalloc(CONSOLE_LOG_MAX, GFP_KERNEL); - if (!text) { - con_printk(KERN_ERR, con, "failed to allocate text buffer\n"); - printk_fallback_preferred_direct(); - goto out; - } - - if (con->flags & CON_EXTENDED) { - ext_text = kmalloc(CONSOLE_EXT_LOG_MAX, GFP_KERNEL); - if (!ext_text) { - con_printk(KERN_ERR, con, "failed to allocate ext_text buffer\n"); - printk_fallback_preferred_direct(); - goto out; - } - } else { - dropped_text = kmalloc(DROPPED_TEXT_MAX, GFP_KERNEL); - if (!dropped_text) { - con_printk(KERN_ERR, con, "failed to allocate dropped_text buffer\n"); - printk_fallback_preferred_direct(); - goto out; - } - } - - con_printk(KERN_INFO, con, "printing thread started\n"); - - for (;;) { - /* - * Guarantee this task is visible on the waitqueue before - * checking the wake condition. - * - * The full memory barrier within set_current_state() of - * prepare_to_wait_event() pairs with the full memory barrier - * within wq_has_sleeper(). - * - * This pairs with __wake_up_klogd:A. - */ - error = wait_event_interruptible(log_wait, - printer_should_wake(con, seq)); /* LMM(printk_kthread_func:A) */ - - if (kthread_should_stop() || !printk_kthreads_available) - break; - - if (error) - continue; - - error = mutex_lock_interruptible(&con->lock); - if (error) - continue; - - if (con->blocked || - !console_kthread_printing_tryenter()) { - /* Another context has locked the console_lock. */ - mutex_unlock(&con->lock); - continue; - } - - /* - * Although this context has not locked the console_lock, it - * is known that the console_lock is not locked and it is not - * possible for any other context to lock the console_lock. - * Therefore it is safe to read con->flags. - */ - - if (!__console_is_usable(con->flags)) { - console_kthread_printing_exit(); - mutex_unlock(&con->lock); - continue; - } - - /* - * Even though the printk kthread is always preemptible, it is - * still not allowed to call cond_resched() from within - * console drivers. The task may become non-preemptible in the - * console driver call chain. For example, vt_console_print() - * takes a spinlock and then can call into fbcon_redraw(), - * which can conditionally invoke cond_resched(). - */ - console_may_schedule = 0; - console_emit_next_record(con, text, ext_text, dropped_text); - - seq = con->seq; - - console_kthread_printing_exit(); - - mutex_unlock(&con->lock); - } - - con_printk(KERN_INFO, con, "printing thread stopped\n"); -out: - kfree(dropped_text); - kfree(ext_text); - kfree(text); - - console_lock(); - /* - * If this kthread is being stopped by another task, con->thread will - * already be NULL. That is fine. The important thing is that it is - * NULL after the kthread exits. - */ - con->thread = NULL; - console_unlock(); - - return 0; -} - -/* Must be called under console_lock. */ -static void printk_start_kthread(struct console *con) -{ - /* - * Do not start a kthread if there is no write() callback. The - * kthreads assume the write() callback exists. - */ - if (!con->write) - return; - - con->thread = kthread_run(printk_kthread_func, con, - "pr/%s%d", con->name, con->index); - if (IS_ERR(con->thread)) { - con->thread = NULL; - con_printk(KERN_ERR, con, "unable to start printing thread\n"); - __printk_fallback_preferred_direct(); - return; - } -} - /* * Delayed printk version, for scheduler-internal messages: */ -#define PRINTK_PENDING_WAKEUP 0x01 -#define PRINTK_PENDING_DIRECT_OUTPUT 0x02 +#define PRINTK_PENDING_WAKEUP 0x01 +#define PRINTK_PENDING_OUTPUT 0x02 static DEFINE_PER_CPU(int, printk_pending); @@ -3893,14 +3447,10 @@ static void wake_up_klogd_work_func(struct irq_work *irq_work) { int pending = this_cpu_xchg(printk_pending, 0); - if (pending & PRINTK_PENDING_DIRECT_OUTPUT) { - printk_prefer_direct_enter(); - + if (pending & PRINTK_PENDING_OUTPUT) { /* If trylock fails, someone else is doing the printing */ if (console_trylock()) console_unlock(); - - printk_prefer_direct_exit(); } if (pending & PRINTK_PENDING_WAKEUP) @@ -3925,11 +3475,10 @@ static void __wake_up_klogd(int val) * prepare_to_wait_event(), which is called after ___wait_event() adds * the waiter but before it has checked the wait condition. * - * This pairs with devkmsg_read:A, syslog_print:A, and - * printk_kthread_func:A. + * This pairs with devkmsg_read:A and syslog_print:A. */ if (wq_has_sleeper(&log_wait) || /* LMM(__wake_up_klogd:A) */ - (val & PRINTK_PENDING_DIRECT_OUTPUT)) { + (val & PRINTK_PENDING_OUTPUT)) { this_cpu_or(printk_pending, val); irq_work_queue(this_cpu_ptr(&wake_up_klogd_work)); } @@ -3947,17 +3496,7 @@ void defer_console_output(void) * New messages may have been added directly to the ringbuffer * using vprintk_store(), so wake any waiters as well. */ - int val = PRINTK_PENDING_WAKEUP; - - /* - * Make sure that some context will print the messages when direct - * printing is allowed. This happens in situations when the kthreads - * may not be as reliable or perhaps unusable. - */ - if (allow_direct_printing()) - val |= PRINTK_PENDING_DIRECT_OUTPUT; - - __wake_up_klogd(val); + __wake_up_klogd(PRINTK_PENDING_WAKEUP | PRINTK_PENDING_OUTPUT); } void printk_trigger_flush(void) diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h index 4995c078cff9..a001e1e7a992 100644 --- a/kernel/rcu/tree_stall.h +++ b/kernel/rcu/tree_stall.h @@ -647,7 +647,6 @@ static void print_cpu_stall(unsigned long gps) * See Documentation/RCU/stallwarn.rst for info on how to debug * RCU CPU stall warnings. */ - printk_prefer_direct_enter(); trace_rcu_stall_warning(rcu_state.name, TPS("SelfDetected")); pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name); raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags); @@ -685,7 +684,6 @@ static void print_cpu_stall(unsigned long gps) */ set_tsk_need_resched(current); set_preempt_need_resched(); - printk_prefer_direct_exit(); } static void check_cpu_stall(struct rcu_data *rdp) diff --git a/kernel/reboot.c b/kernel/reboot.c index a091145ee710..3c35445bf5ad 100644 --- a/kernel/reboot.c +++ b/kernel/reboot.c @@ -315,6 +315,43 @@ static int sys_off_notify(struct notifier_block *nb, return handler->sys_off_cb(&data); } +static struct sys_off_handler platform_sys_off_handler; + +static struct sys_off_handler *alloc_sys_off_handler(int priority) +{ + struct sys_off_handler *handler; + gfp_t flags; + + /* + * Platforms like m68k can't allocate sys_off handler dynamically + * at the early boot time because memory allocator isn't available yet. + */ + if (priority == SYS_OFF_PRIO_PLATFORM) { + handler = &platform_sys_off_handler; + if (handler->cb_data) + return ERR_PTR(-EBUSY); + } else { + if (system_state > SYSTEM_RUNNING) + flags = GFP_ATOMIC; + else + flags = GFP_KERNEL; + + handler = kzalloc(sizeof(*handler), flags); + if (!handler) + return ERR_PTR(-ENOMEM); + } + + return handler; +} + +static void free_sys_off_handler(struct sys_off_handler *handler) +{ + if (handler == &platform_sys_off_handler) + memset(handler, 0, sizeof(*handler)); + else + kfree(handler); +} + /** * register_sys_off_handler - Register sys-off handler * @mode: Sys-off mode @@ -345,9 +382,9 @@ register_sys_off_handler(enum sys_off_mode mode, struct sys_off_handler *handler; int err; - handler = kzalloc(sizeof(*handler), GFP_KERNEL); - if (!handler) - return ERR_PTR(-ENOMEM); + handler = alloc_sys_off_handler(priority); + if (IS_ERR(handler)) + return handler; switch (mode) { case SYS_OFF_MODE_POWER_OFF_PREPARE: @@ -364,7 +401,7 @@ register_sys_off_handler(enum sys_off_mode mode, break; default: - kfree(handler); + free_sys_off_handler(handler); return ERR_PTR(-EINVAL); } @@ -391,7 +428,7 @@ register_sys_off_handler(enum sys_off_mode mode, } if (err) { - kfree(handler); + free_sys_off_handler(handler); return ERR_PTR(err); } @@ -409,7 +446,7 @@ void unregister_sys_off_handler(struct sys_off_handler *handler) { int err; - if (!handler) + if (IS_ERR_OR_NULL(handler)) return; if (handler->blocking) @@ -422,7 +459,7 @@ void unregister_sys_off_handler(struct sys_off_handler *handler) /* sanity check, shall never happen */ WARN_ON(err); - kfree(handler); + free_sys_off_handler(handler); } EXPORT_SYMBOL_GPL(unregister_sys_off_handler); @@ -584,7 +621,23 @@ static void do_kernel_power_off_prepare(void) */ void do_kernel_power_off(void) { + struct sys_off_handler *sys_off = NULL; + + /* + * Register sys-off handlers for legacy PM callback. This allows + * legacy PM callbacks temporary co-exist with the new sys-off API. + * + * TODO: Remove legacy handlers once all legacy PM users will be + * switched to the sys-off based APIs. + */ + if (pm_power_off) + sys_off = register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, + SYS_OFF_PRIO_DEFAULT, + legacy_pm_power_off, NULL); + atomic_notifier_call_chain(&power_off_handler_list, 0, NULL); + + unregister_sys_off_handler(sys_off); } /** @@ -595,7 +648,8 @@ void do_kernel_power_off(void) */ bool kernel_can_power_off(void) { - return !atomic_notifier_call_chain_is_empty(&power_off_handler_list); + return !atomic_notifier_call_chain_is_empty(&power_off_handler_list) || + pm_power_off; } EXPORT_SYMBOL_GPL(kernel_can_power_off); @@ -630,7 +684,6 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd, void __user *, arg) { struct pid_namespace *pid_ns = task_active_pid_ns(current); - struct sys_off_handler *sys_off = NULL; char buffer[256]; int ret = 0; @@ -655,21 +708,6 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd, if (ret) return ret; - /* - * Register sys-off handlers for legacy PM callback. This allows - * legacy PM callbacks temporary co-exist with the new sys-off API. - * - * TODO: Remove legacy handlers once all legacy PM users will be - * switched to the sys-off based APIs. - */ - if (pm_power_off) { - sys_off = register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, - SYS_OFF_PRIO_DEFAULT, - legacy_pm_power_off, NULL); - if (IS_ERR(sys_off)) - return PTR_ERR(sys_off); - } - /* Instead of trying to make the power_off code look like * halt when pm_power_off is not set do it the easy way. */ @@ -727,7 +765,6 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd, break; } mutex_unlock(&system_transition_mutex); - unregister_sys_off_handler(sys_off); return ret; } @@ -782,11 +819,9 @@ static int __orderly_reboot(void) ret = run_cmd(reboot_cmd); if (ret) { - printk_prefer_direct_enter(); pr_warn("Failed to start orderly reboot: forcing the issue\n"); emergency_sync(); kernel_restart(NULL); - printk_prefer_direct_exit(); } return ret; @@ -799,7 +834,6 @@ static int __orderly_poweroff(bool force) ret = run_cmd(poweroff_cmd); if (ret && force) { - printk_prefer_direct_enter(); pr_warn("Failed to start orderly shutdown: forcing the issue\n"); /* @@ -809,7 +843,6 @@ static int __orderly_poweroff(bool force) */ emergency_sync(); kernel_power_off(); - printk_prefer_direct_exit(); } return ret; @@ -867,8 +900,6 @@ EXPORT_SYMBOL_GPL(orderly_reboot); */ static void hw_failure_emergency_poweroff_func(struct work_struct *work) { - printk_prefer_direct_enter(); - /* * We have reached here after the emergency shutdown waiting period has * expired. This means orderly_poweroff has not been able to shut off @@ -885,8 +916,6 @@ static void hw_failure_emergency_poweroff_func(struct work_struct *work) */ pr_emerg("Hardware protection shutdown failed. Trying emergency restart\n"); emergency_restart(); - - printk_prefer_direct_exit(); } static DECLARE_DELAYED_WORK(hw_failure_emergency_poweroff_work, @@ -925,13 +954,11 @@ void hw_protection_shutdown(const char *reason, int ms_until_forced) { static atomic_t allow_proceed = ATOMIC_INIT(1); - printk_prefer_direct_enter(); - pr_emerg("HARDWARE PROTECTION shutdown (%s)\n", reason); /* Shutdown should be initiated only once. */ if (!atomic_dec_and_test(&allow_proceed)) - goto out; + return; /* * Queue a backup emergency shutdown in the event of @@ -939,8 +966,6 @@ void hw_protection_shutdown(const char *reason, int ms_until_forced) */ hw_failure_emergency_poweroff(ms_until_forced); orderly_poweroff(true); -out: - printk_prefer_direct_exit(); } EXPORT_SYMBOL_GPL(hw_protection_shutdown); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index bfa7452ca92e..da0bf6fe9ecd 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4798,25 +4798,55 @@ static void do_balance_callbacks(struct rq *rq, struct callback_head *head) static void balance_push(struct rq *rq); +/* + * balance_push_callback is a right abuse of the callback interface and plays + * by significantly different rules. + * + * Where the normal balance_callback's purpose is to be ran in the same context + * that queued it (only later, when it's safe to drop rq->lock again), + * balance_push_callback is specifically targeted at __schedule(). + * + * This abuse is tolerated because it places all the unlikely/odd cases behind + * a single test, namely: rq->balance_callback == NULL. + */ struct callback_head balance_push_callback = { .next = NULL, .func = (void (*)(struct callback_head *))balance_push, }; -static inline struct callback_head *splice_balance_callbacks(struct rq *rq) +static inline struct callback_head * +__splice_balance_callbacks(struct rq *rq, bool split) { struct callback_head *head = rq->balance_callback; + if (likely(!head)) + return NULL; + lockdep_assert_rq_held(rq); - if (head) + /* + * Must not take balance_push_callback off the list when + * splice_balance_callbacks() and balance_callbacks() are not + * in the same rq->lock section. + * + * In that case it would be possible for __schedule() to interleave + * and observe the list empty. + */ + if (split && head == &balance_push_callback) + head = NULL; + else rq->balance_callback = NULL; return head; } +static inline struct callback_head *splice_balance_callbacks(struct rq *rq) +{ + return __splice_balance_callbacks(rq, true); +} + static void __balance_callbacks(struct rq *rq) { - do_balance_callbacks(rq, splice_balance_callbacks(rq)); + do_balance_callbacks(rq, __splice_balance_callbacks(rq, false)); } static inline void balance_callbacks(struct rq *rq, struct callback_head *head) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 01259611beb9..47b89a0fc6e5 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1693,6 +1693,11 @@ queue_balance_callback(struct rq *rq, { lockdep_assert_rq_held(rq); + /* + * Don't (re)queue an already queued item; nor queue anything when + * balance_push() is active, see the comment with + * balance_push_callback. + */ if (unlikely(head->next || rq->balance_callback == &balance_push_callback)) return; diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 10a32b0f2deb..fe04c6f96ca5 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -770,14 +770,11 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) **/ void blk_trace_shutdown(struct request_queue *q) { - mutex_lock(&q->debugfs_mutex); if (rcu_dereference_protected(q->blk_trace, lockdep_is_held(&q->debugfs_mutex))) { __blk_trace_startstop(q, 0); __blk_trace_remove(q); } - - mutex_unlock(&q->debugfs_mutex); } #ifdef CONFIG_BLK_CGROUP diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 10b157a6d73e..88589d74a892 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -2263,11 +2263,11 @@ static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 int err = -ENOMEM; unsigned int i; - syms = kvmalloc(cnt * sizeof(*syms), GFP_KERNEL); + syms = kvmalloc_array(cnt, sizeof(*syms), GFP_KERNEL); if (!syms) goto error; - buf = kvmalloc(cnt * KSYM_NAME_LEN, GFP_KERNEL); + buf = kvmalloc_array(cnt, KSYM_NAME_LEN, GFP_KERNEL); if (!buf) goto error; @@ -2423,7 +2423,7 @@ kprobe_multi_link_handler(struct fprobe *fp, unsigned long entry_ip, kprobe_multi_link_prog_run(link, entry_ip, regs); } -static int symbols_cmp(const void *a, const void *b) +static int symbols_cmp_r(const void *a, const void *b, const void *priv) { const char **str_a = (const char **) a; const char **str_b = (const char **) b; @@ -2431,6 +2431,28 @@ static int symbols_cmp(const void *a, const void *b) return strcmp(*str_a, *str_b); } +struct multi_symbols_sort { + const char **funcs; + u64 *cookies; +}; + +static void symbols_swap_r(void *a, void *b, int size, const void *priv) +{ + const struct multi_symbols_sort *data = priv; + const char **name_a = a, **name_b = b; + + swap(*name_a, *name_b); + + /* If defined, swap also related cookies. */ + if (data->cookies) { + u64 *cookie_a, *cookie_b; + + cookie_a = data->cookies + (name_a - data->funcs); + cookie_b = data->cookies + (name_b - data->funcs); + swap(*cookie_a, *cookie_b); + } +} + int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) { struct bpf_kprobe_multi_link *link = NULL; @@ -2464,42 +2486,50 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr return -EINVAL; size = cnt * sizeof(*addrs); - addrs = kvmalloc(size, GFP_KERNEL); + addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL); if (!addrs) return -ENOMEM; + ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies); + if (ucookies) { + cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL); + if (!cookies) { + err = -ENOMEM; + goto error; + } + if (copy_from_user(cookies, ucookies, size)) { + err = -EFAULT; + goto error; + } + } + if (uaddrs) { if (copy_from_user(addrs, uaddrs, size)) { err = -EFAULT; goto error; } } else { + struct multi_symbols_sort data = { + .cookies = cookies, + }; struct user_syms us; err = copy_user_syms(&us, usyms, cnt); if (err) goto error; - sort(us.syms, cnt, sizeof(*us.syms), symbols_cmp, NULL); + if (cookies) + data.funcs = us.syms; + + sort_r(us.syms, cnt, sizeof(*us.syms), symbols_cmp_r, + symbols_swap_r, &data); + err = ftrace_lookup_symbols(us.syms, cnt, addrs); free_user_syms(&us); if (err) goto error; } - ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies); - if (ucookies) { - cookies = kvmalloc(size, GFP_KERNEL); - if (!cookies) { - err = -ENOMEM; - goto error; - } - if (copy_from_user(cookies, ucookies, size)) { - err = -EFAULT; - goto error; - } - } - link = kzalloc(sizeof(*link), GFP_KERNEL); if (!link) { err = -ENOMEM; diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index e750fe141a60..601ccf1b2f09 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -8029,15 +8029,23 @@ static int kallsyms_callback(void *data, const char *name, struct module *mod, unsigned long addr) { struct kallsyms_data *args = data; + const char **sym; + int idx; - if (!bsearch(&name, args->syms, args->cnt, sizeof(*args->syms), symbols_cmp)) + sym = bsearch(&name, args->syms, args->cnt, sizeof(*args->syms), symbols_cmp); + if (!sym) + return 0; + + idx = sym - args->syms; + if (args->addrs[idx]) return 0; addr = ftrace_location(addr); if (!addr) return 0; - args->addrs[args->found++] = addr; + args->addrs[idx] = addr; + args->found++; return args->found == args->cnt ? 1 : 0; } @@ -8062,6 +8070,7 @@ int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *a struct kallsyms_data args; int err; + memset(addrs, 0, sizeof(*addrs) * cnt); args.addrs = addrs; args.syms = sorted_syms; args.cnt = cnt; diff --git a/kernel/trace/rethook.c b/kernel/trace/rethook.c index b56833700d23..c69d82273ce7 100644 --- a/kernel/trace/rethook.c +++ b/kernel/trace/rethook.c @@ -154,6 +154,15 @@ struct rethook_node *rethook_try_get(struct rethook *rh) if (unlikely(!handler)) return NULL; + /* + * This expects the caller will set up a rethook on a function entry. + * When the function returns, the rethook will eventually be reclaimed + * or released in the rethook_recycle() with call_rcu(). + * This means the caller must be run in the RCU-availabe context. + */ + if (unlikely(!rcu_is_watching())) + return NULL; + fn = freelist_try_get(&rh->pool); if (!fn) return NULL; diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 2c95992e2c71..a8cfac0611bc 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -6424,9 +6424,7 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf) synchronize_rcu(); free_snapshot(tr); } -#endif -#ifdef CONFIG_TRACER_MAX_TRACE if (t->use_max_tr && !had_max_tr) { ret = tracing_alloc_snapshot_instance(tr); if (ret < 0) diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 93507330462c..a245ea673715 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1718,8 +1718,17 @@ static int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs) { struct kretprobe *rp = get_kretprobe(ri); - struct trace_kprobe *tk = container_of(rp, struct trace_kprobe, rp); + struct trace_kprobe *tk; + + /* + * There is a small chance that get_kretprobe(ri) returns NULL when + * the kretprobe is unregister on another CPU between kretprobe's + * trampoline_handler and this function. + */ + if (unlikely(!rp)) + return 0; + tk = container_of(rp, struct trace_kprobe, rp); raw_cpu_inc(*tk->nhit); if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE)) diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 9711589273cd..c3dc4f859a6b 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -546,7 +546,6 @@ static int __trace_uprobe_create(int argc, const char **argv) bool is_return = false; int i, ret; - ret = 0; ref_ctr_offset = 0; switch (argv[0][0]) { diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 20a7a55e62b6..ecb0e8346e65 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -424,8 +424,6 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) /* Start period for the next softlockup warning. */ update_report_ts(); - printk_prefer_direct_enter(); - pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", smp_processor_id(), duration, current->comm, task_pid_nr(current)); @@ -444,8 +442,6 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK); if (softlockup_panic) panic("softlockup: hung tasks"); - - printk_prefer_direct_exit(); } return HRTIMER_RESTART; diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c index 701f35f0e2d4..247bf0b1582c 100644 --- a/kernel/watchdog_hld.c +++ b/kernel/watchdog_hld.c @@ -135,8 +135,6 @@ static void watchdog_overflow_callback(struct perf_event *event, if (__this_cpu_read(hard_watchdog_warn) == true) return; - printk_prefer_direct_enter(); - pr_emerg("Watchdog detected hard LOCKUP on cpu %d\n", this_cpu); print_modules(); @@ -157,8 +155,6 @@ static void watchdog_overflow_callback(struct perf_event *event, if (hardlockup_panic) nmi_panic(regs, "Hard LOCKUP"); - printk_prefer_direct_exit(); - __this_cpu_write(hard_watchdog_warn, true); return; } diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 4056f2a3f9d5..1ea50f6be843 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2788,13 +2788,13 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, } /** - * flush_workqueue - ensure that any scheduled work has run to completion. + * __flush_workqueue - ensure that any scheduled work has run to completion. * @wq: workqueue to flush * * This function sleeps until all work items which were queued on entry * have finished execution, but it is not livelocked by new incoming ones. */ -void flush_workqueue(struct workqueue_struct *wq) +void __flush_workqueue(struct workqueue_struct *wq) { struct wq_flusher this_flusher = { .list = LIST_HEAD_INIT(this_flusher.list), @@ -2943,7 +2943,7 @@ void flush_workqueue(struct workqueue_struct *wq) out_unlock: mutex_unlock(&wq->mutex); } -EXPORT_SYMBOL(flush_workqueue); +EXPORT_SYMBOL(__flush_workqueue); /** * drain_workqueue - drain a workqueue @@ -2971,7 +2971,7 @@ void drain_workqueue(struct workqueue_struct *wq) wq->flags |= __WQ_DRAINING; mutex_unlock(&wq->mutex); reflush: - flush_workqueue(wq); + __flush_workqueue(wq); mutex_lock(&wq->mutex); @@ -6111,3 +6111,11 @@ void __init workqueue_init(void) wq_online = true; wq_watchdog_init(); } + +/* + * Despite the naming, this is a no-op function which is here only for avoiding + * link error. Since compile-time warning may fail to catch, we will need to + * emit run-time warning from __flush_workqueue(). + */ +void __warn_flushing_systemwide_wq(void) { } +EXPORT_SYMBOL(__warn_flushing_systemwide_wq); diff --git a/lib/Kconfig b/lib/Kconfig index 6a843639814f..eaaad4d85bf2 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -120,6 +120,9 @@ config INDIRECT_IOMEM_FALLBACK source "lib/crypto/Kconfig" +config LIB_MEMNEQ + bool + config CRC_CCITT tristate "CRC-CCITT functions" help diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan index c4fe15d38b60..a9f7eb047768 100644 --- a/lib/Kconfig.ubsan +++ b/lib/Kconfig.ubsan @@ -94,7 +94,7 @@ config UBSAN_UNREACHABLE bool "Perform checking for unreachable code" # objtool already handles unreachable checking and gets angry about # seeing UBSan instrumentation located in unreachable places. - depends on !(OBJTOOL && (STACK_VALIDATION || UNWINDER_ORC || X86_SMAP)) + depends on !(OBJTOOL && (STACK_VALIDATION || UNWINDER_ORC || HAVE_UACCESS_VALIDATION)) depends on $(cc-option,-fsanitize=unreachable) help This option enables -fsanitize=unreachable which checks for control diff --git a/lib/Makefile b/lib/Makefile index ea54294d73bf..f99bf61f8bbc 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -251,6 +251,7 @@ obj-$(CONFIG_DIMLIB) += dim/ obj-$(CONFIG_SIGNATURE) += digsig.o lib-$(CONFIG_CLZ_TAB) += clz_tab.o +lib-$(CONFIG_LIB_MEMNEQ) += memneq.o obj-$(CONFIG_GENERIC_STRNCPY_FROM_USER) += strncpy_from_user.o obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o diff --git a/lib/crc-itu-t.c b/lib/crc-itu-t.c index 1974b355c148..1d26a1647da5 100644 --- a/lib/crc-itu-t.c +++ b/lib/crc-itu-t.c @@ -7,7 +7,7 @@ #include <linux/module.h> #include <linux/crc-itu-t.h> -/** CRC table for the CRC ITU-T V.41 0x1021 (x^16 + x^12 + x^15 + 1) */ +/* CRC table for the CRC ITU-T V.41 0x1021 (x^16 + x^12 + x^5 + 1) */ const u16 crc_itu_t_table[256] = { 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig index 9856e291f414..2082af43d51f 100644 --- a/lib/crypto/Kconfig +++ b/lib/crypto/Kconfig @@ -71,6 +71,7 @@ config CRYPTO_LIB_CURVE25519 tristate "Curve25519 scalar multiplication library" depends on CRYPTO_ARCH_HAVE_LIB_CURVE25519 || !CRYPTO_ARCH_HAVE_LIB_CURVE25519 select CRYPTO_LIB_CURVE25519_GENERIC if CRYPTO_ARCH_HAVE_LIB_CURVE25519=n + select LIB_MEMNEQ help Enable the Curve25519 library interface. This interface may be fulfilled by either the generic implementation or an arch-specific diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 6dd5330f7a99..0b64695ab632 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -1434,7 +1434,7 @@ static ssize_t iter_xarray_get_pages(struct iov_iter *i, { unsigned nr, offset; pgoff_t index, count; - size_t size = maxsize, actual; + size_t size = maxsize; loff_t pos; if (!size || !maxpages) @@ -1461,13 +1461,7 @@ static ssize_t iter_xarray_get_pages(struct iov_iter *i, if (nr == 0) return 0; - actual = PAGE_SIZE * nr; - actual -= offset; - if (nr == count && size > 0) { - unsigned last_offset = (nr > 1) ? 0 : offset; - actual -= PAGE_SIZE - (last_offset + size); - } - return actual; + return min_t(size_t, nr * PAGE_SIZE - offset, maxsize); } /* must be done on non-empty ITER_IOVEC one */ @@ -1602,7 +1596,7 @@ static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i, struct page **p; unsigned nr, offset; pgoff_t index, count; - size_t size = maxsize, actual; + size_t size = maxsize; loff_t pos; if (!size) @@ -1631,13 +1625,7 @@ static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i, if (nr == 0) return 0; - actual = PAGE_SIZE * nr; - actual -= offset; - if (nr == count && size > 0) { - unsigned last_offset = (nr > 1) ? 0 : offset; - actual -= PAGE_SIZE - (last_offset + size); - } - return actual; + return min_t(size_t, nr * PAGE_SIZE - offset, maxsize); } ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, diff --git a/crypto/memneq.c b/lib/memneq.c index fb11608b1ec1..fb11608b1ec1 100644 --- a/crypto/memneq.c +++ b/lib/memneq.c diff --git a/lib/vsprintf.c b/lib/vsprintf.c index fb77f7bfd126..3c1853a9d1c0 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -769,8 +769,7 @@ static inline int __ptr_to_hashval(const void *ptr, unsigned long *hashval_out) static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn); unsigned long flags; - if (!system_unbound_wq || - (!rng_is_initialized() && !rng_has_arch_random()) || + if (!system_unbound_wq || !rng_is_initialized() || !spin_trylock_irqsave(&filling, flags)) return -EAGAIN; diff --git a/lib/xarray.c b/lib/xarray.c index 54e646e8e6ee..ea9ce1f0b386 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -264,9 +264,10 @@ static void xa_node_free(struct xa_node *node) * xas_destroy() - Free any resources allocated during the XArray operation. * @xas: XArray operation state. * - * This function is now internal-only. + * Most users will not need to call this function; it is called for you + * by xas_nomem(). */ -static void xas_destroy(struct xa_state *xas) +void xas_destroy(struct xa_state *xas) { struct xa_node *next, *node = xas->xa_alloc; diff --git a/mm/backing-dev.c b/mm/backing-dev.c index ff60bd7d74e0..95550b8fa7fe 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -231,20 +231,13 @@ static __init int bdi_class_init(void) } postcore_initcall(bdi_class_init); -static int bdi_init(struct backing_dev_info *bdi); - static int __init default_bdi_init(void) { - int err; - bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_SYSFS, 0); if (!bdi_wq) return -ENOMEM; - - err = bdi_init(&noop_backing_dev_info); - - return err; + return 0; } subsys_initcall(default_bdi_init); @@ -781,7 +774,7 @@ static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb) #endif /* CONFIG_CGROUP_WRITEBACK */ -static int bdi_init(struct backing_dev_info *bdi) +int bdi_init(struct backing_dev_info *bdi) { int ret; diff --git a/mm/damon/reclaim.c b/mm/damon/reclaim.c index 8efbfb24f3a1..4b07c29effe9 100644 --- a/mm/damon/reclaim.c +++ b/mm/damon/reclaim.c @@ -374,6 +374,8 @@ static void damon_reclaim_timer_fn(struct work_struct *work) } static DECLARE_DELAYED_WORK(damon_reclaim_timer, damon_reclaim_timer_fn); +static bool damon_reclaim_initialized; + static int enabled_store(const char *val, const struct kernel_param *kp) { @@ -382,6 +384,10 @@ static int enabled_store(const char *val, if (rc < 0) return rc; + /* system_wq might not initialized yet */ + if (!damon_reclaim_initialized) + return rc; + if (enabled) schedule_delayed_work(&damon_reclaim_timer, 0); @@ -449,6 +455,8 @@ static int __init damon_reclaim_init(void) damon_add_target(ctx, target); schedule_delayed_work(&damon_reclaim_timer, 0); + + damon_reclaim_initialized = true; return 0; } diff --git a/mm/filemap.c b/mm/filemap.c index 9daeaab36081..ffdfbc8b0e3c 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2385,6 +2385,8 @@ static void filemap_get_read_batch(struct address_space *mapping, continue; if (xas.xa_index > max || xa_is_value(folio)) break; + if (xa_is_sibling(folio)) + break; if (!folio_try_get_rcu(folio)) goto retry; @@ -2629,6 +2631,13 @@ err: return err; } +static inline bool pos_same_folio(loff_t pos1, loff_t pos2, struct folio *folio) +{ + unsigned int shift = folio_shift(folio); + + return (pos1 >> shift == pos2 >> shift); +} + /** * filemap_read - Read data from the page cache. * @iocb: The iocb to read. @@ -2700,11 +2709,11 @@ ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter, writably_mapped = mapping_writably_mapped(mapping); /* - * When a sequential read accesses a page several times, only + * When a read accesses the same folio several times, only * mark it as accessed the first time. */ - if (iocb->ki_pos >> PAGE_SHIFT != - ra->prev_pos >> PAGE_SHIFT) + if (!pos_same_folio(iocb->ki_pos, ra->prev_pos - 1, + fbatch.folios[0])) folio_mark_accessed(fbatch.folios[0]); for (i = 0; i < folio_batch_count(&fbatch); i++) { @@ -2991,11 +3000,12 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) struct address_space *mapping = file->f_mapping; DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff); struct file *fpin = NULL; + unsigned long vm_flags = vmf->vma->vm_flags; unsigned int mmap_miss; #ifdef CONFIG_TRANSPARENT_HUGEPAGE /* Use the readahead code, even if readahead is disabled */ - if (vmf->vma->vm_flags & VM_HUGEPAGE) { + if (vm_flags & VM_HUGEPAGE) { fpin = maybe_unlock_mmap_for_io(vmf, fpin); ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1); ra->size = HPAGE_PMD_NR; @@ -3003,7 +3013,7 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) * Fetch two PMD folios, so we get the chance to actually * readahead, unless we've been told not to. */ - if (!(vmf->vma->vm_flags & VM_RAND_READ)) + if (!(vm_flags & VM_RAND_READ)) ra->size *= 2; ra->async_size = HPAGE_PMD_NR; page_cache_ra_order(&ractl, ra, HPAGE_PMD_ORDER); @@ -3012,12 +3022,12 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) #endif /* If we don't want any read-ahead, don't bother */ - if (vmf->vma->vm_flags & VM_RAND_READ) + if (vm_flags & VM_RAND_READ) return fpin; if (!ra->ra_pages) return fpin; - if (vmf->vma->vm_flags & VM_SEQ_READ) { + if (vm_flags & VM_SEQ_READ) { fpin = maybe_unlock_mmap_for_io(vmf, fpin); page_cache_sync_ra(&ractl, ra->ra_pages); return fpin; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index a77c78a2b6b5..834f288b3769 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2377,6 +2377,7 @@ static void __split_huge_page_tail(struct page *head, int tail, page_tail); page_tail->mapping = head->mapping; page_tail->index = head->index + tail; + page_tail->private = 0; /* Page flags must be visible before we make the page non-compound. */ smp_wmb(); @@ -2672,8 +2673,7 @@ out_unlock: if (mapping) i_mmap_unlock_read(mapping); out: - /* Free any memory we didn't use */ - xas_nomem(&xas, 0); + xas_destroy(&xas); count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED); return ret; } diff --git a/mm/hwpoison-inject.c b/mm/hwpoison-inject.c index 5c0cddd81505..65e242b5a432 100644 --- a/mm/hwpoison-inject.c +++ b/mm/hwpoison-inject.c @@ -48,7 +48,7 @@ static int hwpoison_inject(void *data, u64 val) inject: pr_info("Injecting memory failure at pfn %#lx\n", pfn); - err = memory_failure(pfn, 0); + err = memory_failure(pfn, MF_SW_SIMULATED); return (err == -EOPNOTSUPP) ? 0 : err; } diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 4e7cd4c8e687..4b5e5a3d3a63 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -360,6 +360,9 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g unsigned long flags; struct slab *slab; void *addr; + const bool random_right_allocate = prandom_u32_max(2); + const bool random_fault = CONFIG_KFENCE_STRESS_TEST_FAULTS && + !prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS); /* Try to obtain a free object. */ raw_spin_lock_irqsave(&kfence_freelist_lock, flags); @@ -404,7 +407,7 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g * is that the out-of-bounds accesses detected are deterministic for * such allocations. */ - if (prandom_u32_max(2)) { + if (random_right_allocate) { /* Allocate on the "right" side, re-calculate address. */ meta->addr += PAGE_SIZE - size; meta->addr = ALIGN_DOWN(meta->addr, cache->align); @@ -444,7 +447,7 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g if (cache->ctor) cache->ctor(addr); - if (CONFIG_KFENCE_STRESS_TEST_FAULTS && !prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS)) + if (random_fault) kfence_protect(meta->addr); /* Random "faults" by protecting the object. */ atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]); diff --git a/mm/madvise.c b/mm/madvise.c index d7b4f2602949..0316bbc6441b 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -1112,7 +1112,7 @@ static int madvise_inject_error(int behavior, } else { pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n", pfn, start); - ret = memory_failure(pfn, MF_COUNT_INCREASED); + ret = memory_failure(pfn, MF_COUNT_INCREASED | MF_SW_SIMULATED); if (ret == -EOPNOTSUPP) ret = 0; } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index abec50f31fe6..618c366a2f07 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -4859,7 +4859,7 @@ static int mem_cgroup_slab_show(struct seq_file *m, void *p) { /* * Deprecated. - * Please, take a look at tools/cgroup/slabinfo.py . + * Please, take a look at tools/cgroup/memcg_slabinfo.py . */ return 0; } diff --git a/mm/memory-failure.c b/mm/memory-failure.c index b85661cbdc4a..da39ec8afca8 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -69,6 +69,8 @@ int sysctl_memory_failure_recovery __read_mostly = 1; atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0); +static bool hw_memory_failure __read_mostly = false; + static bool __page_handle_poison(struct page *page) { int ret; @@ -1768,6 +1770,9 @@ int memory_failure(unsigned long pfn, int flags) mutex_lock(&mf_mutex); + if (!(flags & MF_SW_SIMULATED)) + hw_memory_failure = true; + p = pfn_to_online_page(pfn); if (!p) { res = arch_memory_failure(pfn, flags); @@ -2103,6 +2108,13 @@ int unpoison_memory(unsigned long pfn) mutex_lock(&mf_mutex); + if (hw_memory_failure) { + unpoison_pr_info("Unpoison: Disabled after HW memory failure %#lx\n", + pfn, &unpoison_rs); + ret = -EOPNOTSUPP; + goto unlock_mutex; + } + if (!PageHWPoison(p)) { unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n", pfn, &unpoison_rs); diff --git a/mm/migrate.c b/mm/migrate.c index e51588e95f57..6c1ea61f39d8 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1106,6 +1106,7 @@ static int unmap_and_move(new_page_t get_new_page, if (!newpage) return -ENOMEM; + newpage->private = 0; rc = __unmap_and_move(page, newpage, force, mode); if (rc == MIGRATEPAGE_SUCCESS) set_page_owner_migrate_reason(newpage, reason); diff --git a/mm/page_isolation.c b/mm/page_isolation.c index d200d41ad0d3..9d73dc38e3d7 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -286,6 +286,8 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages) * @flags: isolation flags * @gfp_flags: GFP flags used for migrating pages * @isolate_before: isolate the pageblock before the boundary_pfn + * @skip_isolation: the flag to skip the pageblock isolation in second + * isolate_single_pageblock() * * Free and in-use pages can be as big as MAX_ORDER-1 and contain more than one * pageblock. When not all pageblocks within a page are isolated at the same diff --git a/mm/readahead.c b/mm/readahead.c index 415c39d764ea..fdcd28cbd92d 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -164,12 +164,14 @@ static void read_pages(struct readahead_control *rac) while ((folio = readahead_folio(rac)) != NULL) { unsigned long nr = folio_nr_pages(folio); + folio_get(folio); rac->ra->size -= nr; if (rac->ra->async_size >= nr) { rac->ra->async_size -= nr; filemap_remove_folio(folio); } folio_unlock(folio); + folio_put(folio); } } else { while ((folio = readahead_folio(rac)) != NULL) @@ -508,6 +510,7 @@ void page_cache_ra_order(struct readahead_control *ractl, new_order--; } + filemap_invalidate_lock_shared(mapping); while (index <= limit) { unsigned int order = new_order; @@ -534,6 +537,7 @@ void page_cache_ra_order(struct readahead_control *ractl, } read_pages(ractl); + filemap_invalidate_unlock_shared(mapping); /* * If there were already pages in the page cache, then we may have diff --git a/mm/slub.c b/mm/slub.c index e5535020e0fd..b1281b8654bd 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -726,25 +726,48 @@ static struct track *get_track(struct kmem_cache *s, void *object, return kasan_reset_tag(p + alloc); } -static void noinline set_track(struct kmem_cache *s, void *object, - enum track_item alloc, unsigned long addr) -{ - struct track *p = get_track(s, object, alloc); - #ifdef CONFIG_STACKDEPOT +static noinline depot_stack_handle_t set_track_prepare(void) +{ + depot_stack_handle_t handle; unsigned long entries[TRACK_ADDRS_COUNT]; unsigned int nr_entries; nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3); - p->handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT); + handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT); + + return handle; +} +#else +static inline depot_stack_handle_t set_track_prepare(void) +{ + return 0; +} #endif +static void set_track_update(struct kmem_cache *s, void *object, + enum track_item alloc, unsigned long addr, + depot_stack_handle_t handle) +{ + struct track *p = get_track(s, object, alloc); + +#ifdef CONFIG_STACKDEPOT + p->handle = handle; +#endif p->addr = addr; p->cpu = smp_processor_id(); p->pid = current->pid; p->when = jiffies; } +static __always_inline void set_track(struct kmem_cache *s, void *object, + enum track_item alloc, unsigned long addr) +{ + depot_stack_handle_t handle = set_track_prepare(); + + set_track_update(s, object, alloc, addr, handle); +} + static void init_tracking(struct kmem_cache *s, void *object) { struct track *p; @@ -1373,6 +1396,10 @@ static noinline int free_debug_processing( int cnt = 0; unsigned long flags, flags2; int ret = 0; + depot_stack_handle_t handle = 0; + + if (s->flags & SLAB_STORE_USER) + handle = set_track_prepare(); spin_lock_irqsave(&n->list_lock, flags); slab_lock(slab, &flags2); @@ -1391,7 +1418,7 @@ next_object: } if (s->flags & SLAB_STORE_USER) - set_track(s, object, TRACK_FREE, addr); + set_track_update(s, object, TRACK_FREE, addr, handle); trace(s, slab, object, 0); /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */ init_object(s, object, SLUB_RED_INACTIVE); @@ -2936,6 +2963,7 @@ redo: if (!freelist) { c->slab = NULL; + c->tid = next_tid(c->tid); local_unlock_irqrestore(&s->cpu_slab->lock, flags); stat(s, DEACTIVATE_BYPASS); goto new_slab; @@ -2968,6 +2996,7 @@ deactivate_slab: freelist = c->freelist; c->slab = NULL; c->freelist = NULL; + c->tid = next_tid(c->tid); local_unlock_irqrestore(&s->cpu_slab->lock, flags); deactivate_slab(s, slab, freelist); diff --git a/mm/swap.c b/mm/swap.c index f3922a96b2e9..034bb24879a3 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -881,7 +881,7 @@ void lru_cache_disable(void) * lru_disable_count = 0 will have exited the critical * section when synchronize_rcu() returns. */ - synchronize_rcu(); + synchronize_rcu_expedited(); #ifdef CONFIG_SMP __lru_add_drain_all(true); #else diff --git a/mm/usercopy.c b/mm/usercopy.c index baeacc735b83..4e1da708699b 100644 --- a/mm/usercopy.c +++ b/mm/usercopy.c @@ -161,29 +161,27 @@ static inline void check_bogus_address(const unsigned long ptr, unsigned long n, static inline void check_heap_object(const void *ptr, unsigned long n, bool to_user) { + uintptr_t addr = (uintptr_t)ptr; + unsigned long offset; struct folio *folio; if (is_kmap_addr(ptr)) { - unsigned long page_end = (unsigned long)ptr | (PAGE_SIZE - 1); - - if ((unsigned long)ptr + n - 1 > page_end) - usercopy_abort("kmap", NULL, to_user, - offset_in_page(ptr), n); + offset = offset_in_page(ptr); + if (n > PAGE_SIZE - offset) + usercopy_abort("kmap", NULL, to_user, offset, n); return; } if (is_vmalloc_addr(ptr)) { - struct vm_struct *area = find_vm_area(ptr); - unsigned long offset; + struct vmap_area *area = find_vmap_area(addr); - if (!area) { + if (!area) usercopy_abort("vmalloc", "no area", to_user, 0, n); - return; - } - offset = ptr - area->addr; - if (offset + n > get_vm_area_size(area)) + if (n > area->va_end - addr) { + offset = addr - area->va_start; usercopy_abort("vmalloc", NULL, to_user, offset, n); + } return; } @@ -196,8 +194,8 @@ static inline void check_heap_object(const void *ptr, unsigned long n, /* Check slab allocator for flags and size. */ __check_heap_object(ptr, n, folio_slab(folio), to_user); } else if (folio_test_large(folio)) { - unsigned long offset = ptr - folio_address(folio); - if (offset + n > folio_size(folio)) + offset = ptr - folio_address(folio); + if (n > folio_size(folio) - offset) usercopy_abort("page alloc", NULL, to_user, offset, n); } } diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 07db42455dd4..effd1ff6a4b4 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1798,7 +1798,7 @@ static void free_unmap_vmap_area(struct vmap_area *va) free_vmap_area_noflush(va); } -static struct vmap_area *find_vmap_area(unsigned long addr) +struct vmap_area *find_vmap_area(unsigned long addr) { struct vmap_area *va; diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 95393bb2760b..4c7030ed8d33 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -1661,9 +1661,12 @@ static int ax25_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; - struct sk_buff *skb; + struct sk_buff *skb, *last; + struct sk_buff_head *sk_queue; int copied; int err = 0; + int off = 0; + long timeo; lock_sock(sk); /* @@ -1675,10 +1678,29 @@ static int ax25_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, goto out; } - /* Now we can treat all alike */ - skb = skb_recv_datagram(sk, flags, &err); - if (skb == NULL) - goto out; + /* We need support for non-blocking reads. */ + sk_queue = &sk->sk_receive_queue; + skb = __skb_try_recv_datagram(sk, sk_queue, flags, &off, &err, &last); + /* If no packet is available, release_sock(sk) and try again. */ + if (!skb) { + if (err != -EAGAIN) + goto out; + release_sock(sk); + timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); + while (timeo && !__skb_wait_for_more_packets(sk, sk_queue, &err, + &timeo, last)) { + skb = __skb_try_recv_datagram(sk, sk_queue, flags, &off, + &err, &last); + if (skb) + break; + + if (err != -EAGAIN) + goto done; + } + if (!skb) + goto done; + lock_sock(sk); + } if (!sk_to_ax25(sk)->pidincl) skb_pull(skb, 1); /* Remove PID */ @@ -1725,6 +1747,7 @@ static int ax25_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, out: release_sock(sk); +done: return err; } diff --git a/net/core/dev.c b/net/core/dev.c index 08ce317fcec8..8e6f22961206 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -397,16 +397,18 @@ static void list_netdevice(struct net_device *dev) /* Device list removal * caller must respect a RCU grace period before freeing/reusing dev */ -static void unlist_netdevice(struct net_device *dev) +static void unlist_netdevice(struct net_device *dev, bool lock) { ASSERT_RTNL(); /* Unlink dev from the device chain */ - write_lock(&dev_base_lock); + if (lock) + write_lock(&dev_base_lock); list_del_rcu(&dev->dev_list); netdev_name_node_del(dev->name_node); hlist_del_rcu(&dev->index_hlist); - write_unlock(&dev_base_lock); + if (lock) + write_unlock(&dev_base_lock); dev_base_seq_inc(dev_net(dev)); } @@ -10043,11 +10045,11 @@ int register_netdevice(struct net_device *dev) goto err_uninit; ret = netdev_register_kobject(dev); - if (ret) { - dev->reg_state = NETREG_UNREGISTERED; + write_lock(&dev_base_lock); + dev->reg_state = ret ? NETREG_UNREGISTERED : NETREG_REGISTERED; + write_unlock(&dev_base_lock); + if (ret) goto err_uninit; - } - dev->reg_state = NETREG_REGISTERED; __netdev_update_features(dev); @@ -10329,7 +10331,9 @@ void netdev_run_todo(void) continue; } + write_lock(&dev_base_lock); dev->reg_state = NETREG_UNREGISTERED; + write_unlock(&dev_base_lock); linkwatch_forget_dev(dev); } @@ -10810,9 +10814,10 @@ void unregister_netdevice_many(struct list_head *head) list_for_each_entry(dev, head, unreg_list) { /* And unlink it from device chain. */ - unlist_netdevice(dev); - + write_lock(&dev_base_lock); + unlist_netdevice(dev, false); dev->reg_state = NETREG_UNREGISTERING; + write_unlock(&dev_base_lock); } flush_all_backlogs(); @@ -10959,7 +10964,7 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net, dev_close(dev); /* And unlink it from device chain */ - unlist_netdevice(dev); + unlist_netdevice(dev, true); synchronize_net(); diff --git a/net/core/filter.c b/net/core/filter.c index 5af58eb48587..5d16d66727fc 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -6516,10 +6516,21 @@ __bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, ifindex, proto, netns_id, flags); if (sk) { - sk = sk_to_full_sk(sk); - if (!sk_fullsock(sk)) { + struct sock *sk2 = sk_to_full_sk(sk); + + /* sk_to_full_sk() may return (sk)->rsk_listener, so make sure the original sk + * sock refcnt is decremented to prevent a request_sock leak. + */ + if (!sk_fullsock(sk2)) + sk2 = NULL; + if (sk2 != sk) { sock_gen_put(sk); - return NULL; + /* Ensure there is no need to bump sk2 refcnt */ + if (unlikely(sk2 && !sock_flag(sk2, SOCK_RCU_FREE))) { + WARN_ONCE(1, "Found non-RCU, unreferenced socket!"); + return NULL; + } + sk = sk2; } } @@ -6553,10 +6564,21 @@ bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, flags); if (sk) { - sk = sk_to_full_sk(sk); - if (!sk_fullsock(sk)) { + struct sock *sk2 = sk_to_full_sk(sk); + + /* sk_to_full_sk() may return (sk)->rsk_listener, so make sure the original sk + * sock refcnt is decremented to prevent a request_sock leak. + */ + if (!sk_fullsock(sk2)) + sk2 = NULL; + if (sk2 != sk) { sock_gen_put(sk); - return NULL; + /* Ensure there is no need to bump sk2 refcnt */ + if (unlikely(sk2 && !sock_flag(sk2, SOCK_RCU_FREE))) { + WARN_ONCE(1, "Found non-RCU, unreferenced socket!"); + return NULL; + } + sk = sk2; } } diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c index 73f68d4625f3..929f6379a279 100644 --- a/net/core/flow_offload.c +++ b/net/core/flow_offload.c @@ -595,3 +595,9 @@ int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch, return (bo && list_empty(&bo->cb_list)) ? -EOPNOTSUPP : count; } EXPORT_SYMBOL(flow_indr_dev_setup_offload); + +bool flow_indr_dev_exists(void) +{ + return !list_empty(&flow_block_indr_dev_list); +} +EXPORT_SYMBOL(flow_indr_dev_exists); diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index e319e242dddf..a3642569fe53 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -33,6 +33,7 @@ static const char fmt_dec[] = "%d\n"; static const char fmt_ulong[] = "%lu\n"; static const char fmt_u64[] = "%llu\n"; +/* Caller holds RTNL or dev_base_lock */ static inline int dev_isalive(const struct net_device *dev) { return dev->reg_state <= NETREG_REGISTERED; diff --git a/net/core/skmsg.c b/net/core/skmsg.c index 22b983ade0e7..b0fcd0200e84 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -699,6 +699,11 @@ struct sk_psock *sk_psock_init(struct sock *sk, int node) write_lock_bh(&sk->sk_callback_lock); + if (sk_is_inet(sk) && inet_csk_has_ulp(sk)) { + psock = ERR_PTR(-EINVAL); + goto out; + } + if (sk->sk_user_data) { psock = ERR_PTR(-EBUSY); goto out; diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 2e78458900f2..eb8e128e43e8 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -1120,12 +1120,6 @@ static int __init dccp_init(void) SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, NULL); if (!dccp_hashinfo.bind_bucket_cachep) goto out_free_hashinfo2; - dccp_hashinfo.bind2_bucket_cachep = - kmem_cache_create("dccp_bind2_bucket", - sizeof(struct inet_bind2_bucket), 0, - SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, NULL); - if (!dccp_hashinfo.bind2_bucket_cachep) - goto out_free_bind_bucket_cachep; /* * Size and allocate the main established and bind bucket @@ -1156,7 +1150,7 @@ static int __init dccp_init(void) if (!dccp_hashinfo.ehash) { DCCP_CRIT("Failed to allocate DCCP established hash table"); - goto out_free_bind2_bucket_cachep; + goto out_free_bind_bucket_cachep; } for (i = 0; i <= dccp_hashinfo.ehash_mask; i++) @@ -1182,23 +1176,14 @@ static int __init dccp_init(void) goto out_free_dccp_locks; } - dccp_hashinfo.bhash2 = (struct inet_bind2_hashbucket *) - __get_free_pages(GFP_ATOMIC | __GFP_NOWARN, bhash_order); - - if (!dccp_hashinfo.bhash2) { - DCCP_CRIT("Failed to allocate DCCP bind2 hash table"); - goto out_free_dccp_bhash; - } - for (i = 0; i < dccp_hashinfo.bhash_size; i++) { spin_lock_init(&dccp_hashinfo.bhash[i].lock); INIT_HLIST_HEAD(&dccp_hashinfo.bhash[i].chain); - INIT_HLIST_HEAD(&dccp_hashinfo.bhash2[i].chain); } rc = dccp_mib_init(); if (rc) - goto out_free_dccp_bhash2; + goto out_free_dccp_bhash; rc = dccp_ackvec_init(); if (rc) @@ -1222,38 +1207,30 @@ out_ackvec_exit: dccp_ackvec_exit(); out_free_dccp_mib: dccp_mib_exit(); -out_free_dccp_bhash2: - free_pages((unsigned long)dccp_hashinfo.bhash2, bhash_order); out_free_dccp_bhash: free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order); out_free_dccp_locks: inet_ehash_locks_free(&dccp_hashinfo); out_free_dccp_ehash: free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order); -out_free_bind2_bucket_cachep: - kmem_cache_destroy(dccp_hashinfo.bind2_bucket_cachep); out_free_bind_bucket_cachep: kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep); out_free_hashinfo2: inet_hashinfo2_free_mod(&dccp_hashinfo); out_fail: dccp_hashinfo.bhash = NULL; - dccp_hashinfo.bhash2 = NULL; dccp_hashinfo.ehash = NULL; dccp_hashinfo.bind_bucket_cachep = NULL; - dccp_hashinfo.bind2_bucket_cachep = NULL; return rc; } static void __exit dccp_fini(void) { - int bhash_order = get_order(dccp_hashinfo.bhash_size * - sizeof(struct inet_bind_hashbucket)); - ccid_cleanup_builtins(); dccp_mib_exit(); - free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order); - free_pages((unsigned long)dccp_hashinfo.bhash2, bhash_order); + free_pages((unsigned long)dccp_hashinfo.bhash, + get_order(dccp_hashinfo.bhash_size * + sizeof(struct inet_bind_hashbucket))); free_pages((unsigned long)dccp_hashinfo.ehash, get_order((dccp_hashinfo.ehash_mask + 1) * sizeof(struct inet_ehash_bucket))); diff --git a/net/ethtool/eeprom.c b/net/ethtool/eeprom.c index 7e6b37a54add..1c94bb8ea03f 100644 --- a/net/ethtool/eeprom.c +++ b/net/ethtool/eeprom.c @@ -36,7 +36,7 @@ static int fallback_set_params(struct eeprom_req_info *request, if (request->page) offset = request->page * ETH_MODULE_EEPROM_PAGE_LEN + offset; - if (modinfo->type == ETH_MODULE_SFF_8079 && + if (modinfo->type == ETH_MODULE_SFF_8472 && request->i2c_address == 0x51) offset += ETH_MODULE_EEPROM_PAGE_LEN * 2; diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index c0b7e6c21360..53f5f956d948 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -117,32 +117,6 @@ bool inet_rcv_saddr_any(const struct sock *sk) return !sk->sk_rcv_saddr; } -static bool use_bhash2_on_bind(const struct sock *sk) -{ -#if IS_ENABLED(CONFIG_IPV6) - int addr_type; - - if (sk->sk_family == AF_INET6) { - addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr); - return addr_type != IPV6_ADDR_ANY && - addr_type != IPV6_ADDR_MAPPED; - } -#endif - return sk->sk_rcv_saddr != htonl(INADDR_ANY); -} - -static u32 get_bhash2_nulladdr_hash(const struct sock *sk, struct net *net, - int port) -{ -#if IS_ENABLED(CONFIG_IPV6) - struct in6_addr nulladdr = {}; - - if (sk->sk_family == AF_INET6) - return ipv6_portaddr_hash(net, &nulladdr, port); -#endif - return ipv4_portaddr_hash(net, 0, port); -} - void inet_get_local_port_range(struct net *net, int *low, int *high) { unsigned int seq; @@ -156,71 +130,16 @@ void inet_get_local_port_range(struct net *net, int *low, int *high) } EXPORT_SYMBOL(inet_get_local_port_range); -static bool bind_conflict_exist(const struct sock *sk, struct sock *sk2, - kuid_t sk_uid, bool relax, - bool reuseport_cb_ok, bool reuseport_ok) -{ - int bound_dev_if2; - - if (sk == sk2) - return false; - - bound_dev_if2 = READ_ONCE(sk2->sk_bound_dev_if); - - if (!sk->sk_bound_dev_if || !bound_dev_if2 || - sk->sk_bound_dev_if == bound_dev_if2) { - if (sk->sk_reuse && sk2->sk_reuse && - sk2->sk_state != TCP_LISTEN) { - if (!relax || (!reuseport_ok && sk->sk_reuseport && - sk2->sk_reuseport && reuseport_cb_ok && - (sk2->sk_state == TCP_TIME_WAIT || - uid_eq(sk_uid, sock_i_uid(sk2))))) - return true; - } else if (!reuseport_ok || !sk->sk_reuseport || - !sk2->sk_reuseport || !reuseport_cb_ok || - (sk2->sk_state != TCP_TIME_WAIT && - !uid_eq(sk_uid, sock_i_uid(sk2)))) { - return true; - } - } - return false; -} - -static bool check_bhash2_conflict(const struct sock *sk, - struct inet_bind2_bucket *tb2, kuid_t sk_uid, - bool relax, bool reuseport_cb_ok, - bool reuseport_ok) -{ - struct sock *sk2; - - sk_for_each_bound_bhash2(sk2, &tb2->owners) { - if (sk->sk_family == AF_INET && ipv6_only_sock(sk2)) - continue; - - if (bind_conflict_exist(sk, sk2, sk_uid, relax, - reuseport_cb_ok, reuseport_ok)) - return true; - } - return false; -} - -/* This should be called only when the corresponding inet_bind_bucket spinlock - * is held - */ -static int inet_csk_bind_conflict(const struct sock *sk, int port, - struct inet_bind_bucket *tb, - struct inet_bind2_bucket *tb2, /* may be null */ +static int inet_csk_bind_conflict(const struct sock *sk, + const struct inet_bind_bucket *tb, bool relax, bool reuseport_ok) { - struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo; - kuid_t uid = sock_i_uid((struct sock *)sk); - struct sock_reuseport *reuseport_cb; - struct inet_bind2_hashbucket *head2; - bool reuseport_cb_ok; struct sock *sk2; - struct net *net; - int l3mdev; - u32 hash; + bool reuseport_cb_ok; + bool reuse = sk->sk_reuse; + bool reuseport = !!sk->sk_reuseport; + struct sock_reuseport *reuseport_cb; + kuid_t uid = sock_i_uid((struct sock *)sk); rcu_read_lock(); reuseport_cb = rcu_dereference(sk->sk_reuseport_cb); @@ -231,42 +150,40 @@ static int inet_csk_bind_conflict(const struct sock *sk, int port, /* * Unlike other sk lookup places we do not check * for sk_net here, since _all_ the socks listed - * in tb->owners and tb2->owners list belong - * to the same net + * in tb->owners list belong to the same net - the + * one this bucket belongs to. */ - if (!use_bhash2_on_bind(sk)) { - sk_for_each_bound(sk2, &tb->owners) - if (bind_conflict_exist(sk, sk2, uid, relax, - reuseport_cb_ok, reuseport_ok) && - inet_rcv_saddr_equal(sk, sk2, true)) - return true; + sk_for_each_bound(sk2, &tb->owners) { + int bound_dev_if2; - return false; + if (sk == sk2) + continue; + bound_dev_if2 = READ_ONCE(sk2->sk_bound_dev_if); + if ((!sk->sk_bound_dev_if || + !bound_dev_if2 || + sk->sk_bound_dev_if == bound_dev_if2)) { + if (reuse && sk2->sk_reuse && + sk2->sk_state != TCP_LISTEN) { + if ((!relax || + (!reuseport_ok && + reuseport && sk2->sk_reuseport && + reuseport_cb_ok && + (sk2->sk_state == TCP_TIME_WAIT || + uid_eq(uid, sock_i_uid(sk2))))) && + inet_rcv_saddr_equal(sk, sk2, true)) + break; + } else if (!reuseport_ok || + !reuseport || !sk2->sk_reuseport || + !reuseport_cb_ok || + (sk2->sk_state != TCP_TIME_WAIT && + !uid_eq(uid, sock_i_uid(sk2)))) { + if (inet_rcv_saddr_equal(sk, sk2, true)) + break; + } + } } - - if (tb2 && check_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok, - reuseport_ok)) - return true; - - net = sock_net(sk); - - /* check there's no conflict with an existing IPV6_ADDR_ANY (if ipv6) or - * INADDR_ANY (if ipv4) socket. - */ - hash = get_bhash2_nulladdr_hash(sk, net, port); - head2 = &hinfo->bhash2[hash & (hinfo->bhash_size - 1)]; - - l3mdev = inet_sk_bound_l3mdev(sk); - inet_bind_bucket_for_each(tb2, &head2->chain) - if (check_bind2_bucket_match_nulladdr(tb2, net, port, l3mdev, sk)) - break; - - if (tb2 && check_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok, - reuseport_ok)) - return true; - - return false; + return sk2 != NULL; } /* @@ -274,20 +191,16 @@ static int inet_csk_bind_conflict(const struct sock *sk, int port, * inet_bind_hashbucket lock held. */ static struct inet_bind_hashbucket * -inet_csk_find_open_port(struct sock *sk, struct inet_bind_bucket **tb_ret, - struct inet_bind2_bucket **tb2_ret, - struct inet_bind2_hashbucket **head2_ret, int *port_ret) +inet_csk_find_open_port(struct sock *sk, struct inet_bind_bucket **tb_ret, int *port_ret) { struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo; - struct inet_bind2_hashbucket *head2; + int port = 0; struct inet_bind_hashbucket *head; struct net *net = sock_net(sk); + bool relax = false; int i, low, high, attempt_half; - struct inet_bind2_bucket *tb2; struct inet_bind_bucket *tb; u32 remaining, offset; - bool relax = false; - int port = 0; int l3mdev; l3mdev = inet_sk_bound_l3mdev(sk); @@ -326,12 +239,10 @@ other_parity_scan: head = &hinfo->bhash[inet_bhashfn(net, port, hinfo->bhash_size)]; spin_lock_bh(&head->lock); - tb2 = inet_bind2_bucket_find(hinfo, net, port, l3mdev, sk, - &head2); inet_bind_bucket_for_each(tb, &head->chain) - if (check_bind_bucket_match(tb, net, port, l3mdev)) { - if (!inet_csk_bind_conflict(sk, port, tb, tb2, - relax, false)) + if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev && + tb->port == port) { + if (!inet_csk_bind_conflict(sk, tb, relax, false)) goto success; goto next_port; } @@ -361,8 +272,6 @@ next_port: success: *port_ret = port; *tb_ret = tb; - *tb2_ret = tb2; - *head2_ret = head2; return head; } @@ -458,81 +367,54 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum) { bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN; struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo; - bool bhash_created = false, bhash2_created = false; - struct inet_bind2_bucket *tb2 = NULL; - struct inet_bind2_hashbucket *head2; - struct inet_bind_bucket *tb = NULL; + int ret = 1, port = snum; struct inet_bind_hashbucket *head; struct net *net = sock_net(sk); - int ret = 1, port = snum; - bool found_port = false; + struct inet_bind_bucket *tb = NULL; int l3mdev; l3mdev = inet_sk_bound_l3mdev(sk); if (!port) { - head = inet_csk_find_open_port(sk, &tb, &tb2, &head2, &port); + head = inet_csk_find_open_port(sk, &tb, &port); if (!head) return ret; - if (tb && tb2) - goto success; - found_port = true; - } else { - head = &hinfo->bhash[inet_bhashfn(net, port, - hinfo->bhash_size)]; - spin_lock_bh(&head->lock); - inet_bind_bucket_for_each(tb, &head->chain) - if (check_bind_bucket_match(tb, net, port, l3mdev)) - break; - - tb2 = inet_bind2_bucket_find(hinfo, net, port, l3mdev, sk, - &head2); - } - - if (!tb) { - tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, net, - head, port, l3mdev); if (!tb) - goto fail_unlock; - bhash_created = true; - } - - if (!tb2) { - tb2 = inet_bind2_bucket_create(hinfo->bind2_bucket_cachep, - net, head2, port, l3mdev, sk); - if (!tb2) - goto fail_unlock; - bhash2_created = true; + goto tb_not_found; + goto success; } - - /* If we had to find an open port, we already checked for conflicts */ - if (!found_port && !hlist_empty(&tb->owners)) { + head = &hinfo->bhash[inet_bhashfn(net, port, + hinfo->bhash_size)]; + spin_lock_bh(&head->lock); + inet_bind_bucket_for_each(tb, &head->chain) + if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev && + tb->port == port) + goto tb_found; +tb_not_found: + tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, + net, head, port, l3mdev); + if (!tb) + goto fail_unlock; +tb_found: + if (!hlist_empty(&tb->owners)) { if (sk->sk_reuse == SK_FORCE_REUSE) goto success; if ((tb->fastreuse > 0 && reuse) || sk_reuseport_match(tb, sk)) goto success; - if (inet_csk_bind_conflict(sk, port, tb, tb2, true, true)) + if (inet_csk_bind_conflict(sk, tb, true, true)) goto fail_unlock; } success: inet_csk_update_fastreuse(tb, sk); if (!inet_csk(sk)->icsk_bind_hash) - inet_bind_hash(sk, tb, tb2, port); + inet_bind_hash(sk, tb, port); WARN_ON(inet_csk(sk)->icsk_bind_hash != tb); - WARN_ON(inet_csk(sk)->icsk_bind2_hash != tb2); ret = 0; fail_unlock: - if (ret) { - if (bhash_created) - inet_bind_bucket_destroy(hinfo->bind_bucket_cachep, tb); - if (bhash2_created) - inet_bind2_bucket_destroy(hinfo->bind2_bucket_cachep, - tb2); - } spin_unlock_bh(&head->lock); return ret; } @@ -1079,7 +961,6 @@ struct sock *inet_csk_clone_lock(const struct sock *sk, inet_sk_set_state(newsk, TCP_SYN_RECV); newicsk->icsk_bind_hash = NULL; - newicsk->icsk_bind2_hash = NULL; inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port; inet_sk(newsk)->inet_num = inet_rsk(req)->ir_num; diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index e8de5e699b3f..b9d995b5ce24 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -81,41 +81,6 @@ struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep, return tb; } -struct inet_bind2_bucket *inet_bind2_bucket_create(struct kmem_cache *cachep, - struct net *net, - struct inet_bind2_hashbucket *head, - const unsigned short port, - int l3mdev, - const struct sock *sk) -{ - struct inet_bind2_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC); - - if (tb) { - write_pnet(&tb->ib_net, net); - tb->l3mdev = l3mdev; - tb->port = port; -#if IS_ENABLED(CONFIG_IPV6) - if (sk->sk_family == AF_INET6) - tb->v6_rcv_saddr = sk->sk_v6_rcv_saddr; - else -#endif - tb->rcv_saddr = sk->sk_rcv_saddr; - INIT_HLIST_HEAD(&tb->owners); - hlist_add_head(&tb->node, &head->chain); - } - return tb; -} - -static bool bind2_bucket_addr_match(struct inet_bind2_bucket *tb2, struct sock *sk) -{ -#if IS_ENABLED(CONFIG_IPV6) - if (sk->sk_family == AF_INET6) - return ipv6_addr_equal(&tb2->v6_rcv_saddr, - &sk->sk_v6_rcv_saddr); -#endif - return tb2->rcv_saddr == sk->sk_rcv_saddr; -} - /* * Caller must hold hashbucket lock for this tb with local BH disabled */ @@ -127,25 +92,12 @@ void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket } } -/* Caller must hold the lock for the corresponding hashbucket in the bhash table - * with local BH disabled - */ -void inet_bind2_bucket_destroy(struct kmem_cache *cachep, struct inet_bind2_bucket *tb) -{ - if (hlist_empty(&tb->owners)) { - __hlist_del(&tb->node); - kmem_cache_free(cachep, tb); - } -} - void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, - struct inet_bind2_bucket *tb2, const unsigned short snum) + const unsigned short snum) { inet_sk(sk)->inet_num = snum; sk_add_bind_node(sk, &tb->owners); inet_csk(sk)->icsk_bind_hash = tb; - sk_add_bind2_node(sk, &tb2->owners); - inet_csk(sk)->icsk_bind2_hash = tb2; } /* @@ -157,7 +109,6 @@ static void __inet_put_port(struct sock *sk) const int bhash = inet_bhashfn(sock_net(sk), inet_sk(sk)->inet_num, hashinfo->bhash_size); struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash]; - struct inet_bind2_bucket *tb2; struct inet_bind_bucket *tb; spin_lock(&head->lock); @@ -166,13 +117,6 @@ static void __inet_put_port(struct sock *sk) inet_csk(sk)->icsk_bind_hash = NULL; inet_sk(sk)->inet_num = 0; inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); - - if (inet_csk(sk)->icsk_bind2_hash) { - tb2 = inet_csk(sk)->icsk_bind2_hash; - __sk_del_bind2_node(sk); - inet_csk(sk)->icsk_bind2_hash = NULL; - inet_bind2_bucket_destroy(hashinfo->bind2_bucket_cachep, tb2); - } spin_unlock(&head->lock); } @@ -189,19 +133,14 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child) struct inet_hashinfo *table = sk->sk_prot->h.hashinfo; unsigned short port = inet_sk(child)->inet_num; const int bhash = inet_bhashfn(sock_net(sk), port, - table->bhash_size); + table->bhash_size); struct inet_bind_hashbucket *head = &table->bhash[bhash]; - struct inet_bind2_hashbucket *head_bhash2; - bool created_inet_bind_bucket = false; - struct net *net = sock_net(sk); - struct inet_bind2_bucket *tb2; struct inet_bind_bucket *tb; int l3mdev; spin_lock(&head->lock); tb = inet_csk(sk)->icsk_bind_hash; - tb2 = inet_csk(sk)->icsk_bind2_hash; - if (unlikely(!tb || !tb2)) { + if (unlikely(!tb)) { spin_unlock(&head->lock); return -ENOENT; } @@ -214,45 +153,25 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child) * as that of the child socket. We have to look up or * create a new bind bucket for the child here. */ inet_bind_bucket_for_each(tb, &head->chain) { - if (check_bind_bucket_match(tb, net, port, l3mdev)) + if (net_eq(ib_net(tb), sock_net(sk)) && + tb->l3mdev == l3mdev && tb->port == port) break; } if (!tb) { tb = inet_bind_bucket_create(table->bind_bucket_cachep, - net, head, port, l3mdev); + sock_net(sk), head, port, + l3mdev); if (!tb) { spin_unlock(&head->lock); return -ENOMEM; } - created_inet_bind_bucket = true; } inet_csk_update_fastreuse(tb, child); - - goto bhash2_find; - } else if (!bind2_bucket_addr_match(tb2, child)) { - l3mdev = inet_sk_bound_l3mdev(sk); - -bhash2_find: - tb2 = inet_bind2_bucket_find(table, net, port, l3mdev, child, - &head_bhash2); - if (!tb2) { - tb2 = inet_bind2_bucket_create(table->bind2_bucket_cachep, - net, head_bhash2, port, - l3mdev, child); - if (!tb2) - goto error; - } } - inet_bind_hash(child, tb, tb2, port); + inet_bind_hash(child, tb, port); spin_unlock(&head->lock); return 0; - -error: - if (created_inet_bind_bucket) - inet_bind_bucket_destroy(table->bind_bucket_cachep, tb); - spin_unlock(&head->lock); - return -ENOMEM; } EXPORT_SYMBOL_GPL(__inet_inherit_port); @@ -756,76 +675,6 @@ void inet_unhash(struct sock *sk) } EXPORT_SYMBOL_GPL(inet_unhash); -static bool check_bind2_bucket_match(struct inet_bind2_bucket *tb, - struct net *net, unsigned short port, - int l3mdev, struct sock *sk) -{ -#if IS_ENABLED(CONFIG_IPV6) - if (sk->sk_family == AF_INET6) - return net_eq(ib2_net(tb), net) && tb->port == port && - tb->l3mdev == l3mdev && - ipv6_addr_equal(&tb->v6_rcv_saddr, &sk->sk_v6_rcv_saddr); - else -#endif - return net_eq(ib2_net(tb), net) && tb->port == port && - tb->l3mdev == l3mdev && tb->rcv_saddr == sk->sk_rcv_saddr; -} - -bool check_bind2_bucket_match_nulladdr(struct inet_bind2_bucket *tb, - struct net *net, const unsigned short port, - int l3mdev, const struct sock *sk) -{ -#if IS_ENABLED(CONFIG_IPV6) - struct in6_addr nulladdr = {}; - - if (sk->sk_family == AF_INET6) - return net_eq(ib2_net(tb), net) && tb->port == port && - tb->l3mdev == l3mdev && - ipv6_addr_equal(&tb->v6_rcv_saddr, &nulladdr); - else -#endif - return net_eq(ib2_net(tb), net) && tb->port == port && - tb->l3mdev == l3mdev && tb->rcv_saddr == 0; -} - -static struct inet_bind2_hashbucket * -inet_bhashfn_portaddr(struct inet_hashinfo *hinfo, const struct sock *sk, - const struct net *net, unsigned short port) -{ - u32 hash; - -#if IS_ENABLED(CONFIG_IPV6) - if (sk->sk_family == AF_INET6) - hash = ipv6_portaddr_hash(net, &sk->sk_v6_rcv_saddr, port); - else -#endif - hash = ipv4_portaddr_hash(net, sk->sk_rcv_saddr, port); - return &hinfo->bhash2[hash & (hinfo->bhash_size - 1)]; -} - -/* This should only be called when the spinlock for the socket's corresponding - * bind_hashbucket is held - */ -struct inet_bind2_bucket * -inet_bind2_bucket_find(struct inet_hashinfo *hinfo, struct net *net, - const unsigned short port, int l3mdev, struct sock *sk, - struct inet_bind2_hashbucket **head) -{ - struct inet_bind2_bucket *bhash2 = NULL; - struct inet_bind2_hashbucket *h; - - h = inet_bhashfn_portaddr(hinfo, sk, net, port); - inet_bind_bucket_for_each(bhash2, &h->chain) { - if (check_bind2_bucket_match(bhash2, net, port, l3mdev, sk)) - break; - } - - if (head) - *head = h; - - return bhash2; -} - /* RFC 6056 3.3.4. Algorithm 4: Double-Hash Port Selection Algorithm * Note that we use 32bit integers (vs RFC 'short integers') * because 2^16 is not a multiple of num_ephemeral and this @@ -846,13 +695,10 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, { struct inet_hashinfo *hinfo = death_row->hashinfo; struct inet_timewait_sock *tw = NULL; - struct inet_bind2_hashbucket *head2; struct inet_bind_hashbucket *head; int port = inet_sk(sk)->inet_num; struct net *net = sock_net(sk); - struct inet_bind2_bucket *tb2; struct inet_bind_bucket *tb; - bool tb_created = false; u32 remaining, offset; int ret, i, low, high; int l3mdev; @@ -909,7 +755,8 @@ other_parity_scan: * the established check is already unique enough. */ inet_bind_bucket_for_each(tb, &head->chain) { - if (check_bind_bucket_match(tb, net, port, l3mdev)) { + if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev && + tb->port == port) { if (tb->fastreuse >= 0 || tb->fastreuseport >= 0) goto next_port; @@ -927,7 +774,6 @@ other_parity_scan: spin_unlock_bh(&head->lock); return -ENOMEM; } - tb_created = true; tb->fastreuse = -1; tb->fastreuseport = -1; goto ok; @@ -943,17 +789,6 @@ next_port: return -EADDRNOTAVAIL; ok: - /* Find the corresponding tb2 bucket since we need to - * add the socket to the bhash2 table as well - */ - tb2 = inet_bind2_bucket_find(hinfo, net, port, l3mdev, sk, &head2); - if (!tb2) { - tb2 = inet_bind2_bucket_create(hinfo->bind2_bucket_cachep, net, - head2, port, l3mdev, sk); - if (!tb2) - goto error; - } - /* Here we want to add a little bit of randomness to the next source * port that will be chosen. We use a max() with a random here so that * on low contention the randomness is maximal and on high contention @@ -963,7 +798,7 @@ ok: WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + 2); /* Head lock still held and bh's disabled */ - inet_bind_hash(sk, tb, tb2, port); + inet_bind_hash(sk, tb, port); if (sk_unhashed(sk)) { inet_sk(sk)->inet_sport = htons(port); inet_ehash_nolisten(sk, (struct sock *)tw, NULL); @@ -975,12 +810,6 @@ ok: inet_twsk_deschedule_put(tw); local_bh_enable(); return 0; - -error: - if (tb_created) - inet_bind_bucket_destroy(hinfo->bind_bucket_cachep, tb); - spin_unlock_bh(&head->lock); - return -ENOMEM; } /* @@ -1026,10 +855,12 @@ void __init inet_hashinfo2_init(struct inet_hashinfo *h, const char *name, init_hashinfo_lhash2(h); /* this one is used for source ports of outgoing connections */ - table_perturb = kmalloc_array(INET_TABLE_PERTURB_SIZE, - sizeof(*table_perturb), GFP_KERNEL); - if (!table_perturb) - panic("TCP: failed to alloc table_perturb"); + table_perturb = alloc_large_system_hash("Table-perturb", + sizeof(*table_perturb), + INET_TABLE_PERTURB_SIZE, + 0, 0, NULL, NULL, + INET_TABLE_PERTURB_SIZE, + INET_TABLE_PERTURB_SIZE); } int inet_hashinfo2_init_mod(struct inet_hashinfo *h) diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 7e474a85deaf..5c58e21f724e 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -524,7 +524,6 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev) int tunnel_hlen; int version; int nhoff; - int thoff; tun_info = skb_tunnel_info(skb); if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) || @@ -558,10 +557,16 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev) (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff)) truncate = true; - thoff = skb_transport_header(skb) - skb_mac_header(skb); - if (skb->protocol == htons(ETH_P_IPV6) && - (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)) - truncate = true; + if (skb->protocol == htons(ETH_P_IPV6)) { + int thoff; + + if (skb_transport_header_was_set(skb)) + thoff = skb_transport_header(skb) - skb_mac_header(skb); + else + thoff = nhoff + sizeof(struct ipv6hdr); + if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff) + truncate = true; + } if (version == 1) { erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)), @@ -629,21 +634,20 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb, } if (dev->header_ops) { - const int pull_len = tunnel->hlen + sizeof(struct iphdr); - if (skb_cow_head(skb, 0)) goto free_skb; tnl_params = (const struct iphdr *)skb->data; - if (pull_len > skb_transport_offset(skb)) - goto free_skb; - /* Pull skb since ip_tunnel_xmit() needs skb->data pointing * to gre header. */ - skb_pull(skb, pull_len); + skb_pull(skb, tunnel->hlen + sizeof(struct iphdr)); skb_reset_mac_header(skb); + + if (skb->ip_summed == CHECKSUM_PARTIAL && + skb_checksum_start(skb) < skb->data) + goto free_skb; } else { if (skb_cow_head(skb, dev->needed_headroom)) goto free_skb; diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 1a43ca73f94d..3c6101def7d6 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -319,12 +319,16 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk, pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n", sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port)); + if (addr->sin_addr.s_addr == htonl(INADDR_ANY)) + return 0; + tb_id = l3mdev_fib_table_by_index(net, sk->sk_bound_dev_if) ? : tb_id; chk_addr_ret = inet_addr_type_table(net, addr->sin_addr.s_addr, tb_id); - if (!inet_addr_valid_or_nonlocal(net, inet_sk(sk), - addr->sin_addr.s_addr, - chk_addr_ret)) + if (chk_addr_ret == RTN_MULTICAST || + chk_addr_ret == RTN_BROADCAST || + (chk_addr_ret != RTN_LOCAL && + !inet_can_nonlocal_bind(net, isk))) return -EADDRNOTAVAIL; #if IS_ENABLED(CONFIG_IPV6) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 9984d23a7f3e..028513d3e2a2 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -4604,12 +4604,6 @@ void __init tcp_init(void) SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT, NULL); - tcp_hashinfo.bind2_bucket_cachep = - kmem_cache_create("tcp_bind2_bucket", - sizeof(struct inet_bind2_bucket), 0, - SLAB_HWCACHE_ALIGN | SLAB_PANIC | - SLAB_ACCOUNT, - NULL); /* Size and allocate the main established and bind bucket * hash tables. @@ -4632,9 +4626,8 @@ void __init tcp_init(void) if (inet_ehash_locks_alloc(&tcp_hashinfo)) panic("TCP: failed to alloc ehash_locks"); tcp_hashinfo.bhash = - alloc_large_system_hash("TCP bind bhash tables", - sizeof(struct inet_bind_hashbucket) + - sizeof(struct inet_bind2_hashbucket), + alloc_large_system_hash("TCP bind", + sizeof(struct inet_bind_hashbucket), tcp_hashinfo.ehash_mask + 1, 17, /* one slot per 128 KB of memory */ 0, @@ -4643,12 +4636,9 @@ void __init tcp_init(void) 0, 64 * 1024); tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size; - tcp_hashinfo.bhash2 = - (struct inet_bind2_hashbucket *)(tcp_hashinfo.bhash + tcp_hashinfo.bhash_size); for (i = 0; i < tcp_hashinfo.bhash_size; i++) { spin_lock_init(&tcp_hashinfo.bhash[i].lock); INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); - INIT_HLIST_HEAD(&tcp_hashinfo.bhash2[i].chain); } diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c index be3947e70fec..0d3f68bb51c0 100644 --- a/net/ipv4/tcp_bpf.c +++ b/net/ipv4/tcp_bpf.c @@ -611,9 +611,6 @@ int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore) return 0; } - if (inet_csk_has_ulp(sk)) - return -EINVAL; - if (sk->sk_family == AF_INET6) { if (tcp_bpf_assert_proto_ops(psock->sk_proto)) return -EINVAL; diff --git a/net/ipv4/xfrm4_protocol.c b/net/ipv4/xfrm4_protocol.c index 2fe5860c21d6..b146ce88c5d0 100644 --- a/net/ipv4/xfrm4_protocol.c +++ b/net/ipv4/xfrm4_protocol.c @@ -304,4 +304,3 @@ void __init xfrm4_protocol_init(void) { xfrm_input_register_afinfo(&xfrm4_input_afinfo); } -EXPORT_SYMBOL(xfrm4_protocol_init); diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 4e37f7c29900..a9051df0625d 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -939,7 +939,6 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, __be16 proto; __u32 mtu; int nhoff; - int thoff; if (!pskb_inet_may_pull(skb)) goto tx_err; @@ -960,10 +959,16 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff)) truncate = true; - thoff = skb_transport_header(skb) - skb_mac_header(skb); - if (skb->protocol == htons(ETH_P_IPV6) && - (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)) - truncate = true; + if (skb->protocol == htons(ETH_P_IPV6)) { + int thoff; + + if (skb_transport_header_was_set(skb)) + thoff = skb_transport_header(skb) - skb_mac_header(skb); + else + thoff = nhoff + sizeof(struct ipv6hdr); + if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff) + truncate = true; + } if (skb_cow_head(skb, dev->needed_headroom ?: t->hlen)) goto tx_err; diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 4081b12a01ff..77e3f5970ce4 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1450,7 +1450,7 @@ static int __ip6_append_data(struct sock *sk, struct page_frag *pfrag, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), - void *from, int length, int transhdrlen, + void *from, size_t length, int transhdrlen, unsigned int flags, struct ipcm6_cookie *ipc6) { struct sk_buff *skb, *skb_prev = NULL; @@ -1798,7 +1798,7 @@ error: int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), - void *from, int length, int transhdrlen, + void *from, size_t length, int transhdrlen, struct ipcm6_cookie *ipc6, struct flowi6 *fl6, struct rt6_info *rt, unsigned int flags) { @@ -1995,7 +1995,7 @@ EXPORT_SYMBOL_GPL(ip6_flush_pending_frames); struct sk_buff *ip6_make_skb(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), - void *from, int length, int transhdrlen, + void *from, size_t length, int transhdrlen, struct ipcm6_cookie *ipc6, struct rt6_info *rt, unsigned int flags, struct inet_cork_full *cork) { diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c index 29bc4e7c3046..6de01185cc68 100644 --- a/net/ipv6/seg6_hmac.c +++ b/net/ipv6/seg6_hmac.c @@ -399,7 +399,6 @@ int __init seg6_hmac_init(void) { return seg6_hmac_init_algo(); } -EXPORT_SYMBOL(seg6_hmac_init); int __net_init seg6_hmac_net_init(struct net *net) { diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c index 9fbe243a0e81..98a34287439c 100644 --- a/net/ipv6/seg6_local.c +++ b/net/ipv6/seg6_local.c @@ -218,6 +218,7 @@ seg6_lookup_any_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr, struct flowi6 fl6; int dev_flags = 0; + memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_iif = skb->dev->ifindex; fl6.daddr = nhaddr ? *nhaddr : hdr->daddr; fl6.saddr = hdr->saddr; diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index c6ff8bf9b55f..9dbd801ddb98 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -504,14 +504,15 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) struct ipcm6_cookie ipc6; int addr_len = msg->msg_namelen; int transhdrlen = 4; /* zero session-id */ - int ulen = len + transhdrlen; + int ulen; int err; /* Rough check on arithmetic overflow, * better check is made in ip6_append_data(). */ - if (len > INT_MAX) + if (len > INT_MAX - transhdrlen) return -EMSGSIZE; + ulen = len + transhdrlen; /* Mirror BSD error message compatibility */ if (msg->msg_flags & MSG_OOB) diff --git a/net/netfilter/nf_dup_netdev.c b/net/netfilter/nf_dup_netdev.c index 7873bd1389c3..a8e2425e43b0 100644 --- a/net/netfilter/nf_dup_netdev.c +++ b/net/netfilter/nf_dup_netdev.c @@ -13,14 +13,31 @@ #include <net/netfilter/nf_tables_offload.h> #include <net/netfilter/nf_dup_netdev.h> -static void nf_do_netdev_egress(struct sk_buff *skb, struct net_device *dev) +#define NF_RECURSION_LIMIT 2 + +static DEFINE_PER_CPU(u8, nf_dup_skb_recursion); + +static void nf_do_netdev_egress(struct sk_buff *skb, struct net_device *dev, + enum nf_dev_hooks hook) { - if (skb_mac_header_was_set(skb)) + if (__this_cpu_read(nf_dup_skb_recursion) > NF_RECURSION_LIMIT) + goto err; + + if (hook == NF_NETDEV_INGRESS && skb_mac_header_was_set(skb)) { + if (skb_cow_head(skb, skb->mac_len)) + goto err; + skb_push(skb, skb->mac_len); + } skb->dev = dev; skb_clear_tstamp(skb); + __this_cpu_inc(nf_dup_skb_recursion); dev_queue_xmit(skb); + __this_cpu_dec(nf_dup_skb_recursion); + return; +err: + kfree_skb(skb); } void nf_fwd_netdev_egress(const struct nft_pktinfo *pkt, int oif) @@ -33,7 +50,7 @@ void nf_fwd_netdev_egress(const struct nft_pktinfo *pkt, int oif) return; } - nf_do_netdev_egress(pkt->skb, dev); + nf_do_netdev_egress(pkt->skb, dev, nft_hook(pkt)); } EXPORT_SYMBOL_GPL(nf_fwd_netdev_egress); @@ -48,7 +65,7 @@ void nf_dup_netdev_egress(const struct nft_pktinfo *pkt, int oif) skb = skb_clone(pkt->skb, GFP_ATOMIC); if (skb) - nf_do_netdev_egress(skb, dev); + nf_do_netdev_egress(skb, dev, nft_hook(pkt)); } EXPORT_SYMBOL_GPL(nf_dup_netdev_egress); diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 746be13438ef..51144fc66889 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -544,6 +544,7 @@ static int nft_trans_flowtable_add(struct nft_ctx *ctx, int msg_type, if (msg_type == NFT_MSG_NEWFLOWTABLE) nft_activate_next(ctx->net, flowtable); + INIT_LIST_HEAD(&nft_trans_flowtable_hooks(trans)); nft_trans_flowtable(trans) = flowtable; nft_trans_commit_list_add_tail(ctx->net, trans); @@ -1914,7 +1915,6 @@ static struct nft_hook *nft_netdev_hook_alloc(struct net *net, goto err_hook_dev; } hook->ops.dev = dev; - hook->inactive = false; return hook; @@ -2166,7 +2166,7 @@ static int nft_basechain_init(struct nft_base_chain *basechain, u8 family, chain->flags |= NFT_CHAIN_BASE | flags; basechain->policy = NF_ACCEPT; if (chain->flags & NFT_CHAIN_HW_OFFLOAD && - nft_chain_offload_priority(basechain) < 0) + !nft_chain_offload_support(basechain)) return -EOPNOTSUPP; flow_block_init(&basechain->flow_block); @@ -7332,7 +7332,7 @@ static void __nft_unregister_flowtable_net_hooks(struct net *net, nf_unregister_net_hook(net, &hook->ops); if (release_netdev) { list_del(&hook->list); - kfree_rcu(hook); + kfree_rcu(hook, rcu); } } } @@ -7433,11 +7433,15 @@ static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh, if (nla[NFTA_FLOWTABLE_FLAGS]) { flags = ntohl(nla_get_be32(nla[NFTA_FLOWTABLE_FLAGS])); - if (flags & ~NFT_FLOWTABLE_MASK) - return -EOPNOTSUPP; + if (flags & ~NFT_FLOWTABLE_MASK) { + err = -EOPNOTSUPP; + goto err_flowtable_update_hook; + } if ((flowtable->data.flags & NFT_FLOWTABLE_HW_OFFLOAD) ^ - (flags & NFT_FLOWTABLE_HW_OFFLOAD)) - return -EOPNOTSUPP; + (flags & NFT_FLOWTABLE_HW_OFFLOAD)) { + err = -EOPNOTSUPP; + goto err_flowtable_update_hook; + } } else { flags = flowtable->data.flags; } @@ -7618,6 +7622,7 @@ static int nft_delflowtable_hook(struct nft_ctx *ctx, { const struct nlattr * const *nla = ctx->nla; struct nft_flowtable_hook flowtable_hook; + LIST_HEAD(flowtable_del_list); struct nft_hook *this, *hook; struct nft_trans *trans; int err; @@ -7633,7 +7638,7 @@ static int nft_delflowtable_hook(struct nft_ctx *ctx, err = -ENOENT; goto err_flowtable_del_hook; } - hook->inactive = true; + list_move(&hook->list, &flowtable_del_list); } trans = nft_trans_alloc(ctx, NFT_MSG_DELFLOWTABLE, @@ -7646,6 +7651,7 @@ static int nft_delflowtable_hook(struct nft_ctx *ctx, nft_trans_flowtable(trans) = flowtable; nft_trans_flowtable_update(trans) = true; INIT_LIST_HEAD(&nft_trans_flowtable_hooks(trans)); + list_splice(&flowtable_del_list, &nft_trans_flowtable_hooks(trans)); nft_flowtable_hook_release(&flowtable_hook); nft_trans_commit_list_add_tail(ctx->net, trans); @@ -7653,13 +7659,7 @@ static int nft_delflowtable_hook(struct nft_ctx *ctx, return 0; err_flowtable_del_hook: - list_for_each_entry(this, &flowtable_hook.list, list) { - hook = nft_hook_list_find(&flowtable->hook_list, this); - if (!hook) - break; - - hook->inactive = false; - } + list_splice(&flowtable_del_list, &flowtable->hook_list); nft_flowtable_hook_release(&flowtable_hook); return err; @@ -8329,6 +8329,9 @@ static void nft_commit_release(struct nft_trans *trans) nf_tables_chain_destroy(&trans->ctx); break; case NFT_MSG_DELRULE: + if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) + nft_flow_rule_destroy(nft_trans_flow_rule(trans)); + nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans)); break; case NFT_MSG_DELSET: @@ -8563,17 +8566,6 @@ void nft_chain_del(struct nft_chain *chain) list_del_rcu(&chain->list); } -static void nft_flowtable_hooks_del(struct nft_flowtable *flowtable, - struct list_head *hook_list) -{ - struct nft_hook *hook, *next; - - list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) { - if (hook->inactive) - list_move(&hook->list, hook_list); - } -} - static void nf_tables_module_autoload_cleanup(struct net *net) { struct nftables_pernet *nft_net = nft_pernet(net); @@ -8828,6 +8820,9 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) nf_tables_rule_notify(&trans->ctx, nft_trans_rule(trans), NFT_MSG_NEWRULE); + if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) + nft_flow_rule_destroy(nft_trans_flow_rule(trans)); + nft_trans_destroy(trans); break; case NFT_MSG_DELRULE: @@ -8918,8 +8913,6 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) break; case NFT_MSG_DELFLOWTABLE: if (nft_trans_flowtable_update(trans)) { - nft_flowtable_hooks_del(nft_trans_flowtable(trans), - &nft_trans_flowtable_hooks(trans)); nf_tables_flowtable_notify(&trans->ctx, nft_trans_flowtable(trans), &nft_trans_flowtable_hooks(trans), @@ -9000,7 +8993,6 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) struct nftables_pernet *nft_net = nft_pernet(net); struct nft_trans *trans, *next; struct nft_trans_elem *te; - struct nft_hook *hook; if (action == NFNL_ABORT_VALIDATE && nf_tables_validate(net) < 0) @@ -9131,8 +9123,8 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) break; case NFT_MSG_DELFLOWTABLE: if (nft_trans_flowtable_update(trans)) { - list_for_each_entry(hook, &nft_trans_flowtable(trans)->hook_list, list) - hook->inactive = false; + list_splice(&nft_trans_flowtable_hooks(trans), + &nft_trans_flowtable(trans)->hook_list); } else { trans->ctx.table->use++; nft_clear(trans->ctx.net, nft_trans_flowtable(trans)); diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c index 2d36952b1392..910ef881c3b8 100644 --- a/net/netfilter/nf_tables_offload.c +++ b/net/netfilter/nf_tables_offload.c @@ -208,7 +208,7 @@ static int nft_setup_cb_call(enum tc_setup_type type, void *type_data, return 0; } -int nft_chain_offload_priority(struct nft_base_chain *basechain) +static int nft_chain_offload_priority(const struct nft_base_chain *basechain) { if (basechain->ops.priority <= 0 || basechain->ops.priority > USHRT_MAX) @@ -217,6 +217,27 @@ int nft_chain_offload_priority(struct nft_base_chain *basechain) return 0; } +bool nft_chain_offload_support(const struct nft_base_chain *basechain) +{ + struct net_device *dev; + struct nft_hook *hook; + + if (nft_chain_offload_priority(basechain) < 0) + return false; + + list_for_each_entry(hook, &basechain->hook_list, list) { + if (hook->ops.pf != NFPROTO_NETDEV || + hook->ops.hooknum != NF_NETDEV_INGRESS) + return false; + + dev = hook->ops.dev; + if (!dev->netdev_ops->ndo_setup_tc && !flow_indr_dev_exists()) + return false; + } + + return true; +} + static void nft_flow_cls_offload_setup(struct flow_cls_offload *cls_flow, const struct nft_base_chain *basechain, const struct nft_rule *rule, diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c index af15102bc696..f466af4f8531 100644 --- a/net/netfilter/nfnetlink_cttimeout.c +++ b/net/netfilter/nfnetlink_cttimeout.c @@ -614,7 +614,7 @@ static void __net_exit cttimeout_net_exit(struct net *net) nf_ct_untimeout(net, NULL); - list_for_each_entry_safe(cur, tmp, &pernet->nfct_timeout_freelist, head) { + list_for_each_entry_safe(cur, tmp, &pernet->nfct_timeout_freelist, free_head) { list_del(&cur->free_head); if (refcount_dec_and_test(&cur->refcnt)) diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c index ac4859241e17..55d2d49c3425 100644 --- a/net/netfilter/nft_meta.c +++ b/net/netfilter/nft_meta.c @@ -14,6 +14,7 @@ #include <linux/in.h> #include <linux/ip.h> #include <linux/ipv6.h> +#include <linux/random.h> #include <linux/smp.h> #include <linux/static_key.h> #include <net/dst.h> @@ -32,8 +33,6 @@ #define NFT_META_SECS_PER_DAY 86400 #define NFT_META_DAYS_PER_WEEK 7 -static DEFINE_PER_CPU(struct rnd_state, nft_prandom_state); - static u8 nft_meta_weekday(void) { time64_t secs = ktime_get_real_seconds(); @@ -271,13 +270,6 @@ static bool nft_meta_get_eval_ifname(enum nft_meta_keys key, u32 *dest, return true; } -static noinline u32 nft_prandom_u32(void) -{ - struct rnd_state *state = this_cpu_ptr(&nft_prandom_state); - - return prandom_u32_state(state); -} - #ifdef CONFIG_IP_ROUTE_CLASSID static noinline bool nft_meta_get_eval_rtclassid(const struct sk_buff *skb, u32 *dest) @@ -389,7 +381,7 @@ void nft_meta_get_eval(const struct nft_expr *expr, break; #endif case NFT_META_PRANDOM: - *dest = nft_prandom_u32(); + *dest = get_random_u32(); break; #ifdef CONFIG_XFRM case NFT_META_SECPATH: @@ -518,7 +510,6 @@ int nft_meta_get_init(const struct nft_ctx *ctx, len = IFNAMSIZ; break; case NFT_META_PRANDOM: - prandom_init_once(&nft_prandom_state); len = sizeof(u32); break; #ifdef CONFIG_XFRM diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c index 4394df4bc99b..e5fd6995e4bf 100644 --- a/net/netfilter/nft_nat.c +++ b/net/netfilter/nft_nat.c @@ -335,7 +335,8 @@ static void nft_nat_inet_eval(const struct nft_expr *expr, { const struct nft_nat *priv = nft_expr_priv(expr); - if (priv->family == nft_pf(pkt)) + if (priv->family == nft_pf(pkt) || + priv->family == NFPROTO_INET) nft_nat_eval(expr, regs, pkt); } diff --git a/net/netfilter/nft_numgen.c b/net/netfilter/nft_numgen.c index 81b40c663d86..45d3dc9e96f2 100644 --- a/net/netfilter/nft_numgen.c +++ b/net/netfilter/nft_numgen.c @@ -9,12 +9,11 @@ #include <linux/netlink.h> #include <linux/netfilter.h> #include <linux/netfilter/nf_tables.h> +#include <linux/random.h> #include <linux/static_key.h> #include <net/netfilter/nf_tables.h> #include <net/netfilter/nf_tables_core.h> -static DEFINE_PER_CPU(struct rnd_state, nft_numgen_prandom_state); - struct nft_ng_inc { u8 dreg; u32 modulus; @@ -135,12 +134,9 @@ struct nft_ng_random { u32 offset; }; -static u32 nft_ng_random_gen(struct nft_ng_random *priv) +static u32 nft_ng_random_gen(const struct nft_ng_random *priv) { - struct rnd_state *state = this_cpu_ptr(&nft_numgen_prandom_state); - - return reciprocal_scale(prandom_u32_state(state), priv->modulus) + - priv->offset; + return reciprocal_scale(get_random_u32(), priv->modulus) + priv->offset; } static void nft_ng_random_eval(const struct nft_expr *expr, @@ -168,8 +164,6 @@ static int nft_ng_random_init(const struct nft_ctx *ctx, if (priv->offset + priv->modulus - 1 < priv->offset) return -EOVERFLOW; - prandom_init_once(&nft_numgen_prandom_state); - return nft_parse_register_store(ctx, tb[NFTA_NG_DREG], &priv->dreg, NULL, NFT_DATA_VALUE, sizeof(u32)); } diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 1b5d73079dc9..868db4669a29 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -373,6 +373,7 @@ static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh, update_ip_l4_checksum(skb, nh, *addr, new_addr); csum_replace4(&nh->check, *addr, new_addr); skb_clear_hash(skb); + ovs_ct_clear(skb, NULL); *addr = new_addr; } @@ -420,6 +421,7 @@ static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto, update_ipv6_checksum(skb, l4_proto, addr, new_addr); skb_clear_hash(skb); + ovs_ct_clear(skb, NULL); memcpy(addr, new_addr, sizeof(__be32[4])); } @@ -660,6 +662,7 @@ static int set_nsh(struct sk_buff *skb, struct sw_flow_key *flow_key, static void set_tp_port(struct sk_buff *skb, __be16 *port, __be16 new_port, __sum16 *check) { + ovs_ct_clear(skb, NULL); inet_proto_csum_replace2(check, skb, *port, new_port, false); *port = new_port; } @@ -699,6 +702,7 @@ static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key, uh->dest = dst; flow_key->tp.src = src; flow_key->tp.dst = dst; + ovs_ct_clear(skb, NULL); } skb_clear_hash(skb); @@ -761,6 +765,8 @@ static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key, sh->checksum = old_csum ^ old_correct_csum ^ new_csum; skb_clear_hash(skb); + ovs_ct_clear(skb, NULL); + flow_key->tp.src = sh->source; flow_key->tp.dst = sh->dest; diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 4a947c13c813..4e70df91d0f2 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -1342,7 +1342,9 @@ int ovs_ct_clear(struct sk_buff *skb, struct sw_flow_key *key) nf_ct_put(ct); nf_ct_set(skb, NULL, IP_CT_UNTRACKED); - ovs_ct_fill_key(skb, key, false); + + if (key) + ovs_ct_fill_key(skb, key, false); return 0; } diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 372bf54a0ca9..e20d1a973417 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -407,7 +407,7 @@ static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key) if (flags & IP6_FH_F_FRAG) { if (frag_off) { key->ip.frag = OVS_FRAG_TYPE_LATER; - key->ip.proto = nexthdr; + key->ip.proto = NEXTHDR_FRAGMENT; return 0; } key->ip.frag = OVS_FRAG_TYPE_FIRST; diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index ed4ccef5d6a8..5449ed114e40 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -1146,9 +1146,9 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) struct tc_netem_rate rate; struct tc_netem_slot slot; - qopt.latency = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->latency), + qopt.latency = min_t(psched_time_t, PSCHED_NS2TICKS(q->latency), UINT_MAX); - qopt.jitter = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->jitter), + qopt.jitter = min_t(psched_time_t, PSCHED_NS2TICKS(q->jitter), UINT_MAX); qopt.limit = q->limit; qopt.loss = q->loss; diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index e2c6eca0271b..b6781ada3aa8 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -651,6 +651,7 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args, new->cl_discrtry = clnt->cl_discrtry; new->cl_chatty = clnt->cl_chatty; new->cl_principal = clnt->cl_principal; + new->cl_max_connect = clnt->cl_max_connect; return new; out_err: diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index df194cc07035..f87a2d8f23a7 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -919,7 +919,7 @@ void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p, EXPORT_SYMBOL_GPL(xdr_init_encode); /** - * xdr_commit_encode - Ensure all data is written to buffer + * __xdr_commit_encode - Ensure all data is written to buffer * @xdr: pointer to xdr_stream * * We handle encoding across page boundaries by giving the caller a @@ -931,26 +931,29 @@ EXPORT_SYMBOL_GPL(xdr_init_encode); * required at the end of encoding, or any other time when the xdr_buf * data might be read. */ -inline void xdr_commit_encode(struct xdr_stream *xdr) +void __xdr_commit_encode(struct xdr_stream *xdr) { - int shift = xdr->scratch.iov_len; + size_t shift = xdr->scratch.iov_len; void *page; - if (shift == 0) - return; page = page_address(*xdr->page_ptr); memcpy(xdr->scratch.iov_base, page, shift); memmove(page, page + shift, (void *)xdr->p - page); xdr_reset_scratch_buffer(xdr); } -EXPORT_SYMBOL_GPL(xdr_commit_encode); +EXPORT_SYMBOL_GPL(__xdr_commit_encode); -static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr, - size_t nbytes) +/* + * The buffer space to be reserved crosses the boundary between + * xdr->buf->head and xdr->buf->pages, or between two pages + * in xdr->buf->pages. + */ +static noinline __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr, + size_t nbytes) { - __be32 *p; int space_left; int frag1bytes, frag2bytes; + void *p; if (nbytes > PAGE_SIZE) goto out_overflow; /* Bigger buffers require special handling */ @@ -964,6 +967,7 @@ static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr, xdr->buf->page_len += frag1bytes; xdr->page_ptr++; xdr->iov = NULL; + /* * If the last encode didn't end exactly on a page boundary, the * next one will straddle boundaries. Encode into the next @@ -972,14 +976,19 @@ static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr, * space at the end of the previous buffer: */ xdr_set_scratch_buffer(xdr, xdr->p, frag1bytes); - p = page_address(*xdr->page_ptr); + /* - * Note this is where the next encode will start after we've - * shifted this one back: + * xdr->p is where the next encode will start after + * xdr_commit_encode() has shifted this one back: */ - xdr->p = (void *)p + frag2bytes; + p = page_address(*xdr->page_ptr); + xdr->p = p + frag2bytes; space_left = xdr->buf->buflen - xdr->buf->len; - xdr->end = (void *)p + min_t(int, space_left, PAGE_SIZE); + if (space_left - nbytes >= PAGE_SIZE) + xdr->end = p + PAGE_SIZE; + else + xdr->end = p + space_left - frag1bytes; + xdr->buf->page_len += frag2bytes; xdr->buf->len += nbytes; return p; diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c index 5f0155fdefc7..11cf7c646644 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_rw.c +++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c @@ -478,10 +478,10 @@ svc_rdma_build_writes(struct svc_rdma_write_info *info, unsigned int write_len; u64 offset; - seg = &info->wi_chunk->ch_segments[info->wi_seg_no]; - if (!seg) + if (info->wi_seg_no >= info->wi_chunk->ch_segcount) goto out_overflow; + seg = &info->wi_chunk->ch_segments[info->wi_seg_no]; write_len = min(remaining, seg->rs_length - info->wi_seg_off); if (!write_len) goto out_overflow; diff --git a/net/tipc/core.c b/net/tipc/core.c index 3f4542e0f065..434e70eabe08 100644 --- a/net/tipc/core.c +++ b/net/tipc/core.c @@ -109,10 +109,9 @@ static void __net_exit tipc_exit_net(struct net *net) struct tipc_net *tn = tipc_net(net); tipc_detach_loopback(net); + tipc_net_stop(net); /* Make sure the tipc_net_finalize_work() finished */ cancel_work_sync(&tn->work); - tipc_net_stop(net); - tipc_bcast_stop(net); tipc_nametbl_stop(net); tipc_sk_rht_destroy(net); diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index b91ddc110786..2ffede463e4a 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -544,7 +544,7 @@ static int do_tls_getsockopt(struct sock *sk, int optname, rc = do_tls_getsockopt_conf(sk, optval, optlen, optname == TLS_TX); break; - case TLS_TX_ZEROCOPY_SENDFILE: + case TLS_TX_ZEROCOPY_RO: rc = do_tls_getsockopt_tx_zc(sk, optval, optlen); break; default: @@ -731,7 +731,7 @@ static int do_tls_setsockopt(struct sock *sk, int optname, sockptr_t optval, optname == TLS_TX); release_sock(sk); break; - case TLS_TX_ZEROCOPY_SENDFILE: + case TLS_TX_ZEROCOPY_RO: lock_sock(sk); rc = do_tls_setsockopt_tx_zc(sk, optval, optlen); release_sock(sk); @@ -921,6 +921,8 @@ static void tls_update(struct sock *sk, struct proto *p, { struct tls_context *ctx; + WARN_ON_ONCE(sk->sk_prot == p); + ctx = tls_get_ctx(sk); if (likely(ctx)) { ctx->sk_write_space = write_space; @@ -970,7 +972,7 @@ static int tls_get_info(const struct sock *sk, struct sk_buff *skb) goto nla_failure; if (ctx->tx_conf == TLS_HW && ctx->zerocopy_sendfile) { - err = nla_put_flag(skb, TLS_INFO_ZC_SENDFILE); + err = nla_put_flag(skb, TLS_INFO_ZC_RO_TX); if (err) goto nla_failure; } @@ -994,7 +996,7 @@ static size_t tls_get_info_size(const struct sock *sk) nla_total_size(sizeof(u16)) + /* TLS_INFO_CIPHER */ nla_total_size(sizeof(u16)) + /* TLS_INFO_RXCONF */ nla_total_size(sizeof(u16)) + /* TLS_INFO_TXCONF */ - nla_total_size(0) + /* TLS_INFO_ZC_SENDFILE */ + nla_total_size(0) + /* TLS_INFO_ZC_RO_TX */ 0; return size; diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 654dcef7cfb3..2206e6f8902d 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -490,7 +490,7 @@ static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other) * -ECONNREFUSED. Otherwise, if we haven't queued any skbs * to other and its full, we will hang waiting for POLLOUT. */ - if (unix_recvq_full(other) && !sock_flag(other, SOCK_DEAD)) + if (unix_recvq_full_lockless(other) && !sock_flag(other, SOCK_DEAD)) return 1; if (connected) diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index e0a4526ab66b..09002387987e 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -373,7 +373,8 @@ u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max_entries) goto out; } - nb_pkts = xskq_cons_peek_desc_batch(xs->tx, pool, max_entries); + max_entries = xskq_cons_nb_entries(xs->tx, max_entries); + nb_pkts = xskq_cons_read_desc_batch(xs->tx, pool, max_entries); if (!nb_pkts) { xs->tx->queue_empty_descs++; goto out; @@ -389,7 +390,7 @@ u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max_entries) if (!nb_pkts) goto out; - xskq_cons_release_n(xs->tx, nb_pkts); + xskq_cons_release_n(xs->tx, max_entries); __xskq_cons_release(xs->tx); xs->sk.sk_write_space(&xs->sk); @@ -537,12 +538,6 @@ static int xsk_generic_xmit(struct sock *sk) goto out; } - skb = xsk_build_skb(xs, &desc); - if (IS_ERR(skb)) { - err = PTR_ERR(skb); - goto out; - } - /* This is the backpressure mechanism for the Tx path. * Reserve space in the completion queue and only proceed * if there is space in it. This avoids having to implement @@ -551,11 +546,19 @@ static int xsk_generic_xmit(struct sock *sk) spin_lock_irqsave(&xs->pool->cq_lock, flags); if (xskq_prod_reserve(xs->pool->cq)) { spin_unlock_irqrestore(&xs->pool->cq_lock, flags); - kfree_skb(skb); goto out; } spin_unlock_irqrestore(&xs->pool->cq_lock, flags); + skb = xsk_build_skb(xs, &desc); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + spin_lock_irqsave(&xs->pool->cq_lock, flags); + xskq_prod_cancel(xs->pool->cq); + spin_unlock_irqrestore(&xs->pool->cq_lock, flags); + goto out; + } + err = __dev_direct_xmit(skb, xs->queue_id); if (err == NETDEV_TX_BUSY) { /* Tell user-space to retry the send */ diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h index a794410989cc..fb20bf7207cf 100644 --- a/net/xdp/xsk_queue.h +++ b/net/xdp/xsk_queue.h @@ -282,14 +282,6 @@ static inline bool xskq_cons_peek_desc(struct xsk_queue *q, return xskq_cons_read_desc(q, desc, pool); } -static inline u32 xskq_cons_peek_desc_batch(struct xsk_queue *q, struct xsk_buff_pool *pool, - u32 max) -{ - u32 entries = xskq_cons_nb_entries(q, max); - - return xskq_cons_read_desc_batch(q, pool, entries); -} - /* To improve performance in the xskq_cons_release functions, only update local state here. * Reflect this to global state when we get new entries from the ring in * xskq_cons_get_entries() and whenever Rx or Tx processing are completed in the NAPI loop. diff --git a/samples/fprobe/fprobe_example.c b/samples/fprobe/fprobe_example.c index 24d3cf109140..01ee6c8c8382 100644 --- a/samples/fprobe/fprobe_example.c +++ b/samples/fprobe/fprobe_example.c @@ -21,6 +21,7 @@ #define BACKTRACE_DEPTH 16 #define MAX_SYMBOL_LEN 4096 struct fprobe sample_probe; +static unsigned long nhit; static char symbol[MAX_SYMBOL_LEN] = "kernel_clone"; module_param_string(symbol, symbol, sizeof(symbol), 0644); @@ -28,6 +29,8 @@ static char nosymbol[MAX_SYMBOL_LEN] = ""; module_param_string(nosymbol, nosymbol, sizeof(nosymbol), 0644); static bool stackdump = true; module_param(stackdump, bool, 0644); +static bool use_trace = false; +module_param(use_trace, bool, 0644); static void show_backtrace(void) { @@ -40,7 +43,15 @@ static void show_backtrace(void) static void sample_entry_handler(struct fprobe *fp, unsigned long ip, struct pt_regs *regs) { - pr_info("Enter <%pS> ip = 0x%p\n", (void *)ip, (void *)ip); + if (use_trace) + /* + * This is just an example, no kernel code should call + * trace_printk() except when actively debugging. + */ + trace_printk("Enter <%pS> ip = 0x%p\n", (void *)ip, (void *)ip); + else + pr_info("Enter <%pS> ip = 0x%p\n", (void *)ip, (void *)ip); + nhit++; if (stackdump) show_backtrace(); } @@ -49,8 +60,17 @@ static void sample_exit_handler(struct fprobe *fp, unsigned long ip, struct pt_r { unsigned long rip = instruction_pointer(regs); - pr_info("Return from <%pS> ip = 0x%p to rip = 0x%p (%pS)\n", - (void *)ip, (void *)ip, (void *)rip, (void *)rip); + if (use_trace) + /* + * This is just an example, no kernel code should call + * trace_printk() except when actively debugging. + */ + trace_printk("Return from <%pS> ip = 0x%p to rip = 0x%p (%pS)\n", + (void *)ip, (void *)ip, (void *)rip, (void *)rip); + else + pr_info("Return from <%pS> ip = 0x%p to rip = 0x%p (%pS)\n", + (void *)ip, (void *)ip, (void *)rip, (void *)rip); + nhit++; if (stackdump) show_backtrace(); } @@ -112,7 +132,8 @@ static void __exit fprobe_exit(void) { unregister_fprobe(&sample_probe); - pr_info("fprobe at %s unregistered\n", symbol); + pr_info("fprobe at %s unregistered. %ld times hit, %ld times missed\n", + symbol, nhit, sample_probe.nmissed); } module_init(fprobe_init) diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 1f01ac65c0cd..cac070aee791 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -251,8 +251,8 @@ $(obj)/%.o: $(src)/%.c $(recordmcount_source) FORCE # To make this rule robust against "Argument list too long" error, # ensure to add $(obj)/ prefix by a shell command. -cmd_mod = echo $(call real-search, $*.o, .o, -objs -y -m) | \ - $(AWK) -v RS='( |\n)' '!x[$$0]++ { print("$(obj)/"$$0) }' > $@ +cmd_mod = printf '%s\n' $(call real-search, $*.o, .o, -objs -y -m) | \ + $(AWK) '!x[$$0]++ { print("$(obj)/"$$0) }' > $@ $(obj)/%.mod: FORCE $(call if_changed,mod) diff --git a/scripts/check-local-export b/scripts/check-local-export index da745e2743b7..6ccc2f467416 100755 --- a/scripts/check-local-export +++ b/scripts/check-local-export @@ -8,11 +8,31 @@ set -e +# catch errors from ${NM} +set -o pipefail + +# Run the last element of a pipeline in the current shell. +# Without this, the while-loop would be executed in a subshell, and +# the changes made to 'symbol_types' and 'export_symbols' would be lost. +shopt -s lastpipe + declare -A symbol_types declare -a export_symbols exit_code=0 +# If there is no symbol in the object, ${NM} (both GNU nm and llvm-nm) shows +# 'no symbols' diagnostic (but exits with 0). It is harmless and hidden by +# '2>/dev/null'. However, it suppresses real error messages as well. Add a +# hand-crafted error message here. +# +# TODO: +# Use --quiet instead of 2>/dev/null when we upgrade the minimum version of +# binutils to 2.37, llvm to 13.0.0. +# Then, the following line will be really simple: +# ${NM} --quiet ${1} | + +{ ${NM} ${1} 2>/dev/null || { echo "${0}: ${NM} failed" >&2; false; } } | while read value type name do # Skip the line if the number of fields is less than 3. @@ -37,21 +57,7 @@ do if [[ ${name} == __ksymtab_* ]]; then export_symbols+=(${name#__ksymtab_}) fi - - # If there is no symbol in the object, ${NM} (both GNU nm and llvm-nm) - # shows 'no symbols' diagnostic (but exits with 0). It is harmless and - # hidden by '2>/dev/null'. However, it suppresses real error messages - # as well. Add a hand-crafted error message here. - # - # Use --quiet instead of 2>/dev/null when we upgrade the minimum version - # of binutils to 2.37, llvm to 13.0.0. - # - # Then, the following line will be really simple: - # done < <(${NM} --quiet ${1}) -done < <(${NM} ${1} 2>/dev/null || { echo "${0}: ${NM} failed" >&2; false; } ) - -# Catch error in the process substitution -wait $! +done for name in "${export_symbols[@]}" do diff --git a/scripts/faddr2line b/scripts/faddr2line index 0e6268d59883..94ed98dd899f 100755 --- a/scripts/faddr2line +++ b/scripts/faddr2line @@ -95,17 +95,25 @@ __faddr2line() { local print_warnings=$4 local sym_name=${func_addr%+*} - local offset=${func_addr#*+} - offset=${offset%/*} + local func_offset=${func_addr#*+} + func_offset=${func_offset%/*} local user_size= + local file_type + local is_vmlinux=0 [[ $func_addr =~ "/" ]] && user_size=${func_addr#*/} - if [[ -z $sym_name ]] || [[ -z $offset ]] || [[ $sym_name = $func_addr ]]; then + if [[ -z $sym_name ]] || [[ -z $func_offset ]] || [[ $sym_name = $func_addr ]]; then warn "bad func+offset $func_addr" DONE=1 return fi + # vmlinux uses absolute addresses in the section table rather than + # section offsets. + local file_type=$(${READELF} --file-header $objfile | + ${AWK} '$1 == "Type:" { print $2; exit }') + [[ $file_type = "EXEC" ]] && is_vmlinux=1 + # Go through each of the object's symbols which match the func name. # In rare cases there might be duplicates, in which case we print all # matches. @@ -114,9 +122,11 @@ __faddr2line() { local sym_addr=0x${fields[1]} local sym_elf_size=${fields[2]} local sym_sec=${fields[6]} + local sec_size + local sec_name # Get the section size: - local sec_size=$(${READELF} --section-headers --wide $objfile | + sec_size=$(${READELF} --section-headers --wide $objfile | sed 's/\[ /\[/' | ${AWK} -v sec=$sym_sec '$1 == "[" sec "]" { print "0x" $6; exit }') @@ -126,6 +136,17 @@ __faddr2line() { return fi + # Get the section name: + sec_name=$(${READELF} --section-headers --wide $objfile | + sed 's/\[ /\[/' | + ${AWK} -v sec=$sym_sec '$1 == "[" sec "]" { print $2; exit }') + + if [[ -z $sec_name ]]; then + warn "bad section name: section: $sym_sec" + DONE=1 + return + fi + # Calculate the symbol size. # # Unfortunately we can't use the ELF size, because kallsyms @@ -174,10 +195,10 @@ __faddr2line() { sym_size=0x$(printf %x $sym_size) - # Calculate the section address from user-supplied offset: - local addr=$(($sym_addr + $offset)) + # Calculate the address from user-supplied offset: + local addr=$(($sym_addr + $func_offset)) if [[ -z $addr ]] || [[ $addr = 0 ]]; then - warn "bad address: $sym_addr + $offset" + warn "bad address: $sym_addr + $func_offset" DONE=1 return fi @@ -191,9 +212,9 @@ __faddr2line() { fi # Make sure the provided offset is within the symbol's range: - if [[ $offset -gt $sym_size ]]; then + if [[ $func_offset -gt $sym_size ]]; then [[ $print_warnings = 1 ]] && - echo "skipping $sym_name address at $addr due to size mismatch ($offset > $sym_size)" + echo "skipping $sym_name address at $addr due to size mismatch ($func_offset > $sym_size)" continue fi @@ -202,11 +223,13 @@ __faddr2line() { [[ $FIRST = 0 ]] && echo FIRST=0 - echo "$sym_name+$offset/$sym_size:" + echo "$sym_name+$func_offset/$sym_size:" # Pass section address to addr2line and strip absolute paths # from the output: - local output=$(${ADDR2LINE} -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;") + local args="--functions --pretty-print --inlines --exe=$objfile" + [[ $is_vmlinux = 0 ]] && args="$args --section=$sec_name" + local output=$(${ADDR2LINE} $args $addr | sed "s; $dir_prefix\(\./\)*; ;") [[ -z $output ]] && continue # Default output (non --list): diff --git a/scripts/gdb/linux/config.py b/scripts/gdb/linux/config.py index 90e1565b1967..8843ab3cbadd 100644 --- a/scripts/gdb/linux/config.py +++ b/scripts/gdb/linux/config.py @@ -24,9 +24,9 @@ class LxConfigDump(gdb.Command): filename = arg try: - py_config_ptr = gdb.parse_and_eval("kernel_config_data + 8") - py_config_size = gdb.parse_and_eval( - "sizeof(kernel_config_data) - 1 - 8 * 2") + py_config_ptr = gdb.parse_and_eval("&kernel_config_data") + py_config_ptr_end = gdb.parse_and_eval("&kernel_config_data_end") + py_config_size = py_config_ptr_end - py_config_ptr except gdb.error as e: raise gdb.GdbError("Can't find config, enable CONFIG_IKCONFIG?") diff --git a/scripts/gen_autoksyms.sh b/scripts/gen_autoksyms.sh index faacf7062122..653fadbad302 100755 --- a/scripts/gen_autoksyms.sh +++ b/scripts/gen_autoksyms.sh @@ -56,4 +56,7 @@ EOT # point addresses. sed -e 's/^\.//' | sort -u | +# Ignore __this_module. It's not an exported symbol, and will be resolved +# when the final .ko's are linked. +grep -v '^__this_module$' | sed -e 's/\(.*\)/#define __KSYM_\1 1/' >> "$output_file" diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index 29d5a841e215..620dc8c4c814 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c @@ -980,7 +980,7 @@ static const struct sectioncheck sectioncheck[] = { }, /* Do not export init/exit functions or data */ { - .fromsec = { "__ksymtab*", NULL }, + .fromsec = { "___ksymtab*", NULL }, .bad_tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL }, .mismatch = EXPORT_TO_INIT_EXIT, .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL }, diff --git a/scripts/nsdeps b/scripts/nsdeps index 04c4b96e95ec..f1718cc0d700 100644 --- a/scripts/nsdeps +++ b/scripts/nsdeps @@ -34,9 +34,8 @@ generate_deps() { local mod=${1%.ko:} shift local namespaces="$*" - local mod_source_files="`cat $mod.mod | sed -n 1p \ - | sed -e 's/\.o/\.c/g' \ - | sed "s|[^ ]* *|${src_prefix}&|g"`" + local mod_source_files=$(sed "s|^\(.*\)\.o$|${src_prefix}\1.c|" $mod.mod) + for ns in $namespaces; do echo "Adding namespace $ns to module $mod.ko." generate_deps_for_ns $ns "$mod_source_files" diff --git a/scripts/sign-file.c b/scripts/sign-file.c index fbd34b8e8f57..7434e9ea926e 100644 --- a/scripts/sign-file.c +++ b/scripts/sign-file.c @@ -30,6 +30,13 @@ #include <openssl/engine.h> /* + * OpenSSL 3.0 deprecates the OpenSSL's ENGINE API. + * + * Remove this if/when that API is no longer used + */ +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + +/* * Use CMS if we have openssl-1.0.0 or newer available - otherwise we have to * assume that it's not available and its header file is missing and that we * should use PKCS#7 instead. Switching to the older PKCS#7 format restricts diff --git a/security/keys/trusted-keys/trusted_tpm2.c b/security/keys/trusted-keys/trusted_tpm2.c index 0165da386289..2b2c8eb258d5 100644 --- a/security/keys/trusted-keys/trusted_tpm2.c +++ b/security/keys/trusted-keys/trusted_tpm2.c @@ -283,8 +283,8 @@ int tpm2_seal_trusted(struct tpm_chip *chip, /* key properties */ flags = 0; flags |= options->policydigest_len ? 0 : TPM2_OA_USER_WITH_AUTH; - flags |= payload->migratable ? (TPM2_OA_FIXED_TPM | - TPM2_OA_FIXED_PARENT) : 0; + flags |= payload->migratable ? 0 : (TPM2_OA_FIXED_TPM | + TPM2_OA_FIXED_PARENT); tpm_buf_append_u32(&buf, flags); /* policy */ diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index beceb89f68d9..1bbd53321d13 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -2600,8 +2600,9 @@ static int selinux_sb_eat_lsm_opts(char *options, void **mnt_opts) } } rc = selinux_add_opt(token, arg, mnt_opts); + kfree(arg); + arg = NULL; if (unlikely(rc)) { - kfree(arg); goto free_opt; } } else { @@ -2792,17 +2793,13 @@ static int selinux_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param) { struct fs_parse_result result; - int opt, rc; + int opt; opt = fs_parse(fc, selinux_fs_parameters, param, &result); if (opt < 0) return opt; - rc = selinux_add_opt(opt, param->string, &fc->security); - if (!rc) - param->string = NULL; - - return rc; + return selinux_add_opt(opt, param->string, &fc->security); } /* inode security operations */ diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c index 15dc7160ba34..8cfdaee77905 100644 --- a/sound/core/memalloc.c +++ b/sound/core/memalloc.c @@ -431,33 +431,17 @@ static const struct snd_malloc_ops snd_dma_iram_ops = { */ static void *snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size) { - void *p; - - p = dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP); -#ifdef CONFIG_X86 - if (p && dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC) - set_memory_wc((unsigned long)p, PAGE_ALIGN(size) >> PAGE_SHIFT); -#endif - return p; + return dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP); } static void snd_dma_dev_free(struct snd_dma_buffer *dmab) { -#ifdef CONFIG_X86 - if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC) - set_memory_wb((unsigned long)dmab->area, - PAGE_ALIGN(dmab->bytes) >> PAGE_SHIFT); -#endif dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); } static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab, struct vm_area_struct *area) { -#ifdef CONFIG_X86 - if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC) - area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); -#endif return dma_mmap_coherent(dmab->dev.dev, area, dmab->area, dmab->addr, dmab->bytes); } @@ -471,10 +455,6 @@ static const struct snd_malloc_ops snd_dma_dev_ops = { /* * Write-combined pages */ -#ifdef CONFIG_X86 -/* On x86, share the same ops as the standard dev ops */ -#define snd_dma_wc_ops snd_dma_dev_ops -#else /* CONFIG_X86 */ static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size) { return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP); @@ -497,7 +477,6 @@ static const struct snd_malloc_ops snd_dma_wc_ops = { .free = snd_dma_wc_free, .mmap = snd_dma_wc_mmap, }; -#endif /* CONFIG_X86 */ #ifdef CONFIG_SND_DMA_SGBUF static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size); diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c index 3e9e9ac804f6..b7e5032b61c9 100644 --- a/sound/hda/hdac_device.c +++ b/sound/hda/hdac_device.c @@ -660,6 +660,7 @@ static const struct hda_vendor_id hda_vendor_ids[] = { { 0x14f1, "Conexant" }, { 0x17e8, "Chrontel" }, { 0x1854, "LG" }, + { 0x19e5, "Huawei" }, { 0x1aec, "Wolfson Microelectronics" }, { 0x1af4, "QEMU" }, { 0x434d, "C-Media" }, diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c index 3f35972e1cf7..161a9711cd63 100644 --- a/sound/hda/hdac_i915.c +++ b/sound/hda/hdac_i915.c @@ -119,21 +119,18 @@ static int i915_component_master_match(struct device *dev, int subcomponent, /* check whether Intel graphics is present and reachable */ static int i915_gfx_present(struct pci_dev *hdac_pci) { - unsigned int class = PCI_BASE_CLASS_DISPLAY << 16; struct pci_dev *display_dev = NULL; - bool match = false; - do { - display_dev = pci_get_class(class, display_dev); - - if (display_dev && display_dev->vendor == PCI_VENDOR_ID_INTEL && + for_each_pci_dev(display_dev) { + if (display_dev->vendor == PCI_VENDOR_ID_INTEL && + (display_dev->class >> 16) == PCI_BASE_CLASS_DISPLAY && connectivity_check(display_dev, hdac_pci)) { pci_dev_put(display_dev); - match = true; + return true; } - } while (!match && display_dev); + } - return match; + return false; } /** diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c index a8fe01764b25..ec9cbb219bc1 100644 --- a/sound/hda/intel-dsp-config.c +++ b/sound/hda/intel-dsp-config.c @@ -196,6 +196,12 @@ static const struct config_entry config_table[] = { DMI_MATCH(DMI_SYS_VENDOR, "Google"), } }, + { + .ident = "UP-WHL", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "AAEON"), + } + }, {} } }, @@ -358,6 +364,12 @@ static const struct config_entry config_table[] = { DMI_MATCH(DMI_SYS_VENDOR, "Google"), } }, + { + .ident = "UPX-TGL", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "AAEON"), + } + }, {} } }, diff --git a/sound/hda/intel-nhlt.c b/sound/hda/intel-nhlt.c index 4063da378283..9db5ccd9aa2d 100644 --- a/sound/hda/intel-nhlt.c +++ b/sound/hda/intel-nhlt.c @@ -55,8 +55,8 @@ int intel_nhlt_get_dmic_geo(struct device *dev, struct nhlt_acpi_table *nhlt) /* find max number of channels based on format_configuration */ if (fmt_configs->fmt_count) { - dev_dbg(dev, "%s: found %d format definitions\n", - __func__, fmt_configs->fmt_count); + dev_dbg(dev, "found %d format definitions\n", + fmt_configs->fmt_count); for (i = 0; i < fmt_configs->fmt_count; i++) { struct wav_fmt_ext *fmt_ext; @@ -66,9 +66,9 @@ int intel_nhlt_get_dmic_geo(struct device *dev, struct nhlt_acpi_table *nhlt) if (fmt_ext->fmt.channels > max_ch) max_ch = fmt_ext->fmt.channels; } - dev_dbg(dev, "%s: max channels found %d\n", __func__, max_ch); + dev_dbg(dev, "max channels found %d\n", max_ch); } else { - dev_dbg(dev, "%s: No format information found\n", __func__); + dev_dbg(dev, "No format information found\n"); } if (cfg->device_config.config_type != NHLT_CONFIG_TYPE_MIC_ARRAY) { @@ -95,17 +95,16 @@ int intel_nhlt_get_dmic_geo(struct device *dev, struct nhlt_acpi_table *nhlt) } if (dmic_geo > 0) { - dev_dbg(dev, "%s: Array with %d dmics\n", __func__, dmic_geo); + dev_dbg(dev, "Array with %d dmics\n", dmic_geo); } if (max_ch > dmic_geo) { - dev_dbg(dev, "%s: max channels %d exceed dmic number %d\n", - __func__, max_ch, dmic_geo); + dev_dbg(dev, "max channels %d exceed dmic number %d\n", + max_ch, dmic_geo); } } } - dev_dbg(dev, "%s: dmic number %d max_ch %d\n", - __func__, dmic_geo, max_ch); + dev_dbg(dev, "dmic number %d max_ch %d\n", dmic_geo, max_ch); return dmic_geo; } diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c index cd1db943b7e0..7c6b1fe8dfcc 100644 --- a/sound/pci/hda/hda_auto_parser.c +++ b/sound/pci/hda/hda_auto_parser.c @@ -819,7 +819,7 @@ static void set_pin_targets(struct hda_codec *codec, snd_hda_set_pin_ctl_cache(codec, cfg->nid, cfg->val); } -static void apply_fixup(struct hda_codec *codec, int id, int action, int depth) +void __snd_hda_apply_fixup(struct hda_codec *codec, int id, int action, int depth) { const char *modelname = codec->fixup_name; @@ -829,7 +829,7 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth) if (++depth > 10) break; if (fix->chained_before) - apply_fixup(codec, fix->chain_id, action, depth + 1); + __snd_hda_apply_fixup(codec, fix->chain_id, action, depth + 1); switch (fix->type) { case HDA_FIXUP_PINS: @@ -870,6 +870,7 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth) id = fix->chain_id; } } +EXPORT_SYMBOL_GPL(__snd_hda_apply_fixup); /** * snd_hda_apply_fixup - Apply the fixup chain with the given action @@ -879,7 +880,7 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth) void snd_hda_apply_fixup(struct hda_codec *codec, int action) { if (codec->fixup_list) - apply_fixup(codec, codec->fixup_id, action, 0); + __snd_hda_apply_fixup(codec, codec->fixup_id, action, 0); } EXPORT_SYMBOL_GPL(snd_hda_apply_fixup); diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 0a83eb6b88b1..a77165bd92a9 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2525,6 +2525,9 @@ static const struct pci_device_id azx_ids[] = { .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, { PCI_DEVICE(0x8086, 0x51cf), .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, + /* Meteorlake-P */ + { PCI_DEVICE(0x8086, 0x7e28), + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, /* Broxton-P(Apollolake) */ { PCI_DEVICE(0x8086, 0x5a98), .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_BROXTON }, diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h index aca592651870..682dca2057db 100644 --- a/sound/pci/hda/hda_local.h +++ b/sound/pci/hda/hda_local.h @@ -348,6 +348,7 @@ void snd_hda_apply_verbs(struct hda_codec *codec); void snd_hda_apply_pincfgs(struct hda_codec *codec, const struct hda_pintbl *cfg); void snd_hda_apply_fixup(struct hda_codec *codec, int action); +void __snd_hda_apply_fixup(struct hda_codec *codec, int id, int action, int depth); void snd_hda_pick_fixup(struct hda_codec *codec, const struct hda_model_fixup *models, const struct snd_pci_quirk *quirk, diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index aa360a0af284..3e541a4c0423 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -1052,6 +1052,13 @@ static int patch_conexant_auto(struct hda_codec *codec) snd_hda_pick_fixup(codec, cxt5051_fixup_models, cxt5051_fixups, cxt_fixups); break; + case 0x14f15098: + codec->pin_amp_workaround = 1; + spec->gen.mixer_nid = 0x22; + spec->gen.add_stereo_mix_input = HDA_HINT_STEREO_MIX_AUTO; + snd_hda_pick_fixup(codec, cxt5066_fixup_models, + cxt5066_fixups, cxt_fixups); + break; case 0x14f150f2: codec->power_save_node = 1; fallthrough; @@ -1072,11 +1079,11 @@ static int patch_conexant_auto(struct hda_codec *codec) if (err < 0) goto error; - err = snd_hda_gen_parse_auto_config(codec, &spec->gen.autocfg); + err = cx_auto_parse_beep(codec); if (err < 0) goto error; - err = cx_auto_parse_beep(codec); + err = snd_hda_gen_parse_auto_config(codec, &spec->gen.autocfg); if (err < 0) goto error; diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 31fe41795571..6c209cd26c0c 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -4554,6 +4554,7 @@ HDA_CODEC_ENTRY(0x8086281a, "Jasperlake HDMI", patch_i915_icl_hdmi), HDA_CODEC_ENTRY(0x8086281b, "Elkhartlake HDMI", patch_i915_icl_hdmi), HDA_CODEC_ENTRY(0x8086281c, "Alderlake-P HDMI", patch_i915_adlp_hdmi), HDA_CODEC_ENTRY(0x8086281f, "Raptorlake-P HDMI", patch_i915_adlp_hdmi), +HDA_CODEC_ENTRY(0x8086281d, "Meteorlake HDMI", patch_i915_adlp_hdmi), HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi), HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI", patch_i915_byt_hdmi), HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI", patch_i915_byt_hdmi), diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index f3ad454b3fbf..cee69fa7e246 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -443,6 +443,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) case 0x10ec0245: case 0x10ec0255: case 0x10ec0256: + case 0x19e58326: case 0x10ec0257: case 0x10ec0282: case 0x10ec0283: @@ -580,6 +581,7 @@ static void alc_shutup_pins(struct hda_codec *codec) switch (codec->core.vendor_id) { case 0x10ec0236: case 0x10ec0256: + case 0x19e58326: case 0x10ec0283: case 0x10ec0286: case 0x10ec0288: @@ -2632,6 +2634,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x67f1, "Clevo PC70H[PRS]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), + SND_PCI_QUIRK(0x1558, 0x67f5, "Clevo PD70PN[NRT]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x70d1, "Clevo PC70[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x7714, "Clevo X170SM", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x7715, "Clevo X170KM-G", ALC1220_FIXUP_CLEVO_PB51ED), @@ -3247,6 +3250,7 @@ static void alc_disable_headset_jack_key(struct hda_codec *codec) case 0x10ec0230: case 0x10ec0236: case 0x10ec0256: + case 0x19e58326: alc_write_coef_idx(codec, 0x48, 0x0); alc_update_coef_idx(codec, 0x49, 0x0045, 0x0); break; @@ -3275,6 +3279,7 @@ static void alc_enable_headset_jack_key(struct hda_codec *codec) case 0x10ec0230: case 0x10ec0236: case 0x10ec0256: + case 0x19e58326: alc_write_coef_idx(codec, 0x48, 0xd011); alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045); break; @@ -4910,6 +4915,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec) case 0x10ec0230: case 0x10ec0236: case 0x10ec0256: + case 0x19e58326: alc_process_coef_fw(codec, coef0256); break; case 0x10ec0234: @@ -5025,6 +5031,7 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin, case 0x10ec0230: case 0x10ec0236: case 0x10ec0256: + case 0x19e58326: alc_write_coef_idx(codec, 0x45, 0xc489); snd_hda_set_pin_ctl_cache(codec, hp_pin, 0); alc_process_coef_fw(codec, coef0256); @@ -5175,6 +5182,7 @@ static void alc_headset_mode_default(struct hda_codec *codec) case 0x10ec0230: case 0x10ec0236: case 0x10ec0256: + case 0x19e58326: alc_write_coef_idx(codec, 0x1b, 0x0e4b); alc_write_coef_idx(codec, 0x45, 0xc089); msleep(50); @@ -5274,6 +5282,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec) case 0x10ec0230: case 0x10ec0236: case 0x10ec0256: + case 0x19e58326: alc_process_coef_fw(codec, coef0256); break; case 0x10ec0234: @@ -5388,6 +5397,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec) case 0x10ec0230: case 0x10ec0236: case 0x10ec0256: + case 0x19e58326: alc_process_coef_fw(codec, coef0256); break; case 0x10ec0234: @@ -5489,6 +5499,7 @@ static void alc_determine_headset_type(struct hda_codec *codec) case 0x10ec0230: case 0x10ec0236: case 0x10ec0256: + case 0x19e58326: alc_write_coef_idx(codec, 0x1b, 0x0e4b); alc_write_coef_idx(codec, 0x06, 0x6104); alc_write_coefex_idx(codec, 0x57, 0x3, 0x09a3); @@ -5783,6 +5794,7 @@ static void alc255_set_default_jack_type(struct hda_codec *codec) case 0x10ec0230: case 0x10ec0236: case 0x10ec0256: + case 0x19e58326: alc_process_coef_fw(codec, alc256fw); break; } @@ -6385,6 +6397,7 @@ static void alc_combo_jack_hp_jd_restart(struct hda_codec *codec) case 0x10ec0236: case 0x10ec0255: case 0x10ec0256: + case 0x19e58326: alc_update_coef_idx(codec, 0x1b, 0x8000, 1 << 15); /* Reset HP JD */ alc_update_coef_idx(codec, 0x1b, 0x8000, 0 << 15); break; @@ -6992,6 +7005,7 @@ enum { ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS, ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE, ALC287_FIXUP_YOGA7_14ITL_SPEAKERS, + ALC298_FIXUP_LENOVO_C940_DUET7, ALC287_FIXUP_13S_GEN2_SPEAKERS, ALC256_FIXUP_SET_COEF_DEFAULTS, ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE, @@ -7010,6 +7024,23 @@ enum { ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE, }; +/* A special fixup for Lenovo C940 and Yoga Duet 7; + * both have the very same PCI SSID, and we need to apply different fixups + * depending on the codec ID + */ +static void alc298_fixup_lenovo_c940_duet7(struct hda_codec *codec, + const struct hda_fixup *fix, + int action) +{ + int id; + + if (codec->core.vendor_id == 0x10ec0298) + id = ALC298_FIXUP_LENOVO_SPK_VOLUME; /* C940 */ + else + id = ALC287_FIXUP_YOGA7_14ITL_SPEAKERS; /* Duet 7 */ + __snd_hda_apply_fixup(codec, id, action, 0); +} + static const struct hda_fixup alc269_fixups[] = { [ALC269_FIXUP_GPIO2] = { .type = HDA_FIXUP_FUNC, @@ -8709,6 +8740,10 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MODE, }, + [ALC298_FIXUP_LENOVO_C940_DUET7] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc298_fixup_lenovo_c940_duet7, + }, [ALC287_FIXUP_13S_GEN2_SPEAKERS] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { @@ -9010,6 +9045,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x8783, "HP ZBook Fury 15 G7 Mobile Workstation", ALC285_FIXUP_HP_GPIO_AMP_INIT), + SND_PCI_QUIRK(0x103c, 0x8787, "HP OMEN 15", ALC285_FIXUP_HP_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x8788, "HP OMEN 15", ALC285_FIXUP_HP_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x87c8, "HP", ALC287_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x87e5, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED), @@ -9059,6 +9095,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x89c3, "Zbook Studio G9", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x89c6, "Zbook Fury 17 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x89ca, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), + SND_PCI_QUIRK(0x103c, 0x8a78, "HP Dev One", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), @@ -9174,6 +9211,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1558, 0x70f3, "Clevo NH77DPQ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x70f4, "Clevo NH77EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x70f6, "Clevo NH77DPQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x7716, "Clevo NS50PU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x8228, "Clevo NR40BU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x8520, "Clevo NH50D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x8521, "Clevo NH77D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), @@ -9258,8 +9296,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x17aa, 0x31af, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340), + SND_PCI_QUIRK(0x17aa, 0x3802, "Lenovo Yoga DuetITL 2021", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS), SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS), - SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME), + SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940 / Yoga Duet 7", ALC298_FIXUP_LENOVO_C940_DUET7), SND_PCI_QUIRK(0x17aa, 0x3819, "Lenovo 13s Gen2 ITL", ALC287_FIXUP_13S_GEN2_SPEAKERS), SND_PCI_QUIRK(0x17aa, 0x3820, "Yoga Duet 7 13ITL6", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS), SND_PCI_QUIRK(0x17aa, 0x3824, "Legion Y9000X 2020", ALC285_FIXUP_LEGION_Y9000X_SPEAKERS), @@ -10095,6 +10134,7 @@ static int patch_alc269(struct hda_codec *codec) case 0x10ec0230: case 0x10ec0236: case 0x10ec0256: + case 0x19e58326: spec->codec_variant = ALC269_TYPE_ALC256; spec->shutup = alc256_shutup; spec->init_hook = alc256_init; @@ -10722,6 +10762,7 @@ enum { ALC668_FIXUP_MIC_DET_COEF, ALC897_FIXUP_LENOVO_HEADSET_MIC, ALC897_FIXUP_HEADSET_MIC_PIN, + ALC897_FIXUP_HP_HSMIC_VERB, }; static const struct hda_fixup alc662_fixups[] = { @@ -11141,6 +11182,13 @@ static const struct hda_fixup alc662_fixups[] = { .chained = true, .chain_id = ALC897_FIXUP_LENOVO_HEADSET_MIC }, + [ALC897_FIXUP_HP_HSMIC_VERB] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ + { } + }, + }, }; static const struct snd_pci_quirk alc662_fixup_tbl[] = { @@ -11166,6 +11214,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800), + SND_PCI_QUIRK(0x103c, 0x8719, "HP", ALC897_FIXUP_HP_HSMIC_VERB), SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2), SND_PCI_QUIRK(0x103c, 0x885f, "HP 288 Pro G8", ALC671_FIXUP_HP_HEADSET_MIC2), SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE), @@ -11545,6 +11594,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = { HDA_CODEC_ENTRY(0x10ec0b00, "ALCS1200A", patch_alc882), HDA_CODEC_ENTRY(0x10ec1168, "ALC1220", patch_alc882), HDA_CODEC_ENTRY(0x10ec1220, "ALC1220", patch_alc882), + HDA_CODEC_ENTRY(0x19e58326, "HW8326", patch_alc269), {} /* terminator */ }; MODULE_DEVICE_TABLE(hdaudio, snd_hda_id_realtek); diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c index a05304f340df..aea7fae2ca4b 100644 --- a/sound/pci/hda/patch_via.c +++ b/sound/pci/hda/patch_via.c @@ -518,11 +518,11 @@ static int via_parse_auto_config(struct hda_codec *codec) if (err < 0) return err; - err = snd_hda_gen_parse_auto_config(codec, &spec->gen.autocfg); + err = auto_parse_beep(codec); if (err < 0) return err; - err = auto_parse_beep(codec); + err = snd_hda_gen_parse_auto_config(codec, &spec->gen.autocfg); if (err < 0) return err; diff --git a/sound/soc/codecs/cs35l36.c b/sound/soc/codecs/cs35l36.c index 920190daa4d1..dfe85dc2cd20 100644 --- a/sound/soc/codecs/cs35l36.c +++ b/sound/soc/codecs/cs35l36.c @@ -444,7 +444,8 @@ static bool cs35l36_volatile_reg(struct device *dev, unsigned int reg) } } -static DECLARE_TLV_DB_SCALE(dig_vol_tlv, -10200, 25, 0); +static const DECLARE_TLV_DB_RANGE(dig_vol_tlv, 0, 912, + TLV_DB_MINMAX_ITEM(-10200, 1200)); static DECLARE_TLV_DB_SCALE(amp_gain_tlv, 0, 1, 1); static const char * const cs35l36_pcm_sftramp_text[] = { diff --git a/sound/soc/codecs/cs42l51.c b/sound/soc/codecs/cs42l51.c index aff618513c75..0e933181b5db 100644 --- a/sound/soc/codecs/cs42l51.c +++ b/sound/soc/codecs/cs42l51.c @@ -143,7 +143,7 @@ static const struct snd_kcontrol_new cs42l51_snd_controls[] = { 0, 0xA0, 96, adc_att_tlv), SOC_DOUBLE_R_SX_TLV("PGA Volume", CS42L51_ALC_PGA_CTL, CS42L51_ALC_PGB_CTL, - 0, 0x1A, 30, pga_tlv), + 0, 0x19, 30, pga_tlv), SOC_SINGLE("Playback Deemphasis Switch", CS42L51_DAC_CTL, 3, 1, 0), SOC_SINGLE("Auto-Mute Switch", CS42L51_DAC_CTL, 2, 1, 0), SOC_SINGLE("Soft Ramp Switch", CS42L51_DAC_CTL, 1, 1, 0), diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c index 9b182b585be4..10e696406a71 100644 --- a/sound/soc/codecs/cs42l52.c +++ b/sound/soc/codecs/cs42l52.c @@ -137,7 +137,9 @@ static DECLARE_TLV_DB_SCALE(mic_tlv, 1600, 100, 0); static DECLARE_TLV_DB_SCALE(pga_tlv, -600, 50, 0); -static DECLARE_TLV_DB_SCALE(mix_tlv, -50, 50, 0); +static DECLARE_TLV_DB_SCALE(pass_tlv, -6000, 50, 0); + +static DECLARE_TLV_DB_SCALE(mix_tlv, -5150, 50, 0); static DECLARE_TLV_DB_SCALE(beep_tlv, -56, 200, 0); @@ -351,7 +353,7 @@ static const struct snd_kcontrol_new cs42l52_snd_controls[] = { CS42L52_SPKB_VOL, 0, 0x40, 0xC0, hl_tlv), SOC_DOUBLE_R_SX_TLV("Bypass Volume", CS42L52_PASSTHRUA_VOL, - CS42L52_PASSTHRUB_VOL, 0, 0x88, 0x90, pga_tlv), + CS42L52_PASSTHRUB_VOL, 0, 0x88, 0x90, pass_tlv), SOC_DOUBLE("Bypass Mute", CS42L52_MISC_CTL, 4, 5, 1, 0), @@ -364,7 +366,7 @@ static const struct snd_kcontrol_new cs42l52_snd_controls[] = { CS42L52_ADCB_VOL, 0, 0xA0, 0x78, ipd_tlv), SOC_DOUBLE_R_SX_TLV("ADC Mixer Volume", CS42L52_ADCA_MIXER_VOL, CS42L52_ADCB_MIXER_VOL, - 0, 0x19, 0x7F, ipd_tlv), + 0, 0x19, 0x7F, mix_tlv), SOC_DOUBLE("ADC Switch", CS42L52_ADC_MISC_CTL, 0, 1, 1, 0), diff --git a/sound/soc/codecs/cs42l56.c b/sound/soc/codecs/cs42l56.c index dc23007336c5..510c94265b1f 100644 --- a/sound/soc/codecs/cs42l56.c +++ b/sound/soc/codecs/cs42l56.c @@ -391,9 +391,9 @@ static const struct snd_kcontrol_new cs42l56_snd_controls[] = { SOC_DOUBLE("ADC Boost Switch", CS42L56_GAIN_BIAS_CTL, 3, 2, 1, 1), SOC_DOUBLE_R_SX_TLV("Headphone Volume", CS42L56_HPA_VOLUME, - CS42L56_HPB_VOLUME, 0, 0x84, 0x48, hl_tlv), + CS42L56_HPB_VOLUME, 0, 0x44, 0x48, hl_tlv), SOC_DOUBLE_R_SX_TLV("LineOut Volume", CS42L56_LOA_VOLUME, - CS42L56_LOB_VOLUME, 0, 0x84, 0x48, hl_tlv), + CS42L56_LOB_VOLUME, 0, 0x44, 0x48, hl_tlv), SOC_SINGLE_TLV("Bass Shelving Volume", CS42L56_TONE_CTL, 0, 0x00, 1, tone_tlv), diff --git a/sound/soc/codecs/cs53l30.c b/sound/soc/codecs/cs53l30.c index 703545273900..360ca2ffd506 100644 --- a/sound/soc/codecs/cs53l30.c +++ b/sound/soc/codecs/cs53l30.c @@ -348,22 +348,22 @@ static const struct snd_kcontrol_new cs53l30_snd_controls[] = { SOC_ENUM("ADC2 NG Delay", adc2_ng_delay_enum), SOC_SINGLE_SX_TLV("ADC1A PGA Volume", - CS53L30_ADC1A_AFE_CTL, 0, 0x34, 0x18, pga_tlv), + CS53L30_ADC1A_AFE_CTL, 0, 0x34, 0x24, pga_tlv), SOC_SINGLE_SX_TLV("ADC1B PGA Volume", - CS53L30_ADC1B_AFE_CTL, 0, 0x34, 0x18, pga_tlv), + CS53L30_ADC1B_AFE_CTL, 0, 0x34, 0x24, pga_tlv), SOC_SINGLE_SX_TLV("ADC2A PGA Volume", - CS53L30_ADC2A_AFE_CTL, 0, 0x34, 0x18, pga_tlv), + CS53L30_ADC2A_AFE_CTL, 0, 0x34, 0x24, pga_tlv), SOC_SINGLE_SX_TLV("ADC2B PGA Volume", - CS53L30_ADC2B_AFE_CTL, 0, 0x34, 0x18, pga_tlv), + CS53L30_ADC2B_AFE_CTL, 0, 0x34, 0x24, pga_tlv), SOC_SINGLE_SX_TLV("ADC1A Digital Volume", - CS53L30_ADC1A_DIG_VOL, 0, 0xA0, 0x0C, dig_tlv), + CS53L30_ADC1A_DIG_VOL, 0, 0xA0, 0x6C, dig_tlv), SOC_SINGLE_SX_TLV("ADC1B Digital Volume", - CS53L30_ADC1B_DIG_VOL, 0, 0xA0, 0x0C, dig_tlv), + CS53L30_ADC1B_DIG_VOL, 0, 0xA0, 0x6C, dig_tlv), SOC_SINGLE_SX_TLV("ADC2A Digital Volume", - CS53L30_ADC2A_DIG_VOL, 0, 0xA0, 0x0C, dig_tlv), + CS53L30_ADC2A_DIG_VOL, 0, 0xA0, 0x6C, dig_tlv), SOC_SINGLE_SX_TLV("ADC2B Digital Volume", - CS53L30_ADC2B_DIG_VOL, 0, 0xA0, 0x0C, dig_tlv), + CS53L30_ADC2B_DIG_VOL, 0, 0xA0, 0x6C, dig_tlv), }; static const struct snd_soc_dapm_widget cs53l30_dapm_widgets[] = { diff --git a/sound/soc/codecs/es8328.c b/sound/soc/codecs/es8328.c index 3f00ead97006..dd53dfd87b04 100644 --- a/sound/soc/codecs/es8328.c +++ b/sound/soc/codecs/es8328.c @@ -161,13 +161,16 @@ static int es8328_put_deemph(struct snd_kcontrol *kcontrol, if (deemph > 1) return -EINVAL; + if (es8328->deemph == deemph) + return 0; + ret = es8328_set_deemph(component); if (ret < 0) return ret; es8328->deemph = deemph; - return 0; + return 1; } diff --git a/sound/soc/codecs/nau8822.c b/sound/soc/codecs/nau8822.c index 66bbd8f4f1ad..08f6c56dc387 100644 --- a/sound/soc/codecs/nau8822.c +++ b/sound/soc/codecs/nau8822.c @@ -741,6 +741,8 @@ static int nau8822_set_pll(struct snd_soc_dai *dai, int pll_id, int source, pll_param->mclk_scaler, pll_param->pre_factor); snd_soc_component_update_bits(component, + NAU8822_REG_POWER_MANAGEMENT_1, NAU8822_PLL_EN_MASK, NAU8822_PLL_OFF); + snd_soc_component_update_bits(component, NAU8822_REG_PLL_N, NAU8822_PLLMCLK_DIV2 | NAU8822_PLLN_MASK, (pll_param->pre_factor ? NAU8822_PLLMCLK_DIV2 : 0) | pll_param->pll_int); @@ -757,6 +759,8 @@ static int nau8822_set_pll(struct snd_soc_dai *dai, int pll_id, int source, pll_param->mclk_scaler << NAU8822_MCLKSEL_SFT); snd_soc_component_update_bits(component, NAU8822_REG_CLOCKING, NAU8822_CLKM_MASK, NAU8822_CLKM_PLL); + snd_soc_component_update_bits(component, + NAU8822_REG_POWER_MANAGEMENT_1, NAU8822_PLL_EN_MASK, NAU8822_PLL_ON); return 0; } diff --git a/sound/soc/codecs/nau8822.h b/sound/soc/codecs/nau8822.h index 489191ff187e..b45d42c15de6 100644 --- a/sound/soc/codecs/nau8822.h +++ b/sound/soc/codecs/nau8822.h @@ -90,6 +90,9 @@ #define NAU8822_REFIMP_3K 0x3 #define NAU8822_IOBUF_EN (0x1 << 2) #define NAU8822_ABIAS_EN (0x1 << 3) +#define NAU8822_PLL_EN_MASK (0x1 << 5) +#define NAU8822_PLL_ON (0x1 << 5) +#define NAU8822_PLL_OFF (0x0 << 5) /* NAU8822_REG_AUDIO_INTERFACE (0x4) */ #define NAU8822_AIFMT_MASK (0x3 << 3) diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c index 34cd5a2a997c..5cca89364280 100644 --- a/sound/soc/codecs/wm8962.c +++ b/sound/soc/codecs/wm8962.c @@ -3868,6 +3868,7 @@ static int wm8962_runtime_suspend(struct device *dev) #endif static const struct dev_pm_ops wm8962_pm = { + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) SET_RUNTIME_PM_OPS(wm8962_runtime_suspend, wm8962_runtime_resume, NULL) }; diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c index 7973a75cac05..6d7fd88243aa 100644 --- a/sound/soc/codecs/wm_adsp.c +++ b/sound/soc/codecs/wm_adsp.c @@ -333,7 +333,7 @@ int wm_adsp_fw_put(struct snd_kcontrol *kcontrol, struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol); struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; struct wm_adsp *dsp = snd_soc_component_get_drvdata(component); - int ret = 0; + int ret = 1; if (ucontrol->value.enumerated.item[0] == dsp[e->shift_l].fw) return 0; diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c index fa950dde5310..e765da9a19e7 100644 --- a/sound/soc/fsl/fsl_sai.c +++ b/sound/soc/fsl/fsl_sai.c @@ -1293,6 +1293,7 @@ static const struct of_device_id fsl_sai_ids[] = { { .compatible = "fsl,imx8mm-sai", .data = &fsl_sai_imx8mm_data }, { .compatible = "fsl,imx8mp-sai", .data = &fsl_sai_imx8mp_data }, { .compatible = "fsl,imx8ulp-sai", .data = &fsl_sai_imx8ulp_data }, + { .compatible = "fsl,imx8mn-sai", .data = &fsl_sai_imx8mp_data }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, fsl_sai_ids); diff --git a/sound/soc/intel/boards/sof_cirrus_common.c b/sound/soc/intel/boards/sof_cirrus_common.c index e71d74ec1b0b..f4192df962d6 100644 --- a/sound/soc/intel/boards/sof_cirrus_common.c +++ b/sound/soc/intel/boards/sof_cirrus_common.c @@ -54,22 +54,29 @@ static struct snd_soc_dai_link_component cs35l41_components[] = { }, }; +/* + * Mapping between ACPI instance id and speaker position. + * + * Four speakers: + * 0: Tweeter left, 1: Woofer left + * 2: Tweeter right, 3: Woofer right + */ static struct snd_soc_codec_conf cs35l41_codec_conf[] = { { .dlc = COMP_CODEC_CONF(CS35L41_DEV0_NAME), - .name_prefix = "WL", + .name_prefix = "TL", }, { .dlc = COMP_CODEC_CONF(CS35L41_DEV1_NAME), - .name_prefix = "WR", + .name_prefix = "WL", }, { .dlc = COMP_CODEC_CONF(CS35L41_DEV2_NAME), - .name_prefix = "TL", + .name_prefix = "TR", }, { .dlc = COMP_CODEC_CONF(CS35L41_DEV3_NAME), - .name_prefix = "TR", + .name_prefix = "WR", }, }; @@ -101,6 +108,21 @@ static int cs35l41_init(struct snd_soc_pcm_runtime *rtd) return ret; } +/* + * Channel map: + * + * TL/WL: ASPRX1 on slot 0, ASPRX2 on slot 1 (default) + * TR/WR: ASPRX1 on slot 1, ASPRX2 on slot 0 + */ +static const struct { + unsigned int rx[2]; +} cs35l41_channel_map[] = { + {.rx = {0, 1}}, /* TL */ + {.rx = {0, 1}}, /* WL */ + {.rx = {1, 0}}, /* TR */ + {.rx = {1, 0}}, /* WR */ +}; + static int cs35l41_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { @@ -134,6 +156,16 @@ static int cs35l41_hw_params(struct snd_pcm_substream *substream, ret); return ret; } + + /* setup channel map */ + ret = snd_soc_dai_set_channel_map(codec_dai, 0, NULL, + ARRAY_SIZE(cs35l41_channel_map[i].rx), + (unsigned int *)cs35l41_channel_map[i].rx); + if (ret < 0) { + dev_err(codec_dai->dev, "fail to set channel map, ret %d\n", + ret); + return ret; + } } return 0; diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c index f03a7ae49d50..b41ab7a321ae 100644 --- a/sound/soc/qcom/lpass-platform.c +++ b/sound/soc/qcom/lpass-platform.c @@ -898,7 +898,7 @@ static int lpass_platform_cdc_dma_mmap(struct snd_pcm_substream *substream, struct snd_pcm_runtime *runtime = substream->runtime; unsigned long size, offset; - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); size = vma->vm_end - vma->vm_start; offset = vma->vm_pgoff << PAGE_SHIFT; return io_remap_pfn_range(vma, vma->vm_start, diff --git a/sound/soc/sof/sof-audio.c b/sound/soc/sof/sof-audio.c index 8d740635a4bb..28976098a89e 100644 --- a/sound/soc/sof/sof-audio.c +++ b/sound/soc/sof/sof-audio.c @@ -318,7 +318,7 @@ sink_prepare: p->walking = false; if (ret < 0) { /* unprepare the source widget */ - if (!widget_ops[widget->id].ipc_unprepare && swidget->prepared) { + if (widget_ops[widget->id].ipc_unprepare && swidget->prepared) { widget_ops[widget->id].ipc_unprepare(swidget); swidget->prepared = false; } diff --git a/sound/soc/sof/sof-client-ipc-msg-injector.c b/sound/soc/sof/sof-client-ipc-msg-injector.c index 03490a4d4ae7..6bdfa527b7f7 100644 --- a/sound/soc/sof/sof-client-ipc-msg-injector.c +++ b/sound/soc/sof/sof-client-ipc-msg-injector.c @@ -150,7 +150,7 @@ static ssize_t sof_msg_inject_dfs_write(struct file *file, const char __user *bu { struct sof_client_dev *cdev = file->private_data; struct sof_msg_inject_priv *priv = cdev->data; - size_t size; + ssize_t size; int ret; if (*ppos) @@ -158,8 +158,10 @@ static ssize_t sof_msg_inject_dfs_write(struct file *file, const char __user *bu size = simple_write_to_buffer(priv->tx_buffer, priv->max_msg_size, ppos, buffer, count); + if (size < 0) + return size; if (size != count) - return size > 0 ? -EFAULT : size; + return -EFAULT; memset(priv->rx_buffer, 0, priv->max_msg_size); @@ -179,7 +181,7 @@ static ssize_t sof_msg_inject_ipc4_dfs_write(struct file *file, struct sof_client_dev *cdev = file->private_data; struct sof_msg_inject_priv *priv = cdev->data; struct sof_ipc4_msg *ipc4_msg = priv->tx_buffer; - size_t size; + ssize_t size; int ret; if (*ppos) @@ -192,18 +194,20 @@ static ssize_t sof_msg_inject_ipc4_dfs_write(struct file *file, size = simple_write_to_buffer(&ipc4_msg->header_u64, sizeof(ipc4_msg->header_u64), ppos, buffer, count); + if (size < 0) + return size; if (size != sizeof(ipc4_msg->header_u64)) - return size > 0 ? -EFAULT : size; + return -EFAULT; count -= size; - if (!count) { - /* Copy the payload */ - size = simple_write_to_buffer(ipc4_msg->data_ptr, - priv->max_msg_size, ppos, buffer, - count); - if (size != count) - return size > 0 ? -EFAULT : size; - } + /* Copy the payload */ + size = simple_write_to_buffer(ipc4_msg->data_ptr, + priv->max_msg_size, ppos, buffer, + count); + if (size < 0) + return size; + if (size != count) + return -EFAULT; ipc4_msg->data_size = count; diff --git a/sound/usb/mixer_us16x08.c b/sound/usb/mixer_us16x08.c index b7b6f3834ed5..6eb7d93b358d 100644 --- a/sound/usb/mixer_us16x08.c +++ b/sound/usb/mixer_us16x08.c @@ -637,10 +637,10 @@ static int snd_get_meter_comp_index(struct snd_us16x08_meter_store *store) } } else { /* skip channels with no compressor active */ - while (!store->comp_store->val[ + while (store->comp_index <= SND_US16X08_MAX_CHANNELS + && !store->comp_store->val[ COMP_STORE_IDX(SND_US16X08_ID_COMP_SWITCH)] - [store->comp_index - 1] - && store->comp_index <= SND_US16X08_MAX_CHANNELS) { + [store->comp_index - 1]) { store->comp_index++; } ret = store->comp_index++; diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index b470404a5376..e692ae04436a 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c @@ -291,6 +291,9 @@ int snd_usb_audioformat_set_sync_ep(struct snd_usb_audio *chip, bool is_playback; int err; + if (fmt->sync_ep) + return 0; /* already set up */ + alts = snd_usb_get_host_interface(chip, fmt->iface, fmt->altsetting); if (!alts) return 0; @@ -304,7 +307,7 @@ int snd_usb_audioformat_set_sync_ep(struct snd_usb_audio *chip, * Generic sync EP handling */ - if (altsd->bNumEndpoints < 2) + if (fmt->ep_idx > 0 || altsd->bNumEndpoints < 2) return 0; is_playback = !(get_endpoint(alts, 0)->bEndpointAddress & USB_DIR_IN); diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 78eb41b621d6..4f56e1784932 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -2658,7 +2658,12 @@ YAMAHA_DEVICE(0x7010, "UB99"), .nr_rates = 2, .rate_table = (unsigned int[]) { 44100, 48000 - } + }, + .sync_ep = 0x82, + .sync_iface = 0, + .sync_altsetting = 1, + .sync_ep_idx = 1, + .implicit_fb = 1, } }, { diff --git a/sound/x86/intel_hdmi_audio.c b/sound/x86/intel_hdmi_audio.c index 0d828e35b401..ab95fb34a635 100644 --- a/sound/x86/intel_hdmi_audio.c +++ b/sound/x86/intel_hdmi_audio.c @@ -33,6 +33,8 @@ #include <drm/intel_lpe_audio.h> #include "intel_hdmi_audio.h" +#define INTEL_HDMI_AUDIO_SUSPEND_DELAY_MS 5000 + #define for_each_pipe(card_ctx, pipe) \ for ((pipe) = 0; (pipe) < (card_ctx)->num_pipes; (pipe)++) #define for_each_port(card_ctx, port) \ @@ -1066,7 +1068,9 @@ static int had_pcm_open(struct snd_pcm_substream *substream) intelhaddata = snd_pcm_substream_chip(substream); runtime = substream->runtime; - pm_runtime_get_sync(intelhaddata->dev); + retval = pm_runtime_resume_and_get(intelhaddata->dev); + if (retval < 0) + return retval; /* set the runtime hw parameter with local snd_pcm_hardware struct */ runtime->hw = had_pcm_hardware; @@ -1534,8 +1538,12 @@ static void had_audio_wq(struct work_struct *work) container_of(work, struct snd_intelhad, hdmi_audio_wq); struct intel_hdmi_lpe_audio_pdata *pdata = ctx->dev->platform_data; struct intel_hdmi_lpe_audio_port_pdata *ppdata = &pdata->port[ctx->port]; + int ret; + + ret = pm_runtime_resume_and_get(ctx->dev); + if (ret < 0) + return; - pm_runtime_get_sync(ctx->dev); mutex_lock(&ctx->mutex); if (ppdata->pipe < 0) { dev_dbg(ctx->dev, "%s: Event: HAD_NOTIFY_HOT_UNPLUG : port = %d\n", @@ -1802,8 +1810,11 @@ static int __hdmi_lpe_audio_probe(struct platform_device *pdev) pdata->notify_audio_lpe = notify_audio_lpe; spin_unlock_irq(&pdata->lpe_audio_slock); + pm_runtime_set_autosuspend_delay(&pdev->dev, INTEL_HDMI_AUDIO_SUSPEND_DELAY_MS); pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_enable(&pdev->dev); pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_idle(&pdev->dev); dev_dbg(&pdev->dev, "%s: handle pending notification\n", __func__); for_each_port(card_ctx, port) { diff --git a/tools/arch/arm64/include/asm/cputype.h b/tools/arch/arm64/include/asm/cputype.h index e09d6908a21d..8aa0d276a636 100644 --- a/tools/arch/arm64/include/asm/cputype.h +++ b/tools/arch/arm64/include/asm/cputype.h @@ -36,7 +36,7 @@ #define MIDR_VARIANT(midr) \ (((midr) & MIDR_VARIANT_MASK) >> MIDR_VARIANT_SHIFT) #define MIDR_IMPLEMENTOR_SHIFT 24 -#define MIDR_IMPLEMENTOR_MASK (0xff << MIDR_IMPLEMENTOR_SHIFT) +#define MIDR_IMPLEMENTOR_MASK (0xffU << MIDR_IMPLEMENTOR_SHIFT) #define MIDR_IMPLEMENTOR(midr) \ (((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT) @@ -118,6 +118,10 @@ #define APPLE_CPU_PART_M1_ICESTORM 0x022 #define APPLE_CPU_PART_M1_FIRESTORM 0x023 +#define APPLE_CPU_PART_M1_ICESTORM_PRO 0x024 +#define APPLE_CPU_PART_M1_FIRESTORM_PRO 0x025 +#define APPLE_CPU_PART_M1_ICESTORM_MAX 0x028 +#define APPLE_CPU_PART_M1_FIRESTORM_MAX 0x029 #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) @@ -164,6 +168,10 @@ #define MIDR_HISI_TSV110 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_TSV110) #define MIDR_APPLE_M1_ICESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM) #define MIDR_APPLE_M1_FIRESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM) +#define MIDR_APPLE_M1_ICESTORM_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM_PRO) +#define MIDR_APPLE_M1_FIRESTORM_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_PRO) +#define MIDR_APPLE_M1_ICESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM_MAX) +#define MIDR_APPLE_M1_FIRESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_MAX) /* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */ #define MIDR_FUJITSU_ERRATUM_010001 MIDR_FUJITSU_A64FX @@ -172,7 +180,7 @@ #ifndef __ASSEMBLY__ -#include "sysreg.h" +#include <asm/sysreg.h> #define read_cpuid(reg) read_sysreg_s(SYS_ ## reg) diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index 73e643ae94b6..03acc823838a 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h @@ -201,7 +201,7 @@ #define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */ #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ -/* FREE! ( 7*32+10) */ +#define X86_FEATURE_XCOMPACTED ( 7*32+10) /* "" Use compacted XSTATE (XSAVES or XSAVEC) */ #define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */ #define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */ #define X86_FEATURE_RETPOLINE_LFENCE ( 7*32+13) /* "" Use LFENCE for Spectre variant 2 */ @@ -211,7 +211,7 @@ #define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */ #define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */ -/* FREE! ( 7*32+20) */ +#define X86_FEATURE_PERFMON_V2 ( 7*32+20) /* AMD Performance Monitoring Version 2 */ #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */ #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */ #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */ @@ -238,6 +238,7 @@ #define X86_FEATURE_VMW_VMMCALL ( 8*32+19) /* "" VMware prefers VMMCALL hypercall instruction */ #define X86_FEATURE_PVUNLOCK ( 8*32+20) /* "" PV unlock function */ #define X86_FEATURE_VCPUPREEMPT ( 8*32+21) /* "" PV vcpu_is_preempted function */ +#define X86_FEATURE_TDX_GUEST ( 8*32+22) /* Intel Trust Domain Extensions Guest */ /* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */ #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/ @@ -315,6 +316,7 @@ #define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */ #define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */ #define X86_FEATURE_CPPC (13*32+27) /* Collaborative Processor Performance Control */ +#define X86_FEATURE_BRS (13*32+31) /* Branch Sampling available */ /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */ #define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ @@ -405,6 +407,7 @@ #define X86_FEATURE_SEV (19*32+ 1) /* AMD Secure Encrypted Virtualization */ #define X86_FEATURE_VM_PAGE_FLUSH (19*32+ 2) /* "" VM Page Flush MSR is supported */ #define X86_FEATURE_SEV_ES (19*32+ 3) /* AMD Secure Encrypted Virtualization - Encrypted State */ +#define X86_FEATURE_V_TSC_AUX (19*32+ 9) /* "" Virtual TSC_AUX */ #define X86_FEATURE_SME_COHERENT (19*32+10) /* "" AMD hardware-enforced cache coherency */ /* @@ -443,5 +446,6 @@ #define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */ #define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */ #define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */ +#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h index 1ae0fab7d902..36369e76cc63 100644 --- a/tools/arch/x86/include/asm/disabled-features.h +++ b/tools/arch/x86/include/asm/disabled-features.h @@ -62,6 +62,12 @@ # define DISABLE_SGX (1 << (X86_FEATURE_SGX & 31)) #endif +#ifdef CONFIG_INTEL_TDX_GUEST +# define DISABLE_TDX_GUEST 0 +#else +# define DISABLE_TDX_GUEST (1 << (X86_FEATURE_TDX_GUEST & 31)) +#endif + /* * Make sure to add features to the correct mask */ @@ -73,7 +79,7 @@ #define DISABLED_MASK5 0 #define DISABLED_MASK6 0 #define DISABLED_MASK7 (DISABLE_PTI) -#define DISABLED_MASK8 0 +#define DISABLED_MASK8 (DISABLE_TDX_GUEST) #define DISABLED_MASK9 (DISABLE_SGX) #define DISABLED_MASK10 0 #define DISABLED_MASK11 0 diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h index 403e83b4adc8..d27e0581b777 100644 --- a/tools/arch/x86/include/asm/msr-index.h +++ b/tools/arch/x86/include/asm/msr-index.h @@ -116,6 +116,30 @@ * Not susceptible to * TSX Async Abort (TAA) vulnerabilities. */ +#define ARCH_CAP_SBDR_SSDP_NO BIT(13) /* + * Not susceptible to SBDR and SSDP + * variants of Processor MMIO stale data + * vulnerabilities. + */ +#define ARCH_CAP_FBSDP_NO BIT(14) /* + * Not susceptible to FBSDP variant of + * Processor MMIO stale data + * vulnerabilities. + */ +#define ARCH_CAP_PSDP_NO BIT(15) /* + * Not susceptible to PSDP variant of + * Processor MMIO stale data + * vulnerabilities. + */ +#define ARCH_CAP_FB_CLEAR BIT(17) /* + * VERW clears CPU fill buffer + * even on MDS_NO CPUs. + */ +#define ARCH_CAP_FB_CLEAR_CTRL BIT(18) /* + * MSR_IA32_MCU_OPT_CTRL[FB_CLEAR_DIS] + * bit available to control VERW + * behavior. + */ #define MSR_IA32_FLUSH_CMD 0x0000010b #define L1D_FLUSH BIT(0) /* @@ -133,6 +157,7 @@ #define MSR_IA32_MCU_OPT_CTRL 0x00000123 #define RNGDS_MITG_DIS BIT(0) /* SRBDS support */ #define RTM_ALLOW BIT(1) /* TSX development mode */ +#define FB_CLEAR_DIS BIT(3) /* CPU Fill buffer clear disable */ #define MSR_IA32_SYSENTER_CS 0x00000174 #define MSR_IA32_SYSENTER_ESP 0x00000175 diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h index bf6e96011dfe..21614807a2cb 100644 --- a/tools/arch/x86/include/uapi/asm/kvm.h +++ b/tools/arch/x86/include/uapi/asm/kvm.h @@ -428,11 +428,12 @@ struct kvm_sync_regs { struct kvm_vcpu_events events; }; -#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0) -#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1) -#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2) -#define KVM_X86_QUIRK_OUT_7E_INC_RIP (1 << 3) -#define KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT (1 << 4) +#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0) +#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1) +#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2) +#define KVM_X86_QUIRK_OUT_7E_INC_RIP (1 << 3) +#define KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT (1 << 4) +#define KVM_X86_QUIRK_FIX_HYPERCALL_INSN (1 << 5) #define KVM_STATE_NESTED_FORMAT_VMX 0 #define KVM_STATE_NESTED_FORMAT_SVM 1 diff --git a/tools/arch/x86/include/uapi/asm/svm.h b/tools/arch/x86/include/uapi/asm/svm.h index efa969325ede..f69c168391aa 100644 --- a/tools/arch/x86/include/uapi/asm/svm.h +++ b/tools/arch/x86/include/uapi/asm/svm.h @@ -108,6 +108,14 @@ #define SVM_VMGEXIT_AP_JUMP_TABLE 0x80000005 #define SVM_VMGEXIT_SET_AP_JUMP_TABLE 0 #define SVM_VMGEXIT_GET_AP_JUMP_TABLE 1 +#define SVM_VMGEXIT_PSC 0x80000010 +#define SVM_VMGEXIT_GUEST_REQUEST 0x80000011 +#define SVM_VMGEXIT_EXT_GUEST_REQUEST 0x80000012 +#define SVM_VMGEXIT_AP_CREATION 0x80000013 +#define SVM_VMGEXIT_AP_CREATE_ON_INIT 0 +#define SVM_VMGEXIT_AP_CREATE 1 +#define SVM_VMGEXIT_AP_DESTROY 2 +#define SVM_VMGEXIT_HV_FEATURES 0x8000fffd #define SVM_VMGEXIT_UNSUPPORTED_EVENT 0x8000ffff /* Exit code reserved for hypervisor/software use */ @@ -218,6 +226,11 @@ { SVM_VMGEXIT_NMI_COMPLETE, "vmgexit_nmi_complete" }, \ { SVM_VMGEXIT_AP_HLT_LOOP, "vmgexit_ap_hlt_loop" }, \ { SVM_VMGEXIT_AP_JUMP_TABLE, "vmgexit_ap_jump_table" }, \ + { SVM_VMGEXIT_PSC, "vmgexit_page_state_change" }, \ + { SVM_VMGEXIT_GUEST_REQUEST, "vmgexit_guest_request" }, \ + { SVM_VMGEXIT_EXT_GUEST_REQUEST, "vmgexit_ext_guest_request" }, \ + { SVM_VMGEXIT_AP_CREATION, "vmgexit_ap_creation" }, \ + { SVM_VMGEXIT_HV_FEATURES, "vmgexit_hypervisor_feature" }, \ { SVM_EXIT_ERR, "invalid_guest_state" } diff --git a/tools/include/linux/objtool.h b/tools/include/linux/objtool.h index 6491fa8fba6d..15b940ec1eac 100644 --- a/tools/include/linux/objtool.h +++ b/tools/include/linux/objtool.h @@ -143,6 +143,12 @@ struct unwind_hint { .popsection .endm +.macro STACK_FRAME_NON_STANDARD_FP func:req +#ifdef CONFIG_FRAME_POINTER + STACK_FRAME_NON_STANDARD \func +#endif +.endm + .macro ANNOTATE_NOENDBR .Lhere_\@: .pushsection .discard.noendbr diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h index 05c3642aaece..a2def7b27009 100644 --- a/tools/include/uapi/drm/i915_drm.h +++ b/tools/include/uapi/drm/i915_drm.h @@ -154,25 +154,77 @@ enum i915_mocs_table_index { I915_MOCS_CACHED, }; -/* +/** + * enum drm_i915_gem_engine_class - uapi engine type enumeration + * * Different engines serve different roles, and there may be more than one - * engine serving each role. enum drm_i915_gem_engine_class provides a - * classification of the role of the engine, which may be used when requesting - * operations to be performed on a certain subset of engines, or for providing - * information about that group. + * engine serving each role. This enum provides a classification of the role + * of the engine, which may be used when requesting operations to be performed + * on a certain subset of engines, or for providing information about that + * group. */ enum drm_i915_gem_engine_class { + /** + * @I915_ENGINE_CLASS_RENDER: + * + * Render engines support instructions used for 3D, Compute (GPGPU), + * and programmable media workloads. These instructions fetch data and + * dispatch individual work items to threads that operate in parallel. + * The threads run small programs (called "kernels" or "shaders") on + * the GPU's execution units (EUs). + */ I915_ENGINE_CLASS_RENDER = 0, + + /** + * @I915_ENGINE_CLASS_COPY: + * + * Copy engines (also referred to as "blitters") support instructions + * that move blocks of data from one location in memory to another, + * or that fill a specified location of memory with fixed data. + * Copy engines can perform pre-defined logical or bitwise operations + * on the source, destination, or pattern data. + */ I915_ENGINE_CLASS_COPY = 1, + + /** + * @I915_ENGINE_CLASS_VIDEO: + * + * Video engines (also referred to as "bit stream decode" (BSD) or + * "vdbox") support instructions that perform fixed-function media + * decode and encode. + */ I915_ENGINE_CLASS_VIDEO = 2, + + /** + * @I915_ENGINE_CLASS_VIDEO_ENHANCE: + * + * Video enhancement engines (also referred to as "vebox") support + * instructions related to image enhancement. + */ I915_ENGINE_CLASS_VIDEO_ENHANCE = 3, - /* should be kept compact */ + /** + * @I915_ENGINE_CLASS_COMPUTE: + * + * Compute engines support a subset of the instructions available + * on render engines: compute engines support Compute (GPGPU) and + * programmable media workloads, but do not support the 3D pipeline. + */ + I915_ENGINE_CLASS_COMPUTE = 4, + + /* Values in this enum should be kept compact. */ + /** + * @I915_ENGINE_CLASS_INVALID: + * + * Placeholder value to represent an invalid engine class assignment. + */ I915_ENGINE_CLASS_INVALID = -1 }; -/* +/** + * struct i915_engine_class_instance - Engine class/instance identifier + * * There may be more than one engine fulfilling any role within the system. * Each engine of a class is given a unique instance number and therefore * any engine can be specified by its class:instance tuplet. APIs that allow @@ -180,10 +232,21 @@ enum drm_i915_gem_engine_class { * for this identification. */ struct i915_engine_class_instance { - __u16 engine_class; /* see enum drm_i915_gem_engine_class */ - __u16 engine_instance; + /** + * @engine_class: + * + * Engine class from enum drm_i915_gem_engine_class + */ + __u16 engine_class; #define I915_ENGINE_CLASS_INVALID_NONE -1 #define I915_ENGINE_CLASS_INVALID_VIRTUAL -2 + + /** + * @engine_instance: + * + * Engine instance. + */ + __u16 engine_instance; }; /** @@ -2657,24 +2720,65 @@ enum drm_i915_perf_record_type { DRM_I915_PERF_RECORD_MAX /* non-ABI */ }; -/* +/** + * struct drm_i915_perf_oa_config + * * Structure to upload perf dynamic configuration into the kernel. */ struct drm_i915_perf_oa_config { - /** String formatted like "%08x-%04x-%04x-%04x-%012x" */ + /** + * @uuid: + * + * String formatted like "%\08x-%\04x-%\04x-%\04x-%\012x" + */ char uuid[36]; + /** + * @n_mux_regs: + * + * Number of mux regs in &mux_regs_ptr. + */ __u32 n_mux_regs; + + /** + * @n_boolean_regs: + * + * Number of boolean regs in &boolean_regs_ptr. + */ __u32 n_boolean_regs; + + /** + * @n_flex_regs: + * + * Number of flex regs in &flex_regs_ptr. + */ __u32 n_flex_regs; - /* - * These fields are pointers to tuples of u32 values (register address, - * value). For example the expected length of the buffer pointed by - * mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs). + /** + * @mux_regs_ptr: + * + * Pointer to tuples of u32 values (register address, value) for mux + * registers. Expected length of buffer is (2 * sizeof(u32) * + * &n_mux_regs). */ __u64 mux_regs_ptr; + + /** + * @boolean_regs_ptr: + * + * Pointer to tuples of u32 values (register address, value) for mux + * registers. Expected length of buffer is (2 * sizeof(u32) * + * &n_boolean_regs). + */ __u64 boolean_regs_ptr; + + /** + * @flex_regs_ptr: + * + * Pointer to tuples of u32 values (register address, value) for mux + * registers. Expected length of buffer is (2 * sizeof(u32) * + * &n_flex_regs). + */ __u64 flex_regs_ptr; }; @@ -2685,12 +2789,24 @@ struct drm_i915_perf_oa_config { * @data_ptr is also depends on the specific @query_id. */ struct drm_i915_query_item { - /** @query_id: The id for this query */ + /** + * @query_id: + * + * The id for this query. Currently accepted query IDs are: + * - %DRM_I915_QUERY_TOPOLOGY_INFO (see struct drm_i915_query_topology_info) + * - %DRM_I915_QUERY_ENGINE_INFO (see struct drm_i915_engine_info) + * - %DRM_I915_QUERY_PERF_CONFIG (see struct drm_i915_query_perf_config) + * - %DRM_I915_QUERY_MEMORY_REGIONS (see struct drm_i915_query_memory_regions) + * - %DRM_I915_QUERY_HWCONFIG_BLOB (see `GuC HWCONFIG blob uAPI`) + * - %DRM_I915_QUERY_GEOMETRY_SUBSLICES (see struct drm_i915_query_topology_info) + */ __u64 query_id; -#define DRM_I915_QUERY_TOPOLOGY_INFO 1 -#define DRM_I915_QUERY_ENGINE_INFO 2 -#define DRM_I915_QUERY_PERF_CONFIG 3 -#define DRM_I915_QUERY_MEMORY_REGIONS 4 +#define DRM_I915_QUERY_TOPOLOGY_INFO 1 +#define DRM_I915_QUERY_ENGINE_INFO 2 +#define DRM_I915_QUERY_PERF_CONFIG 3 +#define DRM_I915_QUERY_MEMORY_REGIONS 4 +#define DRM_I915_QUERY_HWCONFIG_BLOB 5 +#define DRM_I915_QUERY_GEOMETRY_SUBSLICES 6 /* Must be kept compact -- no holes and well documented */ /** @@ -2706,14 +2822,17 @@ struct drm_i915_query_item { /** * @flags: * - * When query_id == DRM_I915_QUERY_TOPOLOGY_INFO, must be 0. + * When &query_id == %DRM_I915_QUERY_TOPOLOGY_INFO, must be 0. * - * When query_id == DRM_I915_QUERY_PERF_CONFIG, must be one of the + * When &query_id == %DRM_I915_QUERY_PERF_CONFIG, must be one of the * following: * - * - DRM_I915_QUERY_PERF_CONFIG_LIST - * - DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID - * - DRM_I915_QUERY_PERF_CONFIG_FOR_UUID + * - %DRM_I915_QUERY_PERF_CONFIG_LIST + * - %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID + * - %DRM_I915_QUERY_PERF_CONFIG_FOR_UUID + * + * When &query_id == %DRM_I915_QUERY_GEOMETRY_SUBSLICES must contain + * a struct i915_engine_class_instance that references a render engine. */ __u32 flags; #define DRM_I915_QUERY_PERF_CONFIG_LIST 1 @@ -2771,66 +2890,112 @@ struct drm_i915_query { __u64 items_ptr; }; -/* - * Data written by the kernel with query DRM_I915_QUERY_TOPOLOGY_INFO : - * - * data: contains the 3 pieces of information : - * - * - the slice mask with one bit per slice telling whether a slice is - * available. The availability of slice X can be queried with the following - * formula : - * - * (data[X / 8] >> (X % 8)) & 1 - * - * - the subslice mask for each slice with one bit per subslice telling - * whether a subslice is available. Gen12 has dual-subslices, which are - * similar to two gen11 subslices. For gen12, this array represents dual- - * subslices. The availability of subslice Y in slice X can be queried - * with the following formula : - * - * (data[subslice_offset + - * X * subslice_stride + - * Y / 8] >> (Y % 8)) & 1 - * - * - the EU mask for each subslice in each slice with one bit per EU telling - * whether an EU is available. The availability of EU Z in subslice Y in - * slice X can be queried with the following formula : +/** + * struct drm_i915_query_topology_info * - * (data[eu_offset + - * (X * max_subslices + Y) * eu_stride + - * Z / 8] >> (Z % 8)) & 1 + * Describes slice/subslice/EU information queried by + * %DRM_I915_QUERY_TOPOLOGY_INFO */ struct drm_i915_query_topology_info { - /* + /** + * @flags: + * * Unused for now. Must be cleared to zero. */ __u16 flags; + /** + * @max_slices: + * + * The number of bits used to express the slice mask. + */ __u16 max_slices; + + /** + * @max_subslices: + * + * The number of bits used to express the subslice mask. + */ __u16 max_subslices; + + /** + * @max_eus_per_subslice: + * + * The number of bits in the EU mask that correspond to a single + * subslice's EUs. + */ __u16 max_eus_per_subslice; - /* + /** + * @subslice_offset: + * * Offset in data[] at which the subslice masks are stored. */ __u16 subslice_offset; - /* + /** + * @subslice_stride: + * * Stride at which each of the subslice masks for each slice are * stored. */ __u16 subslice_stride; - /* + /** + * @eu_offset: + * * Offset in data[] at which the EU masks are stored. */ __u16 eu_offset; - /* + /** + * @eu_stride: + * * Stride at which each of the EU masks for each subslice are stored. */ __u16 eu_stride; + /** + * @data: + * + * Contains 3 pieces of information : + * + * - The slice mask with one bit per slice telling whether a slice is + * available. The availability of slice X can be queried with the + * following formula : + * + * .. code:: c + * + * (data[X / 8] >> (X % 8)) & 1 + * + * Starting with Xe_HP platforms, Intel hardware no longer has + * traditional slices so i915 will always report a single slice + * (hardcoded slicemask = 0x1) which contains all of the platform's + * subslices. I.e., the mask here does not reflect any of the newer + * hardware concepts such as "gslices" or "cslices" since userspace + * is capable of inferring those from the subslice mask. + * + * - The subslice mask for each slice with one bit per subslice telling + * whether a subslice is available. Starting with Gen12 we use the + * term "subslice" to refer to what the hardware documentation + * describes as a "dual-subslices." The availability of subslice Y + * in slice X can be queried with the following formula : + * + * .. code:: c + * + * (data[subslice_offset + X * subslice_stride + Y / 8] >> (Y % 8)) & 1 + * + * - The EU mask for each subslice in each slice, with one bit per EU + * telling whether an EU is available. The availability of EU Z in + * subslice Y in slice X can be queried with the following formula : + * + * .. code:: c + * + * (data[eu_offset + + * (X * max_subslices + Y) * eu_stride + + * Z / 8 + * ] >> (Z % 8)) & 1 + */ __u8 data[]; }; @@ -2951,52 +3116,68 @@ struct drm_i915_query_engine_info { struct drm_i915_engine_info engines[]; }; -/* - * Data written by the kernel with query DRM_I915_QUERY_PERF_CONFIG. +/** + * struct drm_i915_query_perf_config + * + * Data written by the kernel with query %DRM_I915_QUERY_PERF_CONFIG and + * %DRM_I915_QUERY_GEOMETRY_SUBSLICES. */ struct drm_i915_query_perf_config { union { - /* - * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets - * this fields to the number of configurations available. + /** + * @n_configs: + * + * When &drm_i915_query_item.flags == + * %DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets this fields to + * the number of configurations available. */ __u64 n_configs; - /* - * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID, - * i915 will use the value in this field as configuration - * identifier to decide what data to write into config_ptr. + /** + * @config: + * + * When &drm_i915_query_item.flags == + * %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID, i915 will use the + * value in this field as configuration identifier to decide + * what data to write into config_ptr. */ __u64 config; - /* - * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID, - * i915 will use the value in this field as configuration - * identifier to decide what data to write into config_ptr. + /** + * @uuid: + * + * When &drm_i915_query_item.flags == + * %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID, i915 will use the + * value in this field as configuration identifier to decide + * what data to write into config_ptr. * * String formatted like "%08x-%04x-%04x-%04x-%012x" */ char uuid[36]; }; - /* + /** + * @flags: + * * Unused for now. Must be cleared to zero. */ __u32 flags; - /* - * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 will - * write an array of __u64 of configuration identifiers. + /** + * @data: * - * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_DATA, i915 will - * write a struct drm_i915_perf_oa_config. If the following fields of - * drm_i915_perf_oa_config are set not set to 0, i915 will write into - * the associated pointers the values of submitted when the + * When &drm_i915_query_item.flags == %DRM_I915_QUERY_PERF_CONFIG_LIST, + * i915 will write an array of __u64 of configuration identifiers. + * + * When &drm_i915_query_item.flags == %DRM_I915_QUERY_PERF_CONFIG_DATA, + * i915 will write a struct drm_i915_perf_oa_config. If the following + * fields of struct drm_i915_perf_oa_config are not set to 0, i915 will + * write into the associated pointers the values of submitted when the * configuration was created : * - * - n_mux_regs - * - n_boolean_regs - * - n_flex_regs + * - &drm_i915_perf_oa_config.n_mux_regs + * - &drm_i915_perf_oa_config.n_boolean_regs + * - &drm_i915_perf_oa_config.n_flex_regs */ __u8 data[]; }; @@ -3135,6 +3316,16 @@ struct drm_i915_query_memory_regions { }; /** + * DOC: GuC HWCONFIG blob uAPI + * + * The GuC produces a blob with information about the current device. + * i915 reads this blob from GuC and makes it available via this uAPI. + * + * The format and meaning of the blob content are documented in the + * Programmer's Reference Manual. + */ + +/** * struct drm_i915_gem_create_ext - Existing gem_create behaviour, with added * extension support using struct i915_user_extension. * diff --git a/tools/include/uapi/linux/prctl.h b/tools/include/uapi/linux/prctl.h index e998764f0262..a5e06dcbba13 100644 --- a/tools/include/uapi/linux/prctl.h +++ b/tools/include/uapi/linux/prctl.h @@ -272,6 +272,15 @@ struct prctl_mm_map { # define PR_SCHED_CORE_SCOPE_THREAD_GROUP 1 # define PR_SCHED_CORE_SCOPE_PROCESS_GROUP 2 +/* arm64 Scalable Matrix Extension controls */ +/* Flag values must be in sync with SVE versions */ +#define PR_SME_SET_VL 63 /* set task vector length */ +# define PR_SME_SET_VL_ONEXEC (1 << 18) /* defer effect until exec */ +#define PR_SME_GET_VL 64 /* get task vector length */ +/* Bits common to PR_SME_SET_VL and PR_SME_GET_VL */ +# define PR_SME_VL_LEN_MASK 0xffff +# define PR_SME_VL_INHERIT (1 << 17) /* inherit across exec */ + #define PR_SET_VMA 0x53564d41 # define PR_SET_VMA_ANON_NAME 0 diff --git a/tools/include/uapi/linux/vhost.h b/tools/include/uapi/linux/vhost.h index 5d99e7c242a2..cab645d4a645 100644 --- a/tools/include/uapi/linux/vhost.h +++ b/tools/include/uapi/linux/vhost.h @@ -89,11 +89,6 @@ /* Set or get vhost backend capability */ -/* Use message type V2 */ -#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1 -/* IOTLB can accept batching hints */ -#define VHOST_BACKEND_F_IOTLB_BATCH 0x2 - #define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64) #define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64) @@ -150,11 +145,30 @@ /* Get the valid iova range */ #define VHOST_VDPA_GET_IOVA_RANGE _IOR(VHOST_VIRTIO, 0x78, \ struct vhost_vdpa_iova_range) - /* Get the config size */ #define VHOST_VDPA_GET_CONFIG_SIZE _IOR(VHOST_VIRTIO, 0x79, __u32) /* Get the count of all virtqueues */ #define VHOST_VDPA_GET_VQS_COUNT _IOR(VHOST_VIRTIO, 0x80, __u32) +/* Get the number of virtqueue groups. */ +#define VHOST_VDPA_GET_GROUP_NUM _IOR(VHOST_VIRTIO, 0x81, __u32) + +/* Get the number of address spaces. */ +#define VHOST_VDPA_GET_AS_NUM _IOR(VHOST_VIRTIO, 0x7A, unsigned int) + +/* Get the group for a virtqueue: read index, write group in num, + * The virtqueue index is stored in the index field of + * vhost_vring_state. The group for this specific virtqueue is + * returned via num field of vhost_vring_state. + */ +#define VHOST_VDPA_GET_VRING_GROUP _IOWR(VHOST_VIRTIO, 0x7B, \ + struct vhost_vring_state) +/* Set the ASID for a virtqueue group. The group index is stored in + * the index field of vhost_vring_state, the ASID associated with this + * group is stored at num field of vhost_vring_state. + */ +#define VHOST_VDPA_SET_GROUP_ASID _IOW(VHOST_VIRTIO, 0x7C, \ + struct vhost_vring_state) + #endif diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat index 5a5bd74f55bd..9c366b3a676d 100755 --- a/tools/kvm/kvm_stat/kvm_stat +++ b/tools/kvm/kvm_stat/kvm_stat @@ -1646,7 +1646,8 @@ Press any other key to refresh statistics immediately. .format(values)) if len(pids) > 1: sys.exit('Error: Multiple processes found (pids: {}). Use "-p"' - ' to specify the desired pid'.format(" ".join(pids))) + ' to specify the desired pid' + .format(" ".join(map(str, pids)))) namespace.pid = pids[0] argparser = argparse.ArgumentParser(description=description_text, diff --git a/tools/lib/perf/evsel.c b/tools/lib/perf/evsel.c index c1d58673f6ef..952f3520d5c2 100644 --- a/tools/lib/perf/evsel.c +++ b/tools/lib/perf/evsel.c @@ -149,23 +149,30 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus, int fd, group_fd, *evsel_fd; evsel_fd = FD(evsel, idx, thread); - if (evsel_fd == NULL) - return -EINVAL; + if (evsel_fd == NULL) { + err = -EINVAL; + goto out; + } err = get_group_fd(evsel, idx, thread, &group_fd); if (err < 0) - return err; + goto out; fd = sys_perf_event_open(&evsel->attr, threads->map[thread].pid, cpu, group_fd, 0); - if (fd < 0) - return -errno; + if (fd < 0) { + err = -errno; + goto out; + } *evsel_fd = fd; } } +out: + if (err) + perf_evsel__close(evsel); return err; } diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c index a75bf11585b5..54d4e508a092 100644 --- a/tools/perf/builtin-inject.c +++ b/tools/perf/builtin-inject.c @@ -891,7 +891,9 @@ static int copy_kcore_dir(struct perf_inject *inject) if (ret < 0) return ret; pr_debug("%s\n", cmd); - return system(cmd); + ret = system(cmd); + free(cmd); + return ret; } static int output_fd(struct perf_inject *inject) @@ -916,7 +918,7 @@ static int __cmd_inject(struct perf_inject *inject) inject->tool.tracing_data = perf_event__repipe_tracing_data; } - output_data_offset = session->header.data_offset; + output_data_offset = perf_session__data_offset(session->evlist); if (inject->build_id_all) { inject->tool.mmap = perf_event__repipe_buildid_mmap; diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 4ce87a8eb7d7..d2ecd4d29624 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -2586,6 +2586,8 @@ int cmd_stat(int argc, const char **argv) if (evlist__initialize_ctlfd(evsel_list, stat_config.ctl_fd, stat_config.ctl_fd_ack)) goto out; + /* Enable ignoring missing threads when -p option is defined. */ + evlist__first(evsel_list)->ignore_missing_thread = target.pid; status = 0; for (run_idx = 0; forever || run_idx < stat_config.run_count; run_idx++) { if (stat_config.run_count != 1 && verbose > 0) diff --git a/tools/perf/tests/bp_account.c b/tools/perf/tests/bp_account.c index d1ebb5561e5b..6f921db33cf9 100644 --- a/tools/perf/tests/bp_account.c +++ b/tools/perf/tests/bp_account.c @@ -151,11 +151,21 @@ static int detect_ioctl(void) static int detect_share(int wp_cnt, int bp_cnt) { struct perf_event_attr attr; - int i, fd[wp_cnt + bp_cnt], ret; + int i, *fd = NULL, ret = -1; + + if (wp_cnt + bp_cnt == 0) + return 0; + + fd = malloc(sizeof(int) * (wp_cnt + bp_cnt)); + if (!fd) + return -1; for (i = 0; i < wp_cnt; i++) { fd[i] = wp_event((void *)&the_var, &attr); - TEST_ASSERT_VAL("failed to create wp\n", fd[i] != -1); + if (fd[i] == -1) { + pr_err("failed to create wp\n"); + goto out; + } } for (; i < (bp_cnt + wp_cnt); i++) { @@ -166,9 +176,11 @@ static int detect_share(int wp_cnt, int bp_cnt) ret = i != (bp_cnt + wp_cnt); +out: while (i--) close(fd[i]); + free(fd); return ret; } diff --git a/tools/perf/tests/expr.c b/tools/perf/tests/expr.c index d54c5371c6a6..5c0032fe93ae 100644 --- a/tools/perf/tests/expr.c +++ b/tools/perf/tests/expr.c @@ -97,6 +97,8 @@ static int test__expr(struct test_suite *t __maybe_unused, int subtest __maybe_u ret |= test(ctx, "2.2 > 2.2", 0); ret |= test(ctx, "2.2 < 1.1", 0); ret |= test(ctx, "1.1 > 2.2", 0); + ret |= test(ctx, "1.1e10 < 1.1e100", 1); + ret |= test(ctx, "1.1e2 > 1.1e-2", 1); if (ret) { expr__ctx_free(ctx); diff --git a/tools/perf/tests/shell/lib/perf_csv_output_lint.py b/tools/perf/tests/shell/lib/perf_csv_output_lint.py deleted file mode 100644 index 714f283cfb1b..000000000000 --- a/tools/perf/tests/shell/lib/perf_csv_output_lint.py +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/python -# SPDX-License-Identifier: GPL-2.0 - -import argparse -import sys - -# Basic sanity check of perf CSV output as specified in the man page. -# Currently just checks the number of fields per line in output. - -ap = argparse.ArgumentParser() -ap.add_argument('--no-args', action='store_true') -ap.add_argument('--interval', action='store_true') -ap.add_argument('--system-wide-no-aggr', action='store_true') -ap.add_argument('--system-wide', action='store_true') -ap.add_argument('--event', action='store_true') -ap.add_argument('--per-core', action='store_true') -ap.add_argument('--per-thread', action='store_true') -ap.add_argument('--per-die', action='store_true') -ap.add_argument('--per-node', action='store_true') -ap.add_argument('--per-socket', action='store_true') -ap.add_argument('--separator', default=',', nargs='?') -args = ap.parse_args() - -Lines = sys.stdin.readlines() - -def check_csv_output(exp): - for line in Lines: - if 'failed' not in line: - count = line.count(args.separator) - if count != exp: - sys.stdout.write(''.join(Lines)) - raise RuntimeError(f'wrong number of fields. expected {exp} in {line}') - -try: - if args.no_args or args.system_wide or args.event: - expected_items = 6 - elif args.interval or args.per_thread or args.system_wide_no_aggr: - expected_items = 7 - elif args.per_core or args.per_socket or args.per_node or args.per_die: - expected_items = 8 - else: - ap.print_help() - raise RuntimeError('No checking option specified') - check_csv_output(expected_items) - -except: - sys.stdout.write('Test failed for input: ' + ''.join(Lines)) - raise diff --git a/tools/perf/tests/shell/stat+csv_output.sh b/tools/perf/tests/shell/stat+csv_output.sh index 983220ef3cb4..38c26f3ef4c1 100755 --- a/tools/perf/tests/shell/stat+csv_output.sh +++ b/tools/perf/tests/shell/stat+csv_output.sh @@ -6,20 +6,41 @@ set -e -pythonchecker=$(dirname $0)/lib/perf_csv_output_lint.py -if [ "x$PYTHON" == "x" ] -then - if which python3 > /dev/null - then - PYTHON=python3 - elif which python > /dev/null - then - PYTHON=python - else - echo Skipping test, python not detected please set environment variable PYTHON. - exit 2 - fi -fi +function commachecker() +{ + local -i cnt=0 exp=0 + + case "$1" + in "--no-args") exp=6 + ;; "--system-wide") exp=6 + ;; "--event") exp=6 + ;; "--interval") exp=7 + ;; "--per-thread") exp=7 + ;; "--system-wide-no-aggr") exp=7 + [ $(uname -m) = "s390x" ] && exp=6 + ;; "--per-core") exp=8 + ;; "--per-socket") exp=8 + ;; "--per-node") exp=8 + ;; "--per-die") exp=8 + esac + + while read line + do + # Check for lines beginning with Failed + x=${line:0:6} + [ "$x" = "Failed" ] && continue + + # Count the number of commas + x=$(echo $line | tr -d -c ',') + cnt="${#x}" + # echo $line $cnt + [ "$cnt" -ne "$exp" ] && { + echo "wrong number of fields. expected $exp in $line" 1>&2 + exit 1; + } + done + return 0 +} # Return true if perf_event_paranoid is > $1 and not running as root. function ParanoidAndNotRoot() @@ -30,7 +51,7 @@ function ParanoidAndNotRoot() check_no_args() { echo -n "Checking CSV output: no args " - perf stat -x, true 2>&1 | $PYTHON $pythonchecker --no-args + perf stat -x, true 2>&1 | commachecker --no-args echo "[Success]" } @@ -42,7 +63,7 @@ check_system_wide() echo "[Skip] paranoid and not root" return fi - perf stat -x, -a true 2>&1 | $PYTHON $pythonchecker --system-wide + perf stat -x, -a true 2>&1 | commachecker --system-wide echo "[Success]" } @@ -55,14 +76,14 @@ check_system_wide_no_aggr() return fi echo -n "Checking CSV output: system wide no aggregation " - perf stat -x, -A -a --no-merge true 2>&1 | $PYTHON $pythonchecker --system-wide-no-aggr + perf stat -x, -A -a --no-merge true 2>&1 | commachecker --system-wide-no-aggr echo "[Success]" } check_interval() { echo -n "Checking CSV output: interval " - perf stat -x, -I 1000 true 2>&1 | $PYTHON $pythonchecker --interval + perf stat -x, -I 1000 true 2>&1 | commachecker --interval echo "[Success]" } @@ -70,7 +91,7 @@ check_interval() check_event() { echo -n "Checking CSV output: event " - perf stat -x, -e cpu-clock true 2>&1 | $PYTHON $pythonchecker --event + perf stat -x, -e cpu-clock true 2>&1 | commachecker --event echo "[Success]" } @@ -82,7 +103,7 @@ check_per_core() echo "[Skip] paranoid and not root" return fi - perf stat -x, --per-core -a true 2>&1 | $PYTHON $pythonchecker --per-core + perf stat -x, --per-core -a true 2>&1 | commachecker --per-core echo "[Success]" } @@ -94,7 +115,7 @@ check_per_thread() echo "[Skip] paranoid and not root" return fi - perf stat -x, --per-thread -a true 2>&1 | $PYTHON $pythonchecker --per-thread + perf stat -x, --per-thread -a true 2>&1 | commachecker --per-thread echo "[Success]" } @@ -106,7 +127,7 @@ check_per_die() echo "[Skip] paranoid and not root" return fi - perf stat -x, --per-die -a true 2>&1 | $PYTHON $pythonchecker --per-die + perf stat -x, --per-die -a true 2>&1 | commachecker --per-die echo "[Success]" } @@ -118,7 +139,7 @@ check_per_node() echo "[Skip] paranoid and not root" return fi - perf stat -x, --per-node -a true 2>&1 | $PYTHON $pythonchecker --per-node + perf stat -x, --per-node -a true 2>&1 | commachecker --per-node echo "[Success]" } @@ -130,7 +151,7 @@ check_per_socket() echo "[Skip] paranoid and not root" return fi - perf stat -x, --per-socket -a true 2>&1 | $PYTHON $pythonchecker --per-socket + perf stat -x, --per-socket -a true 2>&1 | commachecker --per-socket echo "[Success]" } diff --git a/tools/perf/tests/shell/test_arm_callgraph_fp.sh b/tools/perf/tests/shell/test_arm_callgraph_fp.sh index 6ffbb27afaba..ec108d45d3c6 100755 --- a/tools/perf/tests/shell/test_arm_callgraph_fp.sh +++ b/tools/perf/tests/shell/test_arm_callgraph_fp.sh @@ -43,7 +43,7 @@ CFLAGS="-g -O0 -fno-inline -fno-omit-frame-pointer" cc $CFLAGS $TEST_PROGRAM_SOURCE -o $TEST_PROGRAM || exit 1 # Add a 1 second delay to skip samples that are not in the leaf() function -perf record -o $PERF_DATA --call-graph fp -e cycles//u -D 1000 -- $TEST_PROGRAM 2> /dev/null & +perf record -o $PERF_DATA --call-graph fp -e cycles//u -D 1000 --user-callchains -- $TEST_PROGRAM 2> /dev/null & PID=$! echo " + Recording (PID=$PID)..." diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c index d23a9e322ff5..0b4f61b6cc6b 100644 --- a/tools/perf/tests/topology.c +++ b/tools/perf/tests/topology.c @@ -115,7 +115,7 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map) * physical_package_id will be set to -1. Hence skip this * test if physical_package_id returns -1 for cpu from perf_cpu_map. */ - if (strncmp(session->header.env.arch, "powerpc", 7)) { + if (!strncmp(session->header.env.arch, "ppc64le", 7)) { if (cpu__get_socket_id(perf_cpu_map__cpu(map, 0)) == -1) return TEST_SKIP; } diff --git a/tools/perf/trace/beauty/arch_errno_names.sh b/tools/perf/trace/beauty/arch_errno_names.sh index 2c5f72fa8108..37c53bac5f56 100755 --- a/tools/perf/trace/beauty/arch_errno_names.sh +++ b/tools/perf/trace/beauty/arch_errno_names.sh @@ -33,23 +33,13 @@ create_errno_lookup_func() local arch=$(arch_string "$1") local nr name - cat <<EoFuncBegin -static const char *errno_to_name__$arch(int err) -{ - switch (err) { -EoFuncBegin + printf "static const char *errno_to_name__%s(int err)\n{\n\tswitch (err) {\n" $arch while read name nr; do printf '\tcase %d: return "%s";\n' $nr $name done - cat <<EoFuncEnd - default: - return "(unknown)"; - } -} - -EoFuncEnd + printf '\tdefault: return "(unknown)";\n\t}\n}\n' } process_arch() diff --git a/tools/perf/trace/beauty/include/linux/socket.h b/tools/perf/trace/beauty/include/linux/socket.h index 6f85f5d957ef..17311ad9f9af 100644 --- a/tools/perf/trace/beauty/include/linux/socket.h +++ b/tools/perf/trace/beauty/include/linux/socket.h @@ -50,6 +50,9 @@ struct linger { struct msghdr { void *msg_name; /* ptr to socket address structure */ int msg_namelen; /* size of socket address structure */ + + int msg_inq; /* output, data left in socket */ + struct iov_iter msg_iter; /* data */ /* @@ -62,8 +65,9 @@ struct msghdr { void __user *msg_control_user; }; bool msg_control_is_user : 1; - __kernel_size_t msg_controllen; /* ancillary data buffer length */ + bool msg_get_inq : 1;/* return INQ after receive */ unsigned int msg_flags; /* flags on received message */ + __kernel_size_t msg_controllen; /* ancillary data buffer length */ struct kiocb *msg_iocb; /* ptr to iocb for async requests */ }; @@ -434,6 +438,7 @@ extern struct file *do_accept(struct file *file, unsigned file_flags, extern int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr, int __user *upeer_addrlen, int flags); extern int __sys_socket(int family, int type, int protocol); +extern struct file *__sys_socket_file(int family, int type, int protocol); extern int __sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen); extern int __sys_connect_file(struct file *file, struct sockaddr_storage *addr, int addrlen, int file_flags); diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c index 1a80151baed9..d040406f3314 100644 --- a/tools/perf/util/arm-spe.c +++ b/tools/perf/util/arm-spe.c @@ -387,26 +387,16 @@ static int arm_spe__synth_instruction_sample(struct arm_spe_queue *speq, return arm_spe_deliver_synth_event(spe, speq, event, &sample); } -#define SPE_MEM_TYPE (ARM_SPE_L1D_ACCESS | ARM_SPE_L1D_MISS | \ - ARM_SPE_LLC_ACCESS | ARM_SPE_LLC_MISS | \ - ARM_SPE_REMOTE_ACCESS) - -static bool arm_spe__is_memory_event(enum arm_spe_sample_type type) -{ - if (type & SPE_MEM_TYPE) - return true; - - return false; -} - static u64 arm_spe__synth_data_source(const struct arm_spe_record *record) { union perf_mem_data_src data_src = { 0 }; if (record->op == ARM_SPE_LD) data_src.mem_op = PERF_MEM_OP_LOAD; - else + else if (record->op == ARM_SPE_ST) data_src.mem_op = PERF_MEM_OP_STORE; + else + return 0; if (record->type & (ARM_SPE_LLC_ACCESS | ARM_SPE_LLC_MISS)) { data_src.mem_lvl = PERF_MEM_LVL_L3; @@ -510,7 +500,11 @@ static int arm_spe_sample(struct arm_spe_queue *speq) return err; } - if (spe->sample_memory && arm_spe__is_memory_event(record->type)) { + /* + * When data_src is zero it means the record is not a memory operation, + * skip to synthesize memory sample for this case. + */ + if (spe->sample_memory && data_src) { err = arm_spe__synth_mem_sample(speq, spe->memory_id, data_src); if (err) return err; diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c index 82f3d46bea70..328668f38c69 100644 --- a/tools/perf/util/build-id.c +++ b/tools/perf/util/build-id.c @@ -872,6 +872,30 @@ out_free: return err; } +static int filename__read_build_id_ns(const char *filename, + struct build_id *bid, + struct nsinfo *nsi) +{ + struct nscookie nsc; + int ret; + + nsinfo__mountns_enter(nsi, &nsc); + ret = filename__read_build_id(filename, bid); + nsinfo__mountns_exit(&nsc); + + return ret; +} + +static bool dso__build_id_mismatch(struct dso *dso, const char *name) +{ + struct build_id bid; + + if (filename__read_build_id_ns(name, &bid, dso->nsinfo) < 0) + return false; + + return !dso__build_id_equal(dso, &bid); +} + static int dso__cache_build_id(struct dso *dso, struct machine *machine, void *priv __maybe_unused) { @@ -886,6 +910,10 @@ static int dso__cache_build_id(struct dso *dso, struct machine *machine, is_kallsyms = true; name = machine->mmap_name; } + + if (!is_kallsyms && dso__build_id_mismatch(dso, name)) + return 0; + return build_id_cache__add_b(&dso->bid, name, dso->nsinfo, is_kallsyms, is_vdso); } diff --git a/tools/perf/util/expr.l b/tools/perf/util/expr.l index 0a13eb20c814..4dc8edbfd9ce 100644 --- a/tools/perf/util/expr.l +++ b/tools/perf/util/expr.l @@ -91,7 +91,7 @@ static int literal(yyscan_t scanner) } %} -number ([0-9]+\.?[0-9]*|[0-9]*\.?[0-9]+) +number ([0-9]+\.?[0-9]*|[0-9]*\.?[0-9]+)(e-?[0-9]+)? sch [-,=] spec \\{sch} diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 53332da100e8..6ad629db63b7 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -3686,6 +3686,20 @@ int perf_session__write_header(struct perf_session *session, return perf_session__do_write_header(session, evlist, fd, at_exit, NULL); } +size_t perf_session__data_offset(const struct evlist *evlist) +{ + struct evsel *evsel; + size_t data_offset; + + data_offset = sizeof(struct perf_file_header); + evlist__for_each_entry(evlist, evsel) { + data_offset += evsel->core.ids * sizeof(u64); + } + data_offset += evlist->core.nr_entries * sizeof(struct perf_file_attr); + + return data_offset; +} + int perf_session__inject_header(struct perf_session *session, struct evlist *evlist, int fd, diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index 08563c1f1bff..56916dabce7b 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h @@ -136,6 +136,8 @@ int perf_session__inject_header(struct perf_session *session, int fd, struct feat_copier *fc); +size_t perf_session__data_offset(const struct evlist *evlist); + void perf_header__set_feat(struct perf_header *header, int feat); void perf_header__clear_feat(struct perf_header *header, int feat); bool perf_header__has_feat(const struct perf_header *header, int feat); diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c index ee8fcfa115e5..8f7baeabc5cf 100644 --- a/tools/perf/util/metricgroup.c +++ b/tools/perf/util/metricgroup.c @@ -1372,6 +1372,7 @@ static int parse_ids(bool metric_no_merge, struct perf_pmu *fake_pmu, *out_evlist = NULL; if (!metric_no_merge || hashmap__size(ids->ids) == 0) { + bool added_event = false; int i; /* * We may fail to share events between metrics because a tool @@ -1393,8 +1394,16 @@ static int parse_ids(bool metric_no_merge, struct perf_pmu *fake_pmu, if (!tmp) return -ENOMEM; ids__insert(ids->ids, tmp); + added_event = true; } } + if (!added_event && hashmap__size(ids->ids) == 0) { + char *tmp = strdup("duration_time"); + + if (!tmp) + return -ENOMEM; + ids__insert(ids->ids, tmp); + } } ret = metricgroup__build_event_string(&events, ids, modifier, has_constraint); diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c index 37622699c91a..6e5b8cce47bf 100644 --- a/tools/perf/util/unwind-libunwind-local.c +++ b/tools/perf/util/unwind-libunwind-local.c @@ -174,7 +174,7 @@ static int elf_section_address_and_offset(int fd, const char *name, u64 *address Elf *elf; GElf_Ehdr ehdr; GElf_Shdr shdr; - int ret; + int ret = -1; elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); if (elf == NULL) diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c index 83ef55e3caa4..2974b44f80fa 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c @@ -121,24 +121,24 @@ static void kprobe_multi_link_api_subtest(void) }) GET_ADDR("bpf_fentry_test1", addrs[0]); - GET_ADDR("bpf_fentry_test2", addrs[1]); - GET_ADDR("bpf_fentry_test3", addrs[2]); - GET_ADDR("bpf_fentry_test4", addrs[3]); - GET_ADDR("bpf_fentry_test5", addrs[4]); - GET_ADDR("bpf_fentry_test6", addrs[5]); - GET_ADDR("bpf_fentry_test7", addrs[6]); + GET_ADDR("bpf_fentry_test3", addrs[1]); + GET_ADDR("bpf_fentry_test4", addrs[2]); + GET_ADDR("bpf_fentry_test5", addrs[3]); + GET_ADDR("bpf_fentry_test6", addrs[4]); + GET_ADDR("bpf_fentry_test7", addrs[5]); + GET_ADDR("bpf_fentry_test2", addrs[6]); GET_ADDR("bpf_fentry_test8", addrs[7]); #undef GET_ADDR - cookies[0] = 1; - cookies[1] = 2; - cookies[2] = 3; - cookies[3] = 4; - cookies[4] = 5; - cookies[5] = 6; - cookies[6] = 7; - cookies[7] = 8; + cookies[0] = 1; /* bpf_fentry_test1 */ + cookies[1] = 2; /* bpf_fentry_test3 */ + cookies[2] = 3; /* bpf_fentry_test4 */ + cookies[3] = 4; /* bpf_fentry_test5 */ + cookies[4] = 5; /* bpf_fentry_test6 */ + cookies[5] = 6; /* bpf_fentry_test7 */ + cookies[6] = 7; /* bpf_fentry_test2 */ + cookies[7] = 8; /* bpf_fentry_test8 */ opts.kprobe_multi.addrs = (const unsigned long *) &addrs; opts.kprobe_multi.cnt = ARRAY_SIZE(addrs); @@ -149,14 +149,14 @@ static void kprobe_multi_link_api_subtest(void) if (!ASSERT_GE(link1_fd, 0, "link1_fd")) goto cleanup; - cookies[0] = 8; - cookies[1] = 7; - cookies[2] = 6; - cookies[3] = 5; - cookies[4] = 4; - cookies[5] = 3; - cookies[6] = 2; - cookies[7] = 1; + cookies[0] = 8; /* bpf_fentry_test1 */ + cookies[1] = 7; /* bpf_fentry_test3 */ + cookies[2] = 6; /* bpf_fentry_test4 */ + cookies[3] = 5; /* bpf_fentry_test5 */ + cookies[4] = 4; /* bpf_fentry_test6 */ + cookies[5] = 3; /* bpf_fentry_test7 */ + cookies[6] = 2; /* bpf_fentry_test2 */ + cookies[7] = 1; /* bpf_fentry_test8 */ opts.kprobe_multi.flags = BPF_F_KPROBE_MULTI_RETURN; prog_fd = bpf_program__fd(skel->progs.test_kretprobe); @@ -181,12 +181,12 @@ static void kprobe_multi_attach_api_subtest(void) struct kprobe_multi *skel = NULL; const char *syms[8] = { "bpf_fentry_test1", - "bpf_fentry_test2", "bpf_fentry_test3", "bpf_fentry_test4", "bpf_fentry_test5", "bpf_fentry_test6", "bpf_fentry_test7", + "bpf_fentry_test2", "bpf_fentry_test8", }; __u64 cookies[8]; @@ -198,14 +198,14 @@ static void kprobe_multi_attach_api_subtest(void) skel->bss->pid = getpid(); skel->bss->test_cookie = true; - cookies[0] = 1; - cookies[1] = 2; - cookies[2] = 3; - cookies[3] = 4; - cookies[4] = 5; - cookies[5] = 6; - cookies[6] = 7; - cookies[7] = 8; + cookies[0] = 1; /* bpf_fentry_test1 */ + cookies[1] = 2; /* bpf_fentry_test3 */ + cookies[2] = 3; /* bpf_fentry_test4 */ + cookies[3] = 4; /* bpf_fentry_test5 */ + cookies[4] = 5; /* bpf_fentry_test6 */ + cookies[5] = 6; /* bpf_fentry_test7 */ + cookies[6] = 7; /* bpf_fentry_test2 */ + cookies[7] = 8; /* bpf_fentry_test8 */ opts.syms = syms; opts.cnt = ARRAY_SIZE(syms); @@ -216,14 +216,14 @@ static void kprobe_multi_attach_api_subtest(void) if (!ASSERT_OK_PTR(link1, "bpf_program__attach_kprobe_multi_opts")) goto cleanup; - cookies[0] = 8; - cookies[1] = 7; - cookies[2] = 6; - cookies[3] = 5; - cookies[4] = 4; - cookies[5] = 3; - cookies[6] = 2; - cookies[7] = 1; + cookies[0] = 8; /* bpf_fentry_test1 */ + cookies[1] = 7; /* bpf_fentry_test3 */ + cookies[2] = 6; /* bpf_fentry_test4 */ + cookies[3] = 5; /* bpf_fentry_test5 */ + cookies[4] = 4; /* bpf_fentry_test6 */ + cookies[5] = 3; /* bpf_fentry_test7 */ + cookies[6] = 2; /* bpf_fentry_test2 */ + cookies[7] = 1; /* bpf_fentry_test8 */ opts.retprobe = true; diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c index d9aad15e0d24..02bb8cbf9194 100644 --- a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c +++ b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c @@ -395,6 +395,18 @@ static void test_func_map_prog_compatibility(void) "./test_attach_probe.o"); } +static void test_func_replace_global_func(void) +{ + const char *prog_name[] = { + "freplace/test_pkt_access", + }; + + test_fexit_bpf2bpf_common("./freplace_global_func.o", + "./test_pkt_access.o", + ARRAY_SIZE(prog_name), + prog_name, false, NULL); +} + /* NOTE: affect other tests, must run in serial mode */ void serial_test_fexit_bpf2bpf(void) { @@ -416,4 +428,6 @@ void serial_test_fexit_bpf2bpf(void) test_func_replace_multi(); if (test__start_subtest("fmod_ret_freplace")) test_fmod_ret_freplace(); + if (test__start_subtest("func_replace_global_func")) + test_func_replace_global_func(); } diff --git a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c index 586dc52d6fb9..5b93d5d0bd93 100644 --- a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c +++ b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c @@ -364,6 +364,9 @@ static int get_syms(char ***symsp, size_t *cntp) continue; if (!strncmp(name, "rcu_", 4)) continue; + if (!strncmp(name, "__ftrace_invalid_address__", + sizeof("__ftrace_invalid_address__") - 1)) + continue; err = hashmap__add(map, name, NULL); if (err) { free(name); diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c index c4da87ec3ba4..19c70880cfb3 100644 --- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c +++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c @@ -831,6 +831,59 @@ out: bpf_object__close(obj); } +#include "tailcall_bpf2bpf6.skel.h" + +/* Tail call counting works even when there is data on stack which is + * not aligned to 8 bytes. + */ +static void test_tailcall_bpf2bpf_6(void) +{ + struct tailcall_bpf2bpf6 *obj; + int err, map_fd, prog_fd, main_fd, data_fd, i, val; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + + obj = tailcall_bpf2bpf6__open_and_load(); + if (!ASSERT_OK_PTR(obj, "open and load")) + return; + + main_fd = bpf_program__fd(obj->progs.entry); + if (!ASSERT_GE(main_fd, 0, "entry prog fd")) + goto out; + + map_fd = bpf_map__fd(obj->maps.jmp_table); + if (!ASSERT_GE(map_fd, 0, "jmp_table map fd")) + goto out; + + prog_fd = bpf_program__fd(obj->progs.classifier_0); + if (!ASSERT_GE(prog_fd, 0, "classifier_0 prog fd")) + goto out; + + i = 0; + err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); + if (!ASSERT_OK(err, "jmp_table map update")) + goto out; + + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "entry prog test run"); + ASSERT_EQ(topts.retval, 0, "tailcall retval"); + + data_fd = bpf_map__fd(obj->maps.bss); + if (!ASSERT_GE(map_fd, 0, "bss map fd")) + goto out; + + i = 0; + err = bpf_map_lookup_elem(data_fd, &i, &val); + ASSERT_OK(err, "bss map lookup"); + ASSERT_EQ(val, 1, "done flag is set"); + +out: + tailcall_bpf2bpf6__destroy(obj); +} + void test_tailcalls(void) { if (test__start_subtest("tailcall_1")) @@ -855,4 +908,6 @@ void test_tailcalls(void) test_tailcall_bpf2bpf_4(false); if (test__start_subtest("tailcall_bpf2bpf_5")) test_tailcall_bpf2bpf_4(true); + if (test__start_subtest("tailcall_bpf2bpf_6")) + test_tailcall_bpf2bpf_6(); } diff --git a/tools/testing/selftests/bpf/progs/freplace_global_func.c b/tools/testing/selftests/bpf/progs/freplace_global_func.c new file mode 100644 index 000000000000..96cb61a6ce87 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/freplace_global_func.c @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> + +__noinline +int test_ctx_global_func(struct __sk_buff *skb) +{ + volatile int retval = 1; + return retval; +} + +SEC("freplace/test_pkt_access") +int new_test_pkt_access(struct __sk_buff *skb) +{ + return test_ctx_global_func(skb); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/kprobe_multi.c b/tools/testing/selftests/bpf/progs/kprobe_multi.c index 93510f4f0f3a..08f95a8155d1 100644 --- a/tools/testing/selftests/bpf/progs/kprobe_multi.c +++ b/tools/testing/selftests/bpf/progs/kprobe_multi.c @@ -54,21 +54,21 @@ static void kprobe_multi_check(void *ctx, bool is_return) if (is_return) { SET(kretprobe_test1_result, &bpf_fentry_test1, 8); - SET(kretprobe_test2_result, &bpf_fentry_test2, 7); - SET(kretprobe_test3_result, &bpf_fentry_test3, 6); - SET(kretprobe_test4_result, &bpf_fentry_test4, 5); - SET(kretprobe_test5_result, &bpf_fentry_test5, 4); - SET(kretprobe_test6_result, &bpf_fentry_test6, 3); - SET(kretprobe_test7_result, &bpf_fentry_test7, 2); + SET(kretprobe_test2_result, &bpf_fentry_test2, 2); + SET(kretprobe_test3_result, &bpf_fentry_test3, 7); + SET(kretprobe_test4_result, &bpf_fentry_test4, 6); + SET(kretprobe_test5_result, &bpf_fentry_test5, 5); + SET(kretprobe_test6_result, &bpf_fentry_test6, 4); + SET(kretprobe_test7_result, &bpf_fentry_test7, 3); SET(kretprobe_test8_result, &bpf_fentry_test8, 1); } else { SET(kprobe_test1_result, &bpf_fentry_test1, 1); - SET(kprobe_test2_result, &bpf_fentry_test2, 2); - SET(kprobe_test3_result, &bpf_fentry_test3, 3); - SET(kprobe_test4_result, &bpf_fentry_test4, 4); - SET(kprobe_test5_result, &bpf_fentry_test5, 5); - SET(kprobe_test6_result, &bpf_fentry_test6, 6); - SET(kprobe_test7_result, &bpf_fentry_test7, 7); + SET(kprobe_test2_result, &bpf_fentry_test2, 7); + SET(kprobe_test3_result, &bpf_fentry_test3, 2); + SET(kprobe_test4_result, &bpf_fentry_test4, 3); + SET(kprobe_test5_result, &bpf_fentry_test5, 4); + SET(kprobe_test6_result, &bpf_fentry_test6, 5); + SET(kprobe_test7_result, &bpf_fentry_test7, 6); SET(kprobe_test8_result, &bpf_fentry_test8, 8); } diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf6.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf6.c new file mode 100644 index 000000000000..41ce83da78e8 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf6.c @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> + +#define __unused __attribute__((unused)) + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 1); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(__u32)); +} jmp_table SEC(".maps"); + +int done = 0; + +SEC("tc") +int classifier_0(struct __sk_buff *skb __unused) +{ + done = 1; + return 0; +} + +static __noinline +int subprog_tail(struct __sk_buff *skb) +{ + /* Don't propagate the constant to the caller */ + volatile int ret = 1; + + bpf_tail_call_static(skb, &jmp_table, 0); + return ret; +} + +SEC("tc") +int entry(struct __sk_buff *skb) +{ + /* Have data on stack which size is not a multiple of 8 */ + volatile char arr[1] = {}; + + return subprog_tail(skb); +} + +char __license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/dma/Makefile b/tools/testing/selftests/dma/Makefile index aa8e8b5b3864..cd8c5ece1cba 100644 --- a/tools/testing/selftests/dma/Makefile +++ b/tools/testing/selftests/dma/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 CFLAGS += -I../../../../usr/include/ +CFLAGS += -I../../../../include/ TEST_GEN_PROGS := dma_map_benchmark diff --git a/tools/testing/selftests/dma/dma_map_benchmark.c b/tools/testing/selftests/dma/dma_map_benchmark.c index c3b3c09e995e..5c997f17fcbd 100644 --- a/tools/testing/selftests/dma/dma_map_benchmark.c +++ b/tools/testing/selftests/dma/dma_map_benchmark.c @@ -10,8 +10,8 @@ #include <unistd.h> #include <sys/ioctl.h> #include <sys/mman.h> -#include <linux/map_benchmark.h> #include <linux/types.h> +#include <linux/map_benchmark.h> #define NSEC_PER_MSEC 1000000L diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 81470a99ed1c..22423c871ed6 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -37,11 +37,38 @@ ifeq ($(ARCH),riscv) UNAME_M := riscv endif -LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/rbtree.c lib/sparsebit.c lib/test_util.c lib/guest_modes.c lib/perf_test_util.c -LIBKVM_x86_64 = lib/x86_64/apic.c lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/svm.c lib/x86_64/ucall.c lib/x86_64/handlers.S -LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c lib/aarch64/handlers.S lib/aarch64/spinlock.c lib/aarch64/gic.c lib/aarch64/gic_v3.c lib/aarch64/vgic.c -LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c lib/s390x/diag318_test_handler.c -LIBKVM_riscv = lib/riscv/processor.c lib/riscv/ucall.c +LIBKVM += lib/assert.c +LIBKVM += lib/elf.c +LIBKVM += lib/guest_modes.c +LIBKVM += lib/io.c +LIBKVM += lib/kvm_util.c +LIBKVM += lib/perf_test_util.c +LIBKVM += lib/rbtree.c +LIBKVM += lib/sparsebit.c +LIBKVM += lib/test_util.c + +LIBKVM_x86_64 += lib/x86_64/apic.c +LIBKVM_x86_64 += lib/x86_64/handlers.S +LIBKVM_x86_64 += lib/x86_64/perf_test_util.c +LIBKVM_x86_64 += lib/x86_64/processor.c +LIBKVM_x86_64 += lib/x86_64/svm.c +LIBKVM_x86_64 += lib/x86_64/ucall.c +LIBKVM_x86_64 += lib/x86_64/vmx.c + +LIBKVM_aarch64 += lib/aarch64/gic.c +LIBKVM_aarch64 += lib/aarch64/gic_v3.c +LIBKVM_aarch64 += lib/aarch64/handlers.S +LIBKVM_aarch64 += lib/aarch64/processor.c +LIBKVM_aarch64 += lib/aarch64/spinlock.c +LIBKVM_aarch64 += lib/aarch64/ucall.c +LIBKVM_aarch64 += lib/aarch64/vgic.c + +LIBKVM_s390x += lib/s390x/diag318_test_handler.c +LIBKVM_s390x += lib/s390x/processor.c +LIBKVM_s390x += lib/s390x/ucall.c + +LIBKVM_riscv += lib/riscv/processor.c +LIBKVM_riscv += lib/riscv/ucall.c TEST_GEN_PROGS_x86_64 = x86_64/cpuid_test TEST_GEN_PROGS_x86_64 += x86_64/cr4_cpuid_sync_test @@ -173,12 +200,13 @@ LDFLAGS += -pthread $(no-pie-option) $(pgste-option) # $(TEST_GEN_PROGS) starts with $(OUTPUT)/ include ../lib.mk -STATIC_LIBS := $(OUTPUT)/libkvm.a LIBKVM_C := $(filter %.c,$(LIBKVM)) LIBKVM_S := $(filter %.S,$(LIBKVM)) LIBKVM_C_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM_C)) LIBKVM_S_OBJ := $(patsubst %.S, $(OUTPUT)/%.o, $(LIBKVM_S)) -EXTRA_CLEAN += $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ) $(STATIC_LIBS) cscope.* +LIBKVM_OBJS = $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ) + +EXTRA_CLEAN += $(LIBKVM_OBJS) cscope.* x := $(shell mkdir -p $(sort $(dir $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ)))) $(LIBKVM_C_OBJ): $(OUTPUT)/%.o: %.c @@ -187,13 +215,8 @@ $(LIBKVM_C_OBJ): $(OUTPUT)/%.o: %.c $(LIBKVM_S_OBJ): $(OUTPUT)/%.o: %.S $(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@ -LIBKVM_OBJS = $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ) -$(OUTPUT)/libkvm.a: $(LIBKVM_OBJS) - $(AR) crs $@ $^ - x := $(shell mkdir -p $(sort $(dir $(TEST_GEN_PROGS)))) -all: $(STATIC_LIBS) -$(TEST_GEN_PROGS): $(STATIC_LIBS) +$(TEST_GEN_PROGS): $(LIBKVM_OBJS) cscope: include_paths = $(LINUX_TOOL_INCLUDE) $(LINUX_HDR_PATH) include lib .. cscope: diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c index 7b47ae4f952e..d60a34cdfaee 100644 --- a/tools/testing/selftests/kvm/dirty_log_perf_test.c +++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c @@ -336,8 +336,8 @@ static void run_test(enum vm_guest_mode mode, void *arg) static void help(char *name) { puts(""); - printf("usage: %s [-h] [-i iterations] [-p offset] [-g]" - "[-m mode] [-b vcpu bytes] [-v vcpus] [-o] [-s mem type]" + printf("usage: %s [-h] [-i iterations] [-p offset] [-g] " + "[-m mode] [-n] [-b vcpu bytes] [-v vcpus] [-o] [-s mem type]" "[-x memslots]\n", name); puts(""); printf(" -i: specify iteration counts (default: %"PRIu64")\n", @@ -351,6 +351,7 @@ static void help(char *name) printf(" -p: specify guest physical test memory offset\n" " Warning: a low offset can conflict with the loaded test code.\n"); guest_modes_help(); + printf(" -n: Run the vCPUs in nested mode (L2)\n"); printf(" -b: specify the size of the memory region which should be\n" " dirtied by each vCPU. e.g. 10M or 3G.\n" " (default: 1G)\n"); @@ -387,7 +388,7 @@ int main(int argc, char *argv[]) guest_modes_append_default(); - while ((opt = getopt(argc, argv, "ghi:p:m:b:f:v:os:x:")) != -1) { + while ((opt = getopt(argc, argv, "ghi:p:m:nb:f:v:os:x:")) != -1) { switch (opt) { case 'g': dirty_log_manual_caps = 0; @@ -401,6 +402,9 @@ int main(int argc, char *argv[]) case 'm': guest_modes_cmdline(optarg); break; + case 'n': + perf_test_args.nested = true; + break; case 'b': guest_percpu_mem_size = parse_size(optarg); break; diff --git a/tools/testing/selftests/kvm/include/perf_test_util.h b/tools/testing/selftests/kvm/include/perf_test_util.h index a86f953d8d36..d822cb670f1c 100644 --- a/tools/testing/selftests/kvm/include/perf_test_util.h +++ b/tools/testing/selftests/kvm/include/perf_test_util.h @@ -30,10 +30,15 @@ struct perf_test_vcpu_args { struct perf_test_args { struct kvm_vm *vm; + /* The starting address and size of the guest test region. */ uint64_t gpa; + uint64_t size; uint64_t guest_page_size; int wr_fract; + /* Run vCPUs in L2 instead of L1, if the architecture supports it. */ + bool nested; + struct perf_test_vcpu_args vcpu_args[KVM_MAX_VCPUS]; }; @@ -49,5 +54,9 @@ void perf_test_set_wr_fract(struct kvm_vm *vm, int wr_fract); void perf_test_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct perf_test_vcpu_args *)); void perf_test_join_vcpu_threads(int vcpus); +void perf_test_guest_code(uint32_t vcpu_id); + +uint64_t perf_test_nested_pages(int nr_vcpus); +void perf_test_setup_nested(struct kvm_vm *vm, int nr_vcpus); #endif /* SELFTEST_KVM_PERF_TEST_UTIL_H */ diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h index d0d51adec76e..6ce185449259 100644 --- a/tools/testing/selftests/kvm/include/x86_64/processor.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h @@ -482,13 +482,23 @@ void vcpu_set_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid); struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid); void vm_xsave_req_perm(int bit); -enum x86_page_size { - X86_PAGE_SIZE_4K = 0, - X86_PAGE_SIZE_2M, - X86_PAGE_SIZE_1G, +enum pg_level { + PG_LEVEL_NONE, + PG_LEVEL_4K, + PG_LEVEL_2M, + PG_LEVEL_1G, + PG_LEVEL_512G, + PG_LEVEL_NUM }; -void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, - enum x86_page_size page_size); + +#define PG_LEVEL_SHIFT(_level) ((_level - 1) * 9 + 12) +#define PG_LEVEL_SIZE(_level) (1ull << PG_LEVEL_SHIFT(_level)) + +#define PG_SIZE_4K PG_LEVEL_SIZE(PG_LEVEL_4K) +#define PG_SIZE_2M PG_LEVEL_SIZE(PG_LEVEL_2M) +#define PG_SIZE_1G PG_LEVEL_SIZE(PG_LEVEL_1G) + +void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level); /* * Basic CPU control in CR0 @@ -505,9 +515,6 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, #define X86_CR0_CD (1UL<<30) /* Cache Disable */ #define X86_CR0_PG (1UL<<31) /* Paging */ -/* VMX_EPT_VPID_CAP bits */ -#define VMX_EPT_VPID_CAP_AD_BITS (1ULL << 21) - #define XSTATE_XTILE_CFG_BIT 17 #define XSTATE_XTILE_DATA_BIT 18 diff --git a/tools/testing/selftests/kvm/include/x86_64/vmx.h b/tools/testing/selftests/kvm/include/x86_64/vmx.h index 583ceb0d1457..cc3604f8f1d3 100644 --- a/tools/testing/selftests/kvm/include/x86_64/vmx.h +++ b/tools/testing/selftests/kvm/include/x86_64/vmx.h @@ -96,6 +96,9 @@ #define VMX_MISC_PREEMPTION_TIMER_RATE_MASK 0x0000001f #define VMX_MISC_SAVE_EFER_LMA 0x00000020 +#define VMX_EPT_VPID_CAP_1G_PAGES 0x00020000 +#define VMX_EPT_VPID_CAP_AD_BITS 0x00200000 + #define EXIT_REASON_FAILED_VMENTRY 0x80000000 #define EXIT_REASON_EXCEPTION_NMI 0 #define EXIT_REASON_EXTERNAL_INTERRUPT 1 @@ -606,6 +609,7 @@ bool load_vmcs(struct vmx_pages *vmx); bool nested_vmx_supported(void); void nested_vmx_check_supported(void); +bool ept_1g_pages_supported(void); void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr); @@ -613,6 +617,8 @@ void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr, uint64_t size); void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm, uint32_t memslot); +void nested_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm, + uint64_t addr, uint64_t size); void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm, uint32_t eptp_memslot); void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm); diff --git a/tools/testing/selftests/kvm/lib/aarch64/ucall.c b/tools/testing/selftests/kvm/lib/aarch64/ucall.c index e0b0164e9af8..be1d9728c4ce 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/ucall.c +++ b/tools/testing/selftests/kvm/lib/aarch64/ucall.c @@ -73,20 +73,19 @@ void ucall_uninit(struct kvm_vm *vm) void ucall(uint64_t cmd, int nargs, ...) { - struct ucall uc = { - .cmd = cmd, - }; + struct ucall uc = {}; va_list va; int i; + WRITE_ONCE(uc.cmd, cmd); nargs = nargs <= UCALL_MAX_ARGS ? nargs : UCALL_MAX_ARGS; va_start(va, nargs); for (i = 0; i < nargs; ++i) - uc.args[i] = va_arg(va, uint64_t); + WRITE_ONCE(uc.args[i], va_arg(va, uint64_t)); va_end(va); - *ucall_exit_mmio_addr = (vm_vaddr_t)&uc; + WRITE_ONCE(*ucall_exit_mmio_addr, (vm_vaddr_t)&uc); } uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc) diff --git a/tools/testing/selftests/kvm/lib/perf_test_util.c b/tools/testing/selftests/kvm/lib/perf_test_util.c index 722df3a28791..f989ff91f022 100644 --- a/tools/testing/selftests/kvm/lib/perf_test_util.c +++ b/tools/testing/selftests/kvm/lib/perf_test_util.c @@ -40,7 +40,7 @@ static bool all_vcpu_threads_running; * Continuously write to the first 8 bytes of each page in the * specified region. */ -static void guest_code(uint32_t vcpu_id) +void perf_test_guest_code(uint32_t vcpu_id) { struct perf_test_args *pta = &perf_test_args; struct perf_test_vcpu_args *vcpu_args = &pta->vcpu_args[vcpu_id]; @@ -108,8 +108,9 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus, { struct perf_test_args *pta = &perf_test_args; struct kvm_vm *vm; - uint64_t guest_num_pages; + uint64_t guest_num_pages, slot0_pages = DEFAULT_GUEST_PHY_PAGES; uint64_t backing_src_pagesz = get_backing_src_pagesz(backing_src); + uint64_t region_end_gfn; int i; pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode)); @@ -135,33 +136,53 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus, slots); /* + * If using nested, allocate extra pages for the nested page tables and + * in-memory data structures. + */ + if (pta->nested) + slot0_pages += perf_test_nested_pages(vcpus); + + /* * Pass guest_num_pages to populate the page tables for test memory. * The memory is also added to memslot 0, but that's a benign side * effect as KVM allows aliasing HVAs in meslots. */ - vm = vm_create_with_vcpus(mode, vcpus, DEFAULT_GUEST_PHY_PAGES, - guest_num_pages, 0, guest_code, NULL); + vm = vm_create_with_vcpus(mode, vcpus, slot0_pages, guest_num_pages, 0, + perf_test_guest_code, NULL); pta->vm = vm; + /* Put the test region at the top guest physical memory. */ + region_end_gfn = vm_get_max_gfn(vm) + 1; + +#ifdef __x86_64__ + /* + * When running vCPUs in L2, restrict the test region to 48 bits to + * avoid needing 5-level page tables to identity map L2. + */ + if (pta->nested) + region_end_gfn = min(region_end_gfn, (1UL << 48) / pta->guest_page_size); +#endif /* * If there should be more memory in the guest test region than there * can be pages in the guest, it will definitely cause problems. */ - TEST_ASSERT(guest_num_pages < vm_get_max_gfn(vm), + TEST_ASSERT(guest_num_pages < region_end_gfn, "Requested more guest memory than address space allows.\n" " guest pages: %" PRIx64 " max gfn: %" PRIx64 " vcpus: %d wss: %" PRIx64 "]\n", - guest_num_pages, vm_get_max_gfn(vm), vcpus, + guest_num_pages, region_end_gfn - 1, vcpus, vcpu_memory_bytes); - pta->gpa = (vm_get_max_gfn(vm) - guest_num_pages) * pta->guest_page_size; + pta->gpa = (region_end_gfn - guest_num_pages) * pta->guest_page_size; pta->gpa = align_down(pta->gpa, backing_src_pagesz); #ifdef __s390x__ /* Align to 1M (segment size) */ pta->gpa = align_down(pta->gpa, 1 << 20); #endif - pr_info("guest physical test memory offset: 0x%lx\n", pta->gpa); + pta->size = guest_num_pages * pta->guest_page_size; + pr_info("guest physical test memory: [0x%lx, 0x%lx)\n", + pta->gpa, pta->gpa + pta->size); /* Add extra memory slots for testing */ for (i = 0; i < slots; i++) { @@ -178,6 +199,11 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus, perf_test_setup_vcpus(vm, vcpus, vcpu_memory_bytes, partition_vcpu_memory_access); + if (pta->nested) { + pr_info("Configuring vCPUs to run in L2 (nested).\n"); + perf_test_setup_nested(vm, vcpus); + } + ucall_init(vm, NULL); /* Export the shared variables to the guest. */ @@ -198,6 +224,17 @@ void perf_test_set_wr_fract(struct kvm_vm *vm, int wr_fract) sync_global_to_guest(vm, perf_test_args); } +uint64_t __weak perf_test_nested_pages(int nr_vcpus) +{ + return 0; +} + +void __weak perf_test_setup_nested(struct kvm_vm *vm, int nr_vcpus) +{ + pr_info("%s() not support on this architecture, skipping.\n", __func__); + exit(KSFT_SKIP); +} + static void *vcpu_thread_main(void *data) { struct vcpu_thread *vcpu = data; diff --git a/tools/testing/selftests/kvm/lib/x86_64/perf_test_util.c b/tools/testing/selftests/kvm/lib/x86_64/perf_test_util.c new file mode 100644 index 000000000000..e258524435a0 --- /dev/null +++ b/tools/testing/selftests/kvm/lib/x86_64/perf_test_util.c @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * x86_64-specific extensions to perf_test_util.c. + * + * Copyright (C) 2022, Google, Inc. + */ +#include <stdio.h> +#include <stdlib.h> +#include <linux/bitmap.h> +#include <linux/bitops.h> + +#include "test_util.h" +#include "kvm_util.h" +#include "perf_test_util.h" +#include "../kvm_util_internal.h" +#include "processor.h" +#include "vmx.h" + +void perf_test_l2_guest_code(uint64_t vcpu_id) +{ + perf_test_guest_code(vcpu_id); + vmcall(); +} + +extern char perf_test_l2_guest_entry[]; +__asm__( +"perf_test_l2_guest_entry:" +" mov (%rsp), %rdi;" +" call perf_test_l2_guest_code;" +" ud2;" +); + +static void perf_test_l1_guest_code(struct vmx_pages *vmx, uint64_t vcpu_id) +{ +#define L2_GUEST_STACK_SIZE 64 + unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; + unsigned long *rsp; + + GUEST_ASSERT(vmx->vmcs_gpa); + GUEST_ASSERT(prepare_for_vmx_operation(vmx)); + GUEST_ASSERT(load_vmcs(vmx)); + GUEST_ASSERT(ept_1g_pages_supported()); + + rsp = &l2_guest_stack[L2_GUEST_STACK_SIZE - 1]; + *rsp = vcpu_id; + prepare_vmcs(vmx, perf_test_l2_guest_entry, rsp); + + GUEST_ASSERT(!vmlaunch()); + GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); + GUEST_DONE(); +} + +uint64_t perf_test_nested_pages(int nr_vcpus) +{ + /* + * 513 page tables is enough to identity-map 256 TiB of L2 with 1G + * pages and 4-level paging, plus a few pages per-vCPU for data + * structures such as the VMCS. + */ + return 513 + 10 * nr_vcpus; +} + +void perf_test_setup_ept(struct vmx_pages *vmx, struct kvm_vm *vm) +{ + uint64_t start, end; + + prepare_eptp(vmx, vm, 0); + + /* + * Identity map the first 4G and the test region with 1G pages so that + * KVM can shadow the EPT12 with the maximum huge page size supported + * by the backing source. + */ + nested_identity_map_1g(vmx, vm, 0, 0x100000000ULL); + + start = align_down(perf_test_args.gpa, PG_SIZE_1G); + end = align_up(perf_test_args.gpa + perf_test_args.size, PG_SIZE_1G); + nested_identity_map_1g(vmx, vm, start, end - start); +} + +void perf_test_setup_nested(struct kvm_vm *vm, int nr_vcpus) +{ + struct vmx_pages *vmx, *vmx0 = NULL; + struct kvm_regs regs; + vm_vaddr_t vmx_gva; + int vcpu_id; + + nested_vmx_check_supported(); + + for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) { + vmx = vcpu_alloc_vmx(vm, &vmx_gva); + + if (vcpu_id == 0) { + perf_test_setup_ept(vmx, vm); + vmx0 = vmx; + } else { + /* Share the same EPT table across all vCPUs. */ + vmx->eptp = vmx0->eptp; + vmx->eptp_hva = vmx0->eptp_hva; + vmx->eptp_gpa = vmx0->eptp_gpa; + } + + /* + * Override the vCPU to run perf_test_l1_guest_code() which will + * bounce it into L2 before calling perf_test_guest_code(). + */ + vcpu_regs_get(vm, vcpu_id, ®s); + regs.rip = (unsigned long) perf_test_l1_guest_code; + vcpu_regs_set(vm, vcpu_id, ®s); + vcpu_args_set(vm, vcpu_id, 2, vmx_gva, vcpu_id); + } +} diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index 33ea5e9955d9..ead7011ee8f6 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -158,7 +158,7 @@ static void *virt_get_pte(struct kvm_vm *vm, uint64_t pt_pfn, uint64_t vaddr, int level) { uint64_t *page_table = addr_gpa2hva(vm, pt_pfn << vm->page_shift); - int index = vaddr >> (vm->page_shift + level * 9) & 0x1ffu; + int index = (vaddr >> PG_LEVEL_SHIFT(level)) & 0x1ffu; return &page_table[index]; } @@ -167,14 +167,14 @@ static uint64_t *virt_create_upper_pte(struct kvm_vm *vm, uint64_t pt_pfn, uint64_t vaddr, uint64_t paddr, - int level, - enum x86_page_size page_size) + int current_level, + int target_level) { - uint64_t *pte = virt_get_pte(vm, pt_pfn, vaddr, level); + uint64_t *pte = virt_get_pte(vm, pt_pfn, vaddr, current_level); if (!(*pte & PTE_PRESENT_MASK)) { *pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK; - if (level == page_size) + if (current_level == target_level) *pte |= PTE_LARGE_MASK | (paddr & PHYSICAL_PAGE_MASK); else *pte |= vm_alloc_page_table(vm) & PHYSICAL_PAGE_MASK; @@ -184,20 +184,19 @@ static uint64_t *virt_create_upper_pte(struct kvm_vm *vm, * a hugepage at this level, and that there isn't a hugepage at * this level. */ - TEST_ASSERT(level != page_size, + TEST_ASSERT(current_level != target_level, "Cannot create hugepage at level: %u, vaddr: 0x%lx\n", - page_size, vaddr); + current_level, vaddr); TEST_ASSERT(!(*pte & PTE_LARGE_MASK), "Cannot create page table at level: %u, vaddr: 0x%lx\n", - level, vaddr); + current_level, vaddr); } return pte; } -void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, - enum x86_page_size page_size) +void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level) { - const uint64_t pg_size = 1ull << ((page_size * 9) + 12); + const uint64_t pg_size = PG_LEVEL_SIZE(level); uint64_t *pml4e, *pdpe, *pde; uint64_t *pte; @@ -222,20 +221,20 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, * early if a hugepage was created. */ pml4e = virt_create_upper_pte(vm, vm->pgd >> vm->page_shift, - vaddr, paddr, 3, page_size); + vaddr, paddr, PG_LEVEL_512G, level); if (*pml4e & PTE_LARGE_MASK) return; - pdpe = virt_create_upper_pte(vm, PTE_GET_PFN(*pml4e), vaddr, paddr, 2, page_size); + pdpe = virt_create_upper_pte(vm, PTE_GET_PFN(*pml4e), vaddr, paddr, PG_LEVEL_1G, level); if (*pdpe & PTE_LARGE_MASK) return; - pde = virt_create_upper_pte(vm, PTE_GET_PFN(*pdpe), vaddr, paddr, 1, page_size); + pde = virt_create_upper_pte(vm, PTE_GET_PFN(*pdpe), vaddr, paddr, PG_LEVEL_2M, level); if (*pde & PTE_LARGE_MASK) return; /* Fill in page table entry. */ - pte = virt_get_pte(vm, PTE_GET_PFN(*pde), vaddr, 0); + pte = virt_get_pte(vm, PTE_GET_PFN(*pde), vaddr, PG_LEVEL_4K); TEST_ASSERT(!(*pte & PTE_PRESENT_MASK), "PTE already present for 4k page at vaddr: 0x%lx\n", vaddr); *pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK | (paddr & PHYSICAL_PAGE_MASK); @@ -243,7 +242,7 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) { - __virt_pg_map(vm, vaddr, paddr, X86_PAGE_SIZE_4K); + __virt_pg_map(vm, vaddr, paddr, PG_LEVEL_4K); } static uint64_t *_vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid, diff --git a/tools/testing/selftests/kvm/lib/x86_64/vmx.c b/tools/testing/selftests/kvm/lib/x86_64/vmx.c index d089d8b850b5..b77a01d0a271 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/vmx.c +++ b/tools/testing/selftests/kvm/lib/x86_64/vmx.c @@ -198,6 +198,16 @@ bool load_vmcs(struct vmx_pages *vmx) return true; } +static bool ept_vpid_cap_supported(uint64_t mask) +{ + return rdmsr(MSR_IA32_VMX_EPT_VPID_CAP) & mask; +} + +bool ept_1g_pages_supported(void) +{ + return ept_vpid_cap_supported(VMX_EPT_VPID_CAP_1G_PAGES); +} + /* * Initialize the control fields to the most basic settings possible. */ @@ -215,7 +225,7 @@ static inline void init_vmcs_control_fields(struct vmx_pages *vmx) struct eptPageTablePointer eptp = { .memory_type = VMX_BASIC_MEM_TYPE_WB, .page_walk_length = 3, /* + 1 */ - .ad_enabled = !!(rdmsr(MSR_IA32_VMX_EPT_VPID_CAP) & VMX_EPT_VPID_CAP_AD_BITS), + .ad_enabled = ept_vpid_cap_supported(VMX_EPT_VPID_CAP_AD_BITS), .address = vmx->eptp_gpa >> PAGE_SHIFT_4K, }; @@ -392,80 +402,93 @@ void nested_vmx_check_supported(void) } } -void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, - uint64_t nested_paddr, uint64_t paddr) +static void nested_create_pte(struct kvm_vm *vm, + struct eptPageTableEntry *pte, + uint64_t nested_paddr, + uint64_t paddr, + int current_level, + int target_level) +{ + if (!pte->readable) { + pte->writable = true; + pte->readable = true; + pte->executable = true; + pte->page_size = (current_level == target_level); + if (pte->page_size) + pte->address = paddr >> vm->page_shift; + else + pte->address = vm_alloc_page_table(vm) >> vm->page_shift; + } else { + /* + * Entry already present. Assert that the caller doesn't want + * a hugepage at this level, and that there isn't a hugepage at + * this level. + */ + TEST_ASSERT(current_level != target_level, + "Cannot create hugepage at level: %u, nested_paddr: 0x%lx\n", + current_level, nested_paddr); + TEST_ASSERT(!pte->page_size, + "Cannot create page table at level: %u, nested_paddr: 0x%lx\n", + current_level, nested_paddr); + } +} + + +void __nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, + uint64_t nested_paddr, uint64_t paddr, int target_level) { - uint16_t index[4]; - struct eptPageTableEntry *pml4e; + const uint64_t page_size = PG_LEVEL_SIZE(target_level); + struct eptPageTableEntry *pt = vmx->eptp_hva, *pte; + uint16_t index; TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use " "unknown or unsupported guest mode, mode: 0x%x", vm->mode); - TEST_ASSERT((nested_paddr % vm->page_size) == 0, + TEST_ASSERT((nested_paddr >> 48) == 0, + "Nested physical address 0x%lx requires 5-level paging", + nested_paddr); + TEST_ASSERT((nested_paddr % page_size) == 0, "Nested physical address not on page boundary,\n" - " nested_paddr: 0x%lx vm->page_size: 0x%x", - nested_paddr, vm->page_size); + " nested_paddr: 0x%lx page_size: 0x%lx", + nested_paddr, page_size); TEST_ASSERT((nested_paddr >> vm->page_shift) <= vm->max_gfn, "Physical address beyond beyond maximum supported,\n" " nested_paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", paddr, vm->max_gfn, vm->page_size); - TEST_ASSERT((paddr % vm->page_size) == 0, + TEST_ASSERT((paddr % page_size) == 0, "Physical address not on page boundary,\n" - " paddr: 0x%lx vm->page_size: 0x%x", - paddr, vm->page_size); + " paddr: 0x%lx page_size: 0x%lx", + paddr, page_size); TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, "Physical address beyond beyond maximum supported,\n" " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", paddr, vm->max_gfn, vm->page_size); - index[0] = (nested_paddr >> 12) & 0x1ffu; - index[1] = (nested_paddr >> 21) & 0x1ffu; - index[2] = (nested_paddr >> 30) & 0x1ffu; - index[3] = (nested_paddr >> 39) & 0x1ffu; - - /* Allocate page directory pointer table if not present. */ - pml4e = vmx->eptp_hva; - if (!pml4e[index[3]].readable) { - pml4e[index[3]].address = vm_alloc_page_table(vm) >> vm->page_shift; - pml4e[index[3]].writable = true; - pml4e[index[3]].readable = true; - pml4e[index[3]].executable = true; - } + for (int level = PG_LEVEL_512G; level >= PG_LEVEL_4K; level--) { + index = (nested_paddr >> PG_LEVEL_SHIFT(level)) & 0x1ffu; + pte = &pt[index]; - /* Allocate page directory table if not present. */ - struct eptPageTableEntry *pdpe; - pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size); - if (!pdpe[index[2]].readable) { - pdpe[index[2]].address = vm_alloc_page_table(vm) >> vm->page_shift; - pdpe[index[2]].writable = true; - pdpe[index[2]].readable = true; - pdpe[index[2]].executable = true; - } + nested_create_pte(vm, pte, nested_paddr, paddr, level, target_level); - /* Allocate page table if not present. */ - struct eptPageTableEntry *pde; - pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size); - if (!pde[index[1]].readable) { - pde[index[1]].address = vm_alloc_page_table(vm) >> vm->page_shift; - pde[index[1]].writable = true; - pde[index[1]].readable = true; - pde[index[1]].executable = true; - } + if (pte->page_size) + break; - /* Fill in page table entry. */ - struct eptPageTableEntry *pte; - pte = addr_gpa2hva(vm, pde[index[1]].address * vm->page_size); - pte[index[0]].address = paddr >> vm->page_shift; - pte[index[0]].writable = true; - pte[index[0]].readable = true; - pte[index[0]].executable = true; + pt = addr_gpa2hva(vm, pte->address * vm->page_size); + } /* * For now mark these as accessed and dirty because the only * testcase we have needs that. Can be reconsidered later. */ - pte[index[0]].accessed = true; - pte[index[0]].dirty = true; + pte->accessed = true; + pte->dirty = true; + +} + +void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, + uint64_t nested_paddr, uint64_t paddr) +{ + __nested_pg_map(vmx, vm, nested_paddr, paddr, PG_LEVEL_4K); } /* @@ -476,7 +499,7 @@ void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, * nested_paddr - Nested guest physical address to map * paddr - VM Physical Address * size - The size of the range to map - * eptp_memslot - Memory region slot for new virtual translation tables + * level - The level at which to map the range * * Output Args: None * @@ -485,22 +508,29 @@ void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, * Within the VM given by vm, creates a nested guest translation for the * page range starting at nested_paddr to the page range starting at paddr. */ -void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm, - uint64_t nested_paddr, uint64_t paddr, uint64_t size) +void __nested_map(struct vmx_pages *vmx, struct kvm_vm *vm, + uint64_t nested_paddr, uint64_t paddr, uint64_t size, + int level) { - size_t page_size = vm->page_size; + size_t page_size = PG_LEVEL_SIZE(level); size_t npages = size / page_size; TEST_ASSERT(nested_paddr + size > nested_paddr, "Vaddr overflow"); TEST_ASSERT(paddr + size > paddr, "Paddr overflow"); while (npages--) { - nested_pg_map(vmx, vm, nested_paddr, paddr); + __nested_pg_map(vmx, vm, nested_paddr, paddr, level); nested_paddr += page_size; paddr += page_size; } } +void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm, + uint64_t nested_paddr, uint64_t paddr, uint64_t size) +{ + __nested_map(vmx, vm, nested_paddr, paddr, size, PG_LEVEL_4K); +} + /* Prepare an identity extended page table that maps all the * physical pages in VM. */ @@ -525,6 +555,13 @@ void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm, } } +/* Identity map a region with 1GiB Pages. */ +void nested_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm, + uint64_t addr, uint64_t size) +{ + __nested_map(vmx, vm, addr, addr, size, PG_LEVEL_1G); +} + void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm, uint32_t eptp_memslot) { diff --git a/tools/testing/selftests/kvm/max_guest_memory_test.c b/tools/testing/selftests/kvm/max_guest_memory_test.c index 3875c4b23a04..15f046e19cb2 100644 --- a/tools/testing/selftests/kvm/max_guest_memory_test.c +++ b/tools/testing/selftests/kvm/max_guest_memory_test.c @@ -244,7 +244,7 @@ int main(int argc, char *argv[]) #ifdef __x86_64__ /* Identity map memory in the guest using 1gb pages. */ for (i = 0; i < slot_size; i += size_1gb) - __virt_pg_map(vm, gpa + i, gpa + i, X86_PAGE_SIZE_1G); + __virt_pg_map(vm, gpa + i, gpa + i, PG_LEVEL_1G); #else for (i = 0; i < slot_size; i += vm_get_page_size(vm)) virt_pg_map(vm, gpa + i, gpa + i); diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_clock.c b/tools/testing/selftests/kvm/x86_64/hyperv_clock.c index e0b2bb1339b1..3330fb183c68 100644 --- a/tools/testing/selftests/kvm/x86_64/hyperv_clock.c +++ b/tools/testing/selftests/kvm/x86_64/hyperv_clock.c @@ -44,7 +44,7 @@ static inline void nop_loop(void) { int i; - for (i = 0; i < 1000000; i++) + for (i = 0; i < 100000000; i++) asm volatile("nop"); } @@ -56,12 +56,14 @@ static inline void check_tsc_msr_rdtsc(void) tsc_freq = rdmsr(HV_X64_MSR_TSC_FREQUENCY); GUEST_ASSERT(tsc_freq > 0); - /* First, check MSR-based clocksource */ + /* For increased accuracy, take mean rdtsc() before and afrer rdmsr() */ r1 = rdtsc(); t1 = rdmsr(HV_X64_MSR_TIME_REF_COUNT); + r1 = (r1 + rdtsc()) / 2; nop_loop(); r2 = rdtsc(); t2 = rdmsr(HV_X64_MSR_TIME_REF_COUNT); + r2 = (r2 + rdtsc()) / 2; GUEST_ASSERT(r2 > r1 && t2 > t1); @@ -181,12 +183,14 @@ static void host_check_tsc_msr_rdtsc(struct kvm_vm *vm) tsc_freq = vcpu_get_msr(vm, VCPU_ID, HV_X64_MSR_TSC_FREQUENCY); TEST_ASSERT(tsc_freq > 0, "TSC frequency must be nonzero"); - /* First, check MSR-based clocksource */ + /* For increased accuracy, take mean rdtsc() before and afrer ioctl */ r1 = rdtsc(); t1 = vcpu_get_msr(vm, VCPU_ID, HV_X64_MSR_TIME_REF_COUNT); + r1 = (r1 + rdtsc()) / 2; nop_loop(); r2 = rdtsc(); t2 = vcpu_get_msr(vm, VCPU_ID, HV_X64_MSR_TIME_REF_COUNT); + r2 = (r2 + rdtsc()) / 2; TEST_ASSERT(t2 > t1, "Time reference MSR is not monotonic (%ld <= %ld)", t1, t2); diff --git a/tools/testing/selftests/kvm/x86_64/mmu_role_test.c b/tools/testing/selftests/kvm/x86_64/mmu_role_test.c index da2325fcad87..bdecd532f935 100644 --- a/tools/testing/selftests/kvm/x86_64/mmu_role_test.c +++ b/tools/testing/selftests/kvm/x86_64/mmu_role_test.c @@ -35,7 +35,7 @@ static void mmu_role_test(u32 *cpuid_reg, u32 evil_cpuid_val) run = vcpu_state(vm, VCPU_ID); /* Map 1gb page without a backing memlot. */ - __virt_pg_map(vm, MMIO_GPA, MMIO_GPA, X86_PAGE_SIZE_1G); + __virt_pg_map(vm, MMIO_GPA, MMIO_GPA, PG_LEVEL_1G); r = _vcpu_run(vm, VCPU_ID); diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk index 2a2d240cdc1b..1a5cc3cd97ec 100644 --- a/tools/testing/selftests/lib.mk +++ b/tools/testing/selftests/lib.mk @@ -7,10 +7,31 @@ else ifneq ($(filter -%,$(LLVM)),) LLVM_SUFFIX := $(LLVM) endif -CC := $(LLVM_PREFIX)clang$(LLVM_SUFFIX) +CLANG_TARGET_FLAGS_arm := arm-linux-gnueabi +CLANG_TARGET_FLAGS_arm64 := aarch64-linux-gnu +CLANG_TARGET_FLAGS_hexagon := hexagon-linux-musl +CLANG_TARGET_FLAGS_m68k := m68k-linux-gnu +CLANG_TARGET_FLAGS_mips := mipsel-linux-gnu +CLANG_TARGET_FLAGS_powerpc := powerpc64le-linux-gnu +CLANG_TARGET_FLAGS_riscv := riscv64-linux-gnu +CLANG_TARGET_FLAGS_s390 := s390x-linux-gnu +CLANG_TARGET_FLAGS_x86 := x86_64-linux-gnu +CLANG_TARGET_FLAGS := $(CLANG_TARGET_FLAGS_$(ARCH)) + +ifeq ($(CROSS_COMPILE),) +ifeq ($(CLANG_TARGET_FLAGS),) +$(error Specify CROSS_COMPILE or add '--target=' option to lib.mk +else +CLANG_FLAGS += --target=$(CLANG_TARGET_FLAGS) +endif # CLANG_TARGET_FLAGS +else +CLANG_FLAGS += --target=$(notdir $(CROSS_COMPILE:%-=%)) +endif # CROSS_COMPILE + +CC := $(LLVM_PREFIX)clang$(LLVM_SUFFIX) $(CLANG_FLAGS) -fintegrated-as else CC := $(CROSS_COMPILE)gcc -endif +endif # LLVM ifeq (0,$(MAKELEVEL)) ifeq ($(OUTPUT),) diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore index b984f8c8d523..a29f79618934 100644 --- a/tools/testing/selftests/net/.gitignore +++ b/tools/testing/selftests/net/.gitignore @@ -37,4 +37,3 @@ gro ioam6_parser toeplitz cmsg_sender -bind_bhash_test diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index 464df13831f2..7ea54af55490 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -59,7 +59,6 @@ TEST_GEN_FILES += toeplitz TEST_GEN_FILES += cmsg_sender TEST_GEN_FILES += stress_reuseport_listen TEST_PROGS += test_vxlan_vnifiltering.sh -TEST_GEN_FILES += bind_bhash_test TEST_FILES := settings @@ -70,5 +69,4 @@ include bpf/Makefile $(OUTPUT)/reuseport_bpf_numa: LDLIBS += -lnuma $(OUTPUT)/tcp_mmap: LDLIBS += -lpthread -$(OUTPUT)/bind_bhash_test: LDLIBS += -lpthread $(OUTPUT)/tcp_inq: LDLIBS += -lpthread diff --git a/tools/testing/selftests/net/bind_bhash_test.c b/tools/testing/selftests/net/bind_bhash_test.c deleted file mode 100644 index 252e73754e76..000000000000 --- a/tools/testing/selftests/net/bind_bhash_test.c +++ /dev/null @@ -1,119 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * This times how long it takes to bind to a port when the port already - * has multiple sockets in its bhash table. - * - * In the setup(), we populate the port's bhash table with - * MAX_THREADS * MAX_CONNECTIONS number of entries. - */ - -#include <unistd.h> -#include <stdio.h> -#include <netdb.h> -#include <pthread.h> - -#define MAX_THREADS 600 -#define MAX_CONNECTIONS 40 - -static const char *bind_addr = "::1"; -static const char *port; - -static int fd_array[MAX_THREADS][MAX_CONNECTIONS]; - -static int bind_socket(int opt, const char *addr) -{ - struct addrinfo *res, hint = {}; - int sock_fd, reuse = 1, err; - - sock_fd = socket(AF_INET6, SOCK_STREAM, 0); - if (sock_fd < 0) { - perror("socket fd err"); - return -1; - } - - hint.ai_family = AF_INET6; - hint.ai_socktype = SOCK_STREAM; - - err = getaddrinfo(addr, port, &hint, &res); - if (err) { - perror("getaddrinfo failed"); - return -1; - } - - if (opt) { - err = setsockopt(sock_fd, SOL_SOCKET, opt, &reuse, sizeof(reuse)); - if (err) { - perror("setsockopt failed"); - return -1; - } - } - - err = bind(sock_fd, res->ai_addr, res->ai_addrlen); - if (err) { - perror("failed to bind to port"); - return -1; - } - - return sock_fd; -} - -static void *setup(void *arg) -{ - int sock_fd, i; - int *array = (int *)arg; - - for (i = 0; i < MAX_CONNECTIONS; i++) { - sock_fd = bind_socket(SO_REUSEADDR | SO_REUSEPORT, bind_addr); - if (sock_fd < 0) - return NULL; - array[i] = sock_fd; - } - - return NULL; -} - -int main(int argc, const char *argv[]) -{ - int listener_fd, sock_fd, i, j; - pthread_t tid[MAX_THREADS]; - clock_t begin, end; - - if (argc != 2) { - printf("Usage: listener <port>\n"); - return -1; - } - - port = argv[1]; - - listener_fd = bind_socket(SO_REUSEADDR | SO_REUSEPORT, bind_addr); - if (listen(listener_fd, 100) < 0) { - perror("listen failed"); - return -1; - } - - /* Set up threads to populate the bhash table entry for the port */ - for (i = 0; i < MAX_THREADS; i++) - pthread_create(&tid[i], NULL, setup, fd_array[i]); - - for (i = 0; i < MAX_THREADS; i++) - pthread_join(tid[i], NULL); - - begin = clock(); - - /* Bind to the same port on a different address */ - sock_fd = bind_socket(0, "2001:0db8:0:f101::1"); - - end = clock(); - - printf("time spent = %f\n", (double)(end - begin) / CLOCKS_PER_SEC); - - /* clean up */ - close(sock_fd); - close(listener_fd); - for (i = 0; i < MAX_THREADS; i++) { - for (j = 0; i < MAX_THREADS; i++) - close(fd_array[i][j]); - } - - return 0; -} diff --git a/tools/testing/selftests/net/bpf/Makefile b/tools/testing/selftests/net/bpf/Makefile index f91bf14bbee7..8a69c91fcca0 100644 --- a/tools/testing/selftests/net/bpf/Makefile +++ b/tools/testing/selftests/net/bpf/Makefile @@ -2,6 +2,7 @@ CLANG ?= clang CCINCLUDE += -I../../bpf +CCINCLUDE += -I../../../lib CCINCLUDE += -I../../../../../usr/include/ TEST_CUSTOM_PROGS = $(OUTPUT)/bpf/nat6to4.o @@ -10,5 +11,4 @@ all: $(TEST_CUSTOM_PROGS) $(OUTPUT)/%.o: %.c $(CLANG) -O2 -target bpf -c $< $(CCINCLUDE) -o $@ -clean: - rm -f $(TEST_CUSTOM_PROGS) +EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) diff --git a/tools/testing/selftests/net/fcnal-test.sh b/tools/testing/selftests/net/fcnal-test.sh index 54701c8b0cd7..03b586760164 100755 --- a/tools/testing/selftests/net/fcnal-test.sh +++ b/tools/testing/selftests/net/fcnal-test.sh @@ -70,6 +70,10 @@ NSB_LO_IP6=2001:db8:2::2 NL_IP=172.17.1.1 NL_IP6=2001:db8:4::1 +# multicast and broadcast addresses +MCAST_IP=224.0.0.1 +BCAST_IP=255.255.255.255 + MD5_PW=abc123 MD5_WRONG_PW=abc1234 @@ -308,6 +312,9 @@ addr2str() 127.0.0.1) echo "loopback";; ::1) echo "IPv6 loopback";; + ${BCAST_IP}) echo "broadcast";; + ${MCAST_IP}) echo "multicast";; + ${NSA_IP}) echo "ns-A IP";; ${NSA_IP6}) echo "ns-A IPv6";; ${NSA_LO_IP}) echo "ns-A loopback IP";; @@ -1793,12 +1800,33 @@ ipv4_addr_bind_novrf() done # - # raw socket with nonlocal bind + # tests for nonlocal bind # a=${NL_IP} log_start - run_cmd nettest -s -R -P icmp -f -l ${a} -I ${NSA_DEV} -b - log_test_addr ${a} $? 0 "Raw socket bind to nonlocal address after device bind" + run_cmd nettest -s -R -f -l ${a} -b + log_test_addr ${a} $? 0 "Raw socket bind to nonlocal address" + + log_start + run_cmd nettest -s -f -l ${a} -b + log_test_addr ${a} $? 0 "TCP socket bind to nonlocal address" + + log_start + run_cmd nettest -s -D -P icmp -f -l ${a} -b + log_test_addr ${a} $? 0 "ICMP socket bind to nonlocal address" + + # + # check that ICMP sockets cannot bind to broadcast and multicast addresses + # + a=${BCAST_IP} + log_start + run_cmd nettest -s -D -P icmp -l ${a} -b + log_test_addr ${a} $? 1 "ICMP socket bind to broadcast address" + + a=${MCAST_IP} + log_start + run_cmd nettest -s -D -P icmp -l ${a} -b + log_test_addr ${a} $? 1 "ICMP socket bind to multicast address" # # tcp sockets @@ -1850,13 +1878,34 @@ ipv4_addr_bind_vrf() log_test_addr ${a} $? 1 "Raw socket bind to out of scope address after VRF bind" # - # raw socket with nonlocal bind + # tests for nonlocal bind # a=${NL_IP} log_start - run_cmd nettest -s -R -P icmp -f -l ${a} -I ${VRF} -b + run_cmd nettest -s -R -f -l ${a} -I ${VRF} -b log_test_addr ${a} $? 0 "Raw socket bind to nonlocal address after VRF bind" + log_start + run_cmd nettest -s -f -l ${a} -I ${VRF} -b + log_test_addr ${a} $? 0 "TCP socket bind to nonlocal address after VRF bind" + + log_start + run_cmd nettest -s -D -P icmp -f -l ${a} -I ${VRF} -b + log_test_addr ${a} $? 0 "ICMP socket bind to nonlocal address after VRF bind" + + # + # check that ICMP sockets cannot bind to broadcast and multicast addresses + # + a=${BCAST_IP} + log_start + run_cmd nettest -s -D -P icmp -l ${a} -I ${VRF} -b + log_test_addr ${a} $? 1 "ICMP socket bind to broadcast address after VRF bind" + + a=${MCAST_IP} + log_start + run_cmd nettest -s -D -P icmp -l ${a} -I ${VRF} -b + log_test_addr ${a} $? 1 "ICMP socket bind to multicast address after VRF bind" + # # tcp sockets # @@ -1889,10 +1938,12 @@ ipv4_addr_bind() log_subsection "No VRF" setup + set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null ipv4_addr_bind_novrf log_subsection "With VRF" setup "yes" + set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null ipv4_addr_bind_vrf } diff --git a/tools/testing/selftests/netfilter/nft_concat_range.sh b/tools/testing/selftests/netfilter/nft_concat_range.sh index b35010cc7f6a..a6991877e50c 100755 --- a/tools/testing/selftests/netfilter/nft_concat_range.sh +++ b/tools/testing/selftests/netfilter/nft_concat_range.sh @@ -31,7 +31,7 @@ BUGS="flush_remove_add reload" # List of possible paths to pktgen script from kernel tree for performance tests PKTGEN_SCRIPT_PATHS=" - ../../../samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh + ../../../../samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh pktgen/pktgen_bench_xmit_mode_netif_receive.sh" # Definition of set types: diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh index eb8543b9a5c4..924ecb3f1f73 100755 --- a/tools/testing/selftests/netfilter/nft_nat.sh +++ b/tools/testing/selftests/netfilter/nft_nat.sh @@ -374,6 +374,45 @@ EOF return $lret } +test_local_dnat_portonly() +{ + local family=$1 + local daddr=$2 + local lret=0 + local sr_s + local sr_r + +ip netns exec "$ns0" nft -f /dev/stdin <<EOF +table $family nat { + chain output { + type nat hook output priority 0; policy accept; + meta l4proto tcp dnat to :2000 + + } +} +EOF + if [ $? -ne 0 ]; then + if [ $family = "inet" ];then + echo "SKIP: inet port test" + test_inet_nat=false + return + fi + echo "SKIP: Could not add $family dnat hook" + return + fi + + echo SERVER-$family | ip netns exec "$ns1" timeout 5 socat -u STDIN TCP-LISTEN:2000 & + sc_s=$! + + result=$(ip netns exec "$ns0" timeout 1 socat TCP:$daddr:2000 STDOUT) + + if [ "$result" = "SERVER-inet" ];then + echo "PASS: inet port rewrite without l3 address" + else + echo "ERROR: inet port rewrite" + ret=1 + fi +} test_masquerade6() { @@ -1148,6 +1187,10 @@ fi reset_counters test_local_dnat ip test_local_dnat6 ip6 + +reset_counters +test_local_dnat_portonly inet 10.0.1.99 + reset_counters $test_inet_nat && test_local_dnat inet $test_inet_nat && test_local_dnat6 inet diff --git a/tools/testing/selftests/vm/gup_test.c b/tools/testing/selftests/vm/gup_test.c index 6bb36ca71cb5..a309876d832f 100644 --- a/tools/testing/selftests/vm/gup_test.c +++ b/tools/testing/selftests/vm/gup_test.c @@ -209,7 +209,7 @@ int main(int argc, char **argv) if (write) gup.gup_flags |= FOLL_WRITE; - gup_fd = open("/sys/kernel/debug/gup_test", O_RDWR); + gup_fd = open(GUP_TEST_FILE, O_RDWR); if (gup_fd == -1) { switch (errno) { case EACCES: @@ -224,7 +224,7 @@ int main(int argc, char **argv) printf("check if CONFIG_GUP_TEST is enabled in kernel config\n"); break; default: - perror("failed to open /sys/kernel/debug/gup_test"); + perror("failed to open " GUP_TEST_FILE); break; } exit(KSFT_SKIP); diff --git a/tools/testing/selftests/vm/ksm_tests.c b/tools/testing/selftests/vm/ksm_tests.c index 2fcf24312da8..f5e4e0bbd081 100644 --- a/tools/testing/selftests/vm/ksm_tests.c +++ b/tools/testing/selftests/vm/ksm_tests.c @@ -54,6 +54,7 @@ static int ksm_write_sysfs(const char *file_path, unsigned long val) } if (fprintf(f, "%lu", val) < 0) { perror("fprintf"); + fclose(f); return 1; } fclose(f); @@ -72,6 +73,7 @@ static int ksm_read_sysfs(const char *file_path, unsigned long *val) } if (fscanf(f, "%lu", val) != 1) { perror("fscanf"); + fclose(f); return 1; } fclose(f); diff --git a/tools/testing/selftests/wireguard/qemu/Makefile b/tools/testing/selftests/wireguard/qemu/Makefile index bca07b93eeb0..7d1b80988d8a 100644 --- a/tools/testing/selftests/wireguard/qemu/Makefile +++ b/tools/testing/selftests/wireguard/qemu/Makefile @@ -64,8 +64,8 @@ QEMU_VPORT_RESULT := virtio-serial-device ifeq ($(HOST_ARCH),$(ARCH)) QEMU_MACHINE := -cpu host -machine virt,gic_version=host,accel=kvm else -QEMU_MACHINE := -cpu cortex-a53 -machine virt -CFLAGS += -march=armv8-a -mtune=cortex-a53 +QEMU_MACHINE := -cpu max -machine virt +CFLAGS += -march=armv8-a endif else ifeq ($(ARCH),aarch64_be) CHOST := aarch64_be-linux-musl @@ -76,8 +76,8 @@ QEMU_VPORT_RESULT := virtio-serial-device ifeq ($(HOST_ARCH),$(ARCH)) QEMU_MACHINE := -cpu host -machine virt,gic_version=host,accel=kvm else -QEMU_MACHINE := -cpu cortex-a53 -machine virt -CFLAGS += -march=armv8-a -mtune=cortex-a53 +QEMU_MACHINE := -cpu max -machine virt +CFLAGS += -march=armv8-a endif else ifeq ($(ARCH),arm) CHOST := arm-linux-musleabi @@ -88,8 +88,8 @@ QEMU_VPORT_RESULT := virtio-serial-device ifeq ($(HOST_ARCH),$(ARCH)) QEMU_MACHINE := -cpu host -machine virt,gic_version=host,accel=kvm else -QEMU_MACHINE := -cpu cortex-a15 -machine virt -CFLAGS += -march=armv7-a -mtune=cortex-a15 -mabi=aapcs-linux +QEMU_MACHINE := -cpu max -machine virt +CFLAGS += -march=armv7-a -mabi=aapcs-linux endif else ifeq ($(ARCH),armeb) CHOST := armeb-linux-musleabi @@ -100,8 +100,8 @@ QEMU_VPORT_RESULT := virtio-serial-device ifeq ($(HOST_ARCH),$(ARCH)) QEMU_MACHINE := -cpu host -machine virt,gic_version=host,accel=kvm else -QEMU_MACHINE := -cpu cortex-a15 -machine virt -CFLAGS += -march=armv7-a -mabi=aapcs-linux # We don't pass -mtune=cortex-a15 due to a compiler bug on big endian. +QEMU_MACHINE := -cpu max -machine virt +CFLAGS += -march=armv7-a -mabi=aapcs-linux LDFLAGS += -Wl,--be8 endif else ifeq ($(ARCH),x86_64) @@ -112,8 +112,7 @@ KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/x86/boot/bzImage ifeq ($(HOST_ARCH),$(ARCH)) QEMU_MACHINE := -cpu host -machine q35,accel=kvm else -QEMU_MACHINE := -cpu Skylake-Server -machine q35 -CFLAGS += -march=skylake-avx512 +QEMU_MACHINE := -cpu max -machine q35 endif else ifeq ($(ARCH),i686) CHOST := i686-linux-musl @@ -123,8 +122,7 @@ KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/x86/boot/bzImage ifeq ($(subst x86_64,i686,$(HOST_ARCH)),$(ARCH)) QEMU_MACHINE := -cpu host -machine q35,accel=kvm else -QEMU_MACHINE := -cpu coreduo -machine q35 -CFLAGS += -march=prescott +QEMU_MACHINE := -cpu max -machine q35 endif else ifeq ($(ARCH),mips64) CHOST := mips64-linux-musl @@ -182,7 +180,7 @@ KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux ifeq ($(HOST_ARCH),$(ARCH)) QEMU_MACHINE := -cpu host,accel=kvm -machine pseries else -QEMU_MACHINE := -machine pseries +QEMU_MACHINE := -machine pseries -device spapr-rng,rng=rng -object rng-random,id=rng endif else ifeq ($(ARCH),powerpc64le) CHOST := powerpc64le-linux-musl @@ -192,7 +190,7 @@ KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux ifeq ($(HOST_ARCH),$(ARCH)) QEMU_MACHINE := -cpu host,accel=kvm -machine pseries else -QEMU_MACHINE := -machine pseries +QEMU_MACHINE := -machine pseries -device spapr-rng,rng=rng -object rng-random,id=rng endif else ifeq ($(ARCH),powerpc) CHOST := powerpc-linux-musl @@ -247,7 +245,7 @@ QEMU_VPORT_RESULT := virtio-serial-ccw ifeq ($(HOST_ARCH),$(ARCH)) QEMU_MACHINE := -cpu host,accel=kvm -machine s390-ccw-virtio -append $(KERNEL_CMDLINE) else -QEMU_MACHINE := -machine s390-ccw-virtio -append $(KERNEL_CMDLINE) +QEMU_MACHINE := -cpu max -machine s390-ccw-virtio -append $(KERNEL_CMDLINE) endif else $(error I only build: x86_64, i686, arm, armeb, aarch64, aarch64_be, mips, mipsel, mips64, mips64el, powerpc64, powerpc64le, powerpc, m68k, riscv64, riscv32, s390x) diff --git a/tools/testing/selftests/wireguard/qemu/init.c b/tools/testing/selftests/wireguard/qemu/init.c index 2a0f48fac925..c9e128436546 100644 --- a/tools/testing/selftests/wireguard/qemu/init.c +++ b/tools/testing/selftests/wireguard/qemu/init.c @@ -21,6 +21,7 @@ #include <sys/utsname.h> #include <sys/sendfile.h> #include <sys/sysmacros.h> +#include <sys/random.h> #include <linux/random.h> #include <linux/version.h> @@ -58,6 +59,8 @@ static void seed_rng(void) { int bits = 256, fd; + if (!getrandom(NULL, 0, GRND_NONBLOCK)) + return; pretty_message("[+] Fake seeding RNG..."); fd = open("/dev/random", O_WRONLY); if (fd < 0) diff --git a/tools/testing/selftests/wireguard/qemu/kernel.config b/tools/testing/selftests/wireguard/qemu/kernel.config index a9b5a520a1d2..bad88f4b0a03 100644 --- a/tools/testing/selftests/wireguard/qemu/kernel.config +++ b/tools/testing/selftests/wireguard/qemu/kernel.config @@ -31,6 +31,7 @@ CONFIG_TTY=y CONFIG_BINFMT_ELF=y CONFIG_BINFMT_SCRIPT=y CONFIG_VDSO=y +CONFIG_STRICT_KERNEL_RWX=y CONFIG_VIRTUALIZATION=y CONFIG_HYPERVISOR_GUEST=y CONFIG_PARAVIRT=y @@ -65,6 +66,8 @@ CONFIG_PROC_FS=y CONFIG_PROC_SYSCTL=y CONFIG_SYSFS=y CONFIG_TMPFS=y +CONFIG_RANDOM_TRUST_CPU=y +CONFIG_RANDOM_TRUST_BOOTLOADER=y CONFIG_CONSOLE_LOGLEVEL_DEFAULT=15 CONFIG_LOG_BUF_SHIFT=18 CONFIG_PRINTK_TIME=y diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 64ec2222a196..a49df8988cd6 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3328,9 +3328,11 @@ bool kvm_vcpu_block(struct kvm_vcpu *vcpu) vcpu->stat.generic.blocking = 1; + preempt_disable(); kvm_arch_vcpu_blocking(vcpu); - prepare_to_rcuwait(wait); + preempt_enable(); + for (;;) { set_current_state(TASK_INTERRUPTIBLE); @@ -3340,9 +3342,11 @@ bool kvm_vcpu_block(struct kvm_vcpu *vcpu) waited = true; schedule(); } - finish_rcuwait(wait); + preempt_disable(); + finish_rcuwait(wait); kvm_arch_vcpu_unblocking(vcpu); + preempt_enable(); vcpu->stat.generic.blocking = 0; @@ -4300,8 +4304,11 @@ static int kvm_ioctl_create_device(struct kvm *kvm, kvm_put_kvm_no_destroy(kvm); mutex_lock(&kvm->lock); list_del(&dev->vm_node); + if (ops->release) + ops->release(dev); mutex_unlock(&kvm->lock); - ops->destroy(dev); + if (ops->destroy) + ops->destroy(dev); return ret; } |